diff --git a/.github/workflows/linux_riscv.yml b/.github/workflows/linux_riscv.yml index 8966a63f611d36..389579ee5c5808 100644 --- a/.github/workflows/linux_riscv.yml +++ b/.github/workflows/linux_riscv.yml @@ -139,7 +139,6 @@ jobs: git submodule update --init -- ${OPENVINO_REPO}/thirdparty/gflags git submodule update --init -- ${OPENVINO_REPO}/thirdparty/telemetry git submodule update --init -- ${OPENVINO_REPO}/src/plugins/intel_cpu - git submodule update --init -- ${OPENVINO_REPO}/thirdparty/open_model_zoo git submodule update --init -- ${OPENVINO_REPO}/thirdparty/flatbuffers/flatbuffers popd diff --git a/.gitmodules b/.gitmodules index 5feb7458da1801..884ba551c9b269 100644 --- a/.gitmodules +++ b/.gitmodules @@ -52,9 +52,6 @@ [submodule "thirdparty/onednn_gpu"] path = src/plugins/intel_gpu/thirdparty/onednn_gpu url = https://github.com/oneapi-src/oneDNN.git -[submodule "tools/pot/thirdparty/open_model_zoo"] - path = thirdparty/open_model_zoo - url = https://github.com/openvinotoolkit/open_model_zoo.git [submodule "thirdparty/json/nlohmann_json"] path = thirdparty/json/nlohmann_json url = https://github.com/nlohmann/json.git diff --git a/thirdparty/open_model_zoo b/thirdparty/open_model_zoo deleted file mode 160000 index e7df86da686d2e..00000000000000 --- a/thirdparty/open_model_zoo +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e7df86da686d2e1600282422e54f66c2fecea160 diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index c0e8467e8b13a5..2c8854505abdb9 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -5,10 +5,6 @@ # Python tools # -# MO - -add_subdirectory(mo) - if(ENABLE_PYTHON) # Benchmark Tool add_subdirectory(benchmark_tool) @@ -16,7 +12,3 @@ if(ENABLE_PYTHON) # OpenVino Conversion Tool add_subdirectory(ovc) endif() - -# wheel openvino-dev - -add_subdirectory(openvino_dev) diff --git a/tools/mo/.coveragerc b/tools/mo/.coveragerc deleted file mode 100644 index 66d591bc9e5b59..00000000000000 --- a/tools/mo/.coveragerc +++ /dev/null @@ -1,41 +0,0 @@ -# .coveragerc to control coverage.py -[run] -branch = True - -source = - extensions/ - mo/ - mo.py - -omit = - # omit anything in a .local directory anywhere - */.local/* - # omit everything in /usr - /usr/* - # omit tests - */test_*.py - *_test.py - # init scripts - */__init__.py - -[report] -# Regexes for lines to exclude from consideration -exclude_lines = - # Have to re-enable the standard pragma - pragma: no cover - - # Don't complain about missing debug-only code: - def __repr__ - - # Don't complain if tests don't hit defensive assertion code: - raise AssertionError - raise NotImplementedError - - # Don't complain if non-runnable code isn't run: - if 0: - if __name__ == .__main__.: - -ignore_errors = True - -[html] -directory = htmlcov diff --git a/tools/mo/.pylintdict b/tools/mo/.pylintdict deleted file mode 100644 index 637082afa1d0a6..00000000000000 --- a/tools/mo/.pylintdict +++ /dev/null @@ -1,68 +0,0 @@ -attrs -arg -args -bfs -bool -caffe -caffemodel -ceil -chw -cli -cls -co-location -concat -config -const -conv -dfs -dict -dsu -eltwise -enum -env -eq -fallback -fc -fcn -fw -hardcoded -http -https -hwc -indices -inplace -inv -io -len -lrn -mul -n-ary -networkx -ndarray -nx -np -numpy -org -oi -params -pb -pbs -priorbox -priorboxes -proto -protobuf -protobufs -prototxt -regex -reorgyolo -regionyolo -replacers -resnet -rcnn -split -splitv -ssd -usr -undead -xml -www diff --git a/tools/mo/.pylintrc b/tools/mo/.pylintrc deleted file mode 100644 index bf5a62656b1bc1..00000000000000 --- a/tools/mo/.pylintrc +++ /dev/null @@ -1,410 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -init-hook='import os; import sys; sys.path.append(os.path.abspath(os.path.curdir))' - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=proto, tests, docs, automation - -# Add files or directories matching the regex patterns to the blacklist. The -# regex matches against base names, not paths. -ignore-patterns=.env3/*, python3.5, .*_test.py - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins= - -# Use multiple processes to speed up Pylint. -# jobs=4 - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist= - -# Allow optimization of some AST trees. This will activate a peephole AST -# optimizer, which will apply various small optimizations. For instance, it can -# be used to obtain the result of joining multiple strings with the addition -# operator. Joining a lot of strings can lead to a maximum recursion error in -# Pylint and this flag can prevent that. It has one side effect, the resulting -# AST will be different than the one from reality. This option is deprecated -# and it will be removed in Pylint 2.0. -optimize-ast=no - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -#enable= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=locally-disabled,too-few-public-methods,no-self-use,too-many-ancestors, - missing-docstring,old-style-class,consider-iterating-dictionary,consider-using-enumerate, - superfluous-parens,no-else-return,duplicate-code,wrong-import-order, - too-many-locals,logging-not-lazy,unnecessary-lambda,super-on-old-class,ungrouped-imports,too-many-format-args, - protected-access,too-many-statements,too-many-branches,too-many-return-statements,too-many-public-methods, - super-init-not-called,singleton-comparison,pointless-string-statement,broad-except, invalid-name - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Put messages in a separate file for each module / package specified on the -# command line instead of printing them on stdout. Reports (if any) will be -# written in a file name "pylint_global.[txt|html]". This option is deprecated -# and it will be removed in Pylint 2.0. -files-output=no - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=8 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=120 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma,dict-separator - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict=en_US - -# List of comma separated words that should not be checked. -spelling-ignore-words=TF, MO, IR, IE - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file=.pylintdict - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,future.builtins - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules=flask_sqlalchemy,app.extensions.flask_sqlalchemy,openvino,torch,paddle - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members=fget,query,begin,add,merge,delete,commit,rollback - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[BASIC] - -# Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_,log,api,a,c,d,e,ei,f,hp,id,l,l2,ml,mn,n,N,op,p,pb,pb,ph,q,rt,s,s1,s2,si,u,v,wp,x,y - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,baz,toto,tutu,tata - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -property-classes=abc.abstractproperty - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct argument names -argument-rgx=([a-z_][a-z0-9_]{2,40}$)|(fileName)|(pl)|(t) - -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{2,40}$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,40}$ - -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,40}$ - -# Regular expression matching correct function names -function-rgx=([a-z_][a-z0-9_]{2,40}$) - -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,40}$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,40}|(__.*__))$ - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,40}|(__.*__))$ - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,40}$ - -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{2,40}$ - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,40}$ - -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{2,40}$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=5 - - -[ELIF] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=5 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=10 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=optparse - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=flask_restplus_patched - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/tools/mo/CMakeLists.txt b/tools/mo/CMakeLists.txt deleted file mode 100644 index daec2b6c41781b..00000000000000 --- a/tools/mo/CMakeLists.txt +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -if(NOT ENABLE_PYTHON) - message(WARNING "Please enable OpenVINO Runtime Python API (set -DENABLE_PYTHON=ON) targets to enable Model Optimizer target") -else() - add_custom_target(model_optimizer DEPENDS ie_api openvino_ir_frontend pyopenvino) - - if(ENABLE_TESTS) - add_subdirectory(unit_tests/mock_mo_frontend/mock_mo_frontend) - add_dependencies(model_optimizer openvino_mock_mo_frontend) - - add_subdirectory(unit_tests/mock_mo_frontend/mock_mo_python_api) - add_dependencies(model_optimizer mock_mo_python_api) - endif() -endif() - -# install - -configure_file("${CMAKE_CURRENT_SOURCE_DIR}/automation/version.txt.in" - "${CMAKE_CURRENT_SOURCE_DIR}/version.txt" @ONLY) - -if(ENABLE_TESTS) - install(FILES requirements_dev.txt - DESTINATION tests/mo - COMPONENT tests - EXCLUDE_FROM_ALL) - - install(DIRECTORY unit_tests - DESTINATION tests/mo - COMPONENT tests - EXCLUDE_FROM_ALL) - - install(DIRECTORY automation - DESTINATION tests/mo - COMPONENT tests - EXCLUDE_FROM_ALL) - - install(FILES .coveragerc - DESTINATION tests/mo - COMPONENT tests - EXCLUDE_FROM_ALL) -endif() diff --git a/tools/mo/README.md b/tools/mo/README.md deleted file mode 100644 index 4eeeb6886b2e66..00000000000000 --- a/tools/mo/README.md +++ /dev/null @@ -1,89 +0,0 @@ -## Installation - -### Installing from PyPi -1. Create a virtual environment and activate it, e.g.: -``` -virtualenv -p /usr/bin/python3.7 .env3 -source .env3/bin/activate -``` - -2. Install openvino-dev package, it contains model conversion API: -``` -pip install openvino-dev -``` - -This will download all requirements and will install MO in your current virtual environment. -If you need only particular frameworks you can specify them manually as optional dependencies in square brackets. -E.g. the command below will install dependencies to support ONNX\* and TensorFlow2\* models: -``` -pip install openvino-dev[onnx,tensorflow2] -``` -To enable support of all frameworks: -``` -pip install openvino-dev[all] -``` -By default, if no frameworks are specified, dependencies to support ONNX\* and TensorFlow2\* are installed. - -[//]: <> (### Installing wheel package from provided OpenVINO™ offline distribution) -[//]: <> (To be done) - -## Converting models -* [Converting Model](../../docs/MO_DG/prepare_model/convert_model/Converting_Model.md) - -## Setup development environment -If you want to contribute to model conversion API you will need to deploy developer environment. -You can do that by following the steps below: - -1. Create virtual environment and activate it, e.g.: -``` -virtualenv -p /usr/bin/python3.7 .env3 -source .env3/bin/activate -``` - -2. Clone the OpenVINO™ repository and change dir to model-optimizer -``` -git clone https://github.com/openvinotoolkit/openvino -cd openvino/tools/mo/ -``` - -3. Install openvino-mo package for development: -``` -pip install -e . -``` -or run `setup.py develop`, result will be the same: -``` -python setup.py develop -``` - -This will download all requirements and deploy model conversion API for development in your virtual environment: -specifically will create *.egg-link into the current directory in your site-packages. -As previously noted, you can also manually specify to support only selected frameworks : -``` -pip install -e ".[onnx,tensorflow2]" -``` - -### How to run unit-tests - -1. Run tests with: -
-    python -m unittest discover -p "*_test.py" [-s PATH_TO_DIR]
-
- -### How to capture unit-tests coverage - -1. Run tests with: -
-    coverage run -m unittest discover -p "*_test.py" [-s PATH_TO_DIR]
-
- -2. Build html report: -
-    coverage html
-
- -### How to run code linting - -1. Run the following command: -
-    pylint openvino/tools/mo/ mo.py
-
diff --git a/tools/mo/automation/create_package.py b/tools/mo/automation/create_package.py deleted file mode 100644 index e957cf4d6aaa5e..00000000000000 --- a/tools/mo/automation/create_package.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import os -from shutil import rmtree - -from utils import Automation - -parser = argparse.ArgumentParser() -parser.add_argument("--build_number", type=str, help="Build number to be added to package version", default="0", ) -args = parser.parse_args() - -auto = Automation() -base_dir = os.path.dirname(__file__) -bom_path = os.path.join(base_dir, "package_BOM.txt") -bom = auto.parse_bom(bom_path=bom_path) -dir_to_tar = auto.copy_files_from_bom(root_path=os.path.join(os.path.dirname(__file__), ".."), bom=bom) -auto.add_version_txt(dst_path=dir_to_tar, build_number=args.build_number) - -auto.make_tarfile(out_file_name="mo_for_tf_{0}.tar.gz".format(args.build_number), source_dir=dir_to_tar) -rmtree(dir_to_tar) diff --git a/tools/mo/automation/package_BOM.txt b/tools/mo/automation/package_BOM.txt deleted file mode 100644 index 1f09faefde1f39..00000000000000 --- a/tools/mo/automation/package_BOM.txt +++ /dev/null @@ -1,999 +0,0 @@ -openvino/tools/__init__.py -openvino/tools/mo/__init__.py -openvino/tools/mo/__main__.py -openvino/tools/mo/analysis/__init__.py -openvino/tools/mo/analysis/boolean_input.py -openvino/tools/mo/analysis/inputs.py -openvino/tools/mo/analysis/json_print.py -openvino/tools/mo/analysis/nodes.py -openvino/tools/mo/analysis/tf_od_api.py -openvino/tools/mo/analysis/tf_retinanet.py -openvino/tools/mo/analysis/tf_yolo.py -openvino/tools/mo/back/__init__.py -openvino/tools/mo/back/add_outputs_recursive.py -openvino/tools/mo/back/AvgPool.py -openvino/tools/mo/back/blob_normalizer.py -openvino/tools/mo/back/CellNormalizer.py -openvino/tools/mo/back/ChangeOutputTypeAttributes.py -openvino/tools/mo/back/ChangeRandomUniformOutputType.py -openvino/tools/mo/back/ClampNormalizer.py -openvino/tools/mo/back/compress_quantized_weights.py -openvino/tools/mo/back/ConvolutionNormalizer.py -openvino/tools/mo/back/CorrectName.py -openvino/tools/mo/back/CropToStridedSlice.py -openvino/tools/mo/back/CutMemory.py -openvino/tools/mo/back/EnableConstantStridedSlice.py -openvino/tools/mo/back/FakeOutputResolver.py -openvino/tools/mo/back/ForceStrictPrecision.py -openvino/tools/mo/back/FuseTransposesSequence.py -openvino/tools/mo/back/GatherNormalizer.py -openvino/tools/mo/back/ie_ir_ver_2/__init__.py -openvino/tools/mo/back/ie_ir_ver_2/emitter.py -openvino/tools/mo/back/insert_compatibility_l2normalization.py -openvino/tools/mo/back/InterpolateReshape.py -openvino/tools/mo/back/kaldi_remove_memory_output.py -openvino/tools/mo/back/LayoutChangeForGatherND.py -openvino/tools/mo/back/LeakyReLUMutation.py -openvino/tools/mo/back/LinearToLinearONNXReplacer.py -openvino/tools/mo/back/LRNToNorm.py -openvino/tools/mo/back/MarkNodesWithShapeValues.py -openvino/tools/mo/back/MatMulNormalizer.py -openvino/tools/mo/back/MaxPool.py -openvino/tools/mo/back/names_uniqueness_check.py -openvino/tools/mo/back/NormalizeToNormalizeL2.py -openvino/tools/mo/back/offline_transformations.py -openvino/tools/mo/back/op_versioning.py -openvino/tools/mo/back/OptimizeTransposeReshapeSequence.py -openvino/tools/mo/back/PackBinaryWeights.py -openvino/tools/mo/back/pass_separator.py -openvino/tools/mo/back/preprocessing.py -openvino/tools/mo/back/priorbox_mutation.py -openvino/tools/mo/back/ProposalMutation.py -openvino/tools/mo/back/ReduceMerge.py -openvino/tools/mo/back/ReduceTransposeDimensions.py -openvino/tools/mo/back/remove_last_softmax_pattern.py -openvino/tools/mo/back/RemoveUselessConvert.py -openvino/tools/mo/back/replacement.py -openvino/tools/mo/back/ReshapeMutation.py -openvino/tools/mo/back/ResultNormalizer.py -openvino/tools/mo/back/ResultRename.py -openvino/tools/mo/back/ReverseInputChannels.py -openvino/tools/mo/back/RNNSequenceTypeRename.py -openvino/tools/mo/back/SelectBroadcast.py -openvino/tools/mo/back/ShapeOfConstFolding.py -openvino/tools/mo/back/ShuffleChannelPatternOptimization.py -openvino/tools/mo/back/SpecialNodesFinalization.py -openvino/tools/mo/back/StridedSliceMasksNormalizer.py -openvino/tools/mo/back/TopKNormalizer.py -openvino/tools/mo/back/TransposeDFT.py -openvino/tools/mo/back/TransposeReduceFusing.py -openvino/tools/mo/back/UselessConcatRemoval.py -openvino/tools/mo/convert.py -openvino/tools/mo/convert_impl.py -openvino/tools/mo/front/__init__.py -openvino/tools/mo/front/ArgOpsSqueeze.py -openvino/tools/mo/front/ATenToEmbeddingBag.py -openvino/tools/mo/front/AttributedClampNormalizer.py -openvino/tools/mo/front/AttributedGatherNormalizer.py -openvino/tools/mo/front/AttributedPadToPad.py -openvino/tools/mo/front/AttributedRandomUniformToRandomUniform.py -openvino/tools/mo/front/AttributedRollToRoll.py -openvino/tools/mo/front/binary_quantize_normalization.py -openvino/tools/mo/front/broadcast_with_range.py -openvino/tools/mo/front/caffe/__init__.py -openvino/tools/mo/front/caffe/argmax_ext.py -openvino/tools/mo/front/caffe/ArgMaxFlatten.py -openvino/tools/mo/front/caffe/axpy.py -openvino/tools/mo/front/caffe/batchnorm_ext.py -openvino/tools/mo/front/caffe/binarization.py -openvino/tools/mo/front/caffe/binary_conv_ext.py -openvino/tools/mo/front/caffe/bn.py -openvino/tools/mo/front/caffe/bn_ext.py -openvino/tools/mo/front/caffe/collect_attributes.py -openvino/tools/mo/front/caffe/concat_ext.py -openvino/tools/mo/front/caffe/conv_ext.py -openvino/tools/mo/front/caffe/crop_ext.py -openvino/tools/mo/front/caffe/ctcgreedydecoder_ext.py -openvino/tools/mo/front/caffe/custom_layers_mapping.py -openvino/tools/mo/front/caffe/CustomLayersMapping.xml.example -openvino/tools/mo/front/caffe/detection_output_ext.py -openvino/tools/mo/front/caffe/dropout_ext.py -openvino/tools/mo/front/caffe/elementwise_ext.py -openvino/tools/mo/front/caffe/eltwise_add_normalize.py -openvino/tools/mo/front/caffe/elu.py -openvino/tools/mo/front/caffe/extractor.py -openvino/tools/mo/front/caffe/extractors/__init__.py -openvino/tools/mo/front/caffe/extractors/native_caffe.py -openvino/tools/mo/front/caffe/extractors/tile.py -openvino/tools/mo/front/caffe/extractors/utils.py -openvino/tools/mo/front/caffe/flatten_ext.py -openvino/tools/mo/front/caffe/grn_ext.py -openvino/tools/mo/front/caffe/inner_product_ext.py -openvino/tools/mo/front/caffe/input_ext.py -openvino/tools/mo/front/caffe/interp_ext.py -openvino/tools/mo/front/caffe/loader.py -openvino/tools/mo/front/caffe/lrn_ext.py -openvino/tools/mo/front/caffe/mvn_ext.py -openvino/tools/mo/front/caffe/MVNCaffeToMVN.py -openvino/tools/mo/front/caffe/normalize_ext.py -openvino/tools/mo/front/caffe/permute_ext.py -openvino/tools/mo/front/caffe/pooling_ext.py -openvino/tools/mo/front/caffe/prelu_ext.py -openvino/tools/mo/front/caffe/priorbox_clustered_ext.py -openvino/tools/mo/front/caffe/priorbox_ext.py -openvino/tools/mo/front/caffe/proposal_ext.py -openvino/tools/mo/front/caffe/proposal_python_ext.py -openvino/tools/mo/front/caffe/proto/__init__.py -openvino/tools/mo/front/caffe/proto/caffe_pb2.py -openvino/tools/mo/front/caffe/proto/generate_caffe_pb2.py -openvino/tools/mo/front/caffe/proto/mo_caffe.proto -openvino/tools/mo/front/caffe/psroipooling_ext.py -openvino/tools/mo/front/caffe/python_layer_extractor.py -openvino/tools/mo/front/caffe/regionyolo_ext.py -openvino/tools/mo/front/caffe/register_custom_ops.py -openvino/tools/mo/front/caffe/relu6.py -openvino/tools/mo/front/caffe/relu_ext.py -openvino/tools/mo/front/caffe/reorgyolo_ext.py -openvino/tools/mo/front/caffe/resample_ext.py -openvino/tools/mo/front/caffe/reshape.py -openvino/tools/mo/front/caffe/roipooling_ext.py -openvino/tools/mo/front/caffe/scale_ext.py -openvino/tools/mo/front/caffe/shufflechannel_ext.py -openvino/tools/mo/front/caffe/sigmoid.py -openvino/tools/mo/front/caffe/slice_ext.py -openvino/tools/mo/front/caffe/slice_to_split.py -openvino/tools/mo/front/caffe/softmax_ext.py -openvino/tools/mo/front/caffe/split_to_identity.py -openvino/tools/mo/front/caffe/tanh.py -openvino/tools/mo/front/common/__init__.py -openvino/tools/mo/front/common/custom_replacement_registry.py -openvino/tools/mo/front/common/extractors/__init__.py -openvino/tools/mo/front/common/extractors/utils.py -openvino/tools/mo/front/common/find_unsupported_ops.py -openvino/tools/mo/front/common/layout.py -openvino/tools/mo/front/common/partial_infer/__init__.py -openvino/tools/mo/front/common/partial_infer/batch_norm.py -openvino/tools/mo/front/common/partial_infer/caffe_fallback.py -openvino/tools/mo/front/common/partial_infer/concat.py -openvino/tools/mo/front/common/partial_infer/crop.py -openvino/tools/mo/front/common/partial_infer/elemental.py -openvino/tools/mo/front/common/partial_infer/eltwise.py -openvino/tools/mo/front/common/partial_infer/multi_box_detection.py -openvino/tools/mo/front/common/partial_infer/multi_box_prior.py -openvino/tools/mo/front/common/partial_infer/roipooling.py -openvino/tools/mo/front/common/partial_infer/utils.py -openvino/tools/mo/front/common/register_custom_ops.py -openvino/tools/mo/front/common/replacement.py -openvino/tools/mo/front/common/weights.py -openvino/tools/mo/front/create_tensor_nodes.py -openvino/tools/mo/front/disable_weights_quantize_value_propagation.py -openvino/tools/mo/front/div.py -openvino/tools/mo/front/eltwise_n.py -openvino/tools/mo/front/ExpandDimsToUnsqueeze.py -openvino/tools/mo/front/extractor.py -openvino/tools/mo/front/FakeQuantWithMinMaxVars.py -openvino/tools/mo/front/FillToBroadcast.py -openvino/tools/mo/front/flatten_to_reshape.py -openvino/tools/mo/front/freeze_placeholder_value.py -openvino/tools/mo/front/GeLUMerger_Erf.py -openvino/tools/mo/front/GeLUMerger_Tanh.py -openvino/tools/mo/front/global_pooling_to_reduce.py -openvino/tools/mo/front/HSigmoid_fusion.py -openvino/tools/mo/front/HSwish_fusion.py -openvino/tools/mo/front/image_scaler.py -openvino/tools/mo/front/input_cut.py -openvino/tools/mo/front/instance_normalization.py -openvino/tools/mo/front/interpolate_reshape.py -openvino/tools/mo/front/InterpolateNormalizer.py -openvino/tools/mo/front/InterpolateV1ToInterpolate.py -openvino/tools/mo/front/kaldi/__init__.py -openvino/tools/mo/front/kaldi/add_reshape_transpose_around_conv_pool.py -openvino/tools/mo/front/kaldi/apply_counts.py -openvino/tools/mo/front/kaldi/extractor.py -openvino/tools/mo/front/kaldi/extractors/__init__.py -openvino/tools/mo/front/kaldi/extractors/add_ext.py -openvino/tools/mo/front/kaldi/extractors/add_shift_ext.py -openvino/tools/mo/front/kaldi/extractors/affine_component_ext.py -openvino/tools/mo/front/kaldi/extractors/affine_component_preconditioned_online_ext.py -openvino/tools/mo/front/kaldi/extractors/affine_transform_ext.py -openvino/tools/mo/front/kaldi/extractors/backproptruncation_ext.py -openvino/tools/mo/front/kaldi/extractors/batchnorm_component_ext.py -openvino/tools/mo/front/kaldi/extractors/bias_component_ext.py -openvino/tools/mo/front/kaldi/extractors/clip_ext.py -openvino/tools/mo/front/kaldi/extractors/concat_ext.py -openvino/tools/mo/front/kaldi/extractors/const_ext.py -openvino/tools/mo/front/kaldi/extractors/convolutional_1d_component_ext.py -openvino/tools/mo/front/kaldi/extractors/convolutional_component_ext.py -openvino/tools/mo/front/kaldi/extractors/copy_ext.py -openvino/tools/mo/front/kaldi/extractors/crop_ext.py -openvino/tools/mo/front/kaldi/extractors/dropoutmask_ext.py -openvino/tools/mo/front/kaldi/extractors/elementwise_component_ext.py -openvino/tools/mo/front/kaldi/extractors/fixed_affine_component_ext.py -openvino/tools/mo/front/kaldi/extractors/generaldropout_ext.py -openvino/tools/mo/front/kaldi/extractors/linear_component_ext.py -openvino/tools/mo/front/kaldi/extractors/lstm_nonlinearity_ext.py -openvino/tools/mo/front/kaldi/extractors/lstm_projected_streams_ext.py -openvino/tools/mo/front/kaldi/extractors/max_pooling_ext.py -openvino/tools/mo/front/kaldi/extractors/memoryoffset_ext.py -openvino/tools/mo/front/kaldi/extractors/mul_ext.py -openvino/tools/mo/front/kaldi/extractors/naturalgradient_affine_component_ext.py -openvino/tools/mo/front/kaldi/extractors/noop_ext.py -openvino/tools/mo/front/kaldi/extractors/normalize_component_ext.py -openvino/tools/mo/front/kaldi/extractors/pnorm_component_ext.py -openvino/tools/mo/front/kaldi/extractors/rectified_linear_component_ext.py -openvino/tools/mo/front/kaldi/extractors/rescale_ext.py -openvino/tools/mo/front/kaldi/extractors/restrictedattentioncomponent_ext.py -openvino/tools/mo/front/kaldi/extractors/scale_component_ext.py -openvino/tools/mo/front/kaldi/extractors/softmax_ext.py -openvino/tools/mo/front/kaldi/extractors/specaugment_component_ext.py -openvino/tools/mo/front/kaldi/extractors/splice_component_ext.py -openvino/tools/mo/front/kaldi/extractors/tdnncomponent_ext.py -openvino/tools/mo/front/kaldi/extractors/timeheightconvolution_ext.py -openvino/tools/mo/front/kaldi/loader/__init__.py -openvino/tools/mo/front/kaldi/loader/loader.py -openvino/tools/mo/front/kaldi/loader/utils.py -openvino/tools/mo/front/kaldi/logsoftmax_component_ext.py -openvino/tools/mo/front/kaldi/memory_offset_adjustment.py -openvino/tools/mo/front/kaldi/memoryoffset_batch_update.py -openvino/tools/mo/front/kaldi/register_custom_ops.py -openvino/tools/mo/front/kaldi/replace_dropoutmask.py -openvino/tools/mo/front/kaldi/replace_eltwise_nin1.py -openvino/tools/mo/front/kaldi/replace_lstm_node_pattern.py -openvino/tools/mo/front/kaldi/replace_lstm_nonlinearity.py -openvino/tools/mo/front/kaldi/replace_timeheightconvolution.py -openvino/tools/mo/front/kaldi/restrictedattentioncomponent_replacer.py -openvino/tools/mo/front/kaldi/set_ports.py -openvino/tools/mo/front/kaldi/sigmoid_ext.py -openvino/tools/mo/front/kaldi/split_recurrent_memoryoffset.py -openvino/tools/mo/front/kaldi/tanh_component_ext.py -openvino/tools/mo/front/kaldi/tdnn_component_replacer.py -openvino/tools/mo/front/kaldi/utils.py -openvino/tools/mo/front/LayerNorm.py -openvino/tools/mo/front/Log1p.py -openvino/tools/mo/front/MatMul_normalizer.py -openvino/tools/mo/front/MoveEmbeddedInputsToInputs.py -openvino/tools/mo/front/no_op_eraser.py -openvino/tools/mo/front/non_max_suppression_normalize.py -openvino/tools/mo/front/OneHotDepthNormalizer.py -openvino/tools/mo/front/onnx/__init__.py -openvino/tools/mo/front/onnx/activation_ext.py -openvino/tools/mo/front/onnx/affine_ext.py -openvino/tools/mo/front/onnx/argmax_ext.py -openvino/tools/mo/front/onnx/argmin_ext.py -openvino/tools/mo/front/onnx/aten_ext.py -openvino/tools/mo/front/onnx/AttributedSliceToSlice.py -openvino/tools/mo/front/onnx/cast_ext.py -openvino/tools/mo/front/onnx/clip_ext.py -openvino/tools/mo/front/onnx/concat_ext.py -openvino/tools/mo/front/onnx/const_ext.py -openvino/tools/mo/front/onnx/constant_fill_ext.py -openvino/tools/mo/front/onnx/constant_of_shape_ext.py -openvino/tools/mo/front/onnx/constant_of_shape_to_broadcast.py -openvino/tools/mo/front/onnx/conv_ext.py -openvino/tools/mo/front/onnx/crop_ext.py -openvino/tools/mo/front/onnx/CTCGreedyDecoder_ext.py -openvino/tools/mo/front/onnx/cumsum_ext.py -openvino/tools/mo/front/onnx/deformable_conv_ext.py -openvino/tools/mo/front/onnx/depth_to_space_ext.py -openvino/tools/mo/front/onnx/dequantize_linear_ext.py -openvino/tools/mo/front/onnx/detection_output.py -openvino/tools/mo/front/onnx/detection_output_ext.py -openvino/tools/mo/front/onnx/detection_output_onnx_ext.py -openvino/tools/mo/front/onnx/dropout_ext.py -openvino/tools/mo/front/onnx/einsum_ext.py -openvino/tools/mo/front/onnx/elementwise_ext.py -openvino/tools/mo/front/onnx/expand_ext.py -openvino/tools/mo/front/onnx/extractor.py -openvino/tools/mo/front/onnx/extractors/__init__.py -openvino/tools/mo/front/onnx/extractors/utils.py -openvino/tools/mo/front/onnx/faster_rcnn.json -openvino/tools/mo/front/onnx/flatten_ext.py -openvino/tools/mo/front/onnx/flattenONNX_to_reshape.py -openvino/tools/mo/front/onnx/fused_bn_ext.py -openvino/tools/mo/front/onnx/gather_ext.py -openvino/tools/mo/front/onnx/gatherelements_ext.py -openvino/tools/mo/front/onnx/gathernd_ext.py -openvino/tools/mo/front/onnx/gemm_ext.py -openvino/tools/mo/front/onnx/group_norm_ext.py -openvino/tools/mo/front/onnx/gru_ext.py -openvino/tools/mo/front/onnx/hard_sigmoid_ext.py -openvino/tools/mo/front/onnx/identity_ext.py -openvino/tools/mo/front/onnx/image_scaler_ext.py -openvino/tools/mo/front/onnx/instance_normalization_ext.py -openvino/tools/mo/front/onnx/loader.py -openvino/tools/mo/front/onnx/logsoftmaxONNX_to_logsoftmax.py -openvino/tools/mo/front/onnx/loop_ext.py -openvino/tools/mo/front/onnx/LoopNormalize.py -openvino/tools/mo/front/onnx/lp_normalization_ext.py -openvino/tools/mo/front/onnx/lrn_ext.py -openvino/tools/mo/front/onnx/lstm_ext.py -openvino/tools/mo/front/onnx/mask_rcnn.json -openvino/tools/mo/front/onnx/mask_rcnn_conversion.py -openvino/tools/mo/front/onnx/matmul_ext.py -openvino/tools/mo/front/onnx/mean_variance_normalization_ext.py -openvino/tools/mo/front/onnx/MvnOnnxToMvn.py -openvino/tools/mo/front/onnx/non_max_suppression_ext.py -openvino/tools/mo/front/onnx/non_zero_ext.py -openvino/tools/mo/front/onnx/normalize_ext.py -openvino/tools/mo/front/onnx/normalize_l2_normalize.py -openvino/tools/mo/front/onnx/one_hot_ext.py -openvino/tools/mo/front/onnx/one_hot_normalize.py -openvino/tools/mo/front/onnx/ONNXResize10ToInterpolate.py -openvino/tools/mo/front/onnx/pad_converter.py -openvino/tools/mo/front/onnx/pad_ext.py -openvino/tools/mo/front/onnx/parameter_ext.py -openvino/tools/mo/front/onnx/person_detection_crossroad.json -openvino/tools/mo/front/onnx/person_detection_crossroad_conversion.py -openvino/tools/mo/front/onnx/pooling_ext.py -openvino/tools/mo/front/onnx/priorbox_clustered_ext.py -openvino/tools/mo/front/onnx/priorbox_ext.py -openvino/tools/mo/front/onnx/priorgridgenerator_ext.py -openvino/tools/mo/front/onnx/proposal_ext.py -openvino/tools/mo/front/onnx/quantize_ext.py -openvino/tools/mo/front/onnx/quantize_linear_ext.py -openvino/tools/mo/front/onnx/random_uniform_ext.py -openvino/tools/mo/front/onnx/range_ext.py -openvino/tools/mo/front/onnx/reduce_ext.py -openvino/tools/mo/front/onnx/register_custom_ops.py -openvino/tools/mo/front/onnx/reshape_ext.py -openvino/tools/mo/front/onnx/resize_ext.py -openvino/tools/mo/front/onnx/reverse_sequence_ext.py -openvino/tools/mo/front/onnx/rnn_ext.py -openvino/tools/mo/front/onnx/roialign_ext.py -openvino/tools/mo/front/onnx/roifeatureextractor_ext.py -openvino/tools/mo/front/onnx/scatter_ext.py -openvino/tools/mo/front/onnx/shape_ext.py -openvino/tools/mo/front/onnx/size_ext.py -openvino/tools/mo/front/onnx/slice_ext.py -openvino/tools/mo/front/onnx/softmax_ext.py -openvino/tools/mo/front/onnx/softmaxONNX_to_softmax.py -openvino/tools/mo/front/onnx/softplus_ext.py -openvino/tools/mo/front/onnx/space_to_depth_ext.py -openvino/tools/mo/front/onnx/split_ext.py -openvino/tools/mo/front/onnx/squeeze_ext.py -openvino/tools/mo/front/onnx/top_k_ext.py -openvino/tools/mo/front/onnx/topkrois_ext.py -openvino/tools/mo/front/onnx/transpose_ext.py -openvino/tools/mo/front/onnx/unsqueeze_ext.py -openvino/tools/mo/front/onnx/upsample_ext.py -openvino/tools/mo/front/onnx/where_ext.py -openvino/tools/mo/front/output_cut.py -openvino/tools/mo/front/override_batch.py -openvino/tools/mo/front/Pack.py -openvino/tools/mo/front/pass_separator.py -openvino/tools/mo/front/PowerToEltwises.py -openvino/tools/mo/front/rank_decomposer.py -openvino/tools/mo/front/reciprocal.py -openvino/tools/mo/front/reduce_axis_normalizer.py -openvino/tools/mo/front/reshape_dim_normalizer.py -openvino/tools/mo/front/restore_ports.py -openvino/tools/mo/front/RollWithEmptyAxesReplacer.py -openvino/tools/mo/front/scatter_normalizer.py -openvino/tools/mo/front/SizeReplacer.py -openvino/tools/mo/front/softmax.py -openvino/tools/mo/front/split_normalizer.py -openvino/tools/mo/front/SqueezeNormalize.py -openvino/tools/mo/front/sub.py -openvino/tools/mo/front/subgraph_matcher.py -openvino/tools/mo/front/tf/__init__.py -openvino/tools/mo/front/tf/activation_ext.py -openvino/tools/mo/front/tf/argmax_ext.py -openvino/tools/mo/front/tf/argmin_ext.py -openvino/tools/mo/front/tf/assign_elimination.py -openvino/tools/mo/front/tf/automl_efficientdet.json -openvino/tools/mo/front/tf/AutomlEfficientDet.py -openvino/tools/mo/front/tf/basic_lstm_cell.py -openvino/tools/mo/front/tf/batch_to_space_ext.py -openvino/tools/mo/front/tf/BatchMatMul_ext.py -openvino/tools/mo/front/tf/BatchToSpaceNDToUpsample.py -openvino/tools/mo/front/tf/BlockLSTM.py -openvino/tools/mo/front/tf/BlockLSTM_ext.py -openvino/tools/mo/front/tf/broadcast_ext.py -openvino/tools/mo/front/tf/bucketize.py -openvino/tools/mo/front/tf/bucketize_ext.py -openvino/tools/mo/front/tf/Cast_ext.py -openvino/tools/mo/front/tf/ClipByValue_ext.py -openvino/tools/mo/front/tf/ClipByValueTFTransformation.py -openvino/tools/mo/front/tf/common.py -openvino/tools/mo/front/tf/complex_ext.py -openvino/tools/mo/front/tf/ComplexAbs.py -openvino/tools/mo/front/tf/ComplexAbsAfterComplex.py -openvino/tools/mo/front/tf/concat.py -openvino/tools/mo/front/tf/concat_ext.py -openvino/tools/mo/front/tf/const_ext.py -openvino/tools/mo/front/tf/conv_ext.py -openvino/tools/mo/front/tf/CorrectPaddingsForPadAfterComplex.py -openvino/tools/mo/front/tf/crop_and_resize_ext.py -openvino/tools/mo/front/tf/CropAndResizeReplacement.py -openvino/tools/mo/front/tf/CTCGreedyDecoder_ext.py -openvino/tools/mo/front/tf/CTCGreedyDecoderReplacement.py -openvino/tools/mo/front/tf/CTCLoss_ext.py -openvino/tools/mo/front/tf/CTCLossReplacement.py -openvino/tools/mo/front/tf/cumsum_ext.py -openvino/tools/mo/front/tf/custom_subgraph_call.py -openvino/tools/mo/front/tf/deconv_ext.py -openvino/tools/mo/front/tf/depth_to_space.py -openvino/tools/mo/front/tf/efficient_det_support_api_v2.0.json -openvino/tools/mo/front/tf/efficient_det_support_api_v2.4.json -openvino/tools/mo/front/tf/einsum_ext.py -openvino/tools/mo/front/tf/elementwise_ext.py -openvino/tools/mo/front/tf/embedding_segments_mean_decomposition.py -openvino/tools/mo/front/tf/embedding_segments_operation_fusing.py -openvino/tools/mo/front/tf/expand_dims_ext.py -openvino/tools/mo/front/tf/extract_image_patches_ext.py -openvino/tools/mo/front/tf/extractor.py -openvino/tools/mo/front/tf/extractors/__init__.py -openvino/tools/mo/front/tf/extractors/concat.py -openvino/tools/mo/front/tf/extractors/fused_bn.py -openvino/tools/mo/front/tf/extractors/identity.py -openvino/tools/mo/front/tf/extractors/native_tf.py -openvino/tools/mo/front/tf/extractors/pack.py -openvino/tools/mo/front/tf/extractors/strided_slice.py -openvino/tools/mo/front/tf/extractors/subgraph_utils.py -openvino/tools/mo/front/tf/extractors/utils.py -openvino/tools/mo/front/tf/eye_ext.py -openvino/tools/mo/front/tf/eye_tf_to_eye.py -openvino/tools/mo/front/tf/fake_const_ext.py -openvino/tools/mo/front/tf/FakeQuantWithMinMaxVars_ext.py -openvino/tools/mo/front/tf/faster_rcnn_support.json -openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.10.json -openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.13.json -openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.14.json -openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.15.json -openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.7.json -openvino/tools/mo/front/tf/faster_rcnn_support_api_v2.0.json -openvino/tools/mo/front/tf/faster_rcnn_support_api_v2.4.json -openvino/tools/mo/front/tf/fft_ext.py -openvino/tools/mo/front/tf/fifo_queue_v2_ext.py -openvino/tools/mo/front/tf/fifo_replacer.py -openvino/tools/mo/front/tf/fill_ext.py -openvino/tools/mo/front/tf/FlattenToReshape.py -openvino/tools/mo/front/tf/floor_div_decomposition.py -openvino/tools/mo/front/tf/floor_ext.py -openvino/tools/mo/front/tf/gather_ext.py -openvino/tools/mo/front/tf/gathernd_ext.py -openvino/tools/mo/front/tf/GatherTree_ext.py -openvino/tools/mo/front/tf/GNMT_DynamicSequenceLengths.py -openvino/tools/mo/front/tf/graph_utils.py -openvino/tools/mo/front/tf/GRUBlockCell_ext.py -openvino/tools/mo/front/tf/GRUBlockCellReplacement.py -openvino/tools/mo/front/tf/identity_ext.py -openvino/tools/mo/front/tf/identityN_to_identity.py -openvino/tools/mo/front/tf/if_ext.py -openvino/tools/mo/front/tf/IteratorGetNext_ext.py -openvino/tools/mo/front/tf/IteratorGetNextCut.py -openvino/tools/mo/front/tf/loader.py -openvino/tools/mo/front/tf/log_softmax_ext.py -openvino/tools/mo/front/tf/LookupTableInsert_ext.py -openvino/tools/mo/front/tf/LoopCond_ext.py -openvino/tools/mo/front/tf/lrn_ext.py -openvino/tools/mo/front/tf/MapFNTransformation.py -openvino/tools/mo/front/tf/mask_rcnn_support.json -openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.11.json -openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.13.json -openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.14.json -openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.15.json -openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.7.json -openvino/tools/mo/front/tf/mask_rcnn_support_api_v2.0.json -openvino/tools/mo/front/tf/mask_rcnn_support_api_v2.4.json -openvino/tools/mo/front/tf/matmul_ext.py -openvino/tools/mo/front/tf/mvn.py -openvino/tools/mo/front/tf/mvn_unrolled.py -openvino/tools/mo/front/tf/nearest_neighbor_upsampling.py -openvino/tools/mo/front/tf/next_iteration_ext.py -openvino/tools/mo/front/tf/non_max_suppression_ext.py -openvino/tools/mo/front/tf/non_max_suppression_normalize.py -openvino/tools/mo/front/tf/NonConstBeginStridedSliceReplacement.py -openvino/tools/mo/front/tf/noop.py -openvino/tools/mo/front/tf/ObjectDetectionAPI.py -openvino/tools/mo/front/tf/one_hot_ext.py -openvino/tools/mo/front/tf/pad_ext.py -openvino/tools/mo/front/tf/pad_tf_to_pad.py -openvino/tools/mo/front/tf/partial_infer/__init__.py -openvino/tools/mo/front/tf/partial_infer/tf.py -openvino/tools/mo/front/tf/placeholder_ext.py -openvino/tools/mo/front/tf/placeholder_with_default_ext.py -openvino/tools/mo/front/tf/pooling_ext.py -openvino/tools/mo/front/tf/prelu.py -openvino/tools/mo/front/tf/QueueDequeue_ext.py -openvino/tools/mo/front/tf/random_uniform_ext.py -openvino/tools/mo/front/tf/random_uniform_int_ext.py -openvino/tools/mo/front/tf/range_ext.py -openvino/tools/mo/front/tf/reduce_ext.py -openvino/tools/mo/front/tf/register_custom_ops.py -openvino/tools/mo/front/tf/replacement.py -openvino/tools/mo/front/tf/reshape_related_ext.py -openvino/tools/mo/front/tf/resize_bilinear.py -openvino/tools/mo/front/tf/resize_nearest_neighbor.py -openvino/tools/mo/front/tf/retinanet.json -openvino/tools/mo/front/tf/RetinaNetFilteredDetectionsReplacement.py -openvino/tools/mo/front/tf/reverse_sequence.py -openvino/tools/mo/front/tf/reverse_v2.py -openvino/tools/mo/front/tf/rfcn_support.json -openvino/tools/mo/front/tf/rfcn_support_api_v1.10.json -openvino/tools/mo/front/tf/rfcn_support_api_v1.13.json -openvino/tools/mo/front/tf/rfcn_support_api_v1.14.json -openvino/tools/mo/front/tf/RFFTRealImagToRFFTSplit.py -openvino/tools/mo/front/tf/roll_ext.py -openvino/tools/mo/front/tf/RollRealImagPack.py -openvino/tools/mo/front/tf/scatter_nd_ext.py -openvino/tools/mo/front/tf/select_ext.py -openvino/tools/mo/front/tf/sign_ext.py -openvino/tools/mo/front/tf/slice_ext.py -openvino/tools/mo/front/tf/softmax_ext.py -openvino/tools/mo/front/tf/softplus_ext.py -openvino/tools/mo/front/tf/space_to_batch.py -openvino/tools/mo/front/tf/space_to_batch_ext.py -openvino/tools/mo/front/tf/space_to_depth_ext.py -openvino/tools/mo/front/tf/sparse_fill_empty_rows_ext.py -openvino/tools/mo/front/tf/sparse_segment_mean_ext.py -openvino/tools/mo/front/tf/sparse_segment_sqrtn_ext.py -openvino/tools/mo/front/tf/sparse_segment_sum_ext.py -openvino/tools/mo/front/tf/sparse_to_dense_replacer.py -openvino/tools/mo/front/tf/split_ext.py -openvino/tools/mo/front/tf/ssd_support.json -openvino/tools/mo/front/tf/ssd_support_api_v1.14.json -openvino/tools/mo/front/tf/ssd_support_api_v1.15.json -openvino/tools/mo/front/tf/ssd_support_api_v2.0.json -openvino/tools/mo/front/tf/ssd_support_api_v2.4.json -openvino/tools/mo/front/tf/ssd_toolbox_detection_output.json -openvino/tools/mo/front/tf/ssd_toolbox_multihead_detection_output.json -openvino/tools/mo/front/tf/ssd_v2_support.json -openvino/tools/mo/front/tf/SSDToolboxDetectionOutput.py -openvino/tools/mo/front/tf/swap_deconv_inputs.py -openvino/tools/mo/front/tf/swish_ext.py -openvino/tools/mo/front/tf/SwitchMergeOptimization.py -openvino/tools/mo/front/tf/TensorArrayExtractors.py -openvino/tools/mo/front/tf/TensorArrayGatherV3.py -openvino/tools/mo/front/tf/tensorflow_custom_operations_config_update.py -openvino/tools/mo/front/tf/TFFFTToDFT.py -openvino/tools/mo/front/tf/TFResizeToInterpolate.py -openvino/tools/mo/front/tf/TFScatterNDDecomposition.py -openvino/tools/mo/front/tf/TFSliceToSlice.py -openvino/tools/mo/front/tf/tile_ext.py -openvino/tools/mo/front/tf/topk_ext.py -openvino/tools/mo/front/tf/transpose_ext.py -openvino/tools/mo/front/tf/transposed_mvn_unrolled.py -openvino/tools/mo/front/tf/unique_ext.py -openvino/tools/mo/front/tf/UnpackPackReverseInputChannels.py -openvino/tools/mo/front/tf/variable_ext.py -openvino/tools/mo/front/tf/variables_values_freezing.py -openvino/tools/mo/front/tf/WhereDecomposition.py -openvino/tools/mo/front/tf/while_ext.py -openvino/tools/mo/front/tf/WhileNormalize.py -openvino/tools/mo/front/tf/yolo_v1.json -openvino/tools/mo/front/tf/yolo_v1_tiny.json -openvino/tools/mo/front/tf/yolo_v2.json -openvino/tools/mo/front/tf/yolo_v2_tiny.json -openvino/tools/mo/front/tf/yolo_v2_tiny_voc.json -openvino/tools/mo/front/tf/yolo_v2_voc.json -openvino/tools/mo/front/tf/yolo_v3.json -openvino/tools/mo/front/tf/yolo_v3_tiny.json -openvino/tools/mo/front/tf/yolo_v3_voc.json -openvino/tools/mo/front/ThresholdedReluDecomposition.py -openvino/tools/mo/front/TopKNormalize.py -openvino/tools/mo/front/transformations_config.py -openvino/tools/mo/front/TransposeOrderNormalizer.py -openvino/tools/mo/front/user_data_repack.py -openvino/tools/mo/front/YOLO.py -openvino/tools/mo/graph/__init__.py -openvino/tools/mo/graph/connection.py -openvino/tools/mo/graph/graph.py -openvino/tools/mo/graph/perm_inputs.py -openvino/tools/mo/graph/port.py -openvino/tools/mo/load/__init__.py -openvino/tools/mo/load/caffe/__init__.py -openvino/tools/mo/load/caffe/loader.py -openvino/tools/mo/load/kaldi/__init__.py -openvino/tools/mo/load/kaldi/loader.py -openvino/tools/mo/load/loader.py -openvino/tools/mo/load/onnx/__init__.py -openvino/tools/mo/load/onnx/loader.py -openvino/tools/mo/load/tf/__init__.py -openvino/tools/mo/load/tf/loader.py -openvino/tools/mo/main.py -openvino/tools/mo/main_caffe.py -openvino/tools/mo/main_kaldi.py -openvino/tools/mo/main_onnx.py -openvino/tools/mo/main_paddle.py -openvino/tools/mo/main_tf.py -openvino/tools/mo/middle/__init__.py -openvino/tools/mo/middle/AddFakeQuantizeFuse.py -openvino/tools/mo/middle/AddIsCyclicAttribute.py -openvino/tools/mo/middle/ApplyNHWCtoNCHWpermutation.py -openvino/tools/mo/middle/ApplyPermutations.py -openvino/tools/mo/middle/ArgOpsToTopK.py -openvino/tools/mo/middle/AttributedTileNormalizer.py -openvino/tools/mo/middle/BiasAddBroadcasting.py -openvino/tools/mo/middle/BinarizeWeightsM1P1.py -openvino/tools/mo/middle/BlockLSTMtoLSTMSequence.py -openvino/tools/mo/middle/CheckForCycle.py -openvino/tools/mo/middle/ConcatOptimization.py -openvino/tools/mo/middle/ConstSwitchResolver.py -openvino/tools/mo/middle/ConvertGroupedStridedSlice.py -openvino/tools/mo/middle/ConvertLayoutDependentOperations.py -openvino/tools/mo/middle/ConvertMultiInputConv.py -openvino/tools/mo/middle/ConvToBinaryConv.py -openvino/tools/mo/middle/CustomSubgraphCall.py -openvino/tools/mo/middle/CutInputHavingZeroDimFromConcat.py -openvino/tools/mo/middle/DecomposeBias.py -openvino/tools/mo/middle/DecomposeBidirectionalRNNSequence.py -openvino/tools/mo/middle/Deconvolution3rdInputNormalization.py -openvino/tools/mo/middle/DeleteControlFlowEdges.py -openvino/tools/mo/middle/DeleteNotExecutable.py -openvino/tools/mo/middle/dequantize_linear_resolver.py -openvino/tools/mo/middle/DilatedConvolution.py -openvino/tools/mo/middle/EltwiseChecker.py -openvino/tools/mo/middle/EltwiseInputReshape.py -openvino/tools/mo/middle/FakeSplitOutputs.py -openvino/tools/mo/middle/FusedBatchNormNonConstant.py -openvino/tools/mo/middle/FusedBatchNormTraining.py -openvino/tools/mo/middle/FuseReshapesSequence.py -openvino/tools/mo/middle/fusings.py -openvino/tools/mo/middle/GatherNDDecomposition.py -openvino/tools/mo/middle/GroupNorm.py -openvino/tools/mo/middle/GRURNNSequenceToTensorIterator.py -openvino/tools/mo/middle/InputCut.py -openvino/tools/mo/middle/InsertLayoutPropagationTransposes.py -openvino/tools/mo/middle/InsertSelect.py -openvino/tools/mo/middle/InterpolateSequenceToInterpolate.py -openvino/tools/mo/middle/L2NormFusing.py -openvino/tools/mo/middle/layer_normalization.py -openvino/tools/mo/middle/LayoutChangeForConstantShapePaths.py -openvino/tools/mo/middle/LayoutChangeForEinsum.py -openvino/tools/mo/middle/LeakyReluPattern.py -openvino/tools/mo/middle/LSTMRNNSequenceToTensorIterator.py -openvino/tools/mo/middle/MakeKaldiConstReshapable.py -openvino/tools/mo/middle/MarkSubgraphsWithCorrectLayout.py -openvino/tools/mo/middle/MergeNodesPermutations.py -openvino/tools/mo/middle/MoveConstToLoopBody.py -openvino/tools/mo/middle/MulFakeQuantizeFuse.py -openvino/tools/mo/middle/MXNetRNNSequenceNormalize.py -openvino/tools/mo/middle/MXNetSplitMultiLayers.py -openvino/tools/mo/middle/MXTileReplacer.py -openvino/tools/mo/middle/ONNXResize11ToInterpolate.py -openvino/tools/mo/middle/ONNXRNNSequenceNormalize.py -openvino/tools/mo/middle/PartialInfer.py -openvino/tools/mo/middle/pass_separator.py -openvino/tools/mo/middle/passes/__init__.py -openvino/tools/mo/middle/passes/conv.py -openvino/tools/mo/middle/passes/convert_data_type.py -openvino/tools/mo/middle/passes/debug.py -openvino/tools/mo/middle/passes/eliminate.py -openvino/tools/mo/middle/passes/fusing/__init__.py -openvino/tools/mo/middle/passes/fusing/decomposition.py -openvino/tools/mo/middle/passes/fusing/fuse_grouped_conv.py -openvino/tools/mo/middle/passes/fusing/fuse_linear_ops.py -openvino/tools/mo/middle/passes/fusing/fuse_linear_seq.py -openvino/tools/mo/middle/passes/fusing/helpers.py -openvino/tools/mo/middle/passes/fusing/mark_unfused_nodes.py -openvino/tools/mo/middle/passes/fusing/resnet_optimization.py -openvino/tools/mo/middle/passes/infer.py -openvino/tools/mo/middle/passes/tensor_names.py -openvino/tools/mo/middle/pattern_match.py -openvino/tools/mo/middle/permute_tensor_iterator.py -openvino/tools/mo/middle/PoolV2ToAttributedPool.py -openvino/tools/mo/middle/preprocessing.py -openvino/tools/mo/middle/PreserveRuntimeInfo.py -openvino/tools/mo/middle/quantize_dequantize_linear_resolver.py -openvino/tools/mo/middle/quantize_fuses.py -openvino/tools/mo/middle/quantize_linear_resolver.py -openvino/tools/mo/middle/ReluQuantizeFuse.py -openvino/tools/mo/middle/RemoveDuplicationMemory.py -openvino/tools/mo/middle/RemoveIdentity.py -openvino/tools/mo/middle/RemoveRedundantReshapeAfterCropAndResize.py -openvino/tools/mo/middle/RemoveRedundantReshapes.py -openvino/tools/mo/middle/RemoveUselessConcatSplit.py -openvino/tools/mo/middle/RemoveUselessCrops.py -openvino/tools/mo/middle/RemoveUselessPad.py -openvino/tools/mo/middle/ReplaceMemoryOffsetWithSplice.py -openvino/tools/mo/middle/replacement.py -openvino/tools/mo/middle/ReplacePNorm.py -openvino/tools/mo/middle/ReplaceSpliceNodePattern.py -openvino/tools/mo/middle/reverse_tensor_iterator.py -openvino/tools/mo/middle/ReverseTransposeNormalization.py -openvino/tools/mo/middle/ReverseV2ToReverseSequence.py -openvino/tools/mo/middle/RNNSequenceNormalizeToIE.py -openvino/tools/mo/middle/SharedWeightsDuplication.py -openvino/tools/mo/middle/SliceConverter.py -openvino/tools/mo/middle/SliceLikeToStridedSlice.py -openvino/tools/mo/middle/sparse_reshape.py -openvino/tools/mo/middle/split_tdnn_memoryoffset.py -openvino/tools/mo/middle/SplitConcatPairToInterpolate.py -openvino/tools/mo/middle/SSliceComplex.py -openvino/tools/mo/middle/StridedSliceNormalizer.py -openvino/tools/mo/middle/SwapAxesMiddleReplacer.py -openvino/tools/mo/middle/TensorIterator_utils.py -openvino/tools/mo/middle/TensorIteratorBackEdge.py -openvino/tools/mo/middle/TensorIteratorCondition.py -openvino/tools/mo/middle/TensorIteratorConditionChecker.py -openvino/tools/mo/middle/TensorIteratorInput.py -openvino/tools/mo/middle/TensorIteratorLSTMToLSTMSequence.py -openvino/tools/mo/middle/TensorIteratorMerge.py -openvino/tools/mo/middle/TensorIteratorOutput.py -openvino/tools/mo/middle/TF_lstm_cell_to_generic.py -openvino/tools/mo/middle/UnsqueezeTileReshapeBlockToInterpolate.py -openvino/tools/mo/middle/UpsampleToResample.py -openvino/tools/mo/middle/UselessMerge.py -openvino/tools/mo/middle/UselessSplitEraser.py -openvino/tools/mo/mo.py -openvino/tools/mo/mo_caffe.py -openvino/tools/mo/mo_kaldi.py -openvino/tools/mo/mo_onnx.py -openvino/tools/mo/mo_paddle.py -openvino/tools/mo/mo_tf.py -openvino/tools/mo/moc_frontend/__init__.py -openvino/tools/mo/moc_frontend/analysis.py -openvino/tools/mo/moc_frontend/check_config.py -openvino/tools/mo/moc_frontend/extractor.py -openvino/tools/mo/moc_frontend/layout_utils.py -openvino/tools/mo/moc_frontend/moc_emit_ir.py -openvino/tools/mo/moc_frontend/paddle_frontend_utils.py -openvino/tools/mo/moc_frontend/pipeline.py -openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py -openvino/tools/mo/moc_frontend/shape_utils.py -openvino/tools/mo/ops/__init__.py -openvino/tools/mo/ops/activation.py -openvino/tools/mo/ops/activation_ops.py -openvino/tools/mo/ops/adaptive_avg_pooling.py -openvino/tools/mo/ops/argmax.py -openvino/tools/mo/ops/argmin.py -openvino/tools/mo/ops/assert_op.py -openvino/tools/mo/ops/assign.py -openvino/tools/mo/ops/aten.py -openvino/tools/mo/ops/axpy.py -openvino/tools/mo/ops/BatchNormInference.py -openvino/tools/mo/ops/binarization.py -openvino/tools/mo/ops/BlockLSTM.py -openvino/tools/mo/ops/BN.py -openvino/tools/mo/ops/box_nms.py -openvino/tools/mo/ops/broadcast.py -openvino/tools/mo/ops/bucketize.py -openvino/tools/mo/ops/Cast.py -openvino/tools/mo/ops/clamp.py -openvino/tools/mo/ops/ClipByValueTF.py -openvino/tools/mo/ops/Complex.py -openvino/tools/mo/ops/concat.py -openvino/tools/mo/ops/const.py -openvino/tools/mo/ops/constant_fill.py -openvino/tools/mo/ops/constant_of_shape.py -openvino/tools/mo/ops/ConvertLike.py -openvino/tools/mo/ops/convolution.py -openvino/tools/mo/ops/copyop.py -openvino/tools/mo/ops/crop.py -openvino/tools/mo/ops/ctc_greedy_decoder.py -openvino/tools/mo/ops/ctc_greedy_decoder_seq_len.py -openvino/tools/mo/ops/ctc_loss.py -openvino/tools/mo/ops/cumsum.py -openvino/tools/mo/ops/deconvolution.py -openvino/tools/mo/ops/deformable_convolution.py -openvino/tools/mo/ops/depth_to_space.py -openvino/tools/mo/ops/dequantize_linear.py -openvino/tools/mo/ops/detection_output_onnx.py -openvino/tools/mo/ops/DetectionOutput.py -openvino/tools/mo/ops/dft.py -openvino/tools/mo/ops/dropoutmask.py -openvino/tools/mo/ops/einsum.py -openvino/tools/mo/ops/elementwise.py -openvino/tools/mo/ops/eltwise.py -openvino/tools/mo/ops/eltwise_n.py -openvino/tools/mo/ops/eltwise_ninputs_in_1.py -openvino/tools/mo/ops/embedding_bag.py -openvino/tools/mo/ops/Enter.py -openvino/tools/mo/ops/Exit.py -openvino/tools/mo/ops/expand_dims.py -openvino/tools/mo/ops/ExtractImagePatches.py -openvino/tools/mo/ops/eye.py -openvino/tools/mo/ops/fake_output.py -openvino/tools/mo/ops/fakequantize.py -openvino/tools/mo/ops/fill.py -openvino/tools/mo/ops/flatten.py -openvino/tools/mo/ops/gather.py -openvino/tools/mo/ops/gatherelements.py -openvino/tools/mo/ops/gathernd.py -openvino/tools/mo/ops/GatherTree.py -openvino/tools/mo/ops/gelu.py -openvino/tools/mo/ops/grn.py -openvino/tools/mo/ops/group_norm.py -openvino/tools/mo/ops/GRU.py -openvino/tools/mo/ops/GRUBlockCell.py -openvino/tools/mo/ops/GRUCell.py -openvino/tools/mo/ops/hard_sigmoid.py -openvino/tools/mo/ops/identity.py -openvino/tools/mo/ops/If.py -openvino/tools/mo/ops/instance_normalization.py -openvino/tools/mo/ops/interp.py -openvino/tools/mo/ops/interpolate.py -openvino/tools/mo/ops/layer_norm.py -openvino/tools/mo/ops/log_softmax.py -openvino/tools/mo/ops/LookupTableInsert.py -openvino/tools/mo/ops/loop.py -openvino/tools/mo/ops/lrn.py -openvino/tools/mo/ops/LSTM.py -openvino/tools/mo/ops/lstm_cell.py -openvino/tools/mo/ops/lstm_sequence.py -openvino/tools/mo/ops/lstmnonlinearity.py -openvino/tools/mo/ops/MatMul.py -openvino/tools/mo/ops/memory.py -openvino/tools/mo/ops/memoryoffset.py -openvino/tools/mo/ops/merge.py -openvino/tools/mo/ops/multinomial.py -openvino/tools/mo/ops/mvn.py -openvino/tools/mo/ops/mxfft.py -openvino/tools/mo/ops/mxrepeat.py -openvino/tools/mo/ops/NextIteration.py -openvino/tools/mo/ops/nms_rotated.py -openvino/tools/mo/ops/non_max_suppression.py -openvino/tools/mo/ops/non_zero.py -openvino/tools/mo/ops/normalize.py -openvino/tools/mo/ops/normalize_l2.py -openvino/tools/mo/ops/one_hot.py -openvino/tools/mo/ops/ONNXResize10.py -openvino/tools/mo/ops/ONNXResize11.py -openvino/tools/mo/ops/op.py -openvino/tools/mo/ops/pack.py -openvino/tools/mo/ops/pad.py -openvino/tools/mo/ops/parameter.py -openvino/tools/mo/ops/permute.py -openvino/tools/mo/ops/pnorm.py -openvino/tools/mo/ops/pooling.py -openvino/tools/mo/ops/power.py -openvino/tools/mo/ops/prelu.py -openvino/tools/mo/ops/priorbox.py -openvino/tools/mo/ops/priorbox_clustered.py -openvino/tools/mo/ops/priorgridgenerator_onnx.py -openvino/tools/mo/ops/proposal.py -openvino/tools/mo/ops/proposal_onnx.py -openvino/tools/mo/ops/proposal_python_example.py -openvino/tools/mo/ops/psroipooling.py -openvino/tools/mo/ops/quantize_linear.py -openvino/tools/mo/ops/random_uniform.py -openvino/tools/mo/ops/range.py -openvino/tools/mo/ops/rank.py -openvino/tools/mo/ops/read_value.py -openvino/tools/mo/ops/ReduceOps.py -openvino/tools/mo/ops/regionyolo.py -openvino/tools/mo/ops/reorgyolo.py -openvino/tools/mo/ops/reshape.py -openvino/tools/mo/ops/resize.py -openvino/tools/mo/ops/resize_factor_utils.py -openvino/tools/mo/ops/restrictedattentioncomponent.py -openvino/tools/mo/ops/result.py -openvino/tools/mo/ops/Reverse.py -openvino/tools/mo/ops/reverse_sequence.py -openvino/tools/mo/ops/RNN.py -openvino/tools/mo/ops/RNNCell.py -openvino/tools/mo/ops/roialign.py -openvino/tools/mo/ops/roifeatureextractor_onnx.py -openvino/tools/mo/ops/roipooling.py -openvino/tools/mo/ops/roll.py -openvino/tools/mo/ops/scale_shift.py -openvino/tools/mo/ops/scatter.py -openvino/tools/mo/ops/scatternd.py -openvino/tools/mo/ops/select.py -openvino/tools/mo/ops/shape.py -openvino/tools/mo/ops/shufflechannel.py -openvino/tools/mo/ops/size.py -openvino/tools/mo/ops/slice.py -openvino/tools/mo/ops/slice_like.py -openvino/tools/mo/ops/softmax.py -openvino/tools/mo/ops/space_to_batch.py -openvino/tools/mo/ops/space_to_depth.py -openvino/tools/mo/ops/sparse_fill_empty_rows.py -openvino/tools/mo/ops/sparse_reshape.py -openvino/tools/mo/ops/sparse_segment_mean.py -openvino/tools/mo/ops/sparse_segment_sqrtn.py -openvino/tools/mo/ops/sparse_segment_sum.py -openvino/tools/mo/ops/splice.py -openvino/tools/mo/ops/split.py -openvino/tools/mo/ops/squeeze.py -openvino/tools/mo/ops/stop_gradient.py -openvino/tools/mo/ops/strided_slice.py -openvino/tools/mo/ops/swapaxis.py -openvino/tools/mo/ops/switch.py -openvino/tools/mo/ops/tdnncomponent.py -openvino/tools/mo/ops/tensor_iterator.py -openvino/tools/mo/ops/TensorArray.py -openvino/tools/mo/ops/TensorArrayGather.py -openvino/tools/mo/ops/TensorArrayRead.py -openvino/tools/mo/ops/TensorArrayScatter.py -openvino/tools/mo/ops/TensorArraySize.py -openvino/tools/mo/ops/TensorArrayWrite.py -openvino/tools/mo/ops/TensorIterator_ops.py -openvino/tools/mo/ops/TFFFT.py -openvino/tools/mo/ops/TFResize.py -openvino/tools/mo/ops/tile.py -openvino/tools/mo/ops/timeheightconvolution.py -openvino/tools/mo/ops/topk.py -openvino/tools/mo/ops/topkrois_onnx.py -openvino/tools/mo/ops/transpose.py -openvino/tools/mo/ops/unique.py -openvino/tools/mo/ops/unsqueeze.py -openvino/tools/mo/ops/upsample.py -openvino/tools/mo/pipeline/__init__.py -openvino/tools/mo/pipeline/common.py -openvino/tools/mo/pipeline/unified.py -openvino/tools/mo/subprocess_main.py -openvino/tools/mo/utils/__init__.py -openvino/tools/mo/utils/broadcasting.py -openvino/tools/mo/utils/check_ie_bindings.py -openvino/tools/mo/utils/check_mo_import.py -openvino/tools/mo/utils/class_registration.py -openvino/tools/mo/utils/cli_parser.py -openvino/tools/mo/utils/custom_replacement_config.py -openvino/tools/mo/utils/dsu.py -openvino/tools/mo/utils/environment_setup_utils.py -openvino/tools/mo/utils/error.py -openvino/tools/mo/utils/find_ie_version.py -openvino/tools/mo/utils/find_inputs.py -openvino/tools/mo/utils/get_ov_update_message.py -openvino/tools/mo/utils/graph.py -openvino/tools/mo/utils/guess_framework.py -openvino/tools/mo/utils/help.py -openvino/tools/mo/utils/ie_version.py -openvino/tools/mo/utils/import_extensions.py -openvino/tools/mo/utils/ir_engine/__init__.py -openvino/tools/mo/utils/ir_engine/compare_graphs.py -openvino/tools/mo/utils/ir_engine/ir_engine.py -openvino/tools/mo/utils/ir_reader/__init__.py -openvino/tools/mo/utils/ir_reader/extender.py -openvino/tools/mo/utils/ir_reader/extenders/__init__.py -openvino/tools/mo/utils/ir_reader/extenders/binary_convolution_extender.py -openvino/tools/mo/utils/ir_reader/extenders/bucketize_extender.py -openvino/tools/mo/utils/ir_reader/extenders/conv_extender.py -openvino/tools/mo/utils/ir_reader/extenders/convert_extender.py -openvino/tools/mo/utils/ir_reader/extenders/ctc_greedy_decoder_seq_len_extender.py -openvino/tools/mo/utils/ir_reader/extenders/deconvolution_extender.py -openvino/tools/mo/utils/ir_reader/extenders/deformable_convolution_extender.py -openvino/tools/mo/utils/ir_reader/extenders/einsum_extender.py -openvino/tools/mo/utils/ir_reader/extenders/experimental_extender.py -openvino/tools/mo/utils/ir_reader/extenders/ExtractImagePatches_extender.py -openvino/tools/mo/utils/ir_reader/extenders/eye_extender.py -openvino/tools/mo/utils/ir_reader/extenders/fakequantize_extender.py -openvino/tools/mo/utils/ir_reader/extenders/GRUCell_extender.py -openvino/tools/mo/utils/ir_reader/extenders/GRUSequence_extender.py -openvino/tools/mo/utils/ir_reader/extenders/if_extender.py -openvino/tools/mo/utils/ir_reader/extenders/interpolate_extender.py -openvino/tools/mo/utils/ir_reader/extenders/loop_extender.py -openvino/tools/mo/utils/ir_reader/extenders/LSTMCell_extender.py -openvino/tools/mo/utils/ir_reader/extenders/LSTMSequence_extender.py -openvino/tools/mo/utils/ir_reader/extenders/non_max_suppression_extender.py -openvino/tools/mo/utils/ir_reader/extenders/non_zero_extender.py -openvino/tools/mo/utils/ir_reader/extenders/pad_extender.py -openvino/tools/mo/utils/ir_reader/extenders/parameter_extender.py -openvino/tools/mo/utils/ir_reader/extenders/pooling_extender.py -openvino/tools/mo/utils/ir_reader/extenders/priorbox_clustered_extender.py -openvino/tools/mo/utils/ir_reader/extenders/priorbox_extender.py -openvino/tools/mo/utils/ir_reader/extenders/random_uniform_extender.py -openvino/tools/mo/utils/ir_reader/extenders/range_extender.py -openvino/tools/mo/utils/ir_reader/extenders/reorg_yolo_extender.py -openvino/tools/mo/utils/ir_reader/extenders/RNNCell_extender.py -openvino/tools/mo/utils/ir_reader/extenders/shape_of_extender.py -openvino/tools/mo/utils/ir_reader/extenders/strided_slice_extender.py -openvino/tools/mo/utils/ir_reader/extenders/tensoriterator_extender.py -openvino/tools/mo/utils/ir_reader/extenders/topk_extender.py -openvino/tools/mo/utils/ir_reader/extenders/variadic_split_extender.py -openvino/tools/mo/utils/ir_reader/internal_ops/scatter.py -openvino/tools/mo/utils/ir_reader/internal_ops/squeeze.py -openvino/tools/mo/utils/ir_reader/internal_ops/unique.py -openvino/tools/mo/utils/ir_reader/internal_ops/unsqueeze.py -openvino/tools/mo/utils/ir_reader/layer_to_class.py -openvino/tools/mo/utils/ir_reader/restore_graph.py -openvino/tools/mo/utils/json_schema.py -openvino/tools/mo/utils/logger.py -openvino/tools/mo/utils/model_analysis.py -openvino/tools/mo/utils/pipeline_config.py -openvino/tools/mo/utils/replacement_pattern.py -openvino/tools/mo/utils/runtime_info.py -openvino/tools/mo/utils/shape.py -openvino/tools/mo/utils/simple_proto_parser.py -openvino/tools/mo/utils/str_to.py -openvino/tools/mo/utils/summarize_graph.py -openvino/tools/mo/utils/telemetry_params.py -openvino/tools/mo/utils/telemetry_stub.py -openvino/tools/mo/utils/telemetry_utils.py -openvino/tools/mo/utils/tensorboard_util.py -openvino/tools/mo/utils/type_utils.py -openvino/tools/mo/utils/unsupported_ops.py -openvino/tools/mo/utils/utils.py -openvino/tools/mo/utils/version.py diff --git a/tools/mo/automation/utils.py b/tools/mo/automation/utils.py deleted file mode 100644 index def1e2fce3aac3..00000000000000 --- a/tools/mo/automation/utils.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import subprocess # nosec -import tarfile -from datetime import datetime -from shutil import copy, copytree, rmtree - - - -class Automation: - @staticmethod - def parse_bom(bom_path): - files = [] - for file in open(bom_path): - files.append(file) - return files - - @staticmethod - def copy_files_from_bom(root_path, bom): - target_dir = os.path.join(os.path.dirname(__file__), "ModelOptimizerForTensorflow") - if os.path.exists(target_dir): - rmtree(target_dir) - os.makedirs(target_dir) - for file in bom: - src = os.path.join(root_path, file.strip('\n')) - dst = os.path.join(target_dir, file.strip('\n')) - if not os.path.exists(os.path.dirname(dst)): - os.makedirs(os.path.dirname(dst)) - if os.path.isdir(src): - copytree(src, dst) - else: - copy(src, dst) - return target_dir - - @staticmethod - def add_version_txt(dst_path, build_number): - timestamp = datetime.now().strftime("%I:%M%p %B %d, %Y") - with open(os.path.join(dst_path, "version.txt"), 'w') as f: - f.write(timestamp + '\n') - f.write(build_number + '\n') - - @staticmethod - def make_tarfile(out_file_name, source_dir): - archive_path = os.path.join(os.path.dirname(__file__), out_file_name) - if os.path.exists(archive_path): - os.remove(archive_path) - with tarfile.open(out_file_name, "w:gz") as tar: - tar.add(source_dir, arcname=os.path.basename(source_dir)) diff --git a/tools/mo/automation/version.txt.in b/tools/mo/automation/version.txt.in deleted file mode 100644 index 9e9d0212f10185..00000000000000 --- a/tools/mo/automation/version.txt.in +++ /dev/null @@ -1 +0,0 @@ -@CI_BUILD_NUMBER@ \ No newline at end of file diff --git a/tools/mo/openvino/__init__.py b/tools/mo/openvino/__init__.py deleted file mode 100644 index b015570964c520..00000000000000 --- a/tools/mo/openvino/__init__.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -__path__ = __import__("pkgutil").extend_path(__path__, __name__) - -# Required for Windows OS platforms -# Note: always top-level -try: - from openvino.utils import _add_openvino_libs_to_search_path - _add_openvino_libs_to_search_path() -except ImportError: - pass - -# OpenVINO API -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino import properties as properties - - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue - - from openvino.runtime import Symbol - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny - - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers - - from openvino._pyopenvino import RemoteContext - from openvino._pyopenvino import RemoteTensor - from openvino._pyopenvino import Op - - # libva related: - from openvino._pyopenvino import VAContext - from openvino._pyopenvino import VASurfaceTensor - - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) - -# Tools -try: - # Model Conversion API - ovc should reside in the main namespace - from openvino.tools.ovc import convert_model -except ImportError: - pass diff --git a/tools/mo/openvino/tools/__init__.py b/tools/mo/openvino/tools/__init__.py deleted file mode 100644 index 0d0e5a44956e88..00000000000000 --- a/tools/mo/openvino/tools/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -__path__ = __import__("pkgutil").extend_path(__path__, __name__) diff --git a/tools/mo/openvino/tools/mo/__init__.py b/tools/mo/openvino/tools/mo/__init__.py deleted file mode 100644 index 021853c0412e07..00000000000000 --- a/tools/mo/openvino/tools/mo/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.convert import convert_model, LayoutMap, InputCutInfo diff --git a/tools/mo/openvino/tools/mo/__main__.py b/tools/mo/openvino/tools/mo/__main__.py deleted file mode 100644 index a46bd890fb3a66..00000000000000 --- a/tools/mo/openvino/tools/mo/__main__.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.subprocess_main import subprocess_main -from openvino.tools.mo.utils.telemetry_utils import init_mo_telemetry -init_mo_telemetry() -subprocess_main(framework=None) diff --git a/tools/mo/openvino/tools/mo/analysis/__init__.py b/tools/mo/openvino/tools/mo/analysis/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/analysis/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/analysis/boolean_input.py b/tools/mo/openvino/tools/mo/analysis/boolean_input.py deleted file mode 100644 index b5803c24ac66d7..00000000000000 --- a/tools/mo/openvino/tools/mo/analysis/boolean_input.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.model_analysis import AnalyzeAction - - -class TrainingPhaseAnalysis(AnalyzeAction): - - def analyze(self, graph: Graph): - nodes = graph.get_op_nodes(op='Parameter', data_type=bool) - names = "" - params = "" - if not nodes: - return None, None - - for node in nodes: - names = names + '\t{}\n'.format(node.name) - params = params + '\t--input "{}->False" or --input "{}->True"\n'.format(node.name, - node.name) - - message = 'It looks like there are input nodes of boolean type:\n' + names - - message = message + 'If this input node is as switch between the training and an inference mode, ' \ - 'then you need to freeze this input with value True or False.\n' \ - 'In order to do this run the Model Optimizer with the command line parameter:\n' \ - + params - - message = message + 'to switch graph to inference mode.' - return None, message diff --git a/tools/mo/openvino/tools/mo/analysis/inputs.py b/tools/mo/openvino/tools/mo/analysis/inputs.py deleted file mode 100644 index 2b541ee6bea3ca..00000000000000 --- a/tools/mo/openvino/tools/mo/analysis/inputs.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.model_analysis import AnalyzeAction - - -class InputsAnalysis(AnalyzeAction): - """ - The analyser gets information about model inputs and their default values if any. - """ - - @classmethod - def fifo_queue_analysis(cls, graph: Graph, inputs_desc: dict): - """ - The FIFOQueue with QueueDeque has a separate input that specifies the size of batch to extract from queue. This - input is redundant and should be remove from the model analysis output. - """ - inputs_to_ignore = set() - for fifo_queue in graph.get_op_nodes(op='FIFOQueueV2'): - if len(fifo_queue.get_outputs({'out': 0})) != 1: - log.debug('The FIFOQueue operation "{}" has more than 1 consumers'.format(fifo_queue.id)) - continue - queue_deque = fifo_queue.out_node(0) - if queue_deque.op in ['QueueDequeueMany', 'QueueDequeueManyV2', 'QueueDequeueUpTo', 'QueueDequeueUpToV2']: - queue_deque_input_1 = queue_deque.in_node(1) - if queue_deque_input_1.op in ['Parameter', 'PlaceholderWithDefault']: - log.debug('Adding node "{}" to placeholder ignore list'.format(queue_deque_input_1.id)) - inputs_to_ignore.add(queue_deque_input_1.id) - - # create input per each QueueDeque output port - for port_ind in range(len(queue_deque.out_nodes())): - inputs_desc["{}:{}".format(queue_deque.id, port_ind)] = {'shape': fifo_queue.shapes[port_ind].tolist(), - 'value': None, - 'data_type': fifo_queue.types[port_ind]} - return inputs_to_ignore - - @classmethod - def iterator_get_next_analysis(cls, graph: Graph, inputs_desc: dict): - message = None - op_nodes = graph.get_op_nodes(op='IteratorGetNext') - - params = '' - for iter_get_next in op_nodes: - for port in iter_get_next.out_nodes().keys(): - inputs_desc['{}:{}'.format(iter_get_next.soft_get('name', iter_get_next.id), port)] = { - 'shape': iter_get_next.shapes[port].tolist(), - 'value': None, - 'data_type': iter_get_next.types[port] - } - if params != '': - params = params + ',' - shape = str(iter_get_next.shapes[port].tolist()).replace(',', '') - params = params + '{}:{}{}'.format(iter_get_next.soft_get('name', iter_get_next.id), port, shape) - - if len(op_nodes): - message = 'It looks like there is IteratorGetNext as input\n' \ - 'Run the Model Optimizer without --input option \n' \ - 'Otherwise, try to run the Model Optimizer with:\n\t\t--input "{}"\n'.format(params) - return message - - def analyze(self, graph: Graph): - inputs_desc = dict() - message = InputsAnalysis.iterator_get_next_analysis(graph, inputs_desc) - inputs_to_ignore = InputsAnalysis.fifo_queue_analysis(graph, inputs_desc) - inputs = graph.get_op_nodes(op='Parameter') - for input in inputs: - inputs_desc[input.name] = {'shape': input.soft_get('shape', None), - 'data_type': input.soft_get('data_type', None), - 'value': None, - } - placeholders_with_default = graph.get_op_nodes(op='PlaceholderWithDefault') - for input in placeholders_with_default: - inputs_desc[input.name] = {'shape': input.soft_get('shape', None), - 'data_type': input.soft_get('data_type', None), - 'value': input.in_node(0).value if 0 in input.in_nodes() and - input.in_node(0).has_valid('value') else None} - - for input_to_ignore in inputs_to_ignore: - del inputs_desc[input_to_ignore] - - # workaround for the ONNX models case where input shape is specified as string value like: "width", "height". - # In this case the string value is converted to 0, but in fact it is an arbitrary value so should be -1 - if graph.graph['fw'] == 'onnx': - for inp in inputs_desc.values(): - inp['shape'] = [-1 if item == 0 else item for item in inp['shape']] - return {'inputs': inputs_desc}, message diff --git a/tools/mo/openvino/tools/mo/analysis/json_print.py b/tools/mo/openvino/tools/mo/analysis/json_print.py deleted file mode 100644 index 1e7b17f72eac82..00000000000000 --- a/tools/mo/openvino/tools/mo/analysis/json_print.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import json -import logging as log -import sys - -import numpy as np - -from openvino.tools.mo.front.user_data_repack import UserDataRepack -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_precision -from openvino.tools.mo.utils.model_analysis import AnalyzeAction, AnalysisCollectorAnchor, AnalysisResults - - -def prepare_obj_for_dump(obj: object): - if isinstance(obj, dict): - return {k: prepare_obj_for_dump(v) for k, v in obj.items()} - elif isinstance(obj, np.ndarray): - if obj.ndim == 0: - return obj.item() - else: - return [prepare_obj_for_dump(elem) for elem in obj] - elif isinstance(obj, list): - return [prepare_obj_for_dump(elem) for elem in obj] - elif isinstance(obj, type): - try: - return np_data_type_to_precision(obj) - except: - log.error('Unsupported data type: {}'.format(str(obj))) - return str(obj) - elif isinstance(obj, np.generic): - return obj.item() - else: - return str(obj) - - -class AnalysisJSONPrint(AnalyzeAction): - """ - The action prints the analysis results in JSON format. - """ - enabled = False - id = 'ANALYSIS_JSON_PRINT' - - def run_before(self): - return [UserDataRepack] - - def run_after(self): - return [AnalysisCollectorAnchor] - - def analyze(self, graph: Graph): - analysis_results = AnalysisResults() - if analysis_results.get_result() is not None: - try: - print(json.dumps(prepare_obj_for_dump(analysis_results.get_result()))) - except Exception as e: - log.error('Cannot serialize to JSON: %s', str(e)) - sys.exit(1) - sys.exit(0) - diff --git a/tools/mo/openvino/tools/mo/analysis/nodes.py b/tools/mo/openvino/tools/mo/analysis/nodes.py deleted file mode 100644 index 6080aee4849a64..00000000000000 --- a/tools/mo/openvino/tools/mo/analysis/nodes.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.model_analysis import AnalyzeAction - - -class IntermediatesNodesAnalysis(AnalyzeAction): - """ - The analyser gets node names, their shapes and values (if possible) of all nodes in the model. - """ - def analyze(self, graph: Graph): - outputs_desc = dict() - - for node in graph.get_op_nodes(): - outputs_desc[node.name] = {'shape': node.soft_get('shape', None), - 'data_type': None, - 'value': None, - } - return {'intermediate': outputs_desc}, None diff --git a/tools/mo/openvino/tools/mo/analysis/tf_od_api.py b/tools/mo/openvino/tools/mo/analysis/tf_od_api.py deleted file mode 100644 index 1c80abfe21fa4e..00000000000000 --- a/tools/mo/openvino/tools/mo/analysis/tf_od_api.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.model_analysis import AnalyzeAction, graph_contains_scope -from openvino.tools.mo.utils.utils import files_by_pattern, get_mo_root_dir - - -class TensorFlowObjectDetectionAPIAnalysis(AnalyzeAction): - """ - The analyser checks if the provided model is TF OD API model from - https://github.com/tensorflow/models/tree/master/research/object_detection/g3doc/detection_model_zoo.md of one of 4 - supported flavors: SSD, RFCN, Faster RCNN, Mask RCNN. - """ - graph_condition = [lambda graph: graph.graph['fw'] == 'tf'] - - file_patterns = {'MaskRCNN': 'mask_rcnn_support.*\\.json', - 'RFCN': 'rfcn_support.*\\.json', - 'FasterRCNN': 'faster_rcnn_support.*\\.json', - 'SSD': 'ssd.*_support.*\\.json', - } - model_scopes = {'MaskRCNN': (['FirstStageFeatureExtractor', - 'SecondStageFeatureExtractor', - 'SecondStageBoxPredictor', - 'SecondStageBoxPredictor_1', - 'SecondStageFeatureExtractor_1', - ],), - 'RFCN': (['FirstStageFeatureExtractor', - 'SecondStageFeatureExtractor', - 'SecondStageBoxPredictor', - 'SecondStageBoxPredictor/map', - 'SecondStageBoxPredictor/map_1', - 'SecondStagePostprocessor', - ],), - 'FasterRCNN': (['FirstStageFeatureExtractor', - 'SecondStageFeatureExtractor', - 'SecondStageBoxPredictor', - 'SecondStagePostprocessor', - ], - ['FirstStageRPNFeatures', - 'FirstStageBoxPredictor', - 'SecondStagePostprocessor', - 'mask_rcnn_keras_box_predictor', - ],), - 'SSD': ([('FeatureExtractor', 'ssd_mobile_net_v2keras_feature_extractor', - 'ssd_mobile_net_v1fpn_keras_feature_extractor', - 'ssd_mobile_net_v2fpn_keras_feature_extractor', 'ResNet50V1_FPN', 'ResNet101V1_FPN', - 'ResNet152V1_FPN' - ), - 'Postprocessor'] - ), - } - - def analyze(self, graph: Graph): - tf_1_names = ['image_tensor', 'detection_classes', 'detection_boxes', 'detection_scores', - ('Preprocessor', 'map')] - tf_1_cond = all([graph_contains_scope(graph, scope) for scope in tf_1_names]) - - tf_2_names = ['input_tensor', 'output_control_node', 'Identity_', ('Preprocessor', 'map')] - tf_2_cond = all([graph_contains_scope(graph, scope) for scope in tf_2_names]) - - if not tf_1_cond and not tf_2_cond: - log.debug('The model does not contain nodes that must exist in the TF OD API models') - return None, None - - for flavor, scopes_tuple in self.model_scopes.items(): - for scopes in scopes_tuple: - if all([graph_contains_scope(graph, scope) for scope in scopes]): - result = dict() - result['flavor'] = flavor - result['mandatory_parameters'] = {'transformations_config': - files_by_pattern(get_mo_root_dir() + '/openvino/tools/mo/front/tf', - __class__.file_patterns[flavor], - add_prefix=True), - 'tensorflow_object_detection_api_pipeline_config': None, - } - message = "Your model looks like TensorFlow Object Detection API Model.\n" \ - "Check if all parameters are specified:\n" \ - "\t--transformations_config\n" \ - "\t--tensorflow_object_detection_api_pipeline_config\n" \ - "\t--input_shape (optional)\n" \ - "\t--reverse_input_channels (if you convert a model to use with the OpenVINO sample applications)\n" \ - "Detailed information about conversion of this model can be found at\n" \ - "https://docs.openvino.ai/2023.0/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models.html" - return {'model_type': {'TF_OD_API': result}}, message - return None, None diff --git a/tools/mo/openvino/tools/mo/analysis/tf_retinanet.py b/tools/mo/openvino/tools/mo/analysis/tf_retinanet.py deleted file mode 100644 index e3115366f9cbd7..00000000000000 --- a/tools/mo/openvino/tools/mo/analysis/tf_retinanet.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.pattern_match import apply_pattern -from openvino.tools.mo.utils.model_analysis import AnalyzeAction - - -def pattern_instance_counter(graph: Graph, match: dict): - pattern_instance_counter.counter += 1 - - -pattern_instance_counter.counter = 0 - -RETINANET_PATTERN = { - 'nodes': [ - ('range_1', dict(kind='op', op='Range')), - ('range_2', dict(kind='op', op='Range')), - ('cast_1', dict(kind='op', op='Cast')), - ('cast_2', dict(kind='op', op='Cast')), - ('add_1', dict(kind='op', op='Add')), - ('add_2', dict(kind='op', op='Add')), - ('mul_1', dict(kind='op', op='Mul')), - ('mul_2', dict(kind='op', op='Mul')), - ('size_1', dict(kind='op', op='Size')), - ('size_2', dict(kind='op', op='Size')), - ('pack', dict(kind='op', op='Pack')), - ('fill', dict(kind='op', op='Fill')) - ], - - 'edges': [ - ('range_1', 'cast_1'), - ('range_2', 'cast_2'), - ('cast_1', 'add_1'), - ('cast_2', 'add_2'), - ('add_1', 'mul_1'), - ('add_2', 'mul_2'), - ('mul_1', 'size_1'), - ('mul_2', 'size_2'), - ('size_1', 'pack'), - ('size_2', 'pack'), - ('pack', 'fill') - ] -} - - -class TensorFlowRetinaNet(AnalyzeAction): - - def analyze(self, graph: Graph): - pattern_instance_counter.counter = 0 - apply_pattern(graph, **RETINANET_PATTERN, action=pattern_instance_counter) - - if pattern_instance_counter.counter > 0: - result = dict() - result['mandatory_parameters'] = {'transformations_config': - 'openvino/tools/mo/front/tf/retinanet.json'} - - message = "Your model looks like TensorFlow RetinaNet Model.\n" \ - "To generate the IR, provide model to the Model Optimizer with the following parameters:\n" \ - "\t--input_model /.pb\n" \ - "\t--input_shape [1,600,600,3]\n" \ - "\t--transformations_config /tools/model_optimizer/openvino/tools/mo/front/tf/retinanet.json\n" \ - "\t--reverse_input_channels" - - return {'model_type': {'TF_RetinaNet': result}}, message - - return None, None diff --git a/tools/mo/openvino/tools/mo/analysis/tf_yolo.py b/tools/mo/openvino/tools/mo/analysis/tf_yolo.py deleted file mode 100644 index b2df587364e069..00000000000000 --- a/tools/mo/openvino/tools/mo/analysis/tf_yolo.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.pattern_match import apply_pattern -from openvino.tools.mo.utils.model_analysis import AnalyzeAction, graph_contains_scope - - -YOLO_PATTERN = { - 'nodes': [ - ('pad', dict(op='Pad')), - ('conv', dict(op='Conv2D')), - ('sub', dict(op='Sub')), - ('div', dict(op='Div')), - ('mul', dict(op='Mul')), - ('bias_add', dict(op='Add')), - ('mul_2', dict(op='Mul')), - ('max', dict(op='Maximum')), - ], - 'edges': [ - ('pad', 'conv', {'out': 0}), - ('conv', 'sub', {'out': 0}), - ('sub', 'div', {'out': 0}), - ('div', 'mul', {'out': 0}), - ('mul', 'bias_add', {'out': 0}), - ('bias_add', 'mul_2', {'out': 0}), - ('bias_add', 'max', {'out': 0}), - ('mul_2', 'max', {'out': 0}), - ] -} - - -def pattern_instance_counter(graph: Graph, match: dict): - pattern_instance_counter.counter += 1 - - -pattern_instance_counter.counter = 0 - - -YOLO_CONFIGS = {'YOLOV2Full': ['openvino/tools/mo/front/tf/yolo_v2.json', 'openvino/tools/mo/front/tf/yolo_v2_voc.json'], - 'YOLOV3Full': ['openvino/tools/mo/front/tf/yolo_v3.json', 'openvino/tools/mo/front/tf/yolo_v3_voc.json'], - 'YOLOV2Tiny': ['openvino/tools/mo/front/tf/yolo_v2_tiny.json', 'openvino/tools/mo/front/tf/yolo_v2_tiny_voc.json'], - 'YOLOV3Tiny': ['openvino/tools/mo/front/tf/yolo_v3_tiny.json', 'openvino/tools/mo/front/tf/yolo_v3_tiny_voc.json'], - } - - -def get_YOLO_params_by_flavor(flavor: str): - result = dict() - result['flavor'] = flavor - result['mandatory_parameters'] = {'transformations_config': YOLO_CONFIGS[flavor]} - return result - - -class TensorFlowYOLOV1V2Analysis(AnalyzeAction): - """ - The analyser checks if the provided model is TensorFlow YOLO models from https://github.com/thtrieu/darkflow . - """ - graph_condition = [lambda graph: graph.graph['fw'] == 'tf'] - - def analyze(self, graph: Graph): - pattern_instance_counter.counter = 0 - apply_pattern(graph, **YOLO_PATTERN, action=pattern_instance_counter) - - flavor = None - if pattern_instance_counter.counter > 0: - if pattern_instance_counter.counter == 22: - flavor = 'YOLOV2Full' - elif pattern_instance_counter.counter == 8: - flavor = 'YOLOV2Tiny' - if flavor is not None: - message = "Your model looks like YOLOv1 or YOLOv2 Model.\n" \ - "To generate the IR, provide TensorFlow YOLOv1 or YOLOv2 Model to the Model Optimizer with the following parameters:\n" \ - "\t--input_model /.pb\n" \ - "\t--batch 1\n" \ - "\t--transformations_config /openvino/tools/mo/front/tf/.json\n" \ - "All detailed information about conversion of this model can be found at\n" \ - "https://docs.openvino.ai/2023.0/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html" - return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message - else: - return None, None - - -class TensorFlowYOLOV3Analysis(AnalyzeAction): - """ - The analyser checks if the provided model is TensorFlow YOLO models from - https://github.com/mystic123/tensorflow-yolo-v3. - """ - graph_condition = [lambda graph: graph.graph['fw'] == 'tf'] - - def analyze(self, graph: Graph): - flavor = None - if graph_contains_scope(graph, 'detector/yolo-v3') and graph_contains_scope(graph, 'detector/darknet-53'): - flavor = 'YOLOV3Full' - elif graph_contains_scope(graph, 'detector/yolo-v3-tiny'): - flavor = 'YOLOV3Tiny' - - if flavor is not None: - message = "Your model looks like YOLOv3 Model.\n" \ - "To generate the IR, provide TensorFlow YOLOv3 Model to the Model Optimizer with the following parameters:\n" \ - "\t--input_model /yolo_v3.pb\n" \ - "\t--batch 1\n" \ - "\t--transformations_config /openvino/tools/mo/front/tf/yolo_v3.json\n" \ - "Detailed information about conversion of this model can be found at\n" \ - "https://docs.openvino.ai/2023.0/_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_YOLO_From_Tensorflow.html" - return {'model_type': {'YOLO': get_YOLO_params_by_flavor(flavor)}}, message - else: - return None, None diff --git a/tools/mo/openvino/tools/mo/back/AvgPool.py b/tools/mo/openvino/tools/mo/back/AvgPool.py deleted file mode 100644 index dd0deb668331ae..00000000000000 --- a/tools/mo/openvino/tools/mo/back/AvgPool.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class AvgPool(BackReplacementPattern): - """ - Rename Pooling/avg to AvgPool - """ - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('pooling', {'type': 'Pooling', 'pool_method': 'avg'}) - ], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - match['pooling'].type = 'AvgPool' - del match['pooling']['pool_method'] diff --git a/tools/mo/openvino/tools/mo/back/CellNormalizer.py b/tools/mo/openvino/tools/mo/back/CellNormalizer.py deleted file mode 100644 index ae581f42e8861c..00000000000000 --- a/tools/mo/openvino/tools/mo/back/CellNormalizer.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.split import VariadicSplit -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, is_fully_defined -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input, create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.reshape import Reshape - - -class CellNormalizer(BackReplacementPattern): - # This class splits WR input on W and R for LSTMCell, GRUCell, RNNCell - - enabled = True - force_clean_up = True - - def pattern(self): - return dict( - nodes=[ - ('cell', dict(type=lambda type: type in ['LSTMCell', 'GRUCell', 'RNNCell'])) - ], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['cell'] - cell_name = node.soft_get('name', node.id) - cell_type = node.soft_get('type') - WR_input_id = node.soft_get('wr_input_id') - hidden_size_coef = node.soft_get('gates_count') - hidden_size = node.get_attrs()["hidden_size"] - - # default values for RNNCell/GRUCell - additional_port_id = 4 - if cell_type == "LSTMCell": - additional_port_id = 5 - - WR_shape = node.in_port(WR_input_id).data.get_shape() - assert WR_shape is not None, "Undefined 'WR' input shape for Cell node '{}'".format(cell_name) - assert is_fully_defined(WR_shape), 'Not fully defined shape for WR for Cell node "{}"'.format(cell_name) - - num_elements_in_WR = np.prod(WR_shape) - input_size = (num_elements_in_WR / (hidden_size_coef * hidden_size)) - hidden_size - - # Reshape - reshape = create_op_node_with_second_input(graph, Reshape, - int64_array([hidden_size_coef * hidden_size, - hidden_size + input_size]), - {'name': cell_name + '/Dims'}) - - # VariadicSplit - split = create_op_with_const_inputs(graph, VariadicSplit, {1: int64_array(1), - 2: int64_array([input_size, hidden_size])}, - {'out_ports_count': 2, 'name': cell_name + '/Split'}, - reshape) - - # Cell - node.in_port(WR_input_id).get_connection().set_destination(reshape.in_port(0)) - - node.add_input_port(additional_port_id, skip_if_exist=True) - assert node.in_port(additional_port_id).disconnected() - - # (x, y, WR, B) -> (x, y, W, R, B(additional_port)) - node.in_port(additional_port_id - 1).get_connection().set_destination(node.in_port(additional_port_id)) - split.out_port(0).connect(node.in_port(additional_port_id - 2)) - split.out_port(1).connect(node.in_port(additional_port_id - 1)) diff --git a/tools/mo/openvino/tools/mo/back/ChangeOutputTypeAttributes.py b/tools/mo/openvino/tools/mo/back/ChangeOutputTypeAttributes.py deleted file mode 100644 index d1b3b14dfea0c6..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ChangeOutputTypeAttributes.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np -from openvino.tools.mo.utils.error import Error - -operations_with_data_type_attributes = { - 'Cast': {'attr_name': 'dst_type', 'in_ports_to_check': (0,)}, - 'Range': {'attr_name': 'output_type', 'in_ports_to_check': (0, 1, 2)}, -} - - -class ChangeOutputTypeAttributes(BackReplacementPattern): - """ - The transformation changes output type for the specific operations defined in the - operations_with_data_type_attributes dictionary if one of the following conditions is met: - - The operation output type is fp64. Since not all plugins support fp64 data type it is converted to fp32. - - Changes output type from fp32 to fp16 (and ensure that this is possible) when generating fp16 IR. - - Keep operation output type equal to fp32 for operations located in the shape calculation sub-graphs to - avoid floating point overflow. - """ - enabled = True - force_shape_inference = True - - def run_after(self): - from openvino.tools.mo.back.MarkNodesWithShapeValues import MarkNodesWithShapeValues - return [MarkNodesWithShapeValues] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - ir_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type) - - for node in graph.get_op_nodes(): - if node.op in operations_with_data_type_attributes: - dst_type = operations_with_data_type_attributes[node.op]['attr_name'] - node_name = node.soft_get('name', node.id) - assert node.has_valid(dst_type), '{} attribute is missing for node {}'.format(dst_type, node_name) - - final_type = None - if node[dst_type] == np.float64: - final_type = np.float32 - - if node[dst_type] in [np.float32, np.float64] and ir_data_type == np.float16 and \ - not node.has_and_set('returns_shape_value'): - final_type = np.float16 - elif node.has_and_set('returns_shape_value') and node[dst_type] == np.float16: - # return back FP32 for all nodes with shape values - final_type = np.float32 - - if final_type is not None: - log.warning('Change data type from {} to {} for node {}'.format(node[dst_type], final_type, - node_name)) - node[dst_type] = final_type - - if final_type == np.float16: - assert_that_is_castable_to_fp16(node) - - -def assert_that_is_castable_to_fp16(node: Node): - op_name = node.soft_get('op') - node_name = node.soft_get('name', node.id) - - for i in operations_with_data_type_attributes[op_name]['in_ports_to_check']: - val = node.in_port(i).data.get_value() - if val is None: - return - - if np.any(val > np.finfo(np.float16).max) or np.any(val < np.finfo(np.float16).min): - raise Error("Try to convert with --data_type=FP32 argument. " - "This model can not be converted to FP16 precision, since " - "'{}' node value {} exceeds FP16 allowed limits: [{}, {}]" - .format(node_name, val, np.finfo(np.float16).min, np.finfo(np.float16).max)) - # further this input values will be rewritten since force_shape_inference=True - node.in_port(i).data.set_value(val.astype(np.float16)) - - original_output = node.out_port(0).data.get_value() - node.infer(node) - casted_output = node.out_port(0).data.get_value() - original_output_len = original_output.size if hasattr(original_output, 'size') else None - casted_output_len = casted_output.size if hasattr(casted_output, 'size') else None - - if original_output_len != casted_output_len: - raise Error("Try to convert with --data_type=FP32 argument. " - "This model can not be converted to FP16 precision, since " - "after conversion of '{}' node to FP16 output shape {} differs from the original {}." - .format(node_name, casted_output_len, original_output_len)) - - diff_count = np.count_nonzero(np.subtract(original_output, casted_output) > 1.e-4) - if diff_count > 0: - log.warning("{} elements of {} of Range node '{}' output differ from the original values while " - "converting network to FP16 precision".format(diff_count, len(original_output), node_name)) diff --git a/tools/mo/openvino/tools/mo/back/ChangeRandomUniformOutputType.py b/tools/mo/openvino/tools/mo/back/ChangeRandomUniformOutputType.py deleted file mode 100644 index 021a40f7485fcc..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ChangeRandomUniformOutputType.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np - - -class ChangeRandomUniformOutputType(BackReplacementPattern): - """ - This transformation adds Cast to IR data_type after RandomUniform operation - when RandomUniform output type is not equal to IR data_type and RandomUniform output type - is floating point type. - 'output_type' attribute determines the generation algorithm of RandomUniform, so output numbers - generated for different values of 'output_type' may not be equal. For this reason 'output_type' - attribute shouldn't be changed for matching of inference results. So in cases when we need - to change the data type of RandomUniform we need to insert Cast node after RandomUniform. - """ - enabled = True - force_shape_inference = True - - def run_after(self): - from openvino.tools.mo.back.MarkNodesWithShapeValues import MarkNodesWithShapeValues - return [MarkNodesWithShapeValues] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - ir_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type) - - for node in graph.get_op_nodes(op='RandomUniform'): - assert node.has_valid('output_type') - - if node.has_and_set('returns_shape_value'): - continue - - if node.output_type != ir_data_type and np.issubdtype(node.output_type, np.floating): - node_name = node.soft_get('name', node.id) - convert_node = Cast(graph, {'name': node_name + "/cast", 'dst_type': ir_data_type}).create_node() - node.out_port(0).get_connection().insert_node(convert_node) diff --git a/tools/mo/openvino/tools/mo/back/ClampNormalizer.py b/tools/mo/openvino/tools/mo/back/ClampNormalizer.py deleted file mode 100644 index 5cbd775673ec34..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ClampNormalizer.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Minimum, Maximum -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph, rename_node -from openvino.tools.mo.ops.clamp import AttributedClamp - - -class ClampNormalizer(BackReplacementPattern): - """ - Replaces Clamp with `min` and `max` as inputs with AttributedClamp with `min` and `max` as attributes. - """ - enabled = True - force_clean_up = True - - def pattern(self): - return dict( - nodes=[('clamp', dict(op='Clamp'))], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - clamp = match['clamp'] - name = clamp.soft_get('name', clamp.id) - - min_value = max_value = None - port_1_exist = clamp.has_port('in', 1) and not clamp.in_port(1).disconnected() - port_2_exist = clamp.has_port('in', 2) and not clamp.in_port(2).disconnected() - if port_1_exist and clamp.in_port(1).get_source().node.soft_get('type') == 'Const': - min_value = clamp.in_port(1).data.get_value() - if port_2_exist and clamp.in_port(2).get_source().node.soft_get('type') == 'Const': - max_value = clamp.in_port(2).data.get_value() - - rename_node(clamp, name + '/TBR') - if min_value is None or max_value is None: - max_node = min_node = None - if port_1_exist: - max_node = Maximum(graph, {}).create_node() - clamp.in_port(0).get_connection().set_destination(max_node.in_port(0)) - clamp.in_port(1).get_connection().set_destination(max_node.in_port(1)) - clamp.out_port(0).get_connection().set_source(max_node.out_port(0)) - if port_2_exist: - min_node = Minimum(graph, {}).create_node() - if max_node is not None: - max_node.out_port(0).get_connection().set_source(min_node.out_port(0)) - max_node.out_port(0).connect(min_node.in_port(0)) - else: - clamp.in_port(0).get_connection().set_destination(min_node.in_port(0)) - clamp.out_port(0).get_connection().set_source(min_node.out_port(0)) - clamp.in_port(2).get_connection().set_destination(min_node.in_port(1)) - assert min_node is not None or max_node is not None, 'Clamp node should have either min or max input used' - rename_node(min_node if min_node is not None else max_node, name) - else: - a_clamp = AttributedClamp(graph, {'name': name, 'min': min_value, 'max': max_value}).create_node() - rename_node(a_clamp, name) - clamp.in_port(0).get_connection().set_destination(a_clamp.in_port(0)) - clamp.out_port(0).get_connection().set_source(a_clamp.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/back/ConvolutionNormalizer.py b/tools/mo/openvino/tools/mo/back/ConvolutionNormalizer.py deleted file mode 100644 index fb78b9cc03e2d8..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ConvolutionNormalizer.py +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.back.ReshapeMutation import ReshapeMutation -from openvino.tools.mo.back.ReverseInputChannels import ApplyReverseChannels -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, is_fully_defined, int64_array, mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input, create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.elementwise import Div -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.strided_slice import StridedSlice -from openvino.tools.mo.utils.error import Error - - -def resolve_convolution_with_group(node: Node, group: int, ir_version: str): - node_name = node.soft_get('name', node.id) - input_shape = node.in_port(0).data.get_shape() - assert len(input_shape) in [3, 4, 5] - - weights_shape = node.in_port(1).data.get_shape() - assert weights_shape is not None - assert len(weights_shape) in [3, 4, 5] - group = int64_array(group).item() - assert weights_shape[0] % group == 0 - - if ir_version == 'V7': - if weights_shape[0] == node.output: - # weights are already is in [G*O I X Y] format - return - - num_spatial_dims = len(weights_shape[2:]) - # Reshape has special_zero=True, if zeros are set then original shapes are copied - zeros_to_copy_spatial_dims = np.zeros(num_spatial_dims) - new_shape = shape_array([node.output, -1, *zeros_to_copy_spatial_dims]) - reshape = create_op_node_with_second_input(node.graph, Reshape, new_shape, - {'override_output_shape': True}) - elif ir_version == 'V10': - # Concat([Constant([group, node.output // group, -1]), *weights_shape[2:]], axis=1) - wshape = Shape(node.graph, {'name': node_name + '/WeightsShape', 'allow_fold': True}).create_node() - weights_node = node.in_port(1).get_source().node - weights_node.out_port(0).connect(wshape.in_port(0)) - - GOI = Const(node.graph, {'value': int64_array([group, node.output // group, -1]), - 'name': node_name + '/GOI_weights_part'}).create_node() - XY = create_op_with_const_inputs(node.graph, Gather, - port_value_dict={1: int64_array(list(range(2, len(weights_shape)))), 2: int64_array(0)}, - op_attrs={'name': node_name + '/XY_weights_part'}, - input_node=wshape) - - new_shape_node = Concat(node.graph, {'axis': 0, 'in_ports_count': 2, 'name': node_name + '/weights_shape'}).create_node() - new_shape_node.in_port(0).connect(GOI.out_port(0)) - new_shape_node.in_port(1).connect(XY.out_port(0)) - reshape = Reshape(node.graph, {'override_output_shape': True, 'special_zero': True}).create_node() - reshape.in_port(1).connect(new_shape_node.out_port(0)) - - del node['group'] - node['type'] = 'GroupConvolution' - else: - raise Error("Unknown IR version: {}".format(ir_version)) - - node.in_port(1).get_connection().insert_node(reshape) - - -class ConvolutionNormalizer(BackReplacementPattern): - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('node', dict(kind='op', type='Convolution')) - ], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['node'] - if node.has_valid('kernel_spatial'): - del node['kernel_spatial'] - - -class V7ConvolutionWithGroupsResolver(BackReplacementPattern): - """ - Normalizes grouped convolution weights shape to fit special weights format [G*O I X Y] - """ - enabled = False - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(type='Convolution'): - group = node.soft_get('group', None) - if group is not None: - if group != 1 or node.soft_get('op') == 'DepthwiseConv2dNative': - resolve_convolution_with_group(node, group, ir_version='V7') - - -class V10ConvolutionWithGroupsResolver(BackReplacementPattern): - """ - Normalizes grouped convolution weights shape to fit special weights format - V10 IR: [G O I X Y] - """ - enabled = False - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(type='Convolution'): - group = node.soft_get('group', None) - if group is not None: - if group != 1 or node.soft_get('op') == 'DepthwiseConv2dNative': - resolve_convolution_with_group(node, group, ir_version='V10') - - -class ConvolutionWithGroupsResolver(BackReplacementPattern): - """ - Normalizes grouped convolution weights shape to fit special weights format - V10 IR: [G O I X Y] - lower IR versions: [G*O I X Y] - """ - enabled = True - force_clean_up = True - - def run_before(self): - from openvino.tools.mo.back.StridedSliceMasksNormalizer import StridedSliceMasksNormalizer - from openvino.tools.mo.back.ShapeOfConstFolding import ShapeOfConstFolding - return [ShapeOfConstFolding, ReshapeMutation, StridedSliceMasksNormalizer] - - def run_after(self): - return [ApplyReverseChannels] - - def find_and_replace_pattern(self, graph: Graph): - V7ConvolutionWithGroupsResolver().find_and_replace_pattern(graph) - PullReshapeThroughFQ().find_and_replace_pattern(graph) - V10ConvolutionWithGroupsResolver().find_and_replace_pattern(graph) - - -class PullReshapeThroughFQ(BackReplacementPattern): - """ - Before: - ... -> FQ -> Reshape -> Convolution -> ... - - After: - ... -> Reshape -> FQ (with aligned limits) -> Convolution -> ... - """ - enabled = False - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('FQ', dict(type='FakeQuantize')), - ('FQed', dict()), - ('reshape', dict(type='Reshape')), - ('reshaped', dict()), - ('node', dict(type=lambda t: t in ['Convolution', 'GroupConvolution'])), - ], - edges=[ - ('FQ', 'FQed'), - ('FQed', 'reshape', {'in': 0}), - ('reshape', 'reshaped'), - ('reshaped', 'node', {'in': 1}), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - FQ = match['FQ'] - reshape = match['reshape'] - conv = match['node'] - - rank_reshape = reshape.in_port(0).data.get_shape().size != reshape.out_port(0).data.get_shape().size - - if not all([np.prod(FQ.in_port(i).data.get_shape()) == 1 for i in range(1, 5)]): - # FakeQuantize has limits with multiple values, that should be reshaped too - # Pulling Reshape through such FQ is a complex procedure because of broadcasting rules - return - - new_rank = reshape.out_port(0).data.get_shape().size - - reshape.in_port(0).disconnect() - reshape.out_port(0).disconnect() - - FQ.out_port(0).connect(conv.in_port(1)) - FQ.in_port(0).get_connection().insert_node(reshape) - - reshape['need_shape_inference'] = True - reshape['override_output_shape'] = True - FQ['need_shape_inference'] = True - FQ['override_output_shape'] = True - - if rank_reshape: - # force rank of limit inputs to match 0-input rank - # reshaping to lower range needs it the most due to FQ inner broadcast semantics - for i in range(1, 5): - reshape = create_op_node_with_second_input(graph, Reshape, int64_array([1] * new_rank), - {'override_output_shape': True}) - FQ.in_port(i).get_connection().insert_node(reshape) - - -class DeconvolutionNormalizer(BackReplacementPattern): - enabled = True - force_clean_up = True - - def run_before(self): - return [ReshapeMutation] - - def run_after(self): - return [ApplyReverseChannels] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('node', dict(type='Deconvolution')) - ], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['node'] - node_name = node.soft_get('name', node.id) - - if 2 in node.in_ports() and not node.in_port(2).disconnected(): - # Third input represents output shape. Cutting its value according to scheme: - # [N, C, spatial_dim_0, ..., spatial_dim_n] -> [spatial_dim_0, ..., spatial_dim_n] - in_rank = node.in_port(0).data.get_shape().size - - shape_src = node.in_port(2).get_source() - node.in_port(2).disconnect() - - ss_0 = create_op_with_const_inputs(graph, StridedSlice, {1: mo_array([2], dtype=np.int32), - 2: mo_array([in_rank], dtype=np.int32), - 3: mo_array([1], dtype=np.int32)}, - {'name': node_name + '/ss_0_port', - 'begin_mask': mo_array([1], dtype=np.int32), - 'end_mask': mo_array([0], dtype=np.int32), - 'new_axis_mask': mo_array([0], dtype=np.int32), - 'shrink_axis_mask': mo_array([0], dtype=np.int32), - 'ellipsis_mask': mo_array([0], dtype=np.int32)}) - - shape_src.connect(ss_0.in_port(0)) - ss_0.out_port(0).connect(node.in_port(2)) - - # Specification: *padding amount* is deduced from relation of input and output spatial shapes - del node['pad'] - - elif node.has_valid('original_output_spatial_shape'): - # node had fixed output spatial shape set in original framework, so we restore it here - const = Const(graph, {'value': int64_array(node.original_output_spatial_shape), - 'name': node_name + '/original_spatial_shape'}).create_node() - node.add_input_port(2, skip_if_exist=True) - const.out_port(0).connect(node.in_port(2)) - - # Specification: *padding amount* is deduced from relation of input and output spatial shapes - del node['pad'] - - group = node.soft_get('group', 1) - - if group != 1: - assert group > 1 - - weights_shape = node.in_port(1).data.get_shape() - assert weights_shape is not None - I = node.in_port(0).data.get_shape()[1] - assert I % group == 0 - assert node.output % group == 0 - - new_shape = shape_array([group, I // group, node.output // group, *weights_shape[2:]]) - - assert not is_fully_defined(new_shape) or not is_fully_defined(weights_shape) or \ - np.prod(weights_shape) == np.prod(new_shape), 'Initial weights shape {}, grouped weights shape {}' \ - ''.format(weights_shape, new_shape) - reshape = create_op_node_with_second_input(graph, Reshape, new_shape, {'override_output_shape': True}, - node.in_port(1).get_source().node) - - node.in_port(1).get_connection().set_source(reshape.out_port(0)) - - node['type'] = 'GroupConvolutionBackpropData' - else: - node['type'] = 'ConvolutionBackpropData' diff --git a/tools/mo/openvino/tools/mo/back/CorrectName.py b/tools/mo/openvino/tools/mo/back/CorrectName.py deleted file mode 100644 index 23cc0654872df3..00000000000000 --- a/tools/mo/openvino/tools/mo/back/CorrectName.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.back.replacement import BackReplacementPattern - - -class RestoreOriginalFrameworkName(BackReplacementPattern): - """ - This transformation corrects names of layers to their framework names. - To perform this correction, framework layer name should be in the attribute 'framework_node_name'. - In some cases, renaming is necessary only if some condition is fulfilled. Such condition should be a some - function in the attribute 'rename_condition'. - - For example, in the transformation SoftmaxONNXFrontReplacer such condition is - lambda n: len(n.graph.get_op_nodes(name=node_name)) == 0 - """ - - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(): - if not node.has_valid('framework_node_name'): - continue - - if node.has_valid('rename_condition'): - need_renaming = node['rename_condition'](node) - del node['rename_condition'] - if need_renaming: - node.name = node['framework_node_name'] - else: - node.name = node['framework_node_name'] - - del node['framework_node_name'] diff --git a/tools/mo/openvino/tools/mo/back/CropToStridedSlice.py b/tools/mo/openvino/tools/mo/back/CropToStridedSlice.py deleted file mode 100644 index 60b03e5c9fde89..00000000000000 --- a/tools/mo/openvino/tools/mo/back/CropToStridedSlice.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.back.ForceStrictPrecision import ForceStrictPrecision -from openvino.tools.mo.ops.elementwise import Add -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.strided_slice import StridedSlice - - -class CropToStridedSlice(BackReplacementPattern): - enabled = True - force_clean_up = True - - def run_before(self): - return [ForceStrictPrecision] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('crop', dict(op='Crop')) - ], - edges=[] - ) - - @staticmethod - def mask_normalizer(shape_rank: int, axes: np.ndarray, values: np.ndarray): - mask = np.zeros(shape_rank, dtype=np.int64) - for i, axis in enumerate(axes): - mask[axis] = values[i] - return mask - - @staticmethod - def list_to_ndarray(val): - return mo_array(val) if mo_array(val).ndim != 0 else mo_array([val]) - - def replace_pattern(self, graph: Graph, match: [str, Node]): - node = match['crop'] - assert node.has_valid('axis') - node_axis = self.list_to_ndarray(node.axis) - - in_shape = node.in_port(0).data.get_shape() - shape_rank = in_shape.size - axis_mask = int64_array([1 if i in node_axis else 0 for i in range(shape_rank)]) - begin_mask = axis_mask.copy() - end_mask = axis_mask.copy() - - ss = StridedSlice(graph, {'name': node.soft_get('name', node.id) + '/strided_slice', 'begin_mask': begin_mask, - 'end_mask': end_mask, - 'new_axis_mask': np.zeros(len(end_mask)), - 'shrink_axis_mask': np.zeros(len(end_mask)), - 'ellipsis_mask': np.zeros(len(end_mask))}).create_node() - - if len(node.in_nodes()) == 2 and node.has_valid('offset'): - # Crop Type 1 - begin = Const(graph, {'value': self.mask_normalizer(shape_rank, node_axis, node.offset), - 'name': ss.name + '/begin'}).create_node() - shape = Shape(graph, {'name': ss.name + '/shape_of_crop'}).create_node() - end = Add(graph, {'name': ss.name + '/end'}).create_node() - node.in_port(1).get_connection().get_source().connect(shape.in_port(0)) - node.in_port(1).disconnect() - shape.out_port(0).connect(end.in_port(0)) - begin.out_port(0).connect(end.in_port(1)) - elif node.has_valid('dim') and node.has_valid('offset'): - # Crop Type 2 - node_dim = self.list_to_ndarray(node.dim) - node_offset = self.list_to_ndarray(node.offset) - assert node_dim.size == node_offset.size == node_axis.size - - begin = Const(graph, {'value': self.mask_normalizer(shape_rank, node_axis, node_offset), - 'name': ss.name + '/begin'}).create_node() - end_values = mo_array([node_offset[i] + node_dim[i] for i in range(len(node_dim))]) - end = Const(graph, {'value': self.mask_normalizer(shape_rank, node_axis, end_values), - 'name': ss.name + '/end'}).create_node() - elif node.has_valid('crop_begin') and node.has_valid('crop_end'): - # Crop Type 3 - node_crop_begin = self.list_to_ndarray(node.crop_begin) - node_crop_end = self.list_to_ndarray(node.crop_end) - assert len(node_crop_begin) == len(node_crop_end) == len(node_axis) - - begin = Const(graph, {'value': self.mask_normalizer(shape_rank, node_axis, node_crop_begin), - 'name': ss.name + '/begin'}).create_node() - shape = Shape(graph, {'name': ss.name + '/shape'}).create_node() - - end = Add(graph, {'name': ss.name + '/end'}).create_node() - const = Const(graph, {'value': -1 * self.mask_normalizer(shape_rank, node_axis, node_crop_end), - 'name': ss.name + '/const'}).create_node() - - node.in_port(0).get_connection().get_source().connect(shape.in_port(0)) - shape.out_port(0).connect(end.in_port(0)) - const.out_port(0).connect(end.in_port(1)) - - else: - raise Exception("Unknown type of Crop") - - source = node.in_port(0).get_connection().get_source() - - stride = Const(graph, {'value': np.ones(shape_rank, dtype=np.int64), - 'name': ss.name + '/stride'}).create_node() - - source.connect(ss.in_port(0)) - begin.out_port(0).connect(ss.in_port(1)) - end.out_port(0).connect(ss.in_port(2)) - stride.out_port(0).connect(ss.in_port(3)) - - node.in_port(0).disconnect() - node.out_port(0).get_connection().set_source(ss.out_port(0)) - - ss['force_precision_in_ports'] = {1: 'int64', 2: 'int64', 3: 'int64'} diff --git a/tools/mo/openvino/tools/mo/back/CutMemory.py b/tools/mo/openvino/tools/mo/back/CutMemory.py deleted file mode 100644 index 32192f12c79a27..00000000000000 --- a/tools/mo/openvino/tools/mo/back/CutMemory.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.crop import Crop -from openvino.tools.mo.utils.logger import log - - -class CutMemoryInput(BackReplacementPattern): - """ - Cut Memory layers and have inputs/outputs in graph instead of them - """ - enabled = True - graph_condition = [lambda graph: graph.graph['fw'] == "kaldi" and graph.graph['cmd_params'].remove_memory] - force_clean_up = True - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('op', dict(kind='op', op='ReadValue'))], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['op'] - node_id = node['variable_id'] - - i = 0 - node.in_port(0).disconnect() - for dest in node.out_port(0).get_destinations(): - new_in = Parameter(graph, {'name': "Parameter_"+str(i)+"_for_"+node_id, - 'shape': dest.data.get_shape()}).create_node() - i += 1 - dest.disconnect() - new_in.out_port(0).connect(dest) - log.error("Add input/output mapped {} -> {} ".format(new_in.name, "Result_for_"+node_id), - extra={'is_warning': True}) - - -class CutMemoryOutput(BackReplacementPattern): - """ - Cut Memory layers and have inputs/outputs in graph instead of them - """ - enabled = True - graph_condition = [lambda graph: graph.graph['fw'] == "kaldi" and graph.graph['cmd_params'].remove_memory] - force_clean_up = True - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('op', dict(kind='op', op='Assign'))], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['op'] - node_id = node['variable_id'] - - out_node_port = node.out_port(0).get_destination() - in_node_port = node.in_port(0).get_source() - node.in_port(0).disconnect() - node.out_port(0).disconnect() - crop = Crop(graph, {'name': 'Result_for_'+node_id, 'dim': mo_array([1]), 'offset': mo_array([0]), - 'axis': mo_array([0])}).create_node() - in_node_port.connect(crop.in_port(0)) - crop.out_port(0).connect(out_node_port) diff --git a/tools/mo/openvino/tools/mo/back/EnableConstantStridedSlice.py b/tools/mo/openvino/tools/mo/back/EnableConstantStridedSlice.py deleted file mode 100644 index 5378922987830a..00000000000000 --- a/tools/mo/openvino/tools/mo/back/EnableConstantStridedSlice.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class EnableConstantStridedSlice(BackReplacementPattern): - enabled = True - graph_condition = [lambda graph: not graph.graph['cmd_params'].static_shape] - - @staticmethod - def pattern(): - return dict( - nodes=[('const_strided_slice', {'op': 'StridedSlice', 'type': lambda type: type != 'StridedSlice'}), - ('data', {'kind': 'data', 'value': lambda value: value is not None}) - ], - edges=[('const_strided_slice', 'data')], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - graph.node[match['const_strided_slice'].id]['type'] = 'StridedSlice' diff --git a/tools/mo/openvino/tools/mo/back/FakeOutputResolver.py b/tools/mo/openvino/tools/mo/back/FakeOutputResolver.py deleted file mode 100644 index 4c0ed3cbeea6aa..00000000000000 --- a/tools/mo/openvino/tools/mo/back/FakeOutputResolver.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Add -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes - - -class FakeOutputResolver(BackReplacementPattern): - """ - This transformation removes FakeOutput nodes. If producer of FakeOutput have only one consumer (FakeOutput itself) - the name of FakeOutput is inherited by its producer, otherwise FakeOutput is replaced with op which does nothing. - """ - enabled = True - force_clean_up = True - - def find_and_replace_pattern(self, graph: Graph): - for fake_output in graph.get_op_nodes(op='FakeOutput'): - name = fake_output.soft_get('name', fake_output.id) - - producer = fake_output.in_port(0).get_source().node - - # At this stage we don't know the type of output, so we rely on MO transformation which updates the - # Const type for elementwise operations in case of input data types mismatch - add = create_op_with_const_inputs(graph, Add, {1: int64_array(0)}, {'can_be_fused': False}) - rename_nodes([(fake_output, name + '/TBD'), (add, name)]) - - prev_op_in_port = fake_output.in_port(0).get_connection().get_source() - # Get tensor names incoming to FakeOutput - tensor_names = prev_op_in_port.get_tensor_names() - - # Remove tensor info from data node - prev_op_in_port.remove_tensor_names() - - fake_output.in_port(0).get_connection().set_destination(add.in_port(0)) - fake_output.out_port(0).get_connection().set_source(add.out_port(0)) - - # Move tensor names to Add op, which replaces FakeOutput - if len(tensor_names) > 0: - add.out_port(0).add_tensor_names(tensor_names) diff --git a/tools/mo/openvino/tools/mo/back/ForceStrictPrecision.py b/tools/mo/openvino/tools/mo/back/ForceStrictPrecision.py deleted file mode 100644 index 51db4d40920235..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ForceStrictPrecision.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np - - -class ForceStrictPrecision(BackReplacementPattern): - """ - Assign precision for some inputs for specific layers depending on their semantics. - - To identify ports which should be processed, this pass relies on special attributes - inside a node: force_precision_in_ports. This attribute should be a dictionary with - index of port as key and required precision code as value (e.g. 'int64' etc.). - """ - enabled = True - - @staticmethod - def pattern(): - return dict( - nodes=[('node', {'force_precision_in_ports': lambda x: x is not None})], - edges=[], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['node'] - for in_port, precision in node.force_precision_in_ports.items(): - if in_port in node.in_ports().keys() and not node.in_port(in_port).disconnected(): - cast = Cast(graph, {'name': node.name + '/Cast_' + str(in_port), - 'dst_type': data_type_str_to_np(precision)}).create_node() - node.in_port(in_port).get_connection().insert_node(cast) diff --git a/tools/mo/openvino/tools/mo/back/FuseTransposesSequence.py b/tools/mo/openvino/tools/mo/back/FuseTransposesSequence.py deleted file mode 100644 index 3c7704a5e890f8..00000000000000 --- a/tools/mo/openvino/tools/mo/back/FuseTransposesSequence.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.eliminate import merge_data_nodes -from openvino.tools.mo.middle.passes.fusing.helpers import get_next_operation -from openvino.tools.mo.utils.error import Error - - -class FuseTransposesSequence(BackReplacementPattern): - """ - This pass finds sequence of Transpose operations and merge them to single Transpose operation - In case if resulting Permutation do nothing, we just remove it - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for permute_node in graph.get_op_nodes(type='Transpose'): - if permute_node.id not in graph.nodes(): - continue - - list_of_permutes = [permute_node] - # Get sequence of permutations - node = permute_node - while True: - next_ops = get_next_operation(node) - if len(next_ops) != 1: - break - - next_op = next_ops[0] - if next_op.soft_get('type') == 'Transpose': - list_of_permutes.append(next_op) - node = next_op - else: - break - - final_permutation = int64_array([x for x in range(len(list_of_permutes[0].in_port(1).data.get_value()))]) - for permute in list_of_permutes: - order = permute.in_port(1).data.get_value() - if order is None: - raise Error("Transpose node {} has wrong order for permute = None".format(permute.name)) - final_permutation = final_permutation[int64_array(order)] - - if np.array_equal(final_permutation, [x for x in range(len(list_of_permutes[0].in_port(1).data.get_value()))]): - first_data_node, last_data_node = list_of_permutes[0].in_node(), list_of_permutes[-1].out_node() - graph.remove_edge(first_data_node.id, list_of_permutes[0].id) - else: - if len(list_of_permutes) < 2: - continue - first_data_node, last_data_node = list_of_permutes[0].out_node(), list_of_permutes[-1].out_node() - list_of_permutes[0].in_port(1).data.set_value(final_permutation) - graph.remove_edge(first_data_node.id, first_data_node.out_node().id) - - graph.remove_edge(last_data_node.in_node().id, last_data_node.id) - - merge_data_nodes(graph, first_data_node, last_data_node) - graph.remove_node(last_data_node.id) - graph.clean_up() diff --git a/tools/mo/openvino/tools/mo/back/GatherNormalizer.py b/tools/mo/openvino/tools/mo/back/GatherNormalizer.py deleted file mode 100644 index 31cc00c9e69075..00000000000000 --- a/tools/mo/openvino/tools/mo/back/GatherNormalizer.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.squeeze import Squeeze - - -class GatherTreeNormalizer(BackReplacementPattern): - enabled = True - force_clean_up = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(type='GatherTree'): - name = node.soft_get('name', node.id) - assert 3 in node.in_ports() and not node.in_port(3).disconnected() - - end_token_shape = node.in_port(3).data.get_shape() - assert end_token_shape is not None - if end_token_shape.size == 1 and end_token_shape.ndim == 1: - squeeze = create_op_node_with_second_input(graph, Squeeze, int64_array([0]), - {'name': name + '/Squeeze', 'override_output_shape': True}) - node.in_port(3).get_connection().insert_node(squeeze) diff --git a/tools/mo/openvino/tools/mo/back/InterpolateReshape.py b/tools/mo/openvino/tools/mo/back/InterpolateReshape.py deleted file mode 100644 index db5cfba5b40a00..00000000000000 --- a/tools/mo/openvino/tools/mo/back/InterpolateReshape.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Mul -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs, create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.shape import Shape - - -class InterpolateConcat(BackReplacementPattern): - r""" - Replaces hard-coded 1-port input of Interpolate with reshape-able sub-graph using the following Concat inputs - - BEFORE: - input Const - shape=[1, 3, 30, 40] value=[60, 160] - \ / - Interpolate(axes=(2, 3)) input_1 - shape=[1, 3, 60, 160] shape=[1, 4, 60, 160] - \ / - Concat(axis=1) - shape=[1, 7, 60, 160] - AFTER: - input - shape=[1, 3, 30, 40] input_1 - | shape=[1, 4, 60, 160] - | / | - | ShapeOf | - | | | - | Gather | - | indices=(2, 3); axis=0 | - \ | | - Interpolate(axes=(2, 3)) | - shape=[1, 3, 60, 160] | - \ / - Concat(axis=1) - shape=[1, 7, 60, 160] - """ - enabled = True - graph_condition = [lambda graph: not graph.graph['cmd_params'].static_shape] - force_shape_inference = True - id = 'reshape_interpolate_through_concat' - - @staticmethod - def make_interpolate_reshapeable(interpolate, concat): - assert interpolate.soft_get('type') == 'Interpolate' - assert concat.soft_get('type') == 'Concat' - - output_shape = interpolate.out_port(0).data.get_shape() - - interp_axes = [get_canonical_axis_index(output_shape, axis) for axis in Interpolate.get_axes(interpolate)] - concat_axis = get_canonical_axis_index(output_shape, concat.axis) - if concat_axis in interp_axes: - return - - concat_srcs = [port.get_source() for port in concat.in_ports().values() if not port.disconnected()] - non_interp_concat_srcs = [src for src in concat_srcs if src.node.soft_get('type') != 'Interpolate'] - if len(non_interp_concat_srcs) == 0: - return - - graph = interpolate.graph - src = non_interp_concat_srcs[0] - - shape = Shape(graph, {'name': src.node.soft_get('name', src.node.id) + '/Shape'}).create_node() - shape.in_port(0).connect(src) - gather = create_op_with_const_inputs(graph, Gather, - {1: mo_array(interp_axes, dtype=np.int32), 2: int64_array(0)}, - {'name': shape.name + '/Gathered'}, shape) - interpolate.in_port(1).get_connection().set_source(gather.out_port(0)) - - def find_and_replace_pattern(self, graph: Graph): - for interpolate in graph.get_op_nodes(type='Interpolate'): - if interpolate.in_port(1).get_source().node.soft_get('type') != 'Const': - continue - dsts = interpolate.out_port(0).get_destinations() - if len(dsts) == 1 and dsts[0].node.soft_get('type') == 'Concat': - self.make_interpolate_reshapeable(interpolate, dsts[0].node) - - -class InterpolateReshapeWA(BackReplacementPattern): - r""" - Replaces hard-coded 1-port input of Interpolate with reshape-able sub-graph. - WARNING: Could cause troubles if model has hard-coded Interpolate intentionally -- rare situation - BEFORE: - input Const - shape=[1, 3, 30, 40] value=[60, 160] - \ / - Interpolate(axes=(2, 3)) - shape=[1, 3, 60, 160] - AFTER: - input - shape=[1, 3, 30, 40] - | \ - | ShapeOf - | | - | Gather Const - | indices=(2, 3); axis=0 value=[2, 4] - | \ / - | Multiply - | / - Interpolate(axes=(2, 3)) - shape=[1, 3, 60, 160] - """ - enabled = False - graph_condition = [lambda graph: not graph.graph['cmd_params'].static_shape] - force_shape_inference = True - id = 'reshape_interpolate_wa' - - def run_after(self): - return [InterpolateConcat] - - @staticmethod - def make_interpolate_reshapeable(interpolate): - assert interpolate.soft_get('type') == 'Interpolate' - axes = Interpolate.get_axes(interpolate) - input_shape = interpolate.in_port(0).data.get_shape() - output_shape = interpolate.out_port(0).data.get_shape() - if not np.all(np.remainder(output_shape, input_shape) == 0) and \ - not np.all(np.remainder(input_shape, output_shape) == 0): - return - graph = interpolate.graph - name = interpolate.soft_get('name', interpolate.id) - shape = Shape(graph, {'name': name + '/ShapeOf'}).create_node() - shape.in_port(0).connect(interpolate.in_port(0).get_source()) - gather = create_op_with_const_inputs(graph, Gather, {1: mo_array(axes, dtype=np.int32), 2: int64_array(0)}, - {'name': shape.name + '/Gathered'}, shape) - multipliers = output_shape[axes] / input_shape[axes] - mul = create_op_node_with_second_input(graph, Mul, multipliers, {'name': gather.name + '/Multiplied'}, gather) - interpolate.in_port(1).get_connection().set_source(mul.out_port(0)) - - def find_and_replace_pattern(self, graph: Graph): - for interpolate in graph.get_op_nodes(type='Interpolate'): - if interpolate.in_port(1).get_source().node.soft_get('type') == 'Const': - self.make_interpolate_reshapeable(interpolate) diff --git a/tools/mo/openvino/tools/mo/back/LRNToNorm.py b/tools/mo/openvino/tools/mo/back/LRNToNorm.py deleted file mode 100644 index bc856e1b58f718..00000000000000 --- a/tools/mo/openvino/tools/mo/back/LRNToNorm.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.lrn import LRN - - -class LRN_normalization(BackReplacementPattern): - """ - Transforming LRN with `region` attribute to LRN with second `axis`-input - """ - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('lrn', dict(kind='op', op='AttributedLRN')) - ], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['lrn'] - name = node.soft_get('name', node.id) - assert node.has_valid('region') - assert node.region in ['across', 'same'] - - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None - axis = [1] if node.region == 'across' else list(range(2, input_shape.size)) - - new_lrn = create_op_node_with_second_input(graph, LRN, int64_array(axis), { - 'name': name, - 'alpha': node.alpha, - 'beta': node.beta, - 'size': node.local_size, - 'bias': node.bias, - }) - - node.out_port(0).get_connection().set_source(new_lrn.out_port(0)) - node.in_port(0).get_connection().set_destination(new_lrn.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/back/LayoutChangeForGatherND.py b/tools/mo/openvino/tools/mo/back/LayoutChangeForGatherND.py deleted file mode 100644 index ab51f537b8bf79..00000000000000 --- a/tools/mo/openvino/tools/mo/back/LayoutChangeForGatherND.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class LayoutChangeForGatherND(BackReplacementPattern): - """ - Return original layout for inputs and output of GatherND operation - since the operation is designed for NHWC layout. - """ - enabled = True - force_shape_inference = True - graph_condition = [lambda graph: graph.graph['fw'] == 'tf'] - - def find_and_replace_pattern(self, graph: Graph): - import openvino.tools.mo.middle.InsertLayoutPropagationTransposes as InsertTransposes - for gathernd in graph.get_op_nodes(type='GatherND'): - InsertTransposes.insert_transpose(graph, gathernd.in_port(0), before_input=True) - InsertTransposes.insert_transpose(graph, gathernd.in_port(1), before_input=True) - InsertTransposes.insert_transpose(graph, gathernd.out_port(0), before_input=False) diff --git a/tools/mo/openvino/tools/mo/back/LeakyReLUMutation.py b/tools/mo/openvino/tools/mo/back/LeakyReLUMutation.py deleted file mode 100644 index 9a300867b90ebf..00000000000000 --- a/tools/mo/openvino/tools/mo/back/LeakyReLUMutation.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.ForceStrictPrecision import ForceStrictPrecision -from openvino.tools.mo.ops.prelu import PReLU -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph, rename_node -from openvino.tools.mo.ops.const import Const - - -class LeakyReLUMutation(BackReplacementPattern): - enabled = True - force_clean_up = True - - def run_before(self): - return [ForceStrictPrecision] - - @staticmethod - def pattern(): - return dict( - nodes=[('leakyrelu', dict(kind='op', op='LeakyReLU'))], - edges=[], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - relu = match['leakyrelu'] - relu_name = relu.soft_get('name', relu.id) - if not relu.has_valid('negative_slope'): - return - - rename_node(relu, relu_name + '/to_delete') - # Create PReLU op and reconnect input/output from LeakyReLU to PReLU - prelu = PReLU(graph, dict(name=relu_name)).create_node() - rename_node(prelu, relu_name) - - const = Const(graph, dict(name=relu_name + "/weights", value=mo_array([relu.negative_slope]))).create_node() - - relu.in_port(0).get_connection().set_destination(prelu.in_port(0)) - const.out_port(0).connect(prelu.in_port(1)) - relu.out_port(0).get_connection().set_source(prelu.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/back/LinearToLinearONNXReplacer.py b/tools/mo/openvino/tools/mo/back/LinearToLinearONNXReplacer.py deleted file mode 100644 index 1e5847c43aef50..00000000000000 --- a/tools/mo/openvino/tools/mo/back/LinearToLinearONNXReplacer.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.InterpolateReshape import InterpolateConcat, InterpolateReshapeWA -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class LinearToLinearONNXReplacer(BackReplacementPattern): - """ - If we don't use this transformation, then we have a performance drop, because CPU and GPU have no optimized - version of the 'linear' mode of the operation Interpolate. - TODO: delete this transformation, when CPU and GPU will have optimized version of the 'linear' mode. - """ - enabled = True - - def run_after(self): - return [InterpolateConcat, InterpolateReshapeWA] - - def find_and_replace_pattern(self, graph: Graph): - for interpolate_node in graph.get_op_nodes(type='Interpolate', version='opset4', mode='linear'): - input_shape = interpolate_node.in_port(0).data.get_shape() - interpolate_name = interpolate_node.soft_get('name', interpolate_node.id) - assert input_shape is not None, \ - 'Shape of interpolated data for node {} must not be None'.format(interpolate_name) - input_rank = len(input_shape) - if input_rank == 4: - interpolate_node['mode'] = 'linear_onnx' diff --git a/tools/mo/openvino/tools/mo/back/MarkNodesWithShapeValues.py b/tools/mo/openvino/tools/mo/back/MarkNodesWithShapeValues.py deleted file mode 100644 index c4f8abe6a9a4b9..00000000000000 --- a/tools/mo/openvino/tools/mo/back/MarkNodesWithShapeValues.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from typing import List - -import numpy as np - -from openvino.tools.mo.middle.MarkSubgraphsWithCorrectLayout import MarkSubGraphsWithCorrectLayout -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph, Node - -shape_accepting_ops = { - 'Interpolate': [1, 2], # sizes, scales inputs - 'Reshape': [1], # shape - 'Broadcast': [1], # target_shape - 'ConvBackPropData ': [2], # output_shape - 'GroupConvolutionBackpropData ': [2], # output_shape - 'BatchToSpace': [1, 2, 3], # block_shape, crops_begin, crops_end - 'SpaceToBatch': [1, 2, 3], # block_shape, pads_begin, pads_end - 'StridedSlice': [1, 2, 3], # begin, end, strides - 'VariadicSplit': [2], # split_lengths - 'Tile': [1], # repeats input - 'TopK': [1], # K input - 'Pad': [1, 2], # pads_begin, pads_end - 'OneHot': [1], # depth input -} - - -class MarkNodesWithShapeValues(BackReplacementPattern): - """ - This transformation marks op nodes in ShapeOf subgraphs with 'returns_shape_value' bool attribute and - data nodes of float32 constants with 'correct_data_type' attribute. - So that float Consts and Cast float will be kept in FP32 even if argument --data_type=FP16 is specified. - - This is needed to enable conversion to FP16 even if values in ShapeOf subgraphs exceed max(float16) - or because of FP16 lower precession shape inference is incorrect on some nodes (e.g. if Interpolate in scales mode - accepts values from ShapeOf subgraph). - - This transformation should be executed after shape inference and after all transformations which insert/modify - Cast nodes in ShapeOf subgraphs therefore it's placed at the end of the back phase. - """ - enabled = True - graph_condition = [lambda graph: graph.graph['cmd_params'].data_type == 'FP16'] - - def run_after(self): - from openvino.tools.mo.back.pass_separator import BackFinish - return [BackFinish] - - def run_before(self): - return [] - - @staticmethod - def get_nodes_with_shape_inputs(graph: Graph) -> List[Node]: - shape_accepting_nodes = [] - for node in graph.get_op_nodes(): - if node.soft_get('type') in shape_accepting_ops: - shape_accepting_nodes.append(node) - return shape_accepting_nodes - - @staticmethod - def get_shape_returning_start_nodes(nodes_with_shape_inputs: List[Node]) -> List[Node]: - sources = [] - for node in nodes_with_shape_inputs: - assert node.soft_get('type') in shape_accepting_ops - for port_idx in shape_accepting_ops[node.soft_get('type')]: - if not node.is_in_port_connected(port_idx): - continue - source_node = node.in_port(port_idx).get_source().node - # no need to start BFS for ShapeOf nodes, indeed if there is a ShapeOf it's an end of BFS - if source_node.soft_get('type') != 'ShapeOf': - sources.append(source_node) - return sources - - @staticmethod - def mark_nodes(shape_returning_nodes: List[Node]): - for node in shape_returning_nodes: - node['returns_shape_value'] = True - if node.soft_get('type') == 'Const': - if node.value.dtype == np.float32: - node.out_node(0)['correct_data_type'] = True - elif node.value.dtype in [np.float16, np.float64]: - log.error("Const node '{}' returns shape values of '{}' type but it must be integer or float32. " - "During Elementwise type inference will attempt to cast to float32". - format(node.soft_get('name', node.id), node.value.dtype), extra={'is_warning': True}) - - def find_and_replace_pattern(self, graph: Graph): - shape_accepting_nodes = self.get_nodes_with_shape_inputs(graph) - - condition = lambda node: node.soft_get('type') != 'ShapeOf' - shape_returning_start_nodes = self.get_shape_returning_start_nodes(shape_accepting_nodes) - shape_returning_nodes = MarkSubGraphsWithCorrectLayout.bfs(shape_returning_start_nodes, set(), - condition, forward=False) - self.mark_nodes(shape_returning_nodes) diff --git a/tools/mo/openvino/tools/mo/back/MatMulNormalizer.py b/tools/mo/openvino/tools/mo/back/MatMulNormalizer.py deleted file mode 100644 index 85c1a9c06997f4..00000000000000 --- a/tools/mo/openvino/tools/mo/back/MatMulNormalizer.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.back.TransposeReduceFusing import TransposeReduce -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.unsqueeze import Unsqueeze -from openvino.tools.mo.utils.shape import node_to_get_shape_value_of_indices, new_shape_node_from_shape_nodes - - -class MatMulConstTransposesExtraction(BackReplacementPattern): - """ - Resolves transpose_a(b) key from MatMul operation if corresponding input is constant by inserting Transpose, - that gets const folded while graph clean up execution - """ - - enabled = True - force_clean_up = True - - @staticmethod - def pattern(): - return dict( - nodes=[('matmul', dict(kind='op', op='MatMul'))], - edges=[] - ) - - @staticmethod - def insert_transpose(node, in_port_idx): - graph = node.graph - name = node.soft_get('name', node.id) - - assert in_port_idx in node.in_ports() and not node.in_port(in_port_idx).disconnected(), \ - 'Input port with index {} should be connected for node {}'.format(in_port_idx, name) - - in_port = node.in_port(in_port_idx) - port_shape = in_port.data.get_shape() - assert port_shape is not None, \ - 'Shape is unknown for input port with index {} for node {}'.format(in_port_idx, name) - - transpose_order = list(range(port_shape.size)) - transpose_order[-1], transpose_order[-2] = transpose_order[-2], transpose_order[-1] - - transpose = create_op_node_with_second_input(graph, Transpose, int64_array(transpose_order), - {'name': name + '/{}_port_transpose'.format(in_port_idx)}) - - port_source = in_port.get_source() - in_port.get_connection().set_source(transpose.out_port(0)) - transpose.in_port(0).connect(port_source) - - transpose['override_output_shape'] = True - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['matmul'] - - if not node.has_and_set('transpose_b'): - B_shape = node.in_port(1).data.get_shape() - B_value = node.in_port(1).data.get_value() - FQ_on_weights = node.in_port(1).get_source().node.has_and_set('stop_value_propagation') - if (B_value is not None or FQ_on_weights) and B_shape[B_shape != 1].size <= 2: - MatMulConstTransposesExtraction.insert_transpose(node, 1) - node['transpose_b'] = True - - -class PullTransposeThroughFQUp(BackReplacementPattern): - r""" - BEFORE AFTER - Const Const - \ \ | / / | - FakeQuantize T T T T T - | \ \ | / / - Transpose FakeQuantize - | | - next_op next_op - `T` is Transpose for short - """ - enabled = True - force_clean_up = True - - def run_after(self): - # in case FQ->Transpose->Reduce we should first try to optimize out Transpose - return [MatMulConstTransposesExtraction, TransposeReduce] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('fq_const_input', dict(kind='op', type='Const')), - ('fq_const_input_d', dict()), - ('fq', dict(kind='op', type='FakeQuantize')), - ('fq_d', dict()), - ('transpose', dict(kind='op', type='Transpose')), - ], - edges=[ - ('fq_const_input', 'fq_const_input_d'), - ('fq_const_input_d', 'fq', {'in': 0}), - ('fq', 'fq_d'), - ('fq_d', 'transpose'), - ] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - fq = match['fq'] - - if len(fq.out_port(0).get_destinations()) > 1: - # FQ should have only one child -- Transpose for optimization - return - - transpose = match['transpose'] - name = fq.soft_get('name', fq.id) - - input_shape = transpose.in_port(0).data.get_shape() - - # detaching transpose from the graph - transpose.out_port(0).get_connection().set_source(transpose.in_port(0).get_connection().get_source()) - transpose.in_port(0).disconnect() - - for idx, port in fq.in_ports().items(): - transpose_copy = transpose.copy_node({'override_output_shape': True}) - transpose.in_port(1).get_source().connect(transpose_copy.in_port(1)) - - start_port = transpose_copy.in_port(0) - - idxs = np.arange(len(input_shape) - len(port.data.get_shape())) - if idxs.size != 0: - axis = Const(graph, {'name': name + '/in_{}_unsqueeze_axis'.format(idx), - 'value': int64_array(idxs)}).create_node() - unsqueeze = Unsqueeze(graph, {'name': name + '/in_{}_unsqueeze'.format(idx)}).create_node() - axis.out_port(0).connect(unsqueeze.in_port(1)) - unsqueeze.out_port(0).connect(transpose_copy.in_port(0)) - start_port = unsqueeze.in_port(0) - - src = port.get_source() - port.get_connection().set_source(transpose_copy.out_port(0)) - src.connect(start_port) - - -class SmartReshape_HC_Reshape_MatMul(BackReplacementPattern): - r""" - Relaxes hard-coded input of Reshape in such sub-graphs: - - input_1 Constant - \ / - Reshape input_2 - \ / - MatMul - | - """ - enabled = True - force_clean_up = True - - def run_after(self): - return [MatMulConstTransposesExtraction] - - def pattern(self): - return dict( - nodes=[ - ('output_shape', dict(type='Const')), - ('output_shape_d', dict()), - ('reshape', dict(type='Reshape')), - ('reshape_d', dict()), - ('other_input', dict(type=lambda t: t not in ['Reshape', 'Transpose'])), - ('other_input_d', dict()), - ('matmul', dict(type='MatMul')), - ], - edges=[ - ('output_shape', 'output_shape_d'), - ('output_shape_d', 'reshape', {'in': 1}), - ('reshape', 'reshape_d'), - ('reshape_d', 'matmul'), - ('other_input', 'other_input_d'), - ('other_input_d', 'matmul'), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - matmul = match['matmul'] - reshape = match['reshape'] - other_input_port_idx = 0 if match['matmul'].in_port(0).get_source().node.id == match['other_input'].id else 1 - shape_source = match['matmul'].in_port(other_input_port_idx).get_source() - initial_reshape_pattern = reshape.in_port(1).data.get_value() - if len(initial_reshape_pattern) != 2: - return - - reshape_is_A_input = matmul.in_port(0).get_source().node.id == reshape.id - if reshape_is_A_input: - idx = -1 if matmul.transpose_b else -2 - else: - idx = -2 if matmul.transpose_a else -1 - idx = get_canonical_axis_index(initial_reshape_pattern, idx) - - shape_name = shape_source.node.soft_get('name', shape_source.node.id) - shape = Shape(graph, {'name': shape_name + '/Shape'}).create_node() - shape.in_port(0).connect(shape_source) - C = node_to_get_shape_value_of_indices(shape, [idx]) - N = Const(graph, {'name': shape_name + '/MinusOne', 'value': int64_array([-1])}).create_node() - - if len(initial_reshape_pattern) == 2: - if reshape_is_A_input: - reshape_pattern = [C, N] if matmul.transpose_a else [N, C] - else: - reshape_pattern = [N, C] if matmul.transpose_b else [C, N] - new_reshape_pattern = new_shape_node_from_shape_nodes(reshape_pattern) - reshape.in_port(1).get_connection().set_source(new_reshape_pattern.out_port(0)) - else: - return - diff --git a/tools/mo/openvino/tools/mo/back/MaxPool.py b/tools/mo/openvino/tools/mo/back/MaxPool.py deleted file mode 100644 index 37f24efe6441c4..00000000000000 --- a/tools/mo/openvino/tools/mo/back/MaxPool.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from openvino.tools.mo.back.FakeOutputResolver import FakeOutputResolver -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.result import Result - - -class MaxPool(BackReplacementPattern): - """ - Rename Pooling/max to MaxPool - """ - enabled = True - - def run_after(self): - return [FakeOutputResolver] - - def pattern(self): - return dict( - nodes=[ - ('pooling', {'type': 'Pooling', 'pool_method': 'max'}) - ], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['pooling'] - node.type = 'MaxPool' - del node['pool_method'] - if 'exclude_pad' in node: - del node['exclude_pad'] - - # adding missed outputs for MaxPool node - MaxPool.normalize_outputs(node) - - @staticmethod - def normalize_outputs(node: Node): - if node.out_port(0).disconnected(): - output = Result(node.graph, {'name': node.name + '/Result_port_0/', - 'keep_output_port': node.has_and_set('remove_values_output')}).create_node() - node.out_port(0).get_connection().set_destination(output.in_port(0)) - - # we check port existing to support MaxPool_1 with only 1 output port and MaxPool_8 with 2 output ports - if node.has_port('out', 1) and node.out_port(1).disconnected(): - output = Result(node.graph, {'name': node.name + '/Result_port_1/', - 'keep_output_port': node.has_and_set('remove_values_output')}).create_node() - node.out_port(1).get_connection().set_destination(output.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/back/NormalizeToNormalizeL2.py b/tools/mo/openvino/tools/mo/back/NormalizeToNormalizeL2.py deleted file mode 100644 index 1420072ba4c607..00000000000000 --- a/tools/mo/openvino/tools/mo/back/NormalizeToNormalizeL2.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.back.insert_compatibility_l2normalization import CompatibilityL2NormalizationPattern -from openvino.tools.mo.ops.elementwise import Mul -from openvino.tools.mo.ops.normalize_l2 import NormalizeL2Op -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_node - - -class NormalizeToNormalizeL2(BackReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - return [CompatibilityL2NormalizationPattern] - - @staticmethod - def pattern(): - return dict( - nodes=[('normalize', {'type': 'Normalize'})], - edges=[], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['normalize'] - - # rename normalize node since it will be no longer output node after the transformation - output_name = node.soft_get('name', node.id) - normalizel2_name = output_name + '/normalizel2' - rename_node(node, normalizel2_name) - - assert node.in_port(0).data.get_shape().size in [2, 3, 4] - assert node.has_valid('across_spatial') - assert node.has_valid('channel_shared') - assert node.has_valid('eps') - - if 'bin' in node.in_edge(1): - del node.in_edge(1)['bin'] - - weights = node.in_port(1).data.get_value() - assert weights is not None - # in the code below we intentionally use get_source() to get the out port. Because updating the out port will - # update the Const node 'value' and 'shape' attributes - if node.channel_shared or all(weights == weights[0]): - node.in_port(1).get_source().data.set_value(mo_array([weights[0]])) - else: - new_shape = np.ones((len(node.in_port(0).data.get_shape())), dtype=np.int64) - new_shape[1] = -1 - node.in_port(1).get_source().data.set_value(mo_array(weights).reshape(new_shape)) - - mul = Mul(graph, {'name': output_name}).create_node() - rename_node(mul, output_name) - - if not node.across_spatial: - axes = int64_array([1]) - else: - axes = int64_array(np.arange(start=1, stop=node.in_port(0).data.get_shape().size)) - - normalizel2 = create_op_with_const_inputs(graph, NormalizeL2Op, {1: axes}, {'eps_mode': 'add', 'eps': node.eps}) - - node.out_port(0).get_connection().set_source(mul.out_port(0)) - node.in_port(1).get_connection().get_source().connect(mul.in_port(1)) - normalizel2.out_port(0).connect(mul.in_port(0)) - node.in_port(0).get_connection().set_destination(normalizel2.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/back/OptimizeTransposeReshapeSequence.py b/tools/mo/openvino/tools/mo/back/OptimizeTransposeReshapeSequence.py deleted file mode 100644 index b70e030981f465..00000000000000 --- a/tools/mo/openvino/tools/mo/back/OptimizeTransposeReshapeSequence.py +++ /dev/null @@ -1,361 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import math - -import numpy as np - -from openvino.tools.mo.back.FuseTransposesSequence import FuseTransposesSequence -from openvino.tools.mo.middle.FuseReshapesSequence import FuseReshapesSequence -from openvino.tools.mo.middle.RemoveRedundantReshapes import RemoveRedundantReshapes -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.fusing.helpers import get_next_operation -from openvino.tools.mo.ops.op import PermuteAttrs -from openvino.tools.mo.ops.reshape import Reshape - - -def split_input_permute_dimension(dim: int, permute_order: np.array): - """ - Creates updated permutation for a given permutation order and the *input* dimension index to be split into two. - :param dim: the input tensor dimension axis to split - :param permute_order: the permutation order - :return: the new permutation order after split of the specified dimension into two - """ - assert dim < len(permute_order) - new_permute_order = list() - for permute_index in permute_order: - if permute_index < dim: - new_permute_order.append(permute_index) - elif permute_index > dim: - new_permute_order.append(permute_index + 1) - else: - new_permute_order.append(permute_index) - new_permute_order.append(permute_index + 1) - return int64_array(new_permute_order) - - -def split_output_permute_dimension(dim: int, permute_order: np.array): - """ - Creates updated permutation for a given permutation order and the *output* dimension index to be split into two. - :param dim: the output tensor dimension axis to split - :param permute_order: the permutation order - :return: the new permutation order after split of the specified dimension into two - """ - assert dim < len(permute_order) - return split_input_permute_dimension(permute_order[dim], permute_order) - - -def match_shapes(input_shape: np.array, output_shape: np.array): - """ - Calculates "match" shape for the given input and output shape of the Reshape layer. The function splits some of the - input/output dimensions into several ones to make new input and output shapes equal. For example, - input_shape=(1,32,64,60)->Reshape->output_shape=(8,4,64,20,3) is converted to - input_shape=(1,8,4,64,20,3)->Reshape->output_shape=(1,8,4,64,20,3). - - :param input_shape: input shape of the Reshape - :param output_shape: output shape of the Reshape - :return: "match" shape or None if it is not possible to calculate match shape - """ - matched_shape = list() - - in_ind = 0 - out_ind = 0 - in_left = input_shape[0] - out_left = output_shape[0] - while in_ind < len(input_shape) or out_ind < len(output_shape): - if in_ind < len(input_shape) and out_ind < len(output_shape): - g = math.gcd(in_left, out_left) - matched_shape.append(g) - if g == 1 and in_left != 1 and out_left != 1: # shapes cannot be matched - return None - in_left //= g - out_left //= g - if in_left == 1: - in_ind += 1 - if in_ind < len(input_shape): - in_left *= input_shape[in_ind] - if out_left == 1: - out_ind += 1 - if out_ind < len(output_shape): - out_left *= output_shape[out_ind] - else: - matched_shape.append(1) - if out_ind != len(output_shape): - out_ind += 1 - else: - in_ind += 1 - return int64_array(matched_shape) - - -def split_dims_indices(input_shape: np.array, match_shape: np.array): - """ - Returns list of indices of the input shape to be split to match the match_shape shape - :param input_shape: input shape - :param match_shape: match shape - :return: list of indices (indices may be repetitive) - """ - result = list() - in_ind = 0 - match_ind = 0 - in_left = input_shape[0] - while match_ind < len(match_shape): - if in_ind >= len(input_shape): - assert match_shape[match_ind] == 1, 'Total number of elements in input shape and output shape are not equal' - match_ind += 1 - result.append(in_ind - 1) - elif match_shape[match_ind] == input_shape[in_ind] and match_shape[match_ind] == 1: - match_ind += 1 - in_ind += 1 - if in_ind < len(input_shape): - in_left *= input_shape[in_ind] - elif in_left > match_shape[match_ind]: - if in_left > match_shape[match_ind] or match_shape[match_ind] == 1: - result.append(in_ind) - in_left //= match_shape[match_ind] - match_ind += 1 - elif in_left == match_shape[match_ind]: - in_ind += 1 - match_ind += 1 - if in_ind < len(input_shape): - in_left = input_shape[in_ind] - else: - in_ind += 1 - in_left *= input_shape[in_ind] - return result - - -def reverse_permute(output_shape: np.array, order: np.array): - """ - Calculates Transpose op input shape based on output shape and permute order. - :param output_shape: Transpose output shape - :param order: permute order - :return: Transpose input shape corresponding to the specified output shape - """ - return int64_array(output_shape[PermuteAttrs.get_inverse_permutation(order)]) - - -def set_reshape_new_output_shape(reshape_node: Node, new_output_shape: np.array): - """ - Updates Reshape node shape to a new output shape. The function updates the second input if the node has it. - :param reshape_node: node to update - :param new_output_shape: new output shape - :return: None - """ - reshape_node.out_port(0).data.set_shape(new_output_shape) - in_ports = [port for port in reshape_node.in_ports().values() if not port.disconnected()] - if len(in_ports) == 2: - reshape_node.in_port(1).data.set_value(new_output_shape) - - -class OptimizeTransposeReshapeSequence(BackReplacementPattern): - """ - The transformation looks for the sequence of Reshape and Transpose operations and tries to optimize it. The procedure - is the following: - - 1. For each Reshape layer in the sequence of nodes being optimized (except leading and trailing one) make it dummy, - i.e. not changing the input and output shape. For example, convert - input_shape=(1,32,64,60)->Reshape->output_shape=(8,4,64,20,3) to - input_shape=(1,8,4,64,20,3)->Reshape->output_shape=(1,8,4,64,20,3). - 2. Propagate new input/output shape forward and backward through the Transpose nodes. - 3. Remove dummy Reshapes. - 4. Fuse sequence of Transposes. - """ - enabled = False - run_not_recursively = True - OPTIMIZED_NODE_FLAG = 'permute_reshape_optimized' - - def run_before(self): - from openvino.tools.mo.back.ReshapeMutation import ReshapeMutation - return [ReshapeMutation] - - def run_after(self): - return [FuseTransposesSequence] - - def is_node_match_for_optimization(self, node: Node): - """ - Check that the node can be added to the sequence of nodes for the Transpose-Reshape optimization - :param node: node to check - :return: result of the check - """ - # TODO change to 'op' and reshape-like - return node.has_and_set('type') and node.type in ('Transpose', 'Reshape') and \ - not node.has_and_set(self.OPTIMIZED_NODE_FLAG) - - def find_and_replace_pattern(self, graph: Graph): - for start_node in graph.pseudo_topological_sort(): - matched_nodes = [] - if self.is_node_match_for_optimization(start_node): - next_node = start_node - while self.is_node_match_for_optimization(next_node): - matched_nodes.append(next_node) - next_node[self.OPTIMIZED_NODE_FLAG] = True - next_nodes = get_next_operation(next_node) - if len(next_nodes) > 1: - log.debug('There are two consumers of the node {}. Stop matching sequence.'.format( - next_node.soft_get('name'))) - break - next_node = next_nodes[0] - # optimize sequence of three or more Transpose-Reshape nodes - if len(matched_nodes) >= 3: - self.optimize_permute_reshape_sequence(graph, matched_nodes) - - # run the RemoveRedundantReshapes to remove dummy (NOP) reshapes. After that we can run Transposes fusing - FuseReshapesSequence().find_and_replace_pattern(graph) - RemoveRedundantReshapes().find_and_replace_pattern(graph) - FuseTransposesSequence().find_and_replace_pattern(graph) - - @staticmethod - def optimize_permute_reshape_sequence(graph: Graph, nodes: list): - log.debug('Running permute-reshape optimization of the following nodes: {}'.format( - [node.soft_get('name') for node in nodes])) - - # the transformation expects that the first and the last operation in the sequence is Reshape so the following - # function adds required reshapes - __class__.add_leading_and_trailing_reshape(graph, nodes) - - for ind in range(1, len(nodes) - 1): - node = nodes[ind] - input_shape = node.in_node(0).shape - output_shape = node.out_node(0).shape - if node.type == 'Reshape' and not np.array_equal(input_shape, output_shape): - log.debug('The Reshape node "{}" is not NOP. Shapes: "{}" vs "{}"'.format( - node.soft_get('name'), input_shape, output_shape)) - __class__.make_reshape_nop(node) - - @staticmethod - def add_leading_and_trailing_reshape(graph: Graph, nodes: list): - """ - When the first operation in the matched list is the Transpose then add the Reshape operation which reshapes to the - Transpose input shape. This Reshape op is needed for the optimization pass. If the optimization will not be - applied then this dummy Reshape will be removed by the "RemoveRedundantReshapes" pass. - - :param graph: the graph with nodes - :param nodes: the sequence of Transpose and ReshapeFF nodes - :return: None - """ - # add leading Reshape - if nodes[0].type == 'Transpose': - dummy_reshape_node = create_op_node_with_second_input( - graph, Reshape, nodes[0].in_port(0).data.get_shape().copy(), - {'name': nodes[0].in_port(0).get_connection().get_source().node.id + '/Reshape'}) - dummy_reshape_node[__class__.OPTIMIZED_NODE_FLAG] = True - nodes[0].in_port(0).get_connection().insert_node(dummy_reshape_node) - nodes.insert(0, dummy_reshape_node) - log.debug('Added Reshape op "{}" in the beginning of the permute-reshape sequence'.format( - dummy_reshape_node.soft_get('name'))) - - # similarly add the Reshape op after the last Transpose op which reshapes to the Transpose output shape - if nodes[-1].type == 'Transpose': - dummy_reshape_node = create_op_node_with_second_input( - graph, Reshape, nodes[-1].out_port(0).data.get_shape().copy(), - {'name': nodes[0].out_port(0).get_connection().get_destination().node.id + '/Reshape'}) - dummy_reshape_node[__class__.OPTIMIZED_NODE_FLAG] = True - nodes[-1].out_port(0).get_connection().insert_node(dummy_reshape_node) - nodes.append(dummy_reshape_node) - log.debug('Added Reshape op "{}" in the end of the permute-reshape sequence'.format( - dummy_reshape_node.soft_get('name'))) - - @staticmethod - def forward_new_reshape_shape(reshape_node: Node, initial_output_shape: np.array): - """ - Propagates the changed output shape of the Reshape node forward. The output of the Reshape node should be - Transpose so it is necessary to update its 'order' attribute according to the updated shape and output data node. - :param reshape_node: the Reshape node to propagate the shape - :param initial_output_shape: old output shape of the Reshape node - :return: None - """ - output_shape = reshape_node.out_port(0).data.get_shape() - if np.all(output_shape == initial_output_shape): - log.debug('Initial output and new output shapes match for node "{}". Do nothing'.format( - reshape_node.soft_get('name'))) - return - - dest_node = reshape_node.out_port(0).get_destination().node - if dest_node.type == 'Transpose': - split_dims = split_dims_indices(initial_output_shape, output_shape) - assert dest_node.in_port(1).data.get_value() is not None, \ - 'The 1st input value "order" is not set for Transpose node "{}"'.format(dest_node.soft_get('name')) - permute_order = dest_node.in_port(1).data.get_value() - for split_dim in split_dims: - permute_order = split_input_permute_dimension(split_dim, permute_order) - dest_node.in_port(1).data.set_value(permute_order) - dest_node.infer(dest_node) - elif dest_node.type == 'Reshape': - log.debug('Two subsequent reshape nodes: "{}" and "{}". Nothing to optimize'.format( - reshape_node.soft_get('name'), dest_node.soft_get('name'))) - else: - assert False, 'Unsupported type of the node "{}" in the Transpose-Reshape optimization' \ - ''.format(dest_node.type) - - @staticmethod - def backward_new_reshape_shape(reshape_node: Node, initial_input_shape: np.array): - """ - Propagates the changed input shape of the Reshape node backward. - 1. The input of the Reshape node should be Transpose so it is necessary to update its 'order' attribute according - to the updated shape and input data node. - 2. The input of the Transpose should be a Reshape node, so it is necessary to update its 'dim' attribute. - - :param reshape_node: the Reshape node to propagate the shape - :param initial_input_shape: old input shape of the Reshape node - :return: None - """ - input_shape = reshape_node.in_port(0).data.get_shape() - if np.all(input_shape == initial_input_shape): - log.debug('Initial input and new input shapes match for node "{}". Do nothing'.format( - reshape_node.soft_get('name'))) - return - - src_node = reshape_node.in_port(0).get_source().node - if src_node.type == 'Transpose': - split_dims = split_dims_indices(initial_input_shape, input_shape) - assert src_node.in_port(1).data.get_value() is not None, \ - 'The 1st input value "order" is not set for Transpose node "{}"'.format(src_node.soft_get('name')) - permute_order = src_node.in_port(1).data.get_value() - for split_dim in split_dims: - permute_order = split_output_permute_dimension(split_dim, permute_order) - src_node.in_port(1).data.set_value(permute_order) - - # calculate a Transpose input shape based on the Transpose output shape - new_permute_input_shape = reverse_permute(input_shape, permute_order) - - # update the Transpose input node (it should be Reshape) output shape and 'dim' attribute - permute_source_port = src_node.in_port(0).get_source() - permute_source_port.data.set_shape(new_permute_input_shape) - set_reshape_new_output_shape(permute_source_port.node, new_permute_input_shape) - elif src_node.type == 'Reshape': - log.debug('Two subsequent reshape nodes: "{}" and "{}". Nothing to optimize'.format( - reshape_node.soft_get('name'), src_node.soft_get('name'))) - else: - assert False, 'Unsupported type of the node "{}" in the Transpose-Reshape optimization' \ - ''.format(src_node.type) - - @staticmethod - def make_reshape_nop(reshape_node: Node): - """ - Change the node input and output shape so the Reshape node becomes dummy (NOP). Then propagate new shapes back - and forth. - - :param reshape_node: reshape node to make it dummy - :return: None - """ - initial_input_shape = reshape_node.in_port(0).data.get_shape().copy() - initial_output_shape = reshape_node.out_port(0).data.get_shape().copy() - - # calculate new shape which makes the Reshape NOP - match_shape = match_shapes(initial_input_shape, initial_output_shape) - if match_shape is None: # it is not possible to optimize reshape - return - - # update Reshape node and input/output attrs - reshape_node.in_port(0).data.set_shape(match_shape) - set_reshape_new_output_shape(reshape_node, match_shape) - - # propagate forward a new reshape shape by updating Transpose op consumer attributes and the following data node - __class__.forward_new_reshape_shape(reshape_node, initial_output_shape) - - # propagate backward a new shape - __class__.backward_new_reshape_shape(reshape_node, initial_input_shape) diff --git a/tools/mo/openvino/tools/mo/back/PackBinaryWeights.py b/tools/mo/openvino/tools/mo/back/PackBinaryWeights.py deleted file mode 100644 index d78d48b36f61ff..00000000000000 --- a/tools/mo/openvino/tools/mo/back/PackBinaryWeights.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph - - -class PackBinaryWeights(BackReplacementPattern): - enabled = True - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('op', dict(kind='op', type='BinaryConvolution'))], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - conv = match['op'] - assert len(conv.in_nodes()) == 2 - initial_shape = conv.in_port(1).data.get_shape() - assert initial_shape is not None - weights = conv.in_port(1).data.get_value().flatten() - weights_rounded = np.round(weights) - assert np.all(np.isclose(weights, weights_rounded)) - assert len(conv.in_node(1).out_nodes()) == 1 - weights_rounded = mo_array(weights_rounded, dtype=np.int32) + 1 # -1 --> 0 - # Reversing element in chunks by 8 elements to pack bits correctly - # First need to pad data with necessary number of element to make the length dividable by 8 - pad = (-len(weights_rounded)) % 8 - weights_rounded = mo_array(np.concatenate((weights_rounded, np.zeros([pad]))), dtype=np.int32) - assert len(weights_rounded) % 8 == 0 - weights_rounded = weights_rounded.reshape([len(weights_rounded) // 8, 8]) - weights_rounded = np.flip(weights_rounded, axis=1) - weights_rounded = weights_rounded.flatten() - packed = np.packbits(weights_rounded) - conv.in_port(1).data.set_value(packed) - conv['packed_weights'] = 1 - - conv.in_node(1)['force_shape'] = initial_shape.copy() - conv.in_node(1)['shape'] = initial_shape.copy() - conv.in_node(1)['force_type'] = 'U1' diff --git a/tools/mo/openvino/tools/mo/back/ProposalMutation.py b/tools/mo/openvino/tools/mo/back/ProposalMutation.py deleted file mode 100644 index 3785078afc8e38..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ProposalMutation.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.back.ReshapeMutation import ReshapeMutation -from openvino.tools.mo.back.StridedSliceMasksNormalizer import StridedSliceMasksNormalizer -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs, create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.strided_slice import StridedSlice - - -class ProposalMutation(BackReplacementPattern): - enabled = True - force_shape_inference = True - - def run_before(self): - return [ReshapeMutation, StridedSliceMasksNormalizer] - - @staticmethod - def pattern(): - return dict( - nodes=[('proposal', {'type': 'Proposal'})], - edges=[], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['proposal'] - assert len(node.in_ports()) == 3, "Proposal op must have exactly 3 input ports" - im_info_shape = node.in_port(2).data.get_shape() - assert im_info_shape is not None - - if np.array_equal(im_info_shape, [1, 6]): - log.error('The model contains Proposal layer "{}" with input of shape [1, 6]. OpenVINO ' - 'implementation of the Proposal layer uses only 4 first values (indices 0, 1, 2 and 3). ' - 'Elements with indices 4 and 5 will be ignored.'.format(node.soft_get('name', node.id)), - extra={'is_warning': True}) - - cropped_im_info = create_op_with_const_inputs(graph, StridedSlice, {1: mo_array([0, 0], dtype=np.int32), - 2: mo_array([1, 3], dtype=np.int32), - 3: mo_array([1, 1], dtype=np.int32)}, - {'name': 'cropped_im_info', - 'begin_mask': int64_array([1, 1]), - 'end_mask': int64_array([1, 1]), - 'new_axis_mask': int64_array([0, 0]), - 'shrink_axis_mask': int64_array([0, 0]), - 'ellipsis_mask': int64_array([0, 0]), - 'override_output_shape': True, - }) - - node.in_port(2).get_connection().insert_node(cropped_im_info) - - # update the im_info_shape so the next 'if' statement become true - im_info_shape = int64_array([1, 3]) - - if np.array_equal(im_info_shape, [1, 3]) or np.array_equal(im_info_shape, [1, 4]): - reshape = create_op_node_with_second_input(graph, Reshape, [im_info_shape[1]], {'name': 'im_info/Reshape'}) - node.in_port(2).get_connection().set_destination(reshape.in_port(0)) - reshape.out_port(0).connect(node.in_port(2)) diff --git a/tools/mo/openvino/tools/mo/back/RNNSequenceTypeRename.py b/tools/mo/openvino/tools/mo/back/RNNSequenceTypeRename.py deleted file mode 100644 index 7ebde10815b9c1..00000000000000 --- a/tools/mo/openvino/tools/mo/back/RNNSequenceTypeRename.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class RNNSequence(BackReplacementPattern): - """ - This transform change type RNNSequence (internal MO type for all recurrent layers) - to correct operation name. - """ - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('rnn_layer', {'type': 'RNNSequence'}) - ], - edges=[] - ) - - _supported_ops = ['RNN', 'LSTM', 'GRU'] - - def replace_pattern(self, graph: Graph, match: dict): - rnn_layer = match['rnn_layer'] - assert rnn_layer['op'] in self._supported_ops - rnn_layer['type'] = rnn_layer['op'] + 'Sequence' diff --git a/tools/mo/openvino/tools/mo/back/ReduceMerge.py b/tools/mo/openvino/tools/mo/back/ReduceMerge.py deleted file mode 100644 index 0014c13215d142..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ReduceMerge.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.ReduceOps import reduce_map -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.concat import Concat - - -class ReduceMerge(BackReplacementPattern): - """ - Fuses sequence of Reduces of the same type into one Reduce layer of this particular type with updated axes input - Limitations: - - `keep_dims` attribute should be the same for all Reduces in the sequence - - in case `keep_dims`=False: next Reduce axes should be strictly less than previous Reduce axes - """ - enabled = True - force_clean_up = True - - @staticmethod - def fuse_reduces(first_reduce, second_reduce): - first_reduce_name = first_reduce.soft_get('name', first_reduce.id) - second_reduce_name = second_reduce.soft_get('name', second_reduce.id) - reduce_type = first_reduce.type - - assert first_reduce.type == second_reduce.type - - if len(first_reduce.out_port(0).get_destinations()) != 1: - # data dependency - return - - if first_reduce.keep_dims != second_reduce.keep_dims: - return - - first_axes = first_reduce.in_port(1).data.get_value() - second_axes = second_reduce.in_port(1).data.get_value() - if first_axes is None or second_axes is None: - # dynamic axes merging is not supported - return - - if not first_reduce.keep_dims: - if not np.all(first_axes > second_axes): - # indexing of upper reduce input dimensions changed - return - - graph = second_reduce.graph - - new_axes = Concat(graph, {'name': second_reduce_name + '/Axes', 'axis': int64_array(0), 'in_ports_count': 2, - 'override_output_shape': True}).create_node() - new_axes.in_port(0).connect(first_reduce.in_port(1).get_source()) - new_axes.in_port(1).connect(second_reduce.in_port(1).get_source()) - - first_reduce.in_port(0).get_source().node['need_shape_inference'] = True - first_reduce.in_port(0).get_source().node['override_output_shape'] = True - - second_reduce.in_port(1).get_connection().set_source(new_axes.out_port(0)) - - first_reduce.out_port(0).get_connection().set_source(first_reduce.in_port(0).get_connection().get_source()) - first_reduce.in_port(1).disconnect() - graph.remove_node(first_reduce.id) - - log.debug('{0} nodes {1} and {2} were fused to a single {2} node with updated axes input' - ''.format(reduce_type, first_reduce_name, second_reduce_name)) - - def find_and_replace_pattern(self, graph: Graph): - rsorted_nodes = graph.pseudo_topological_sort(reverse=True) - for reduce_type in reduce_map.keys(): - reduces_of_type = [n for n in rsorted_nodes if n.id in graph and n.soft_get('type') == reduce_type] - for second_reduce_node in reduces_of_type: - if second_reduce_node.id not in graph: - continue - first_reduce_node = second_reduce_node.in_port(0).get_source().node - if first_reduce_node.soft_get('type', None) == reduce_type: - ReduceMerge.fuse_reduces(first_reduce=first_reduce_node, second_reduce=second_reduce_node) diff --git a/tools/mo/openvino/tools/mo/back/ReduceTransposeDimensions.py b/tools/mo/openvino/tools/mo/back/ReduceTransposeDimensions.py deleted file mode 100644 index 84bbfb6793376e..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ReduceTransposeDimensions.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.back.OptimizeTransposeReshapeSequence import set_reshape_new_output_shape -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph - - -def sequential_dims(order: np.array): - """ - Returns first instance (counting from left) of the sequential dimensions in the 'order' - :param order: order to look for sequential dims - :return: list of indices of the sequential dimensions. If sequential dimensions are not found then return None. - """ - start_ind = cur_ind = 0 - while cur_ind + 1 < len(order): - if order[cur_ind] + 1 == order[cur_ind + 1]: - cur_ind += 1 - else: - if start_ind < cur_ind: - return list(range(start_ind, cur_ind + 1)) - cur_ind += 1 - start_ind = cur_ind - if start_ind < cur_ind: - return list(range(start_ind, cur_ind + 1)) - return None - - -def merge_permute_order_dimensions(dims: list, permute_order: np.array): - """ - Creates updated permutation for a given permutation order and the *input* dimension indices to be merged into one. - :param dims: the input tensor dimensions indices to merge - :param permute_order: the permutation order - :return: the new permutation order after merging of the specified dimensions into one - """ - assert len(dims) >= 2 - new_permute_order = list() - for permute_index in permute_order: - if permute_index < permute_order[dims[0]]: - new_permute_order.append(permute_index) - elif permute_index > permute_order[dims[-1]]: - new_permute_order.append(permute_index - len(dims) + 1) - elif permute_index == permute_order[dims[0]]: - new_permute_order.append(permute_order[dims[0]]) - return int64_array(new_permute_order) - - -def merge_dims(dims_to_merge: np.array, shape: np.array): - """ - Merge several sequential specified dims into one. - - The function does not support magic number "0" in the 'shape'. - :param dims_to_merge: the dimensions indices to merge - :param shape: shape to merge - :return: new shape with merged specified dims - """ - for ind in range(len(dims_to_merge) - 1): - assert dims_to_merge[ind] + 1 == dims_to_merge[ind + 1], 'The dims to merge must be sequential' - assert 0 not in shape, 'The value 0 is not supported during merging of the shape' - - result = list() - if dims_to_merge[0] != 0: - result.extend(shape[:dims_to_merge[0]]) - # handle magic number "-1" - if -1 in shape[dims_to_merge]: - result.append(-1) - else: - result.append(np.prod(shape[dims_to_merge])) - if dims_to_merge[-1] + 1 != len(shape): - result.extend(shape[dims_to_merge[-1] + 1:]) - return int64_array(result) - - -class ReduceTransposeDimensions(BackReplacementPattern): - """ - Transformation looks for the Transpose layers with sequential dimensions in the permutation order and merges them into - one thus reducing the number of dimensions. The transformation is applied to 5D+ permutations only. - """ - enabled = False - - def run_after(self): - from openvino.tools.mo.back.OptimizeTransposeReshapeSequence import OptimizeTransposeReshapeSequence - return [OptimizeTransposeReshapeSequence] - - def run_before(self): - from openvino.tools.mo.back.ReshapeMutation import ReshapeMutation - return [ReshapeMutation] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('reshape_1', dict(kind='op', type='Reshape')), # TODO change to reshape-like - ('reshape_1_data', dict(kind='data')), - ('permute', dict(kind='op', type='Transpose')), - ('permute_data', dict(kind='data')), - ('reshape_2', dict(kind='op', type='Reshape')), - ], - edges=[ - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'permute'), - ('permute', 'permute_data'), - ('permute_data', 'reshape_2'), - ] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - permute_node = match['permute'] - reshape_1_node = match['reshape_1'] - order = permute_node.in_port(1).data.get_value().copy() - if len(order) >= 5: - log.debug('Trying to merge dimensions of the Transpose layer "{}"'.format(permute_node.soft_get('name'))) - seq_dims = sequential_dims(order) - while seq_dims is not None: - permute_input_shape = permute_node.in_port(0).data.get_shape().copy() - # calculate new Transpose order and output of the Reshape layer - new_reshape_dims = merge_dims(order[seq_dims], permute_input_shape) - new_permute_order = merge_permute_order_dimensions(seq_dims, order) - - assert reshape_1_node.has('dim') - # update data Transpose and Reshape attributes and data nodes shapes - set_reshape_new_output_shape(reshape_1_node, new_reshape_dims) - - permute_node.in_port(1).data.set_value(new_permute_order) - permute_node.infer(permute_node) - order = permute_node.in_port(1).data.get_value().copy() - seq_dims = sequential_dims(order) diff --git a/tools/mo/openvino/tools/mo/back/RemoveUselessConvert.py b/tools/mo/openvino/tools/mo/back/RemoveUselessConvert.py deleted file mode 100644 index c4aab3b8b90405..00000000000000 --- a/tools/mo/openvino/tools/mo/back/RemoveUselessConvert.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class RemoveUselessConvert(BackReplacementPattern): - """ - Transformation looks for the Converts layers that do not change actual tensor data type. - The transformation is executed explicitly from the prepare_emit_ir function - """ - enabled = False - run_not_recursively = True - - def find_and_replace_pattern(self, graph: Graph): - for cast_node in graph.get_op_nodes(op='Cast'): - if cast_node.in_port(0).get_data_type() == cast_node.out_port(0).get_data_type(): - log.debug('Convert node {} do not change the data type of the input data.'.format(cast_node.name)) - cast_node.out_port(0).get_connection().set_source(cast_node.in_port(0).get_connection().get_source()) - graph.remove_node(cast_node.id) diff --git a/tools/mo/openvino/tools/mo/back/ReshapeMutation.py b/tools/mo/openvino/tools/mo/back/ReshapeMutation.py deleted file mode 100644 index 58bba727576aed..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ReshapeMutation.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.ForceStrictPrecision import ForceStrictPrecision -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class ReshapeMutation(BackReplacementPattern): - enabled = True - force_clean_up = True - run_not_recursively = True - - def run_before(self): - return [ForceStrictPrecision] - - @staticmethod - def pattern(): - return dict( - nodes=[('reshape', {'kind': 'op'})], - edges=[], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - reshape = match['reshape'] - - if reshape.soft_get('type') == 'Reshape': - reshape['force_precision_in_ports'] = {1: 'int64'} diff --git a/tools/mo/openvino/tools/mo/back/ResultNormalizer.py b/tools/mo/openvino/tools/mo/back/ResultNormalizer.py deleted file mode 100644 index d2c43152839709..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ResultNormalizer.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class ResultNormalizer(BackReplacementPattern): - enabled = True - - @staticmethod - def pattern(): - return dict( - nodes=[('result', {'type': 'Result'})], - edges=[], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['result'] - if len(node.in_nodes()) == 0: - graph.erase_node(node) diff --git a/tools/mo/openvino/tools/mo/back/ResultRename.py b/tools/mo/openvino/tools/mo/back/ResultRename.py deleted file mode 100644 index bf4ec976d1c56c..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ResultRename.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class ResultRename(BackReplacementPattern): - # This transformation sets the Result operation name equal to the incoming tensor name. - # For some frameworks like kaldi and onnx this may result in appearance of nodes with identical names, - # which can lead to errors in other transformations. - # So ResultRename should be launched at the end of back phase. - enabled = False - - def find_and_replace_pattern(self, graph: Graph): - op_names = set() - result_names_map = dict() - for node in graph.get_op_nodes(): - if node.has_valid('name'): - op_names.add(node['name']) - - for node in graph.get_op_nodes(type='Result'): - if node.in_ports(): - prev_node_out_port = node.in_port(0).get_connection().get_source() - tensor_names = prev_node_out_port.get_tensor_names() - # Graph may contain Result nodes with names equal to input tensors and - # renaming in this case is not needed. The example of such situation is - # IR reader check when graph is read with correct Result names. - if node.soft_get('name') in tensor_names: - continue - - # Try to find tensor name, that is not intersects with graph node names - result_name = None - for tensor_name in tensor_names: - if tensor_name not in op_names: - if node.has_valid('name'): - op_names.remove(node['name']) - op_names.add(tensor_name) - result_name = tensor_name - break - - # If we didn't find appropriate tensor name, then Result is named by default naming - if result_name is None: - result_name = prev_node_out_port.node.soft_get('name', prev_node_out_port.node.id) + \ - '/sink_port_' + str(prev_node_out_port.idx) - log.warning("Tensor name for Result node with name {} wasn't found. " - "Default renaming was used: {}".format(node.soft_get('name', node.id), - result_name)) - result_names_map[node['name']] = result_name - node['name'] = result_name - - # Change names saved in graph.outputs_order - for i in range(len(graph.outputs_order)): - if graph.outputs_order[i] in result_names_map: - graph.outputs_order[i] = result_names_map[graph.outputs_order[i]] diff --git a/tools/mo/openvino/tools/mo/back/ReverseInputChannels.py b/tools/mo/openvino/tools/mo/back/ReverseInputChannels.py deleted file mode 100644 index 047f5b96f34f78..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ReverseInputChannels.py +++ /dev/null @@ -1,495 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.layout import get_dim_from_layout, get_features_dim -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, compatible_dims -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.op import Op, PermuteAttrs -from openvino.tools.mo.ops.split import Split -from openvino.tools.mo.utils.error import Error - - -class ReverseChannels(Op): - """ - Internal op that will never be emitted into IR and replaced by other, publicly supported ops - """ - op = 'ReverseChannels' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': None, - 'axis': int64_array(1), - 'order': int64_array([2, 1, 0]), - 'infer': self.infer, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node): - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None - node.out_port(0).data.set_shape(input_shape) - - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) - - -class ReverseChannelsPropagationDown(BackReplacementPattern): - """ - Propagates ReverseChannels operations down through nodes that we have rules for - """ - enabled = False - - propagation_rules = { - 'Convolution': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_conv(node, rc), - - 'ScaleShift': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_eltwise(node, rc), - 'Power': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_eltwise(node, rc), - 'BatchNormalization': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_eltwise(node, rc), - 'FakeQuantize': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_eltwise(node, rc), - 'Multiply': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_eltwise(node, rc), - 'Divide': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_eltwise(node, rc), - 'Add': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_eltwise(node, rc), - 'Subtract': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_eltwise(node, rc), - 'Pow': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_eltwise(node, rc), - 'Convert': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_eltwise(node, rc), - - 'Shape': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_shape(node, rc), - 'ShapeOf': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_shape(node, rc), - - 'Pad': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_zero_port_only(node, rc), - 'Transpose': lambda node, rc: ReverseChannelsPropagationDown.pass_rc_through_transpose(node, rc), - } - - @staticmethod - def pass_rc_through_transpose(node: Node, reverse_channels: Node): - if node.in_port(1).disconnected() or node.in_port(0).disconnected(): - return False - order = node.in_port(1).data.get_value() - reverse_axis = reverse_channels.axis - - data_rank = len(list(node.in_port(0).data.get_shape())) - - if reverse_axis < 0: - reverse_axis = data_rank + reverse_axis - assert 0 < reverse_axis < data_rank, "Incorrect ReverseChannels axis in node {}.".format(reverse_channels) - - if order is None: - return False - new_axis = list(order).index(reverse_axis) - reverse_channels.axis = int64_array(new_axis) - return ReverseChannelsPropagationDown.pass_rc_through_zero_port_only(node, reverse_channels) - - @staticmethod - def pass_rc_through_zero_port_only(node: Node, reverse_channels: Node): - r""" - BEFORE AFTER - - previous_op - | - ReverseChannels previous_op previous_op previous_op - \ / \ / - Node Node - | - ReverseChannels - - returns boolean value whatever we should continue propagating current ReverseChannels operation down or not - """ - # detaching reverse_channels node from the graph - if reverse_channels.is_in_port_connected(0) and reverse_channels.is_out_port_connected(0) \ - and node.is_out_port_connected(0): - reverse_channels.out_port(0).get_connection().set_source( - reverse_channels.in_port(0).get_connection().get_source()) - reverse_channels.in_port(0).disconnect() - - node.out_port(0).get_connection().set_source(reverse_channels.out_port(0)) - node.out_port(0).disconnect() - node.out_port(0).connect(reverse_channels.in_port(0)) - return True - return False - - @staticmethod - def pass_rc_through_conv(node, reverse_channels): - r""" - For non grouped convolution: - BEFORE AFTER - - previous_op weights - | | - ReverseChannels weights previous_op ReverseChannels - \ / \ / - Conv Conv - - For grouped convolution: - BEFORE AFTER - - previous_op weights - | | - ReverseChannels weights previous_op ReverseChannels - \ / \ / - Conv Conv - | - ReverseChannels - - returns boolean value whatever we should continue propagating current ReverseChannels operation down or not - """ - channel_idx = node.soft_get("input_feature_channel", None) - if channel_idx is None: - # unknown Convolution configuration, won't propagate reverse_channels down the network - return False - weights_shape = node.in_port(1).data.get_shape() - if weights_shape is None or weights_shape[channel_idx] != reverse_channels.order.size: - # unexpected Convolution configuration, won't propagate reverse_channels down the network - return False - - # detaching reverse_channels node from the graph - reverse_channels.out_port(0).get_connection().set_source( - reverse_channels.in_port(0).get_connection().get_source()) - reverse_channels.in_port(0).disconnect() - - group = node.soft_get('group', 1) - - # insert ReverseChannels on weights port of Convolution - ric_to_move_to_weights = reverse_channels if group == 1 else reverse_channels.copy_node() - ric_to_move_to_weights['axis'] = mo_array(channel_idx) - src = node.in_port(1).get_connection().get_source() - node.in_port(1).get_connection().set_source(ric_to_move_to_weights.out_port(0)) - src.disconnect() - src.connect(ric_to_move_to_weights.in_port(0)) - - if group != 1 and group == reverse_channels.order.size: - # grouped Convolution weights channel reversing is not enough to complete channel reversing procedure - # we propagate ReverseChannels op through current Convolution with new order value for channel permutation - bottom_channels = node.out_port(0).data.get_shape()[node.channel_dims[0]] - assert bottom_channels % group == 0 - multiplier = int(bottom_channels / group) - new_order = np.take(np.arange(bottom_channels).reshape((group, multiplier)), - indices=reverse_channels.order, axis=0).flatten() - reverse_channels['axis'] = mo_array(reverse_channels.axis.copy()) - reverse_channels['order'] = mo_array(new_order) - - node.out_port(0).get_connection().set_source(reverse_channels.out_port(0)) - node.out_port(0).disconnect() - node.out_port(0).connect(reverse_channels.in_port(0)) - - # as described above, we are not done reversing channels yet, so we should continue propagating - # ReverseChannels operation down the network - return True - # we reversed channels for sure, nothing to propagate down the network - return False - - @staticmethod - def pass_rc_through_eltwise(node, reverse_channels): - r""" - BEFORE AFTER - - previous_op previous_op' - | | - ReverseChannels previous_op' previous_op ReverseChannels - \ / \ / - Eltwise Eltwise - | - ReverseChannels - - returns boolean value whatever we should continue propagating current ReverseChannels operation down or not - """ - before_shape = reverse_channels.out_port(0).data.get_shape() - - port_axis = [] - for idx, port in node.in_ports().items(): - if port.get_connection().get_source().node.id == reverse_channels.id: - continue - shape = port.data.get_shape() - non_one_dims = np.where(shape != 1)[0] - if len(shape) == 0 or shape[reverse_channels.axis] == 1: - continue # nothing to flip for this input - if len(non_one_dims) == 1 and shape[non_one_dims.item()] == reverse_channels.order.size: - new_axis = non_one_dims.item() - elif np.array_equal(before_shape, shape): - new_axis = reverse_channels.axis - else: - # shape has multiple non-one values and shape is not fully broadcasted to value port shape - # it is safe not to propagate reverse channels - return False - port_axis.append((port, new_axis)) - - # reversing eltwise inputs where applicable - for port, axis in port_axis: - ric_copy = reverse_channels.copy_node({'axis': mo_array(axis), 'order': mo_array(reverse_channels.order)}) - - src = port.get_connection().get_source() - port.get_connection().set_source(ric_copy.out_port(0)) - src.disconnect() - src.connect(ric_copy.in_port(0)) - - # detaching reverse_channels node from the graph - reverse_channels.out_port(0).get_connection().set_source( - reverse_channels.in_port(0).get_connection().get_source()) - reverse_channels.in_port(0).disconnect() - - # propagating reverse_channels node to the output port of eltwise - node.out_port(0).get_connection().set_source(reverse_channels.out_port(0)) - node.out_port(0).disconnect() - node.out_port(0).connect(reverse_channels.in_port(0)) - - # propagated reverse_channels successfully through current node, will continue propagation - return True - - @staticmethod - def pass_rc_through_shape(node, reverse_channels): - """ - stops propagation of RIC through shape taking operations, due to RIC does not change shape - """ - reverse_channels.out_port(0).get_connection().set_source( - reverse_channels.in_port(0).get_connection().get_source()) - return False - - @staticmethod - def get_non_shape_taking_dst(dsts): - return [dst for dst in dsts if dst.node.soft_get('type') not in ['Shape', 'ShapeOf']] - - def check_if_we_propagate_down(self, reverse_channels): - dsts = self.get_non_shape_taking_dst(reverse_channels.out_port(0).get_destinations()) - return len(dsts) == 1 and dsts[0].node.soft_get('type') in self.propagation_rules - - def find_and_replace_pattern(self, graph: Graph): - for reverse_channels in graph.get_op_nodes(op='ReverseChannels'): - keep_moving_down = True - while keep_moving_down and self.check_if_we_propagate_down(reverse_channels): - next_node = self.get_non_shape_taking_dst(reverse_channels.out_port(0).get_destinations())[0].node - keep_moving_down = self.propagation_rules[next_node.type](next_node, reverse_channels) - - -class ReverseChannelsPropagationUp(BackReplacementPattern): - """ - Propagates ReverseChannels operations up through nodes that we have rules for - """ - enabled = False - - propagation_rules = { - 'ScaleShift': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), - 'Power': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), - 'BatchNormalization': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), - 'FakeQuantize': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), - 'Multiply': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), - 'Divide': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), - 'Add': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), - 'Subtract': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), - 'Pow': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), - 'Convert': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_eltwise(node, rc), - 'Pad': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_zero_port_only(node, rc), - 'Transpose': lambda node, rc: ReverseChannelsPropagationUp.lift_up_through_transpose(node, rc), - } - - @staticmethod - def lift_up_through_transpose(node: Node, reverse_channels: Node): - if node.in_port(1).disconnected() or node.in_port(0).disconnected(): - return False - order = node.in_port(1).data.get_value() - reverse_axis = reverse_channels.axis - - data_rank = len(list(node.in_port(0).data.get_shape())) - - if reverse_axis < 0: - reverse_axis = data_rank + reverse_axis - assert 0 < reverse_axis < data_rank, "Incorrect ReverseChannels axis in node {}.".format(reverse_channels) - - if order is None: - return False - new_axis = order[reverse_axis] - reverse_channels.axis = int64_array(new_axis) - return ReverseChannelsPropagationUp.lift_up_through_zero_port_only(node, reverse_channels) - - @staticmethod - def lift_up_through_zero_port_only(node: Node, reverse_channels: Node): - r""" - BEFORE AFTER - - previous_op - \ - previous_op previous_op ReverseChannels previous_op - \ / \ / - Node Node - | | - ReverseChannels next_op - | - next_op - - returns two objects: - first - boolean value whatever we should continue propagating current ReverseChannels operation up or not - second - list of ReverseChannels operations that were produced while propagating reverse_channels up - """ - if node.is_in_port_connected(0): - node_input_port_0 = node.in_port(0) - reverse_channels_out_nodes = reverse_channels.out_port(0).get_connection().get_destinations() - reverse_channels.out_port(0).disconnect() - reverse_channels.in_port(0).disconnect() - src = node_input_port_0.get_connection().get_source() - - if src.node.soft_get('type') == 'Parameter': - # For Parameter nodes tensor debug attributes should not move to the last node - # of subgraph. It is needed for the proper mapping of input framework name. - # For this reason "source" mode is used to keep tensor debug attributes at Parameter node. - node_input_port_0.get_connection().set_source(reverse_channels.out_port(0), - attributes_save_mode="source") - else: - node_input_port_0.get_connection().set_source(reverse_channels.out_port(0)) - - src.connect(reverse_channels.in_port(0)) - for reverse_channels_destination in reverse_channels_out_nodes: - node.out_port(0).get_connection().add_destination(reverse_channels_destination) - - return True, [reverse_channels] - return False, [] - - @staticmethod - def lift_up_through_eltwise(node: Node, reverse_channels: Node): - r""" - BEFORE AFTER - - previous_op previous_op' - \ / - previous_op previous_op' ReverseChannels ReverseChannels - \ / \ / - Eltwise Eltwise - | | - ReverseChannels next_op - | - next_op - - returns two objects: - first - boolean value whatever we should continue propagating current ReverseChannels operation up or not - second - list of new ReverseChannels operations that were produced while propagating reverse_channels up - """ - before_shape = reverse_channels.in_port(0).data.get_shape() - - port_axis = [] - for idx, port in node.in_ports().items(): - shape = port.data.get_shape() - - non_one_dims = np.where(shape != 1)[0] - if len(shape) == 0 or shape[reverse_channels.axis] == 1: - continue # nothing to flip for this input - if len(non_one_dims) == 1 and shape[non_one_dims.item()] == reverse_channels.order.size: - axis = non_one_dims.item() - elif np.array_equal(before_shape, shape): - axis = reverse_channels.axis - else: - # shape has multiple non-one values and shape is not fully broadcasted to value port shape - # it is safe not to propagate reverse channels - return False, [] - port_axis.append((port, axis)) - - copies = [] - for port, axis in port_axis: - reverse_channels_copy = reverse_channels.copy_node({'axis': mo_array(axis)}) - - src = port.get_connection().get_source() - if src.node.soft_get('type') == 'Parameter': - # For Parameter nodes tensor debug attributes should not move to the last node - # of subgraph. It is needed for the proper mapping of input framework name. - # For this reason "source" mode is used to keep tensor debug attributes at Parameter node. - port.get_connection().set_source(reverse_channels_copy.out_port(0), attributes_save_mode="source") - else: - port.get_connection().set_source(reverse_channels_copy.out_port(0)) - src.connect(reverse_channels_copy.in_port(0)) - - copies.append(reverse_channels_copy) - - reverse_channels.out_port(0).get_connection().set_source( - reverse_channels.in_port(0).get_connection().get_source()) - reverse_channels.in_port(0).disconnect() - - # propagated reverse_channels successfully through current node, will continue propagation - return True, copies - - def find_and_replace_pattern(self, graph: Graph): - reverse_channels = set(graph.get_op_nodes(op='ReverseChannels')) - while len(reverse_channels): - keep_moving_up = True - while keep_moving_up: - curr_reverse_channels = reverse_channels.pop() - if curr_reverse_channels.in_port(0).get_source().node.soft_get('type') not in self.propagation_rules: - break - next_op = curr_reverse_channels.in_port(0).get_source().node - keep_moving_up, new_reverses = self.propagation_rules[next_op.type](next_op, curr_reverse_channels) - reverse_channels.update(new_reverses) - - -class DecomposeReverseChannels(BackReplacementPattern): - """ - Replaces each internal ReverseChannels operation in graph with publicly supported Gather operation - """ - enabled = False - - @staticmethod - def replace_with_gather(node): - graph = node.graph - - name = node.soft_get('name', node.id) - axis = node.axis - order = node.order - - gather = create_op_with_const_inputs(graph, Gather, {1: order, 2: int64_array(axis)}, {'name': name}) - - node.out_port(0).get_connection().set_source(gather.out_port(0)) - node.in_port(0).get_connection().set_destination(gather.in_port(0)) - - @staticmethod - def replace_with_split_concat(node): - graph = node.graph - - name = node.soft_get('name', node.id) - axis = node.axis - order = node.order - - split = create_op_with_const_inputs(graph, Split, {1: int64_array(axis)}, - {'name': name + '/Split', 'num_splits': order.size}) - concat = Concat(graph, {'name': name + '/Concat', 'axis': axis, 'in_ports_count': order.size}).create_node() - - for out_port_idx, in_port_idx in enumerate(order): - split.out_port(out_port_idx).connect(concat.in_port(in_port_idx)) - - node.out_port(0).get_connection().set_source(concat.out_port(0)) - node.in_port(0).get_connection().set_destination(split.in_port(0)) - - graph.remove_node(node.id) - - def find_and_replace_pattern(self, graph: Graph): - for reverse_channels in graph.get_op_nodes(op='ReverseChannels'): - if reverse_channels.in_port(0).disconnected() or reverse_channels.out_port(0).disconnected(): - # graph.clean_up will delete it - reverse_channels['need_shape_inference'] = False - continue - self.replace_with_split_concat(reverse_channels) - - -class ApplyReverseChannels(BackReplacementPattern): - """ - Reverses input channels for suitable Parameter operation - Optimizes channel reversing by fusion to Convolution weights if applicable - """ - enabled = True - - run_not_recursively = True - force_clean_up = True - - def find_and_replace_pattern(self, graph: Graph): - """ - Following transformations should run in strict order, that is why we disabled them all and run here - """ - ReverseChannelsPropagationDown().find_and_replace_pattern(graph) - ReverseChannelsPropagationUp().find_and_replace_pattern(graph) - DecomposeReverseChannels().find_and_replace_pattern(graph) diff --git a/tools/mo/openvino/tools/mo/back/SelectBroadcast.py b/tools/mo/openvino/tools/mo/back/SelectBroadcast.py deleted file mode 100644 index cc2b2d7a5fecb2..00000000000000 --- a/tools/mo/openvino/tools/mo/back/SelectBroadcast.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.ReshapeMutation import ReshapeMutation -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class SelectBroadcast(BackReplacementPattern): - """ - Select broadcasting semantics in TF isn't numpy-like - broadcasting rules, manual reshape is needed. - For example: - condition: [1] - input_1: [1, 8] - input_2: [1, 8] - Condition should be aligned with first dimensions of inputs. - """ - enabled = True - - def run_before(self): - return [ReshapeMutation] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('op', dict(kind='op', op='Select'))], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - select = match['op'] - if select.has_valid('format') and select['format'] == 'tf': - condition = select.in_node(0) - input_1 = select.in_node(1) - - if len(condition.shape) == 1 and len(input_1.shape) > 1: - unsqueeze_op = create_op_node_with_second_input( - graph, Unsqueeze, int64_array(range(1, len(input_1.shape))), - {'name': select.name+'/Broadcast/'}, select.in_port(0).get_source().node) - - select.in_port(0).disconnect() - select.in_port(0).get_connection().set_source(unsqueeze_op.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/back/ShapeOfConstFolding.py b/tools/mo/openvino/tools/mo/back/ShapeOfConstFolding.py deleted file mode 100644 index 54b05f86f5a0f5..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ShapeOfConstFolding.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined - - -class ShapeOfConstFolding(BackReplacementPattern): - """ - The transformation folds ShapeOf(Const) -> Const - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.back.MatMulNormalizer import SmartReshape_HC_Reshape_MatMul - return [SmartReshape_HC_Reshape_MatMul] - - def find_and_replace_pattern(self, graph: Graph): - for shapeof_node in graph.get_op_nodes(op='ShapeOf'): - in_node = shapeof_node.in_port(0).get_source().node - if in_node.op == 'Const' or (shapeof_node.has_and_set('allow_fold') and is_fully_defined(shapeof_node.in_port(0).data.get_shape())): - shapeof_node.in_port(0).disconnect() - shape_name = shapeof_node.soft_get('name', shapeof_node.id) - shape_value = shapeof_node.out_port(0).data.get_value() - shape_const_node = Const(graph, {'name': shape_name + '/ExecutionConstValue', - 'value': shape_value}).create_node() - shapeof_node.out_port(0).get_connection().set_source(shape_const_node.out_port(0)) - rename_nodes([(shapeof_node, shape_name + '/TBD'), (shape_const_node, shape_name)]) diff --git a/tools/mo/openvino/tools/mo/back/ShuffleChannelPatternOptimization.py b/tools/mo/openvino/tools/mo/back/ShuffleChannelPatternOptimization.py deleted file mode 100644 index 069c6a9db2c337..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ShuffleChannelPatternOptimization.py +++ /dev/null @@ -1,266 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.back.FuseTransposesSequence import FuseTransposesSequence -from openvino.tools.mo.ops.depth_to_space import DepthToSpaceOp -from openvino.tools.mo.ops.shufflechannel import ShuffleChannels -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph - - -class ShuffleChannelPatternOptimization(BackReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - return [FuseTransposesSequence] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('t_start_order', {'type': 'Const'}), - ('t_start_order_d', - {'value': lambda v: v is not None and np.all(np.array_equal(v, [0, 2, 3, 1]))}), - ('t_start', {'type': 'Transpose'}), - ('t_start_d', {}), - - ('reshape_dim', {'type': 'Const'}), - ('reshape_dim_d', - {'value': lambda v: v is not None and v.size == 5 and np.all(v[0] == -1)}), - ('reshape_start', {'type': 'Reshape'}), - ('reshape_start_d', {}), - - ('t_5d_order', {'type': 'Const'}), - ('t_5d_order_d', {'value': lambda v: v is not None and np.all(np.array_equal(v, [0, 1, 2, 4, 3]))}), - ('t_5d', {'type': 'Transpose'}), - ('t_5d_d', {}), - - ('reshape_1_dim', {'type': 'Const'}), - ('reshape_1_dim_d', {'value': lambda v: v is not None and v.size == 4 and np.all(v[0] == -1)}), - ('reshape_end', {'type': 'Reshape'}), - ('reshape_end_d', {}), - - ('t_end_order', {'type': 'Const'}), - ('t_end_order_d', {'value': lambda v: v is not None and np.all(np.array_equal(v, [0, 3, 1, 2]))}), - ('t_end', {'type': 'Transpose'}), - ], - edges=[ - ('t_start_order', 't_start_order_d'), - ('t_start_order_d', 't_start', {'in': 1}), - ('t_start', 't_start_d'), - - ('reshape_dim', 'reshape_dim_d'), - ('t_start_d', 'reshape_start', {'in': 0}), - ('reshape_dim_d', 'reshape_start', {'in': 1}), - ('reshape_start', 'reshape_start_d'), - - ('t_5d_order', 't_5d_order_d'), - ('reshape_start_d', 't_5d', {'in': 0}), - ('t_5d_order_d', 't_5d', {'in': 1}), - ('t_5d', 't_5d_d'), - - ('reshape_1_dim', 'reshape_1_dim_d'), - ('t_5d_d', 'reshape_end', {'in': 0}), - ('reshape_1_dim_d', 'reshape_end', {'in': 1}), - ('reshape_end', 'reshape_end_d'), - - ('t_end_order', 't_end_order_d'), - ('reshape_end_d', 't_end', {'in': 0}), - ('t_end_order_d', 't_end', {'in': 1}), - ], - ) - - @staticmethod - def feature_dim_splitted(short_shape, long_shape): - return all([short_shape[i] == long_shape[i] for i in range(len(short_shape) - 1)]) and \ - short_shape[-1] == long_shape[-1] * long_shape[-2] - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - reshape_5d = match['reshape_start'] - if not ShuffleChannelPatternOptimization.feature_dim_splitted( - short_shape=reshape_5d.in_port(0).data.get_shape(), long_shape=reshape_5d.out_port(0).data.get_shape()): - return - - reshape_4d = match['reshape_end'] - if not ShuffleChannelPatternOptimization.feature_dim_splitted( - short_shape=reshape_4d.out_port(0).data.get_shape(), long_shape=reshape_4d.in_port(0).data.get_shape()): - return - - start = match['t_start'] - end = match['t_end'] - - new_start = match['reshape_start'] - new_end = match['reshape_end'] - - start_source = start.in_port(0).get_connection().get_source() - end_connection = end.out_port(0).get_connection() - - new_end.out_port(0).disconnect() - end_connection.set_source(new_end.out_port(0)) - - start.in_port(0).disconnect() - new_start.in_port(0).disconnect() - - new_start.in_port(0).connect(start_source) - - match['reshape_dim']['value'] = int64_array(np.take(new_start.in_port(1).data.get_value(), [0, 3, 4, 1, 2])) - match['reshape_dim'].infer(match['reshape_dim']) - new_start.infer(new_start) - - match['t_5d_order']['value'] = int64_array([0, 2, 1, 3, 4]) - match['t_5d_order'].infer(match['t_5d_order']) - match['t_5d'].infer(match['t_5d']) - - match['reshape_1_dim']['value'] = int64_array(np.take(new_end.in_port(1).data.get_value(), [0, 3, 1, 2])) - match['reshape_1_dim'].infer(match['reshape_1_dim']) - - -class ShuffleChannelFusion(BackReplacementPattern): - """ - FUSION: Reshape->Transpose->Reshape to ShuffleChannel - We are able to perform the fusion if the pattern satisfies the conditions: - 1. Pattern input 4D shape is the same as pattern output 4D shape - 2. First Reshape splits channel dimension (1 axis) into two dimensions - 3. Transpose permutes only split dimensions - 4. Second Reshape pack them back - - Fixes original models reshape-ability (Smart reshape) - """ - enabled = True - force_clean_up = True - - def run_after(self): - return [FuseTransposesSequence] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('reshape_0_pattern', dict(type='Const')), - ('reshape_0_pattern_d', dict(value=lambda v: v is not None and v.size == 5 and np.all(v > 0))), - ('reshape_0', dict(type='Reshape')), - ('reshape_0_d', dict()), - - ('order', dict(type='Const')), - ('order_d', dict(value=lambda v: v is not None and np.array_equal([0, 2, 1, 3, 4], v))), - ('transpose', dict(type='Transpose')), - ('transpose_d', {}), - - ('reshape_1_pattern', dict(type='Const')), - ('reshape_1_pattern_d', dict(value=lambda v: v is not None and v.size == 4 and np.all(v > 0))), - ('reshape_1', dict(type='Reshape')), - ], - edges=[ - ('reshape_0_pattern', 'reshape_0_pattern_d'), - ('reshape_0_pattern_d', 'reshape_0'), - ('reshape_0', 'reshape_0_d'), - ('reshape_0_d', 'transpose'), - ('order', 'order_d'), - ('order_d', 'transpose'), - ('transpose', 'transpose_d'), - ('transpose_d', 'reshape_1'), - ('reshape_1_pattern', 'reshape_1_pattern_d'), - ('reshape_1_pattern_d', 'reshape_1'), - ] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - channel_splitting_reshape = match['reshape_0'] - channel_concating_reshape = match['reshape_1'] - - initial_shape = channel_splitting_reshape.in_port(0).data.get_shape() - resulting_shape = channel_concating_reshape.in_port(1).data.get_value() - if not np.array_equal(initial_shape, resulting_shape): - return - - channel_splitted_out_shape = channel_splitting_reshape.in_port(1).data.get_value() - if not all([initial_shape[i] == channel_splitted_out_shape[j] for i, j in {0: 0, 2: 3, 3: 4}.items()]): - return - - name = channel_concating_reshape.soft_get('name', channel_concating_reshape.id) - group = channel_splitted_out_shape[1] - shuffle_channel = ShuffleChannels(graph, {'name': name, 'group': group}).create_node() - channel_concating_reshape.out_port(0).get_connection().set_source(shuffle_channel.out_port(0)) - shuffle_channel.in_port(0).connect(channel_splitting_reshape.in_port(0).get_source()) - - -class DepthToSpaceFusion(BackReplacementPattern): - """ - FUSION: Reshape->Transpose->Reshape to DepthToSpace - We are able to perform the fusion if the pattern satisfies the conditions: - 1. Pattern has 6D input and 4D output - 2. First Reshape splits channel dimension (1 axis) into three dimensions [new_depth, block_size, block_size] - 3. Transpose permutes split dimensions with spatial ones - 4. Second Reshape pack block size together with spatial dimension - - Fixes original models reshape-ability (Smart reshape) - """ - enabled = True - force_clean_up = True - - def run_after(self): - return [FuseTransposesSequence] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('reshape_0_pattern', dict(type='Const')), - ('reshape_0_pattern_d', dict(value=lambda v: v is not None and v.size == 6 and np.all(v > 0))), - ('reshape_0', dict(type='Reshape')), - ('reshape_0_d', dict()), - - ('order', dict(type='Const')), - ('order_d', dict(value=lambda v: v is not None and np.array_equal([0, 1, 4, 2, 5, 3], v))), - ('transpose', dict(type='Transpose')), - ('transpose_d', {}), - - ('reshape_1_pattern', dict(type='Const')), - ('reshape_1_pattern_d', dict(value=lambda v: v is not None and v.size == 4 and np.all(v > 0))), - ('reshape_1', dict(type='Reshape')), - ], - edges=[ - ('reshape_0_pattern', 'reshape_0_pattern_d'), - ('reshape_0_pattern_d', 'reshape_0'), - ('reshape_0', 'reshape_0_d'), - ('reshape_0_d', 'transpose'), - ('order', 'order_d'), - ('order_d', 'transpose'), - ('transpose', 'transpose_d'), - ('transpose_d', 'reshape_1'), - ('reshape_1_pattern', 'reshape_1_pattern_d'), - ('reshape_1_pattern_d', 'reshape_1'), - ] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - channel_splitting_reshape = match['reshape_0'] - channel_concating_reshape = match['reshape_1'] - - initial_shape = channel_splitting_reshape.in_port(0).data.get_shape() - resulting_shape = channel_concating_reshape.in_port(1).data.get_value() - if initial_shape[0] != resulting_shape[0]: - return - - channel_splitted_out_shape = channel_splitting_reshape.in_port(1).data.get_value() - if not all([initial_shape[i] == channel_splitted_out_shape[j] for i, j in {0: 0, 2: 4, 3: 5}.items()]) or \ - channel_splitted_out_shape[1] != channel_splitted_out_shape[2]: - return - block_size = channel_splitted_out_shape[2] - expected_output_shape = [initial_shape[0], initial_shape[1] // (block_size * block_size), - initial_shape[2] * block_size, initial_shape[3] * block_size] - if not np.array_equal(expected_output_shape, resulting_shape): - return - - name = channel_concating_reshape.soft_get('name', channel_concating_reshape.id) - depth_to_space = DepthToSpaceOp(graph, - {'name': name, 'block_size': block_size, 'mode': 'depth_first'}).create_node() - channel_concating_reshape.out_port(0).get_connection().set_source(depth_to_space.out_port(0)) - depth_to_space.in_port(0).connect(channel_splitting_reshape.in_port(0).get_source()) diff --git a/tools/mo/openvino/tools/mo/back/SpecialNodesFinalization.py b/tools/mo/openvino/tools/mo/back/SpecialNodesFinalization.py deleted file mode 100644 index bfbe8d2f08bbbb..00000000000000 --- a/tools/mo/openvino/tools/mo/back/SpecialNodesFinalization.py +++ /dev/null @@ -1,218 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import re -from collections import defaultdict - -import numpy as np - -from openvino.tools.mo.back.pass_separator import BackFinish -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.tensor_iterator import TensorIterator -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.runtime_info import RTInfo -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class RemoveConstOps(BackReplacementPattern): - enabled = False - - def run_after(self): - return [BackFinish] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(type='Const'): - graph.remove_edge(node.id, node.out_node().id) - graph.remove_node(node.id) - - -class CreateConstNodesReplacement(BackReplacementPattern): - enabled = False - - def run_before(self): - return [] - - def run_after(self): - return [RemoveConstOps] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('data', dict(kind='data')) - ], - edges=[] - ) - - @staticmethod - def _check_bin_attrs(node): - """Check that at least one output edge from node without 'bin' attribute.""" - out_edges = node.out_edges() - bin_in_out_ports = ['bin' in edge for edge in out_edges] - out_node = [node.has('op') and node.op == 'Result' for node in node.out_nodes()] - return np.any(out_node) or not np.all(bin_in_out_ports) - - @staticmethod - def _check_that_node_from_body(node): - """Check that all output edges from node have 'internal_port_id' - (that shows that this node is from TI body)""" - n_ports = len(node.out_edges()) - internal_port_in_out_ports = ['internal_port_id' in edge for edge in node.out_edges()] - return np.any(internal_port_in_out_ports) and n_ports - - def replace_pattern(self, graph: Graph, match: dict): - """ - Adds layers with type 'Const' that produce blob from 'bin' file. The pass finds data nodes with one output which - doesn't have edge with 'bin' attribute (or with two outputs and at least one output doesn't have 'bin' attr) - and generate Const op node before the node and data node before the Const node. The data node before 'Const' - node is needed because the op node dumps input tensors to bin file. - """ - node = match['data'] - if len(node.in_nodes()) > 0: - return - - if self._check_bin_attrs(node): - if node.has_valid('value'): - const_node_name = node.soft_get('name', node.id) - const_node_name = graph.unique_id(re.sub(r'\/Output_\d+\/Data_(.?)+', '', const_node_name)) - log.debug("Added Const node '{}'".format(const_node_name)) - const_node = Const(graph, {'name': const_node_name, 'value': node.value, - 'force_shape': node.soft_get('force_shape', None), - 'override_output_shape': node.has_valid('force_shape'), - 'force_type': node.soft_get('force_type', None), - 'correct_data_type': node.soft_get('correct_data_type', None), - 'rt_info': node.soft_get('rt_info', RTInfo()), - }).create_node() - const_node.add_input_port(0) - graph.add_edges_from([(const_node_name, node.id, {'out': 0})]) - - node_copy = node.copy_node() - const_node.type_infer(const_node) - graph.add_edges_from([(node_copy.id, const_node_name, {'in': 0, 'bin': 'custom'})]) - elif not self._check_that_node_from_body(node): - log.debug('node = {}'.format(node.graph.node[node.id])) - raise Error( - 'Discovered data node without inputs and value, node.name = {}, consumer.name = {}. ' + - refer_to_faq_msg(23), - node.soft_get('name'), - node.out_node().soft_get('name') if len(node.out_nodes()) else "" - ) - - -class NormalizeTI(BackReplacementPattern): - """ - Transformation changes linking mechanism of TensorIterator outer graph with inner graph - from linking outer graph node ports with inner Parameter and Result operations - to linking outer graph node ports with functional operations and their input/output ports - - 1. Updating `input_port_map`, `output_port_map` and `back_edges` maps - 2. Removing Parameter/Input operation nodes - - NOTE: Result operation will be removed by a separate transformation - """ - enabled = False - - @staticmethod - def maps_uniqueization(ti): - assert ti.has_valid('input_port_map') - assert ti.has_valid('output_port_map') - assert ti.has_valid('back_edges') - - ti.input_port_map = [dict(unique_r) for unique_r in set([tuple(rec.items()) for rec in ti.input_port_map])] - ti.output_port_map = [dict(unique_r) for unique_r in set([tuple(rec.items()) for rec in ti.output_port_map])] - ti.back_edges = [dict(unique_rec) for unique_rec in set([tuple(rec.items()) for rec in ti.back_edges])] - - @staticmethod - def external_nodes_normalization(ti): - """ - TensorIterator external ports may have several internal layer connections. - - Current transformation does the following: - - normalizes port maps (eliminating duplicated records) - - replicates external input/output port for each internal Parameter/Result it is connected to - - updates input and output port maps according to previous step replications - """ - - def update_external_port_id(ti, port_type, old_external_port_id, new_external_port_id, internal_layer_id): - assert port_type in ['in', 'out'] - - port_map = ti.input_port_map if port_type == 'in' else ti.output_port_map - for record in port_map: - if record['external_port_id'] == old_external_port_id and \ - record['internal_layer_id'] == internal_layer_id: - record['external_port_id'] = new_external_port_id - - NormalizeTI.maps_uniqueization(ti) - - body = ti.body - - external_input_ports = defaultdict(list) - for record in ti.input_port_map: - assert 'external_port_id' in record - external_input_ports[record['external_port_id']].append(record) - - for external_port_id, record_list in external_input_ports.items(): - if len(record_list) == 1: - continue - - real_external_port_id = TensorIterator.special_port_to_real_port(ti, external_port_id, 'in') - source = ti.in_port(real_external_port_id).get_source() - - for record in record_list[1:]: - assert 'internal_layer_id' in record - - new_real_input_port_id = max(map(int, ti.in_ports().keys())) + 1 - new_external_port_id = max([int(d['external_port_id']) for d in - list(ti.in_edges().values()) + list(ti.out_edges().values())]) + 1 - - ti.add_input_port(new_real_input_port_id) - source.connect(ti.in_port(new_real_input_port_id)) - - ti.in_edge(new_real_input_port_id)['external_port_id'] = new_external_port_id - update_external_port_id(ti, 'in', external_port_id, new_external_port_id, record['internal_layer_id']) - - external_output_ports = defaultdict(list) - for record in ti.output_port_map: - assert 'external_port_id' in record - external_output_ports[record['external_port_id']].append(record) - - for external_port_id, record_list in external_output_ports.items(): - if len(record_list) == 1: - continue - - real_external_port_id = TensorIterator.special_port_to_real_port(ti, external_port_id, 'out') - dsts = ti.out_port(real_external_port_id).get_destinations() - - for record in record_list[1:]: - assert 'internal_layer_id' in record - - new_real_output_port_id = max(map(int, ti.out_ports().keys())) + 1 - new_external_port_id = max([int(d['external_port_id']) for d in - list(ti.in_edges().values()) + list(ti.out_edges().values())]) + 1 - - ti.add_output_port(new_real_output_port_id) - for dst in dsts: - ti.out_port(new_real_output_port_id).connect(dst) - - update_external_port_id(ti, 'out', external_port_id, new_external_port_id, record['internal_layer_id']) - - body.clean_up() - - def find_and_replace_pattern(self, graph: Graph): - for ti in graph.get_op_nodes(type='TensorIterator'): - self.external_nodes_normalization(ti) - - if len([record for record in ti.input_port_map if record.get('axis') is not None]) == 0: - for record in ti.output_port_map: - if record.get('axis') is not None: - record['start'] = 0 - real_output_port = TensorIterator.special_port_to_real_port(ti, record['external_port_id'], 'out') - output_shape = ti.out_port(real_output_port).data.get_shape() - assert output_shape is not None - record['end'] = output_shape[record['axis']] diff --git a/tools/mo/openvino/tools/mo/back/StridedSliceMasksNormalizer.py b/tools/mo/openvino/tools/mo/back/StridedSliceMasksNormalizer.py deleted file mode 100644 index 44354347a39bfe..00000000000000 --- a/tools/mo/openvino/tools/mo/back/StridedSliceMasksNormalizer.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph - - -class StridedSliceMasksNormalizer(BackReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.back.ConvolutionNormalizer import DeconvolutionNormalizer - from openvino.tools.mo.back.CropToStridedSlice import CropToStridedSlice - return [CropToStridedSlice, DeconvolutionNormalizer] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(type='StridedSlice'): - assert node.has_valid('begin_mask') - assert node.has_valid('end_mask') - node.begin_mask = int64_array([1 - i for i in node.begin_mask]) - node.end_mask = int64_array([1 - i for i in node.end_mask]) diff --git a/tools/mo/openvino/tools/mo/back/TopKNormalizer.py b/tools/mo/openvino/tools/mo/back/TopKNormalizer.py deleted file mode 100644 index 8d2aa371700368..00000000000000 --- a/tools/mo/openvino/tools/mo/back/TopKNormalizer.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.result import Result - - -class TopKNormalizer(BackReplacementPattern): - """ - The transformation converts the second input to the TopK layer from 0D to 1D. - - Also the transformation adds the Result Op if there are no consumers of TopK outputs. However the Result for output - with values is not added if the node has attribute 'remove_values_output' which is set to True for Caffe models - where ArgMax does not have separate output with values. - - TODO this pass should be removed when OV supports 0D tensors. - """ - enabled = True - - @staticmethod - def pattern(): - return dict( - nodes=[('result', {'type': 'TopK'})], - edges=[], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['result'] - - reshape = create_op_node_with_second_input(graph, Reshape, int64_array([]), {'override_output_shape': True}) - node.in_port(1).get_connection().insert_node(reshape) - - TopKNormalizer.normalize_outputs(node) - - @staticmethod - def normalize_outputs(node: Node): - """ - This function adds missed outputs for TopK node. - """ - if node.out_port(0).disconnected(): - output = Result(node.graph, {'name': node.name + '/Result_port_0/', - 'keep_output_port': node.has_and_set('remove_values_output')}).create_node() - node.out_port(0).get_connection().set_destination(output.in_port(0)) - if node.out_port(1).disconnected(): - output = Result(node.graph, {'name': node.name + '/Result_port_1/'}).create_node() - node.out_port(1).get_connection().set_destination(output.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/back/TransposeDFT.py b/tools/mo/openvino/tools/mo/back/TransposeDFT.py deleted file mode 100644 index 7eb89c34fe37d8..00000000000000 --- a/tools/mo/openvino/tools/mo/back/TransposeDFT.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class TransposeDFT(BackReplacementPattern): - """ - In TF models, operation (I)FFTxD has some input shape, [N_0, ..., N_{r - 1}]. - - After the transformation SSliceComplexRolledFFTPackBlockReplacement, we have an input shape [N_0, ..., N_{r - 1}, 2] - for operation DFT or IDFT. - - If the input rank in the TF model was greater than 2, we have [N_0, 2, N_1, ..., N_{r - 1}] as the input shape of - (I)DFT after the layout conversion. - - But, generally speaking, according to DFT and IDFT specifications, the input shape [N_0, 2, N_1, ..., N_{r - 1}] - is not correct input shape for DFT and IDFT. Hence, we need to insert Transpose operations before and after (I)DFT - in such cases. - - This transformation inserts such Transpose nodes, when the source model was the TF model, (I)DFT node has the - attribute 'need_insert_transposes_for_dft', and this attribute is True. - """ - enabled = True - force_shape_inference = True - graph_condition = [lambda graph: graph.graph['fw'] == 'tf'] - - def find_and_replace_pattern(self, graph: Graph): - import openvino.tools.mo.middle.InsertLayoutPropagationTransposes as InsertTransposes - for dft in graph.get_op_nodes(need_insert_transposes_for_dft=True): - InsertTransposes.insert_transpose(graph, dft.in_port(0), before_input=True) - InsertTransposes.insert_transpose(graph, dft.out_port(0), before_input=False) diff --git a/tools/mo/openvino/tools/mo/back/TransposeReduceFusing.py b/tools/mo/openvino/tools/mo/back/TransposeReduceFusing.py deleted file mode 100644 index af791795cc6087..00000000000000 --- a/tools/mo/openvino/tools/mo/back/TransposeReduceFusing.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import Dict - -import numpy as np - -from openvino.tools.mo.back.FuseTransposesSequence import FuseTransposesSequence -from openvino.tools.mo.back.ReduceMerge import ReduceMerge -from openvino.tools.mo.ops.ReduceOps import reduce_map -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node - - -class TransposeReduce(BackReplacementPattern): - """ - Fuse Transpose--->Reduce to Reduce with correct reduce axis input - """ - # TODO: Make another implementation, this is a temporary solution for one case - enabled = True - force_clean_up = True - - def run_before(self): - return [ReduceMerge] - - def run_after(self): - return [FuseTransposesSequence] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('transpose_const', dict(kind='op', type='Const', value=lambda v: v is not None and - np.array_equal(v, int64_array([0, 2, 3, 1])))), - ('transpose_const_data', dict(kind='data')), - ('transpose', dict(kind='op', type='Transpose')), - ('transpose_data', dict(kind='data')), - ('reduce_const', dict(kind='op', type='Const', value=lambda v: v is not None and - np.array_equal(v, int64_array([1, 2])))), - ('reduce_const_data', dict(kind='data')), - ('reduce', dict(kind='op', type=lambda t: t in reduce_map.keys(), keep_dims=False)) - ], - edges=[ - ('transpose_const', 'transpose_const_data'), - ('transpose_const_data', 'transpose', {'in': 1}), - ('transpose', 'transpose_data'), - ('transpose_data', 'reduce', {'in': 0}), - ('reduce_const', 'reduce_const_data'), - ('reduce_const_data', 'reduce', {'in': 1}) - ] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: Dict[str, Node]): - transpose = match['transpose'] - reduce = match['reduce'] - gather = create_op_with_const_inputs(graph, op=Gather, port_value_dict={2: int64_array(0)}, - op_attrs={'name': reduce.name + 'Gather'}) - - transpose.in_port(1).get_connection().set_destination(gather.in_port(0)) - reduce.in_port(1).get_connection().set_destination(gather.in_port(1)) - - gather.out_port(0).connect(reduce.in_port(1)) - transpose.out_port(0).disconnect() - transpose.in_port(0).get_connection().set_destination(reduce.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/back/UselessConcatRemoval.py b/tools/mo/openvino/tools/mo/back/UselessConcatRemoval.py deleted file mode 100644 index 3c0e980dbb4406..00000000000000 --- a/tools/mo/openvino/tools/mo/back/UselessConcatRemoval.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.back.ResultNormalizer import ResultNormalizer -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class UselessConcatRemoval(BackReplacementPattern): - """ - Transformation looks for the Concat nodes with just one input and remove them from the graph. - """ - enabled = True - run_not_recursively = True - - def run_before(self): - return [ResultNormalizer] - - @staticmethod - def pattern(): - return dict( - nodes=[('concat', {'kind': 'op', 'type': 'Concat'})], - edges=[], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - concat_node = match['concat'] - connected_ports = [port for port in concat_node.in_ports().values() if not port.disconnected()] - if len(connected_ports) == 1: - log.debug('Concat node {} has just one input. Removing it.'.format(concat_node.name)) - concat_node.out_port(0).get_connection().set_source(connected_ports[0].get_connection().get_source()) diff --git a/tools/mo/openvino/tools/mo/back/__init__.py b/tools/mo/openvino/tools/mo/back/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/back/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/back/add_outputs_recursive.py b/tools/mo/openvino/tools/mo/back/add_outputs_recursive.py deleted file mode 100644 index d7db36d170f012..00000000000000 --- a/tools/mo/openvino/tools/mo/back/add_outputs_recursive.py +++ /dev/null @@ -1,324 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from math import ceil -import numpy as np - -from openvino.tools.mo.ops.If import If -from openvino.tools.mo.ops.loop import Loop -from openvino.tools.mo.ops.tensor_iterator import TensorIterator -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.result import Result -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -def ti_set_output_port_shape(cycle_node, internal_id, port_num, iterations_count, axis): - int_node_name = TensorIterator.find_internal_layer_id(cycle_node.body, internal_id) - int_node = Node(cycle_node.body, int_node_name) - assert int_node.op == 'Result' - out_shape = int_node.in_port(0).data.get_shape().copy() - # inside cycle node Unsqueeze was added to have the first dimension for concatenating results along it - assert len(out_shape) >= 1 - if axis is not None: - out_shape[axis] = iterations_count - - assert port_num in cycle_node.out_ports() - cycle_node.out_port(port_num).data.set_shape(out_shape) - - -def get_iterations_count_from_output_record(output_rec): - def check_field(record, field): - return field in record and record[field] is not None - - # 1. check if we need to concatenate iteration results for given output - if not check_field(output_rec, 'axis'): - # in this case we do not concatenate outputs, so iterations count is not needed really - return None - - # 2. check if given output record contains values for 'end', so iterations count can be calculated from this record - if check_field(output_rec, 'end') and check_field(output_rec, 'start') and \ - ((output_rec['start'] >= 0 and output_rec['end'] >= 0) or - (output_rec['start'] < 0 and output_rec['end'] < 0)): - stride = output_rec['stride'] if check_field(output_rec, 'stride') else 1 - # get iterations count from output record - iterations_count = ceil((output_rec['end'] - output_rec['start']) / stride) - return iterations_count - - return dynamic_dimension_value - - -# shape inference for TensorIterator -# copy shapes from internal nodes + insert correct iterations count where needed -def ti_infer(step_node, port_num): - out_port_map = step_node.output_port_map - port_num = port_num + len(step_node.in_ports()) - # find out which internal layer maps to port_num - found_rec = None - for record in out_port_map: - if record['external_port_id'] == port_num: - found_rec = record - break - assert found_rec is not None, \ - "External port {} is not connected with body in node {}".format(port_num, - step_node.soft_get('name', step_node.id)) - - port_num = port_num - len(step_node.in_ports()) - - # find out iterations count for TensorIterator to set output shape correctly - - iterations_count = get_iterations_count_from_output_record(found_rec) - if iterations_count is dynamic_dimension_value: - iterations_count = TensorIterator.find_iterations_count_for_output(step_node) - - ti_set_output_port_shape(step_node, found_rec['internal_layer_id'], port_num, iterations_count, - found_rec['axis']) - - -# shape inference for Loop -# copy shapes from internal nodes + insert correct iterations count where needed -# iterations count always in the first dimension -def loop_infer(step_node, port_num): - out_port_map = step_node.output_port_map - int_layer_id = None - iterations_count = Loop.iterations_count(step_node) - for record in out_port_map: - if record['external_port_id'] == port_num: - int_layer_id = record['internal_layer_id'] - - ti_set_output_port_shape(step_node, int_layer_id, port_num, iterations_count, 0) - - -def max_internal_layer_id(graph): - max_int_layer_id = 0 - for n in graph.get_op_nodes(): - if n.has_and_set('internal_layer_id') and n.internal_layer_id > max_int_layer_id: - max_int_layer_id = n.internal_layer_id - return max_int_layer_id - - -# Add Result (and Unsqueeze is add_unsqueeze=True) to node port port_num in graph cur_graph. -# New nodes will have internal id equal to cur_max_layer_id + 1 (and cur_max_layer_id + 2 if 2 nodes were added) -# New nodes will be inserted in tracks on position i. -def add_output_in_body(node, port_num, cur_graph, cur_max_layer_id, tracks, track_index, add_unsqueeze=True): - port = node.out_port(port_num) - if add_unsqueeze: - unsq_name = port.node.soft_get('name', port.node.id) + "/Unsqueeze" - unsq_node = create_op_node_with_second_input(cur_graph, Unsqueeze, - int64_array([0]), - {'name': unsq_name}) - port.connect(unsq_node.in_port(0)) - unsq_node['internal_layer_id'] = cur_max_layer_id + 1 - cur_max_layer_id += 1 - tracks.insert(track_index, {'node': unsq_node, 'graph': cur_graph}) - port = unsq_node.out_port(0) - - out_name = port.node.soft_get('name', port.node.id) + ":" + str(port_num) - res_node = Result(cur_graph, {'name': out_name}).create_node() - port.connect(res_node.in_port(0)) - res_node['internal_layer_id'] = cur_max_layer_id + 1 - cur_max_layer_id += 1 - tracks.insert(track_index, {'node': res_node, 'graph': cur_graph}) - - return res_node, tracks, cur_max_layer_id - - -class AddOutputRecursive(BackReplacementPattern): - """ - Add output to node inside loops. Path to node set in 'additional_outputs' attribute of graph. - Path structure: [node_loop_1, loop_2_in_loop_1,.., if_node, [then_list, else_list]] - After if operation should be sub-list with 2 elements then_list and else_list where each is one node or list of - nodes in according path - For cycles results from all iterations will be concatenated along 0 dimension. - """ - enabled = False - run_not_recursively = True - - @staticmethod - def add_output_for_path(graphs_nodes_path): - # add output to nodes according to path - step_node = graphs_nodes_path[-1]['node'] - cur_graph = graphs_nodes_path[-1]['graph'] - - ports_to_add_nodes = [] - for o_p in step_node.out_ports(): - ports_to_add_nodes.append(o_p) - - # update internal_layer_id for new Results - for i in range(len(graphs_nodes_path)-1, 0, -1): - cur_max_layer_id = max_internal_layer_id(cur_graph) + 1 - cur_loop_node = graphs_nodes_path[i-1]['node'] - new_out_ports = [] - if cur_loop_node.op is not 'If': - # add Unsqueeze and Result for TensorIterator and Loop and update output_port_map - for p_num in ports_to_add_nodes: - res_node, graphs_nodes_path, cur_max_layer_id = add_output_in_body(step_node, p_num, cur_graph, - cur_max_layer_id, - graphs_nodes_path, i) - - # IR reader fix output port map for Loop, but have not change for TensorIterator - new_port_id = len(cur_loop_node.out_ports()) - if cur_loop_node.op == 'TensorIterator': - new_port_id = new_port_id + len(cur_loop_node.in_ports()) - cur_loop_node.output_port_map.append({'axis': 0, 'stride': 1, 'part_size': 1, 'start': 0, - 'end': -1, 'external_port_id': new_port_id, - 'internal_layer_id': res_node['internal_layer_id']}) - port_id = new_port_id - if cur_loop_node.op == 'TensorIterator': - port_id = port_id - len(cur_loop_node.in_ports()) - - new_out_ports.append(port_id) - cur_loop_node.add_output_port(port_id) - else: - # add Result nodes for If and update output_id - for p_num in ports_to_add_nodes: - res_node, graphs_nodes_path, cur_max_layer_id = add_output_in_body(step_node, p_num, cur_graph, - cur_max_layer_id, - graphs_nodes_path, i, - add_unsqueeze=False) - - if cur_loop_node.then_graph == cur_graph: - new_port_id = len(cur_loop_node.out_ports()) - res_node['output_id'] = new_port_id - cur_loop_node.add_output_port(new_port_id) - new_out_ports.append(new_port_id) - else: - res_node['output_id'] = list(cur_loop_node.out_ports().keys())[-1] - ports_to_add_nodes = new_out_ports - step_node = cur_loop_node - cur_graph = graphs_nodes_path[i-1]['graph'] - - i = 0 - for p_num in ports_to_add_nodes: - port = step_node.out_port(p_num) - out_name = step_node.soft_get('name', step_node.id) + "." + str(p_num) - res_node = Result(cur_graph, {'name': out_name}).create_node() - port.connect(res_node.in_port(0)) - # add name of Result to fw_tensor_debug_info to avoid renaming - if step_node.out_nodes()[p_num].has_and_set('fw_tensor_debug_info'): - step_node.out_nodes()[p_num]['fw_tensor_debug_info'].append(out_name) - else: - step_node.out_nodes()[p_num]['fw_tensor_debug_info'] = [[out_name, out_name]] - if step_node.op == 'TensorIterator': - step_node.out_edges()[len(step_node.out_edges())-1]['external_port_id'] = p_num + \ - len(step_node.in_ports()) - graphs_nodes_path.insert(0, {'node': res_node, 'graph': cur_graph}) - i += 1 - return graphs_nodes_path - - @staticmethod - def infer_shapes_of_nodes_in_path(graphs_nodes_path): - # update shape for new or updated nodes - for i in range(len(graphs_nodes_path) - 1, -1, -1): - step_node = graphs_nodes_path[i]['node'] - # update shapes for Loop, TI, If, Unsqueeze - # Result to end node in path added to existing port with already calculated shapes - for p_num in step_node.out_ports(): - if not step_node.out_port(p_num).disconnected(): - if step_node.op == 'TensorIterator': - ti_infer(step_node, p_num) - elif step_node.op == 'Loop': - loop_infer(step_node, p_num) - elif step_node.op == 'Unsqueeze': - assert step_node.in_port(1).get_source().node.has('value') - axis = step_node.in_port(1).get_source().node.value[0] - out_shape = list(step_node.in_port(0).get_source().data.get_shape()) - out_shape.insert(axis, 1) - step_node.out_port(p_num).data.set_shape(out_shape) - elif step_node.op == 'If': - If.update_if_output_ports_shape(step_node) - - @staticmethod - def split_path_to_simple_tracks(graph, path): - # Split complex path into simple linear tracks. - # In path after If node list with 2 sub-lists should be. In this function such path is split into 2 tracks: - # one for each sublist with linear structure. - # Number of tracks got from path is 2 * number of If operations in path. - # Track is looks like list of paths with 2 fields : list of nodes on current path and list of according graphs - # Example: - # input path : [loop_1, loop_2, if_1, [[loop3_1, node_1], [node_2]]] - # output track: [{'nodes': [loop_1, loop_2, if_1, loop3_1, node_1], - # 'graphs':[graph, loop_1.body, loop_2.body, if.then_graph, loop3_1.body]}, - # {'nodes': [loop_1, loop_2, if_1, node_2], - # 'graphs':[graph, loop_1.body, loop_2.body, if.else_graph]}] - - # structure to save tracks - # list with tracks, each track is list of pairs {'node', 'graph'} - paths_nodes_graphs = list() - paths_nodes_graphs.append([]) - # stack for sub-graphs that will be traversed in future - future_graphs_stack = [graph] - # index for track that we currently fill - track_idx = 0 - # save lists that were started but not finished during processing - lists_stack = [{'list': path, 'pos': -1}] - while len(lists_stack) != 0: - cur_list_pos = lists_stack.pop(-1) - # current list to process - cur_list = cur_list_pos['list'] - # index in current list/sub-list - list_idx = cur_list_pos['pos'] + 1 - while list_idx < len(cur_list): - el = cur_list[list_idx] - if isinstance(el, (list, np.ndarray)): - lists_stack.append({'list': cur_list, 'pos': list_idx}) - # if we have previous node non-list then current sublist is for If node - # and new tracks should be added for sub-graphs (the first subgraph will continue current track) - if list_idx != 0 and isinstance(cur_list[list_idx - 1], str): - for i in range(len(el) - 1): - # copy all nodes from existing track to new one - paths_nodes_graphs.append(paths_nodes_graphs[-1][:]) - # new sublist started, so reset index - cur_list = el - list_idx = 0 - else: - assert isinstance(el, str) - cur_graph = future_graphs_stack.pop(-1) - step_node = Node(cur_graph, el) - paths_nodes_graphs[track_idx].append({'node': step_node, 'graph': cur_graph}) - - # if node is not last, check that next node will be on current track or not - if list_idx != len(cur_list) - 1: - # so detect if we are in sublist with branches for If - # then in stack sublist is not the first node of list - # and have previous node with If operation name - if len(lists_stack) != 0 and lists_stack[-1]['pos'] != 0 and \ - isinstance(lists_stack[-1]['list'][lists_stack[-1]['pos']-1], str): - # switch to next track - if list_idx != len(cur_list) - 1: - track_idx += 1 - else: - assert step_node.has_and_set('sub_graphs'), "Node without sub-graphs is not last in path" - # the first graph should be first in traverse - for sub_graphs_name in reversed(step_node['sub_graphs']): - future_graphs_stack.append(step_node[sub_graphs_name]) - list_idx += 1 - - return paths_nodes_graphs - - def find_and_replace_pattern(self, graph: Graph): - - if 'additional_outputs' not in graph.graph: - return - - path = graph.graph['additional_outputs'] - paths_nodes_graphs = self.split_path_to_simple_tracks(graph, path) - - paths_nodes_graphs_old = [] - for i in range(len(paths_nodes_graphs)): - paths_nodes_graphs_old.append(paths_nodes_graphs[i][:]) - paths_nodes_graphs[i] = self.add_output_for_path(paths_nodes_graphs[i]) - - for i in range(len(paths_nodes_graphs)): - self.infer_shapes_of_nodes_in_path(paths_nodes_graphs[i]) - - new_nodes = [] - for i in range(len(paths_nodes_graphs)): - # new Result added to main graph should be on last place - k = 0 - while paths_nodes_graphs_old[i][0]['node'] != paths_nodes_graphs[i][k]['node']: - new_nodes.append(paths_nodes_graphs[i][k]['node']) - k += 1 - - return new_nodes diff --git a/tools/mo/openvino/tools/mo/back/blob_normalizer.py b/tools/mo/openvino/tools/mo/back/blob_normalizer.py deleted file mode 100644 index 448089694e9475..00000000000000 --- a/tools/mo/openvino/tools/mo/back/blob_normalizer.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.op_versioning import OpVersioning -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class BlobNormalizer(BackReplacementPattern): - """ - This pass affects Convolution and FullyConnected weights and biases form in IR. - Old version of those layers included weights and biases as blobs: - - ... - - - - - - - New version (after BlobNormalizer execution) weighs and biases are represented - as inputs to Convolution/FullyConnected layer - """ - enabled = True - - def run_before(self): - return [] - - def run_after(self): - from openvino.tools.mo.back.pass_separator import BackFinish - return [BackFinish] - - @staticmethod - def pattern(): - return dict( - nodes=[('conv', dict(type=lambda type: type in ['Convolution', 'Deconvolution', 'FullyConnected', 'DeformableConvolution']))], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - conv = match['conv'] - for i in [1, 2]: - if i in conv.in_edges() and conv.in_edges()[i] and 'bin' in conv.in_edges()[i]: - del conv.in_edges()[i]['bin'] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(): - if node.soft_get('type').lower() not in OpVersioning.opset_1_types and \ - not node.soft_get('version') in ["opset2", "opset3", "opset4", "opset8"]: - continue - - for _, d in node.in_edges().items(): - if 'bin' in d: - del d['bin'] - - for node in graph.get_data_nodes(): - for d in node.in_edges(): - if 'bin' in d: - del d['bin'] diff --git a/tools/mo/openvino/tools/mo/back/compress_quantized_weights.py b/tools/mo/openvino/tools/mo/back/compress_quantized_weights.py deleted file mode 100644 index ca73f20d34d00d..00000000000000 --- a/tools/mo/openvino/tools/mo/back/compress_quantized_weights.py +++ /dev/null @@ -1,340 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import Dict - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.elementwise import Sub, Div, Mul, Equal -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.select import Select -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np, np_data_type_to_destination_type, packed_I4 -from openvino.tools.mo.middle.pattern_match import apply_pattern -from openvino.tools.mo.ops.const import Const - - -class CompressQuantizeWeights(BackReplacementPattern): - r""" - Compress weights transformation goal is to pre-quantize data to minimize runtime calculations with constant data. - To achieve this goal we perform FakeQuantize decomposition to separate quantization from dequantization in it. - - FakeQuantize: - -[src_dtype]-> FakeQuantize -[src_dtype]-> - is an operation that could be represented as: - -[src_dtype]-> Quantize -[quantized_dtype]-> Dequantize -[src_dtype]-> - - Quantize and Dequantize operations are not present in OpenVINO supported opsets, but can be easily expressed - through supported ones. Transformation algorithm doesn't contain all the steps described - below (some of them are optimized). Steps are presented only to show the idea in details. - - Step 1: FQ decomposition - -[src_dtype]-> Quantize -[quantized_dtype]-> Dequantize -[src_dtype]-> - - Step 2: Representing Quantize and Dequantize through FakeQuantize and Convert operations - Simplified view: - -[src_dtype]-> FakeQuantize -[src_dtype]-> Convert -[quantized_dtype]-> Convert -[src_dtype]-> FakeQuantize -[quantized_dtype]-> - - Detailed view: - initial_input_low initial_input_high initial_output_low initial_output_high - \ / | / - (in: 1) (in: 2) (in: 3) (in: 4) - V V V V - Constant -> FakeQuantize` --> Convert --> Convert --> initial FakeQuantize --> - ^ ^ (quant_dtype) (src_dtype) ^ ^ - | | (in: 1) (in: 2) - (in: 3) (in: 4) | | - | \________________ _________________| | - | \ / | - new_output_low new_output_high | - -(levels // 2) (levels + new_output_low - 1) | - |__________________________________________________________________| - - Step 3: All inputs of initial FQ are Constants and we haven't added dynamic dependencies. Means we can const-fold - sub-graph we already have, but as our goal is to have quantized data, we should mark nodes to be folded. - - -[src_dtype]-> FakeQuantize -[src_dtype]-> Convert -[quantized_dtype]-> Convert -[src_dtype]-> FakeQuantize -[src_dtype]-> - |-------------------------Const Folding-------------------------------|----------------------Stays----------------------------| - - Resulting graph: - Constant -[quantized_dtype]-> Convert -[src_dtype]-> FakeQuantize -[src_dtype]-> - - Step 4: We reduced heavy manipulations with constant data in runtime, but we can go even further. - At this stage FakeQuantize node is playing dequantization role. It means it only shifts and scales the data. - No rounding is performed by this FakeQuantize as data was fully quantized earlier. - Also, runtime calculates this shift (zero point) and scale during low precision transformation. - It means we can pre-calculate even this information for them by simply decomposing FakeQuantize that plays - dequantization role to Subtract-Multiply sequence so resulting graph would be: - Constant -[quantized_dtype]-> Convert -[src_dtype]-> Subtract (zero_point) -> Multiply (scale) -[src_dtype]-> - - Where: - scale = (output_high - output_low) / (input_high - input_low) - WARNING: division by zero imposes restriction -- input_high can not be equal to input_low - zero_point = input_low - output_low / scale - NOTE: if scale == 0 than zero_point is equal to zero too (achieved through Select operation) - - BENEFITS: - Such constant data packing reduces IR size (.bin file size) - Also, transformation prepares quantized constant data for Low Precision pipeline. - With that we can skip same calculations in the runtime and make loading of such sub-graphs to the plugin faster. - """ - - enabled = True - - force_clean_up = True - - QUANTIZATION_MAP = { - # max_levels: (np_dtype, quantization_mode) - 256: (np.int8, "signed"), - 16: (packed_I4, "signed"), - } - - def pattern1(self): - return dict( - nodes=[ - ('const', dict(type='Const')), - ('const_d', dict()), - ('fake_quantize', dict(type='FakeQuantize', levels=lambda x: x is not None and 2 < x <= 256)), - ], - edges=[ - ('const', 'const_d'), - ('const_d', 'fake_quantize', {'in': 0}), - ] - ) - - def pattern2(self): - return dict( - nodes=[ - ('const', dict(type='Const')), - ('const_d', dict()), - ('convert', dict(type='Convert')), - ('convert_d', dict()), - ('fake_quantize', dict(type='FakeQuantize', levels=lambda x: x is not None and 2 < x <= 256)), - ], - edges=[ - ('const', 'const_d'), - ('const_d', 'convert'), - ('convert', 'convert_d'), - ('convert_d', 'fake_quantize', {'in': 0}), - ] - ) - - def find_and_replace_pattern(self, graph: Graph): - apply_pattern(graph, **self.pattern1(), action=self.replace_pattern) # pylint: disable=no-member - apply_pattern(graph, **self.pattern2(), action=self.replace_pattern) # pylint: disable=no-member - - @staticmethod - def quantize_data(fake_quantize: Node, dst_type: type, quantized_type: type, mode: str): - graph = fake_quantize.graph - name = fake_quantize.soft_get('name', fake_quantize.id) - levels = fake_quantize.levels - - quantize = fake_quantize.copy_node(dict(name=name + '/Copy', stop_value_propagation=False), graph) - fake_quantize.in_port(0).get_connection().set_destination(quantize.in_port(0)) - - # inherit input limits - fake_quantize.in_port(1).get_connection().set_destination(quantize.in_port(1)) - fake_quantize.in_port(2).get_connection().set_destination(quantize.in_port(2)) - - # calculate output limits for quantized weights - assert mode in ["signed", "unsigned"] - i_min_value = -(levels // 2) if mode == "signed" else 0 - - i_min = mo_array(i_min_value, dtype=dst_type) if not quantize.in_node(0).shape.size else mo_array([i_min_value], dtype=dst_type) - i_max = mo_array(levels + i_min - 1, dtype=dst_type) - - assert i_max - i_min == levels - 1 - out_low = Const(graph, dict(name=name + '/Copy/out_low', value=i_min)).create_node() - out_high = Const(graph, dict(name=name + '/Copy/out_high', value=i_max)).create_node() - - out_low.out_port(0).connect(quantize.in_port(3)) - out_high.out_port(0).connect(quantize.in_port(4)) - out_low.out_port(0).connect(fake_quantize.in_port(1)) - out_high.out_port(0).connect(fake_quantize.in_port(2)) - - original_const = quantize.in_port(0).get_source().node - quantized_data_name = original_const.soft_get('name', original_const.id) + '/quantized' - cast = Cast(graph, dict(name=quantized_data_name, dst_type=quantized_type, - stop_value_propagation=False)).create_node() - - quantize.out_port(0).connect(cast.in_port(0)) - - cast.out_port(0).connect(fake_quantize.in_port(0)) - - @staticmethod - def dequantize_data(fake_quantize: Node, dst_type: type, quantized_type: type) -> Node: - graph = fake_quantize.graph - quantized_data = fake_quantize.in_port(0).get_source().node - name = fake_quantize.soft_get('name', fake_quantize.id) - - assert quantized_data.soft_get('type') == 'Convert' and quantized_data.dst_type == quantized_type, \ - 'Weights aren`t compressed as expected for node {}'.format(fake_quantize.soft_get('name', fake_quantize.id)) - - dequantizing_cast = Cast(graph, dict( - name=quantized_data.name + "/to_{}".format(np_data_type_to_destination_type(dst_type)), - dst_type=dst_type, stop_value_propagation=True)).create_node() - fake_quantize.in_port(0).get_connection().set_destination(dequantizing_cast.in_port(0)) - - # limits of dequantize - in_low = fake_quantize.in_port(1).get_source() - in_high = fake_quantize.in_port(2).get_source() - out_low = fake_quantize.in_port(3).get_source() - out_high = fake_quantize.in_port(4).get_source() - - need_cast_to_f32 = fake_quantize.out_port(0).is_data_type_defined() and fake_quantize.out_port(0).get_data_type() < np.float32 - if need_cast_to_f32: - in_low_cast = Cast(graph, {'name': name + '/in_low/convert_to_f32', 'dst_type': np.float32}).create_node() - in_low_cast.in_port(0).connect(in_low) - in_low = in_low_cast.out_port(0) - - in_high_cast = Cast(graph, {'name': name + '/in_high/convert_to_f32', 'dst_type': np.float32}).create_node() - in_high_cast.in_port(0).connect(in_high) - in_high = in_high_cast.out_port(0) - - out_low_cast = Cast(graph, {'name': name + '/out_low/convert_to_f32', 'dst_type': np.float32}).create_node() - out_low_cast.in_port(0).connect(out_low) - out_low = out_low_cast.out_port(0) - - out_high_cast = Cast(graph, {'name': name + '/out_high/convert_to_f32', 'dst_type': np.float32}).create_node() - out_high_cast.in_port(0).connect(out_high) - out_high = out_high_cast.out_port(0) - - # scale calculation - output_range = Sub(graph, {'name': name + '/output_range'}).create_node() - output_range.in_port(0).connect(out_high) - output_range.in_port(1).connect(out_low) - - input_range = Sub(graph, {'name': name + '/input_range'}).create_node() - input_range.in_port(0).connect(in_high) - input_range.in_port(1).connect(in_low) - - scale = Div(graph, {'name': name + '/scale'}).create_node() - scale.in_port(0).connect(output_range.out_port(0)) - scale.in_port(1).connect(input_range.out_port(0)) - - # shift calculation - descaled_output_low = Div(graph, {'name': name + '/descaled_output_low'}).create_node() - descaled_output_low.in_port(0).connect(out_low) - descaled_output_low.in_port(1).connect(scale.out_port(0)) - - shift = Sub(graph, {'name': name + '/shift'}).create_node() - shift.in_port(0).connect(in_low) - shift.in_port(1).connect(descaled_output_low.out_port(0)) - - zero = Const(graph, {'name': name + '/zero', 'value': mo_array(0, dtype=dst_type)}).create_node() - scale_eq_zero = Equal(graph, {'name': name + '/scale_eq_zero'}).create_node() - scale_eq_zero.in_port(0).connect(scale.out_port(0)) - scale_eq_zero.in_port(1).connect(zero.out_port(0)) - - zero_point = Select(graph, {'name': name + '/zero_point'}).create_node() - zero_point.in_port(0).connect(scale_eq_zero.out_port(0)) - zero_point.in_port(1).connect(zero.out_port(0)) - zero_point.in_port(2).connect(shift.out_port(0)) - - if need_cast_to_f32: - fq_dtype = fake_quantize.out_port(0).get_data_type() - scale_cast = Cast(graph, {'name': name + '/scale/convert_back', 'dst_type': fq_dtype}).create_node() - scale_cast.in_port(0).connect(scale.out_port(0)) - scale = scale_cast - zero_point_cast = Cast(graph, {'name': name + '/zero_point/convert_back', 'dst_type': fq_dtype}).create_node() - zero_point_cast.in_port(0).connect(zero_point.out_port(0)) - zero_point = zero_point_cast - - # DeQuantize(x) == Mul(Sub(x, zero_point), scale) - sub_zp = Sub(graph, {'name': name + '/minus_zp'}).create_node() - sub_zp.in_port(0).connect(dequantizing_cast.out_port(0)) - sub_zp.in_port(1).connect(zero_point.out_port(0)) - - mul_scale = Mul(graph, {'name': name + '/mulpiply_by_scale'}).create_node() - mul_scale.in_port(0).connect(sub_zp.out_port(0)) - mul_scale.in_port(1).connect(scale.out_port(0)) - - fake_quantize.out_port(0).get_connection().set_source(mul_scale.out_port(0)) - - graph.remove_nodes_from([fake_quantize.id, fake_quantize.out_node(0)]) - - def replace_pattern(self, graph: Graph, match: Dict[str, Node]): - fake_quantize = match['fake_quantize'] - - if fake_quantize.has_and_set('stop_compression'): - return - - if 'convert' in match: - dst_type = match['convert'].dst_type - match['convert']['stop_value_propagation'] = False - Cast.infer(match['convert']) - else: - dst_type = match['const'].value.dtype - - quantized_type, mode = None, None - for quantization_levels in sorted(self.QUANTIZATION_MAP): - if quantization_levels >= fake_quantize.levels: - quantized_type, mode = self.QUANTIZATION_MAP[quantization_levels] - break - - self.quantize_data(fake_quantize, dst_type, quantized_type, mode) - self.dequantize_data(fake_quantize, dst_type, quantized_type) - - -class ZeroPointOptimizer(BackReplacementPattern): - r""" - Step 1: Having zero_point == 0 is really beneficial for performance, so we try to fuse Subtract up to the Constant. - It is not always possible because of the quantized_dtype possible range of values. - - Step 2: From the nature of Subtract operation it may be optimized out if zero_point == 0 - """ - enabled = True - force_clean_up = True - - def run_after(self): - return [CompressQuantizeWeights] - - def pattern(self): - return dict( - nodes=[ - ('const', dict(type='Const')), - ('const_d', dict()), - ('convert', dict(type='Convert')), - ('convert_d', dict()), - ('const_zp', dict(type='Const')), - ('const_zp_d', dict()), - ('sub', dict(type='Subtract')), - ], - edges=[ - ('const', 'const_d'), - ('const_d', 'convert'), - ('convert', 'convert_d'), - ('convert_d', 'sub', {'in': 0}), - ('const_zp', 'const_zp_d'), - ('const_zp_d', 'sub', {'in': 1}), - ] - ) - - def replace_pattern(self, graph: Graph, match: Dict[str, Node]): - zero_point = match['const_zp'].out_port(0).data.get_value() - assert zero_point is not None - convert = match['convert'] - sub = match['sub'] - if np.allclose(zero_point, 0): - sub.out_port(0).get_connection().set_source(convert.out_port(0)) - return - - weights = match['const'].out_port(0).data.get_value() - if weights is None or weights.dtype != np.int8: - return - dst_type = convert.dst_type - - int8_zero_point = np.round(zero_point).astype(np.int8) - adj_zero_point = (zero_point - int8_zero_point).astype(dst_type) - - original = weights.astype(dst_type) - zero_point - transformed = (weights - int8_zero_point).astype(np.int8) - adj_zero_point - - if not np.allclose(original, transformed) or not np.allclose(adj_zero_point, 0, atol=1.e-04): - return - - match['const_d']['value'] = (weights - int8_zero_point).astype(np.int8) - sub.out_port(0).get_connection().set_source(convert.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/back/ie_ir_ver_2/__init__.py b/tools/mo/openvino/tools/mo/back/ie_ir_ver_2/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ie_ir_ver_2/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/back/ie_ir_ver_2/emitter.py b/tools/mo/openvino/tools/mo/back/ie_ir_ver_2/emitter.py deleted file mode 100644 index 357c654e43902b..00000000000000 --- a/tools/mo/openvino/tools/mo/back/ie_ir_ver_2/emitter.py +++ /dev/null @@ -1,640 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import hashlib - -import defusedxml.ElementTree as ET -from defusedxml import defuse_stdlib -from defusedxml.minidom import parseString - -from openvino.tools.mo.front.common.partial_infer.utils import unmask_shape, is_fully_defined -from openvino.tools.mo.graph.graph import * -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_precision -from openvino.tools.mo.utils.unsupported_ops import UnsupportedOps -from openvino.tools.mo.utils.utils import refer_to_faq_msg -from openvino.tools.mo.utils.version import get_version - -# defuse_stdlib provide patched version of xml.etree.ElementTree which allows to use objects from xml.etree.ElementTree -# in a safe manner without including unsafe xml.etree.ElementTree -ET_defused = defuse_stdlib()[ET] -Element = ET_defused.Element -SubElement = ET_defused.SubElement -tostring = ET_defused.tostring - -elements_to_skip_during_serializing = ['inputs_list'] - - -def serialize_constants(graph: Graph, bin_file_name: str, data_type=np.float32): - """ - Found all data constants that has output edges with 'bin' attribute. - Serialize content for such constants to a binary file with name bin_file_name in - raw format. Save offset and length of serialized area in the file as 'offset' and 'size' - attributes of data node. - - Args: - @graph: input graph with op and data nodes - @bin_file_name: path to file to write blobs to - @data_type: numpy data type to convert all blob elements to - - """ - bin_hashes = {} - with open(bin_file_name, 'wb') as bin_file: - serialize_constants_recursively(graph, bin_file, data_type, bin_hashes) - - -def update_offset_size_in_const_node(node: Node): - assert node.kind == 'data' - for consumer in node.out_nodes(): - if consumer.type != 'Const': - continue - assert not consumer.has_valid('offset') - assert not consumer.has_valid('size') - consumer['offset'] = node.offset - consumer['size'] = node.size - - -def serialize_constants_recursively(graph: Graph, bin_file, data_type, bin_hashes): - nodes = sorted(graph.nodes()) - for node in nodes: - node = Node(graph, node) - - if node.kind == 'data' and node.value is not None and \ - any('bin' in d for u, v, d in graph.out_edges(node.node, data=True)): - # avoid array copying while taking hash - blob = node.value if node.value.ndim > 0 else node.value.reshape((1)) - assert is_fully_defined(blob), 'The constant value cannot contain dynamic values' - if isinstance(blob, np.ma.masked_array): - blob = np.ma.getdata(blob) - blob_hash = hashlib.sha512(np.ascontiguousarray(blob).view(np.uint8)).hexdigest() - - if blob_hash in bin_hashes and np.array_equal(blob, bin_hashes[blob_hash]['blob']): - graph.node[node.node]['offset'] = bin_hashes[blob_hash]['offset'] - graph.node[node.node]['size'] = bin_hashes[blob_hash]['size'] - graph.node[node.node]['blob_precision'] = np_data_type_to_precision(blob.dtype) - update_offset_size_in_const_node(node) - else: - start = bin_file.tell() - blob.tofile(bin_file) - end = bin_file.tell() - - graph.node[node.node]['offset'] = start - graph.node[node.node]['size'] = end - start - graph.node[node.node]['blob_precision'] = np_data_type_to_precision(blob.dtype) - - bin_hashes[blob_hash] = {'offset': graph.node[node.node]['offset'], - 'size': graph.node[node.node]['size'], 'blob': blob} - update_offset_size_in_const_node(node) - - assert (blob.dtype.itemsize * np.prod(node.shape) == end - start) or \ - node.has_valid('force_shape'), node.attrs() - - log.debug( - "Detected binary for graph: '{}', node: '{}', id: {}, shape: '{}', offset: '{}', size: '{}'".format( - graph, node.soft_get('name'), node.id, node.shape, node.offset, node.size)) - - # separate loop for sub-graph to dump them after all blobs for more natural blob offset ordering - # TODO: implement strict order for all blobs in entier IR - for node in nodes: - node = Node(graph, node) - # Dump blobs recursively if sub-graphs are present in the node - if node.has_valid('sub_graphs'): - for sub_graph_attr_name in node.sub_graphs: - sub_graph = node[sub_graph_attr_name] - serialize_constants_recursively(sub_graph, bin_file, data_type, bin_hashes) - - -def serialize_mean_image(bin_file_name: str, mean_data=[]): - with open(bin_file_name, 'ab') as bin_file: - mean_offset = [] - mean_size = [] - for x in range(len(mean_data)): - start = bin_file.tell() - bin_file.write(mean_data[x][:]) - end = bin_file.tell() - mean_offset.append(start) - mean_size.append(end - start) - - return mean_offset, mean_size - - -def xml_shape(shape: np.ndarray, element: Element): - for d in unmask_shape(shape): - if d < -1: - raise Error('The value "{}" for shape is not valid value.'.format(d)) - dim = SubElement(element, 'dim') - if int(d) != d: - raise Error('The value "{}" for shape is not integer.'.format(d)) - if not isinstance(d, np.int64): - log.warning('The element of shape is not np.int64 value. Converting the value "{}" to integer'.format(d)) - d = int(d) - dim.text = str(d) - - -def xml_ports(node: Node, element: Element, edges: Element): - # input ports - inputs = None # will create input section only if at least one input is available - for u, d in node.get_sorted_inputs(): - if 'bin' not in d and ('xml_skip' not in d or not d['xml_skip']): - if inputs is None: - inputs = SubElement(element, 'input') - port = SubElement(inputs, 'port') - port.set('id', str(d['in'])) - assert node.graph.node[u]['shape'] is not None, 'Input shape is not calculated properly for node {}'.format( - node.id) - xml_shape(node.graph.node[u]['shape'], port) - - # support saving rt_info passed from IR Reader - port_id = d['in'] - if node.has('restored_input_ports') and port_id in node.restored_input_ports: - port_rt_info_value = node.restored_input_ports[port_id][2] - if port_rt_info_value != {}: - port_rt_info = SubElement(port, 'rt_info') - for (name, version), info_elem in port_rt_info_value.items(): - attribute = SubElement(port_rt_info, 'attribute') - attribute.set('name', name) - attribute.set('version', str(version)) - params = info_elem.serialize(node) if not isinstance(info_elem, dict) else info_elem - for key, value in params.items(): - attribute.set(key, value) - - # u is a data node that has a single producer, let's find it - assert (node.graph.node[u]['kind'] == 'data') - in_nodes = list(node.graph.in_edges(u, data=True)) - assert (len(in_nodes) <= 1) - if len(in_nodes) == 1: - src, _, out_attrs = in_nodes[0] - edge = SubElement(edges, 'edge') - edge.set('from-layer', str(src)) - edge.set('from-port', str(out_attrs['out'])) - edge.set('to-layer', str(node.node)) - edge.set('to-port', str(d['in'])) - # port.set('precision', np_data_type_to_precision(node['_in_port_precision'][d['in']])) - - # output ports - outputs = None - for v, d in node.get_sorted_outputs(): - if 'xml_skip' not in d or not d['xml_skip']: - if outputs is None: - outputs = SubElement(element, 'output') - port = SubElement(outputs, 'port') - port.set('id', str(d['out'])) - # we need to check operation type, if it is const op, we don't renumber out ports - # because they are already counted from zero - port_id = d['out'] - len(node.in_nodes()) if node.type != 'Const' else d['out'] - data_type = node.out_port(port_id).get_data_type() - assert data_type is not None, 'The precision is not defined for the output port {} of node {}' \ - ''.format(port_id, node.soft_get('name')) - - port.set('precision', node.soft_get('force_type', np_data_type_to_precision(data_type))) - assert node.graph.node[v]['shape'] is not None, 'Output shape is not calculated properly for node {}' \ - ''.format(node.id) - tensor_names = node.out_port(port_id).get_tensor_names(port_renumber=True) - if tensor_names: - port.set('names', ','.join(tensor_names)) - xml_shape(node.graph.node[v]['shape'], port) - - # support saving rt_info passed from IR Reader - if node.has('ports') and port_id in node.ports: - port_rt_info_value = node.ports[port_id][2] - if port_rt_info_value != []: - port_rt_info = SubElement(port, 'rt_info') - for (name, version), info_elem in port_rt_info_value.items(): - attribute = SubElement(port_rt_info, 'attribute') - attribute.set('name', name) - attribute.set('version', str(version)) - params = info_elem.serialize(node) if not isinstance(info_elem, dict) else info_elem - for key, value in params.items(): - attribute.set(key, value) - -def xml_consts(graph: Graph, node: Node, element: Element): - blobs = None # sub-element that will be created on-demand - for u, d in node.get_sorted_inputs(): - if 'bin' in d and (node.type != 'Const'): - if not blobs: - blobs = SubElement(element, 'blobs') - const = SubElement(blobs, d['bin']) - try: - const.set('offset', str(graph.node[u]['offset'])) - const.set('size', str(graph.node[u]['size'])) - const.set('precision', graph.node[u]['blob_precision']) - except Exception as e: - raise Error('Unable to access binary attributes ("offset" and/or "size") for blobs for node {}. ' - 'Details: {}'.format(node.soft_get('name'), e)) - - -def soft_get(node, attr): - """ If node has soft_get callable member, returns node.soft_get(attr), else return """ - return node.soft_get(attr) if hasattr(node, 'soft_get') and callable(node.soft_get) else '' - - -def serialize_element( - graph: Graph, - node, - schema: list, - parent_element: Element, - edges: Element, - unsupported): - name, attrs, subelements = schema - element = SubElement(parent_element, name) - for attr in attrs: - if isinstance(attr, tuple): - key = attr[0] - try: - if callable(attr[1]): - value = attr[1](node) - else: - value = node[attr[1]] if attr[1] in node else None - except TypeError as e: - raise Error('Unable to extract {} from layer {}', key, soft_get(node, 'name')) from e - except Exception as e: - raise Error( - 'Cannot emit value for attribute {} for layer {}. ' - 'Internal attribute template: {}.', - key, - soft_get(node, 'name'), - attr - ) from e - elif isinstance(attr, dict): - node_attrs = node.graph.node[node.id] if isinstance(node, Node) else node - for key in attr.keys(): - if key in node_attrs: - for k, v in node_attrs[key].items(): - element.set(k, str(v)) - continue - else: - key = attr - value = node[attr] if attr in node else None - if value is not None: - element.set(key, str(value)) - serialize_node_attributes(graph, node, subelements, element, edges, unsupported) - if len(element.attrib) == 0 and len(list(element)) == 0: - parent_element.remove(element) - - -def serialize_meta_list(graph, node, schema, element, edges, unsupported): - _, list_accessor, sub_schema = schema - items = list_accessor(node) # this is a list of dictionary-like objects - for item in items: - serialize_node_attributes(graph, item, [sub_schema], element, edges, unsupported) - - -def serialize_runtime_info(node, parent_element: Element): - if 'rt_info' not in node: - return - rt_info = SubElement(parent_element, 'rt_info') - - for (name, version), info_elem in node.rt_info.info.items(): - attribute = SubElement(rt_info, 'attribute') - attribute.set('name', name) - attribute.set('version', str(version)) - params = info_elem.serialize(node) if not isinstance(info_elem, dict) else info_elem - for key, value in params.items(): - attribute.set(key, value) - if len(rt_info.attrib) == 0 and len(list(rt_info)) == 0: - parent_element.remove(rt_info) - - -def serialize_node_attributes( - graph: Graph, # the current network graph - node, # dictionary-like object that should be serialized - schema: list, - parent_element: Element, - edges: Element, - unsupported): - # the Result op may be marked so it should not appear in the IR. For example, refer to transformation - # openvino/tools/mo/back/TopKNormalizer.py - if isinstance(node, Node) and node.soft_get('type') == 'Result' and node.has_and_set('keep_output_port'): - return - try: - for s in schema: - if not isinstance(s, tuple): - if s == '@ports': - try: - # TODO make sure that edges are generated regardless of the existence of @ports - xml_ports(node, parent_element, edges) - except Exception as e: - raise Error(('Unable to create ports for node with id {}. ' + - refer_to_faq_msg(3)).format(node.id)) from e - elif s == '@consts': - xml_consts(graph, node, parent_element) - elif s == '@runtime_info': - serialize_runtime_info(node, parent_element) - else: - log.warning('Unknown xml schema tag: {}'.format(s)) - else: - name = s[0] - if name == '@list': - serialize_meta_list(graph, node, s, parent_element, edges, unsupported) - elif name == '@network': - serialize_network(node[s[1]], parent_element, unsupported) - else: - serialize_element(graph, node, s, parent_element, edges, unsupported) - except Exception as e: - raise Error( - 'Error while emitting attributes for layer {} (id = {}). It usually means that there is unsupported ' - 'pattern around this node or unsupported combination of attributes.', - soft_get(node, 'name'), - node.id - ) from e - - -def create_pre_process_block_for_image(net: Element, ref_layer_names: list, mean_offset: tuple, - mean_size: tuple): - pre_process = SubElement(net, 'pre-process') - pre_process.set('mean-precision', 'FP32') # TODO: to think about need to output FP16 mean values - # TODO: extend it for several inputs - pre_process.set('reference-layer-name', ref_layer_names[0]) - for idx in range(len(mean_size)): - channel_xml = SubElement(pre_process, 'channel') - channel_xml.set('id', str(idx)) - mean_xml = SubElement(channel_xml, 'mean') - mean_xml.set('offset', str(mean_offset[idx])) - mean_xml.set('size', str(mean_size[idx])) - - -def create_pre_process_block(net, ref_layer_name, means, scales=None): - """ - Generates the pre-process block for the IR XML - Args: - net: root XML element - ref_layer_name: name of the layer where it is referenced to - means: tuple of values - scales: tuple of values - - Returns: - pre-process XML element - """ - pre_process = SubElement(net, 'pre-process') - pre_process.set('reference-layer-name', ref_layer_name) - - for idx in range(len(means)): - channel_xml = SubElement(pre_process, 'channel') - channel_xml.set('id', str(idx)) - - mean_xml = SubElement(channel_xml, 'mean') - mean_xml.set('value', str(means[idx])) - - if scales: - scale_xml = SubElement(channel_xml, 'scale') - scale_xml.set('value', str(scales[idx])) - - return pre_process - - -def add_quantization_statistics(graph, net_element): - if 'statistics' in graph.graph: - stats = SubElement(net_element, 'statistics') - for tensor, interval in graph.graph['statistics'].items(): - layer = SubElement(stats, 'layer') - name = SubElement(layer, 'name') - name.text = tensor - min = SubElement(layer, 'min') - min.text = interval['min'] - max = SubElement(layer, 'max') - max.text = interval['max'] - log.info('Statistics were inserted to IR') - - -def add_quantization_info_section(net: Element, meta_info: dict): - if 'quantization_parameters' in meta_info: - parameters = meta_info['quantization_parameters'] - quant_params = SubElement(net, 'quantization_parameters') - - config = SubElement(quant_params, 'config') - config.text = parameters['config'] - - version = SubElement(quant_params, 'version') - version.set('value', parameters['version']) - - cli_params = SubElement(quant_params, 'cli_params') - cli_params.set('value', parameters['cli_params']) - - -def add_meta_data_elem(meta: Element, key, value): - if isinstance(value, dict): - sub_elem = SubElement(meta, key) - for sub_key, sub_value in sorted(value.items()): - if sub_value in elements_to_skip_during_serializing: - continue - add_meta_data_elem(sub_elem, sub_key, sub_value) - else: - SubElement(meta, key).set('value', str(value)) - - -def add_net_rt_info(net: Element, meta_info: dict): - if meta_info == {}: - log.warning('`meta_info` is not provided, IR will not contain appropriate section.') - else: - meta = SubElement(net, 'rt_info') - for key, value in meta_info.items(): - if isinstance(value, dict) and value == {}: - continue - add_meta_data_elem(meta, key, value) - - -def serialize_node(graph: Graph, node: Node, layers: SubElement, edges: SubElement, unsupported: UnsupportedOps): - if node.kind == 'op' and (not node.has('type') or node.type is None): - unsupported.add(node) - return - if not node.has('IE'): - return - try: - serialize_node_attributes(graph, node, node.IE, layers, edges, unsupported) - except Error as e: - raise Error(str(e).replace('', '{} (id = {})'.format(node.soft_get('name'), node.id))) from e - - -def get_tensor_names_of_result_node(graph): - result_nodes = graph.get_op_nodes(type='Result') - result_names_to_tensor_names = {} - for res_node in result_nodes: - - # After port renumbering port/connection API is not applicable - assert len(res_node.in_nodes()) > 0, \ - "Result node with name {} has no input node.".format(res_node.soft_get('name')) - res_data_node = res_node.in_node(0) - assert len(res_data_node.in_nodes()) > 0, \ - "Data node of Result with name {} has no input node.".format(res_node.soft_get('name')) - res_in_node = res_data_node.in_node(0) - - # We cannot use out_ports() after port renumbering - for v, d in res_in_node.get_sorted_outputs(): - port_id = d['out'] - len(res_in_node.in_nodes()) if res_in_node.type != 'Const' else d['out'] - tensor_names = res_in_node.out_port(port_id).get_tensor_names(port_renumber=True) - result_names_to_tensor_names[res_node.soft_get('name')] = tensor_names - return result_names_to_tensor_names - - -def find_result_node_by_name(output_name, result_nodes, result_names_to_tensor_names): - for res_node in result_nodes: - res_name = res_node.soft_get('name') - tensor_names = result_names_to_tensor_names[res_name] - if output_name in tensor_names: - # In this case output tensor name is in tensor names list of previous op - return res_name - - return None - -def check_and_add_result_name(result_name:str, ordered_results:list): - if result_name in ordered_results: - log.warning("Result node with name {} has at least two tensor names corresponding " - "to different original results.".format(result_name)) - else: - ordered_results.append(result_name) - -def serialize_network(graph, net_element, unsupported): - layers = SubElement(net_element, 'layers') - edges = SubElement(net_element, 'edges') - if graph is None: - return - nodes = sorted(graph.nodes()) - - result_nodes = graph.get_op_nodes(type='Result') - result_names_to_tensor_names = get_tensor_names_of_result_node(graph) - - ordered_results = [] - for output_name in graph.outputs_order: - node = graph.get_op_nodes(name=output_name) - - if len(node) == 0: - # As graph does not contain node with name=output_name - # in the following code we look for output_name among tensor names - # incoming to Result nodes - found_result_name = find_result_node_by_name(output_name, result_nodes, result_names_to_tensor_names) - - if found_result_name is not None: - check_and_add_result_name(found_result_name, ordered_results) - else: - log.warning("Output node with name {} is not found in graph.".format(output_name)) - continue - node = node[0] - - # In this case Result node has the same name as output tensor - if node.soft_get('type') == 'Result': - check_and_add_result_name(node.soft_get('name'), ordered_results) - continue - - # Here output data node count is checked. Output Op nodes must have at least one data node - assert len(node.out_nodes()) >= 1, "Incorrect graph. Non-Result node with name {} " \ - "has no output data node.".format(output_name) - - # After port renumbering port/connection API is not applicable, and output port numbering - # starts from len(node.in_nodes()). But it not applicable to Constant operations, they have only one output - # port with number 0. - if node.type == 'Const': - data_node = node.out_node(0) - else: - data_node = node.out_node(len(node.in_nodes())) - - found_result = False - for op_node in data_node.out_nodes(): - if op_node.soft_get('type') == 'Result': - found_result = True - check_and_add_result_name(op_node.soft_get('name'), ordered_results) - break - - if not found_result: - log.warning("Node that expected to be output with name {} is not connected with Result node.".format(output_name)) - - param_nodes = graph.get_op_nodes(type='Parameter') - serialized_inputs = [] - for input_name in graph.inputs_order: - node = graph.get_op_nodes(name=input_name) - if len(node) != 0: - serialize_node(graph, node[0], layers, edges, unsupported) - serialized_inputs.append(input_name) - continue - found_tensor_name = False - for param_node in param_nodes: - param_name = param_node.soft_get('name') - if not param_node.is_out_port_connected(0): - continue - tensor_names = param_node.out_port(0).get_tensor_names(port_renumber=True) - if input_name in tensor_names: - # In this case input name is in tensor names list of Parameter op - serialize_node(graph, param_node, layers, edges, unsupported) - serialized_inputs.append(param_name) - found_tensor_name = True - break - - if not found_tensor_name: - log.warning("Input node with name {} is not found in graph.".format(param_name)) - - for node in nodes: - node = Node(graph, node) - if node.soft_get('name') in serialized_inputs: - continue - if node.soft_get('name') in ordered_results: - continue - serialize_node(graph, node, layers, edges, unsupported) - - for output_name in ordered_results: - node = graph.get_op_nodes(name=output_name) - assert len(node) == 1, "Output node with name {} is not found in graph.".format(output_name) - serialize_node(graph, node[0], layers, edges, unsupported) - - -def generate_ie_ir(graph: Graph, file_name: str, input_names: tuple = (), mean_offset: tuple = (), - mean_size: tuple = (), meta_info: dict = dict()): - """ - Extracts OV/IR attributes from kind='op' nodes in three ways: - (1) node.OV xml scheme that sets correspondence from existing attributes to generated xml elements - (2) input/output edges that don't have 'bin' attributes are transformed to input/output ports - (3) input edges that has 'bin' attributes are handled in special way like weights/biases - - Args: - graph: nx graph with FW-independent model - file_name: name of the resulting IR - input_names: names of input layers of the topology to add mean file to - input_name: name of the layer which is referenced from pre-processing block if any - mean_values: tuple of mean values for channels in RGB order - scale_values: tuple of mean values for channels in RGB order - mean_offset: offset in binary file, where mean file values start - mean_size: size of the mean file - """ - net = Element('net') - net.set('name', graph.name) - net.set('version', str((graph.graph['ir_version']))) - - if mean_size or mean_offset: - create_pre_process_block_for_image(net, input_names, mean_offset, mean_size) - - if 'mean_values' in graph.graph.keys(): - for input_name, values in graph.graph['mean_values'].items(): - create_pre_process_block(net, input_name, values) - - unsupported = UnsupportedOps(graph) - - serialize_network(graph, net, unsupported) - - #TODO: Remove this line when POT updates to using of rt_info - add_quantization_statistics(graph, net) - - add_net_rt_info(net, meta_info) - - #TODO: Remove this line when POT updates to using of rt_info - add_quantization_info_section(net, meta_info) - - xml_string = tostring(net) - xml_doc = parseString(xml_string) - pretty_xml_as_string = xml_doc.toprettyxml() - if len(unsupported.unsupported): - log.debug('Partially correct IR XML:\n{}'.format(pretty_xml_as_string)) - unsupported.report(log.error, "List of operations that cannot be converted to OpenVINO IR:") - raise Error('Part of the nodes was not converted to IR. Stopped. ' + - refer_to_faq_msg(24)) - with open(file_name, 'wb') as file: - file.write(bytes(pretty_xml_as_string, "UTF-8")) - - -def port_renumber(graph: Graph): - for node in graph.get_op_nodes(): - base = 0 - # we need to check operation type, if it is const op, we don't renumber out ports to count them from zero - if node.soft_get('type') != 'Const': - for u, d in node.get_sorted_inputs(): - d['in'] = base - base += 1 - for v, d in node.get_sorted_outputs(): - d['out'] = base - base += 1 diff --git a/tools/mo/openvino/tools/mo/back/insert_compatibility_l2normalization.py b/tools/mo/openvino/tools/mo/back/insert_compatibility_l2normalization.py deleted file mode 100644 index ec34657cae828f..00000000000000 --- a/tools/mo/openvino/tools/mo/back/insert_compatibility_l2normalization.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class CompatibilityL2NormalizationPattern(BackReplacementPattern): - force_clean_up = True - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('l2_normalization', dict(op='Normalize')) - ], - edges=[]) - - def replace_pattern(self, graph: Graph, match: dict): - """ - Adds Normalize layer weights, which are required by OpenVINO, - but do not always exist in MXNet model. - - L2Normalization is mapped to Normalize layer - so we need to generate Normalize weights filled with ones. - - Parameters - ---------- - graph : Graph - Graph with loaded model. - match : dict - Patterns which were found in graph structure. - """ - l2_normalization_node = match['l2_normalization'] - if len(l2_normalization_node.in_nodes()) < 2: - value = np.full([l2_normalization_node.in_node(0).shape[1]], 1.0, dtype=np.float32) - weights_node = Const(graph, dict(name=l2_normalization_node['name'] + '_weights', value=value)).create_node() - l2_normalization_node.add_input_port(1) - l2_normalization_node.in_port(1).connect(weights_node.out_port(0)) - l2_normalization_node.in_port(1).bin = 'weights' diff --git a/tools/mo/openvino/tools/mo/back/kaldi_remove_memory_output.py b/tools/mo/openvino/tools/mo/back/kaldi_remove_memory_output.py deleted file mode 100644 index d9bbcbaca00ba1..00000000000000 --- a/tools/mo/openvino/tools/mo/back/kaldi_remove_memory_output.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class KaldiRemoveMemoryOutputBackReplacementPattern(BackReplacementPattern): - enabled = True - - def run_after(self): - from openvino.tools.mo.back.pass_separator import BackFinish - return [BackFinish] - - def run_before(self): - from openvino.tools.mo.back.SpecialNodesFinalization import CreateConstNodesReplacement - return [CreateConstNodesReplacement] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('memory_node', dict(op='Assign')), - ('data_node', dict(kind='data')), - ('op_output', dict(op='Result')) - ], - edges=[ - ('memory_node', 'data_node'), - ('data_node', 'op_output') - ] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - """ - Need to find the pattern: Memory -> Data -> Result - - It is needed to make Memory nodes appear in IR, - but they are output nodes by default and we remove the Result node after each output memory. - - DO NOT use graph clean up after it - otherwise Memory nodes would be removed as they are not on the path from input to output - - Parameters - ---------- - graph : Graph - Graph with loaded model. - match : dict - Patterns which were found in graph structure. - """ - memory = match['memory_node'] - data = match['data_node'] - op_output = match['op_output'] - - graph.remove_edge(memory.id, data.id) - graph.remove_node(data.id) - graph.remove_node(op_output.id) diff --git a/tools/mo/openvino/tools/mo/back/names_uniqueness_check.py b/tools/mo/openvino/tools/mo/back/names_uniqueness_check.py deleted file mode 100644 index ddc8b868e2ac80..00000000000000 --- a/tools/mo/openvino/tools/mo/back/names_uniqueness_check.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from collections import defaultdict -from openvino.tools.mo.back.pass_separator import BackFinish -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph, rename_node - - -def nodes_with_equal_names(graph: Graph): - """ - :param graph: Graph to operate on - :return: Dictionary with node names as keys and a list of their corresponding nodes as values - """ - names_dict = defaultdict(list) - for node in graph.get_op_nodes(): - node_name = node.soft_get('name', node.id) - names_dict[node_name].append(node) - return names_dict - - -def make_node_names_unique(nodes: list, node_names: set): - """ - :param nodes: List with nodes matching a specific name - :param node_names: Set with all node names contained in the graph - :return: None - - Result nodes will be renamed only when it is absolutely necessary(if there are several Result nodes with the same name). - Function finds a position of Result nodes in the "nodes" list, take the first and rename all other nodes. - If the "nodes" list does not contain Result nodes, then all nodes starting from the second one will be renamed. - All new names are added to the "node_names" set. - """ - results_pos = [idx for idx, node in enumerate(nodes) if node.op == 'Result'] - node_position_to_keep = 0 - if len(results_pos) != 0: - node_position_to_keep = results_pos[0] - for idx, node in enumerate(nodes): - if idx != node_position_to_keep: - new_node_name = node.soft_get('name', node.id) + '_' + str(idx) - # preparing a new unique name for the node - while new_node_name in node_names: - new_node_name += '_' + str(idx) - node_names.add(new_node_name) - rename_node(node, new_node_name) - - -class NamesUniquenessCheck(BackReplacementPattern): - """ - If there are several layers with the same name in the original model and they are saved in the IR, OV will fail with - the invalid IR error. OV checks the uniqueness of the names and, if it is not true, throws an exception. The way how - to fix it on the MO side is to rename this nodes (one node will remain with the original name). Since we prefer to - save framework names for the output nodes, nodes with op=Result will not be renamed, except the case when there are - several Result nodes with the same name. - """ - enabled = True - - def run_after(self): - return [BackFinish] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - names_to_nodes = nodes_with_equal_names(graph) - node_names = set(names_to_nodes.keys()) - for nodes in names_to_nodes.values(): - if len(nodes) > 1: - make_node_names_unique(nodes, node_names) diff --git a/tools/mo/openvino/tools/mo/back/offline_transformations.py b/tools/mo/openvino/tools/mo/back/offline_transformations.py deleted file mode 100644 index 221e9b0a5fef08..00000000000000 --- a/tools/mo/openvino/tools/mo/back/offline_transformations.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -from typing import List - -from openvino.tools.mo.utils.cli_parser import parse_transform -from openvino.tools.mo.utils.error import Error -from openvino.runtime import Model - - -def get_new_placeholder_name(node_id: str, is_out_port: bool = False, port: int = 0): - """ - Forms a name of new placeholder created by cutting a graph - :param node_id: a node name that is cut - :param is_out_port: it is True iff output port is cut - :param port: a port number - :return: a name of new placeholder created by cutting a graph - """ - port_type = '_out' if is_out_port else '' - return '{}/placeholder{}_port_{}'.format(node_id, port_type, port) - - -def create_params_with_custom_types(packed_user_shapes: [None, dict]): - """ - Compute a list of placeholder names for which an user specifies custom type - :param packed_user_shapes: packed data that contains input node names, - their port numbers, shapes and data types - :return: a list of placeholder names for which an user specifies custom type - Example of packed_user_shapes dictionary: - packed_user_shapes = - { - 'node_ID': - [ - {'shape': None, 'in': 0}, - {'shape': None, 'in': 1}, - ], - 'node_1_ID': - [ - {'shape': [1, 227, 227, 3], 'port': None, 'data_type': np.int32} - ], - 'node_2_ID': - [ - {'shape': None, 'out': 3} - ] - } - For which the function returns a list ['node_1_ID'] because this node only has custom data type - """ - if packed_user_shapes is None: - return [] - - params_with_custom_types = [] - for input_name in packed_user_shapes: - for desc in packed_user_shapes[input_name]: - p_name = input_name - if 'port' in desc and desc['port'] is None: # neither input nor output port specified - user_defined_type = desc.get('data_type', None) - else: # need to check the particular port the Parameter was created for - p_name = get_new_placeholder_name(input_name, 'out' in desc, - desc['out'] if 'out' in desc else desc['in']) - user_defined_type = desc.get('data_type', None) - if user_defined_type is not None: - params_with_custom_types.append(p_name) - return params_with_custom_types - -def get_available_transformations(): - try: - from openvino._offline_transformations import apply_low_latency_transformation # pylint: disable=import-error,no-name-in-module - from openvino._offline_transformations import apply_make_stateful_transformation # pylint: disable=import-error,no-name-in-module - from openvino._offline_transformations import apply_pruning_transformation # pylint: disable=import-error,no-name-in-module - return { - 'MakeStateful': apply_make_stateful_transformation, - 'LowLatency2': apply_low_latency_transformation, - 'Pruning': apply_pruning_transformation, - } - except Exception as e: - return {} - - -# net should be openvino.runtime.Model type, but OV is still optional dependency -def apply_user_transformations(func: object, transforms: list): - available_transformations = get_available_transformations() - - for name, args in transforms: - if name not in available_transformations.keys(): - raise Error("Transformation {} is not available.".format(name)) - - available_transformations[name](func, **args) - - -def apply_moc_transformations(func: object): - from openvino._offline_transformations import apply_moc_transformations # pylint: disable=import-error,no-name-in-module - apply_moc_transformations(func, cf=False, smart_reshape=True) - - -def apply_moc_legacy_transformations(func: object, params_with_custom_types: List[str]): - from openvino._offline_transformations import apply_moc_legacy_transformations # pylint: disable=import-error,no-name-in-module - apply_moc_legacy_transformations(func, params_with_custom_types) - - -def compress_model(func: object): - from openvino._offline_transformations import compress_model_transformation # pylint: disable=import-error,no-name-in-module - compress_model_transformation(func) - -def apply_fused_names_cleanup(func: object): - from openvino._offline_transformations import apply_fused_names_cleanup # pylint: disable=import-error,no-name-in-module - apply_fused_names_cleanup(func) - - -def apply_offline_transformations(func: Model, argv: argparse.Namespace): - from openvino.tools.mo.back.preprocessing import apply_preprocessing # pylint: disable=no-name-in-module,import-error - - # Apply preprocessing (mean/scale/reverse_channels/convert_layout/etc) - apply_preprocessing(ov_function=func, argv=argv) - - from openvino._offline_transformations import apply_moc_transformations as moc_transformations # pylint: disable=import-error,no-name-in-module - moc_transformations(func, cf=argv.static_shape, smart_reshape=True) - - params_with_custom_types = create_params_with_custom_types(argv.packed_user_shapes) - apply_moc_legacy_transformations(func, params_with_custom_types) - apply_user_transformations(func, parse_transform(argv.transform)) - - if "compress_to_fp16" in argv and argv.compress_to_fp16: - compress_model(func) - - apply_fused_names_cleanup(func) - - return func - diff --git a/tools/mo/openvino/tools/mo/back/op_versioning.py b/tools/mo/openvino/tools/mo/back/op_versioning.py deleted file mode 100644 index 3d827395f49eb3..00000000000000 --- a/tools/mo/openvino/tools/mo/back/op_versioning.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.error import Error - - -class OpVersioning(BackReplacementPattern): - enabled = False - - opset_1_types = set(map(lambda s: s.lower(), [ - "Abs", - "Acos", - "Add", - "Asin", - "Atan", - "AvgPool", - "BatchNormInference", - "BinaryConvolution", - "Broadcast", - "CTCGreedyDecoder", - "Ceiling", - "Clamp", - "Concat", - "Const", # leave this type not to change the whole IR reading infrastructure - "Constant", - "Convert", - "ConvertLike", - "Convolution", - "ConvolutionBackpropData", - "Cos", - "Cosh", - 'DeformableConvolution', - "DeformablePSROIPooling", - "DepthToSpace", - "DetectionOutput", - "Divide", - "Elu", - "Equal", - "Erf", - "Exp", - "Eye", - "FakeQuantize", - "Floor", - "FloorMod", - "GRN", - "Gather", - "GatherTree", - "Greater", - "GreaterEqual", - "GroupConvolution", - "GroupConvolutionBackpropData", - "HardSigmoid", - "Interpolate", - "LRN", - "LSTMCell", - "LSTMSequence", - "Less", - "LessEqual", - "Log", - "LogicalAnd", - "LogicalNot", - "LogicalOr", - "LogicalXor", - #"MVN", # not really included into opset1 - "MatMul", - "MaxPool", - "Maximum", - "Minimum", - "Mod", - "Multiply", - "Negative", - "NonMaxSuppression", - "NormalizeL2", - "NotEqual", - "OneHot", - "PReLU", - "PSROIPooling", - "Pad", - "Parameter", - "Power", - "PriorBox", - "PriorBoxClustered", - "Proposal", - #"ROIPooling", # not really included into opset1 - "Range", - "ReLU", - "ReduceLogicalAnd", - "ReduceLogicalOr", - "ReduceMax", - "ReduceMean", - "ReduceMin", - "ReduceProd", - "ReduceSum", - "RegionYolo", - #"ReorgYolo", # not really included into opset1 - "Reshape", - "Result", - "ReverseSequence", - "Select", - "Selu", - "ShapeOf", - "ShuffleChannels", - "Sigmoid", - "Sign", - "Sin", - "Sinh", - "Softmax", - "SpaceToDepth", - "Split", - "Sqrt", - "SquaredDifference", - "Squeeze", - "StridedSlice", - "Subtract", - "Tan", - "Tanh", - "TensorIterator", - "Tile", - "TopK", - "Transpose", - "Unsqueeze", - "VariadicSplit", - ])) - - opset_1_experimental_ops = set(map(lambda s: s.lower(), [ - "ExperimentalDetectronGenerateProposalsSingleImage", - "ExperimentalDetectronTopKROIs", - "ExperimentalDetectronROIFeatureExtractor", - "ExperimentalDetectronDetectionOutput", - "ExperimentalDetectronPriorGridGenerator", - ])) - - # Several ops were added to opset1 by mistake, now they are marked as belonging to opset2 - opset_2_legacy_ops = set(map(lambda s: s.lower(), [ - "MVN", - "ReorgYolo", - "ROIPooling", - ])) - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(): - node_type = node.soft_get('type').lower() - name = node.soft_get('name', node.id) - - if node.soft_get('version', None) == 'opset1' and node_type not in self.opset_1_types \ - and node_type not in self.opset_2_legacy_ops: - raise Error('Node {} has `version` attribute set to `{}`, but it is a reserved word, ' - 'please use another'.format(name, node.version)) - - if not node.has_valid('version'): - if node_type in self.opset_1_types: - node['version'] = 'opset1' - elif node_type in self.opset_1_experimental_ops: - node['version'] = 'experimental' - elif node_type in self.opset_2_legacy_ops: - node['version'] = 'opset2' - else: - node['version'] = 'extension' - log.error('Please set `version` attribute for node {} with type={}' - ''.format(name, node.soft_get('type')), extra={'is_warning': True}) diff --git a/tools/mo/openvino/tools/mo/back/pass_separator.py b/tools/mo/openvino/tools/mo/back/pass_separator.py deleted file mode 100644 index 309b262bcececb..00000000000000 --- a/tools/mo/openvino/tools/mo/back/pass_separator.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class BackStart(BackReplacementPattern): - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.ApplyPermutations import ApplyPermutation - return [ApplyPermutation] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - pass - - -class BackFinish(BackReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - return [] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - pass diff --git a/tools/mo/openvino/tools/mo/back/preprocessing.py b/tools/mo/openvino/tools/mo/back/preprocessing.py deleted file mode 100644 index 79483b9679892d..00000000000000 --- a/tools/mo/openvino/tools/mo/back/preprocessing.py +++ /dev/null @@ -1,446 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import logging as log -from copy import copy - -from openvino.preprocess import PrePostProcessor # pylint: disable=no-name-in-module,import-error -# pylint: disable=no-name-in-module,import-error -from openvino.runtime import Model, Layout, PartialShape, layout_helpers - -from openvino.tools.mo.moc_frontend.layout_utils import update_layout_to_dict -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def update_mean_scale_to_dict(input_nodes: list, mean_scale_val, scale): - """ - Internal function. Updates mean/scale values from array to dictionary - :param: input_nodes Inputs of model - :param: mean_scale_val Parsed 'mean_scale_val' object from command line arguments - :param: scale Global scale factor for all inputs from scale command line arguments - """ - if not isinstance(mean_scale_val, dict): - if len(mean_scale_val) != len(input_nodes): - raise Error('Numbers of inputs and mean/scale values do not match. ' + refer_to_faq_msg(61)) - data = copy(mean_scale_val) - mean_scale_val = {} - for idx, node in enumerate(input_nodes): - names_list = list(node.get_tensor().get_names()) - names_list.sort() - if not names_list: - continue - node_name = names_list[0] - mean_scale_val.update( - { - node_name: { - 'mean': data[idx][0], - 'scale': data[idx][1] - } - } - ) - - if scale: - for node in input_nodes: - names_list = list(node.get_tensor().get_names()) - names_list.sort() - if not names_list: - continue - node_name = names_list[0] - old_val = mean_scale_val[node_name] if node_name in mean_scale_val else None - mean_scale_val.update( - { - node_name: { - 'mean': old_val['mean'] if old_val and 'mean' in old_val else None, - 'scale': scale - } - } - ) - return mean_scale_val - - -def check_keys_valid(ov_function: Model, dict_to_validate: dict, search_outputs: bool): - """ - Internal function: checks if keys from cmd line arguments correspond to ov_function's inputs/outputs - Throws if some key is not found - Throws if some different keys point to the same actual input/output - """ - nodes_used = {} - nodes = ov_function.inputs - if search_outputs: - nodes += ov_function.outputs - - # We need to replace all node names from dict to tensor names - rename_dict = {} - # Find names for replacing - for name in dict_to_validate.keys(): - for ov_node in nodes: - if name in ov_node.get_tensor().get_names(): - break - elif name == ov_node.get_node().get_friendly_name(): - assert len(ov_node.get_tensor().get_names()) > 0, 'Node must have at least one tensor name' - new_name = list(ov_node.get_tensor().get_names())[0] - rename_dict[name] = new_name - break - - # Replace found node names with tensor names - for name, new_name in rename_dict.items(): - assert name in dict_to_validate, 'Key {} is not in initial dict'.format(name) - assert new_name not in dict_to_validate, 'Key {} is already in initial dict'.format(new_name) - dict_to_validate[new_name] = dict_to_validate[name] - del dict_to_validate[name] - - # validate the dict - for name in dict_to_validate.keys(): - node_found = False - for ov_node in nodes: - if name in ov_node.get_tensor().get_names(): - if ov_node in nodes_used: - raise Error('Key for {} and {} point to same model input/output.' - .format(name, nodes_used[ov_node])) - nodes_used[ov_node] = name - node_found = True - break - - if not node_found: - if not search_outputs: - raise Error('Input with name {} wasn\'t found! {}'.format(name, refer_to_faq_msg(83))) - else: - raise Error('Input/Output with name {} wasn\'t found! {}'.format(name, refer_to_faq_msg(83))) - - -def update_layout_is_input_flag(ov_function: Model, layout_values: dict): - """ - Internal function: updates layout_values with flag whether each layout belongs to input or to output - """ - for name, layout_value in layout_values.items(): - layout_value['is_input'] = False - for ov_input in ov_function.inputs: - if name in ov_input.get_tensor().get_names(): - layout_value['is_input'] = True - break - return layout_values - - -def find_channels_dimension(shape: PartialShape, num_channels: int, name: str, layout_values): - """ - Internal function. Finds dimension index matching with expected channels number - Raises error if there is no candidates or number of candidates is > 1 - :param: shape Parameter's partial shape - :param: num_channels Number of channels to find in shape - :param: name Parameter's name, used for Error-handling purposes - :param: layout_values Existing source/target layout items specified by user - :return: updated layout items with guessed layouts - """ - if shape.rank.is_dynamic: - raise Error('Can\'t determine channels dimension for dynamic shape for parameter {}.' - .format(name)) - - dim_idx_found = -1 - for dim_idx in range(shape.rank.get_length()): - dim = shape.get_dimension(dim_idx) - if dim.is_static and dim.get_length() == num_channels: - if dim_idx_found >= 0: - raise Error('Can\'t determine channels dimension for {}. ' - 'Input shape is {}, needed channels {}. ' - 'Conflicting dimensions: {} and {}. Please specify layout manually.' - .format(name, shape, num_channels, dim_idx_found, dim_idx)) - dim_idx_found = dim_idx - if dim_idx_found < 0: - raise Error('Can\'t determine channels dimension for {}. ' - 'Input shape is {}, needed channels {}' - .format(name, shape, num_channels)) - - # Restrict guessed channels index to particular position depending on tensor shape(3d, 4d, 5d) - if shape.rank.get_length() == 3: - # CHW or HWC, possible channels index is 0 or 2 - if dim_idx_found != 0 and dim_idx_found != 2: - raise Error('Can\'t determine channels dimension for 3D input {} (CHW or HWC) with shape {}. ' - 'Please specify layout containing \'C\' channels manually.'.format(name, shape)) - elif shape.rank.get_length() == 4: - # NCHW or NHWC, possible channels index is 1 or 3 - if dim_idx_found != 1 and dim_idx_found != 3: - raise Error('Can\'t determine channels dimension for 4D input {} (NCHW or NHWC) with shape {}. ' - 'Please specify layout containing \'C\' channels manually.'.format(name, shape)) - elif shape.rank.get_length() == 5: - # NCDHW or NDHWC, possible channels index is 1 or 4 - if dim_idx_found != 1 and dim_idx_found != 4: - raise Error('Can\'t determine channels dimension for 5D input {} (NCDHW or NDHWC) with shape {}. ' - 'Please specify layout containing \'C\' channels manually.'.format(name, shape)) - else: - raise Error('Can\'t determine channels dimension for {}D input {} with shape {}.' - 'Please specify layout containing \'C\' channels manually.' - .format(shape.rank.get_length(), name, shape)) - - layout_str = "?" * shape.rank.get_length() - layout_str = layout_str[:dim_idx_found] + 'C' + layout_str[dim_idx_found + 1:] - layout_values[name] = { - 'source_layout': layout_str, - 'target_layout': None, - 'source_guessed': True, - 'is_input': True - } - return layout_values - - -def guess_source_layouts_by_mean_scale(ov_function: Model, layout_values, mean_scale_values: dict): - """ - Internal function. Try to guess source layout for input by its shape and/or framework - :param: ov_function Original model - :param: layout_values Existing source/target layout items specified by user - :param: mean_scale_values Dictionary with mean/scale values defined for each argument - :return: updated layout items with guessed layouts - """ - for ms_name, mean_scale in mean_scale_values.items(): - num_channels_mean = len(mean_scale['mean']) if mean_scale['mean'] is not None else 0 - num_channels_scale = len(mean_scale['scale']) if hasattr(mean_scale['scale'], '__len__') else 0 - - if num_channels_mean > 1 and \ - num_channels_scale > 1 and \ - num_channels_mean is not num_channels_scale: - raise Error('Mean/Scale values for {} have different sizes: {} {}' - .format(ms_name, num_channels_mean, num_channels_scale)) - - need_guess_channels = num_channels_mean > 1 or num_channels_scale > 1 - if not need_guess_channels: # Mean/scale is complex and needs 'channels' specified in layout - continue - - num_channels = num_channels_mean if num_channels_mean > 1 else num_channels_scale - - for i in range(0, len(ov_function.inputs)): - ov_input = ov_function.input(i) - - if not ov_function.get_parameters()[i].layout.empty: - continue - - if ms_name not in ov_input.get_tensor().get_names(): - continue - - layout_item = None - for name in ov_input.get_tensor().get_names(): - if name in layout_values: - layout_item = layout_values[name] - break - - if layout_item is not None: - # User specified some layout, skip guessing - continue - - # Guess layout is applicable only when number of channels is '3' - if num_channels != 3: - raise Error('Can\'t determine channels dimension for {}. ' - 'When number of mean/scale values is {} (not 3), ' - 'please specify layout for input manually'.format(ms_name, num_channels)) - - layout_values = find_channels_dimension(shape=ov_input.get_partial_shape(), - num_channels=num_channels, - name=ms_name, - layout_values=layout_values) - return layout_values - - -def check_suitable_for_reverse(layout: Layout, ov_input): - """ - Internal function. Checks if input with layout is suitable for reversing channels - :param: layout Existing source/target layout items specified by user - :param: ov_input Model's input - :return: True if reverse channels can be applied to input - """ - if not layout_helpers.has_channels(layout): - return False - if ov_input.get_partial_shape().rank.is_dynamic: - return False - - c_idx = layout_helpers.channels_idx(layout) - rank = ov_input.get_partial_shape().rank.get_length() - if c_idx < 0: - c_idx += rank - if c_idx >= rank: - raise Error('Layout {} for input {} is inconsistent with shape {}'.format( - layout, ov_input.get_tensor().get_any_name(), ov_input.get_partial_shape())) - c_num = ov_input.get_partial_shape()[c_idx] - return c_num.is_dynamic or c_num.get_length() == 3 - - -def guess_source_layouts_for_reverse_channels(ov_function: Model, layout_values): - """ - Internal function. Try to guess source layout for input by finding dimension with size=3 (RGB/BGR) - Additionally checks existing layouts and detects suitable inputs for reversing of input channels - :param: ov_function Original model - :param: layout_values Existing source/target layout items specified by user - :return: array with suitable parameters for reversing of input channels - """ - all_params = [] - suitable_params = [] - for i in range(0, len(ov_function.inputs)): - ov_input = ov_function.input(i) - param_info = [ov_input.get_tensor().get_any_name(), ov_input.get_partial_shape()] - all_params.append(param_info) - - if not ov_function.get_parameters()[i].layout.empty: - if check_suitable_for_reverse(ov_function.get_parameters()[i].layout, ov_input): - suitable_params.append(param_info) - continue - - layout_item = None - first_name = ov_input.get_tensor().get_any_name() - for name in ov_input.get_tensor().get_names(): - if name in layout_values: - layout_item = layout_values[name] - break - - if layout_item is not None: - # RIC transformation is applied before changing layout so only source_layout - # should be checked (even is target_layout is also provided) - if layout_item.get('source_layout'): - if check_suitable_for_reverse(Layout(layout_item['source_layout']), ov_input): - suitable_params.append(param_info) - continue - - try: - layout_values = find_channels_dimension(shape=ov_input.get_partial_shape(), - num_channels=3, - name=first_name, - layout_values=layout_values) - except Error as e: - log.debug('Reverse input channels guess did not succeed {}'.format(e)) - else: - layout = layout_values[first_name].get('source_layout') - if layout and check_suitable_for_reverse(Layout(layout), ov_input): - suitable_params.append(param_info) - - if not len(suitable_params): - raise Error('Network has {} inputs overall, but none of them are suitable for input channels reversing.\n' - 'Suitable for input channel reversing inputs are 4-dimensional with 3 channels (in case of dynamic ' - 'dimensions C channel must be provided in a layout for this input)\nAll inputs: {}'.format( - len(all_params), all_params)) - elif len(suitable_params) < len(all_params): - log.error('Network has {} inputs overall, but only {} of them are suitable for input channels reversing.\n' - 'Suitable for input channel reversing inputs are 4-dimensional with 3 channels (in case of dynamic ' - 'dimensions C channel must be provided in a layout for this input)\nAll inputs: {}\n' - 'Suitable inputs {}'.format(len(all_params), len(suitable_params), all_params, suitable_params), - extra={'is_warning': True}) - return suitable_params - - -def update_tensor_names_to_first_in_sorted_list(values_dict: dict, ov_function: Model): - if not isinstance(values_dict, dict): - return values_dict - updated_dict = {} - used_nodes = {} - for name, value in values_dict.items(): - input_found = False - for input in ov_function.inputs: - tensor_names = list(input.names) - tensor_names.sort() - if not (name in tensor_names or name == input.node.get_friendly_name()): - continue - if input in used_nodes: - raise Error("Tensor names {} and {} refer to the same node.".format(name, used_nodes[input])) - used_nodes.update({input: name}) - updated_dict[tensor_names[0]] = value - input_found = True - break - if not input_found: - raise Error('Input with name {} wasn\'t found! {}'.format(name, refer_to_faq_msg(83))) - - return updated_dict - - -def apply_preprocessing(ov_function: Model, argv: argparse.Namespace): - """ - Applies pre-processing of model inputs by adding appropriate operations - On return, 'ov_function' object will be updated - Expected 'argv.mean_scale_values' formats examples: - a) Dict: {'inputName': {'mean': [1., 2., 3.], 'scale': [2., 4., 8.]}} - b) List: list(np.array([(np.array([1., 2., 3.]), np.array([2., 4., 6.])), - (np.array([7., 8., 9.]), np.array([5., 6., 7.]))) - Expected 'argv.layout_values' format examples: - a) Specific layouts for inputs and outputs - { 'input1': { - 'source_layout': 'nchw', - 'target_layout': 'nhwc' - }, - 'output2': { - 'source_layout': 'nhwc' - } - } - b) Layout for single input: {'': {'source_layout': 'nchw'}} - :param: ov_function OV function for applying mean/scale pre-processing - :param: argv Parsed command line arguments - """ - prep = PrePostProcessor(ov_function) - - if 'mean_scale_values' in argv and argv.mean_scale_values: - mean_scale_values = argv.mean_scale_values - else: - mean_scale_values = {} - - # mean_scale_values stores mean/scale values from command line with names which were set by user. - # For models with single input scale or mean may be unnamed, so name is set by first tensor name from - # names list. This may lead to different naming of preprocessing params for a single node and lead to error. - # To make naming for mean/scale values unified, names provided by user are renamed here - # by the first tensor name from sorted names list. - mean_scale_values = update_tensor_names_to_first_in_sorted_list(mean_scale_values, ov_function) - mean_scale_values = update_mean_scale_to_dict(input_nodes=ov_function.inputs, - mean_scale_val=mean_scale_values, - scale=argv.scale) - # On return, mean_scale_values is a dictionary with input names as key and mean/scale pair as value - # {'inputName': {'mean': [1., 2., 3.], 'scale': [2.]}} - - layout_values = {} - if 'layout_values' in argv and argv.layout_values: - layout_values = update_layout_to_dict(ov_function.inputs, argv.layout_values, - lambda ov_input: ov_input.get_tensor().get_names()) - - check_keys_valid(ov_function=ov_function, dict_to_validate=mean_scale_values, search_outputs=False) - check_keys_valid(ov_function=ov_function, dict_to_validate=layout_values, search_outputs=True) - - layout_values = update_layout_is_input_flag(ov_function, layout_values) - layout_values = guess_source_layouts_by_mean_scale(ov_function, layout_values, mean_scale_values) - need_reverse = 'reverse_input_channels' in argv and argv.reverse_input_channels - suitable_params_ric = [] - if need_reverse: - suitable_params_ric = guess_source_layouts_for_reverse_channels(ov_function=ov_function, - layout_values=layout_values) - - for node_name, layout_value in layout_values.items(): - if layout_value.get('source_layout'): - if layout_value.get('is_input'): - prep.input(node_name).model().set_layout(Layout(layout_value['source_layout'])) - else: - prep.output(node_name).model().set_layout(Layout(layout_value['source_layout'])) - if layout_value.get('target_layout'): - if layout_value.get('is_input'): - prep.input(node_name).tensor().set_layout(Layout(layout_value['target_layout'])) - else: - prep.output(node_name).tensor().set_layout(Layout(layout_value['target_layout'])) - - # Apply reverse_input_channels - if need_reverse: - for name, _ in suitable_params_ric: - prep.input(name).preprocess().reverse_channels() - log.debug('reverse_input_channels pre-processing applied to {}'.format(name)) - - for node_name, node_mean_scale_values in mean_scale_values.items(): - # Apply mean first, then scale - if node_mean_scale_values['mean'] is not None: - prep.input(node_name).preprocess().mean(node_mean_scale_values['mean']) - if node_mean_scale_values['scale'] is not None: - prep.input(node_name).preprocess().scale(node_mean_scale_values['scale']) - log.debug('Mean/Scale pre-processing applied to {}'.format(node_name)) - - # Apply pre-processing builder to a function - ov_function = prep.build() - - # Remove guessed layout values from ov_function (these values shall not be serialized to IR - for node_name, layout_value in layout_values.items(): - if layout_value.get('source_guessed') and \ - not layout_value.get('target_layout'): - # search for parameter object - for idx, ov_input in enumerate(ov_function.inputs): - if node_name in ov_input.get_tensor().get_names(): - log.debug('Clearing guessed layout {} for {}' - .format(layout_value['source_layout'], node_name)) - ov_function.get_parameters()[idx].layout = Layout() diff --git a/tools/mo/openvino/tools/mo/back/priorbox_mutation.py b/tools/mo/openvino/tools/mo/back/priorbox_mutation.py deleted file mode 100644 index f64a8654f963d7..00000000000000 --- a/tools/mo/openvino/tools/mo/back/priorbox_mutation.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.back.ForceStrictPrecision import ForceStrictPrecision -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.strided_slice import StridedSlice -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class PriorboxMutation(BackReplacementPattern): - enabled = True - force_shape_inference = True - - def run_before(self): - return [ForceStrictPrecision] - - def pattern(self): - return dict( - nodes=[ - ('pb', {'type': lambda node_type: node_type in ['PriorBox', 'PriorBoxClustered']}) - ], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['pb'] - name = node.soft_get('name', node.id) - - graph.graph['cmd_params'].static_shape = False - - assert len(node.in_ports()) == 2 - - begin = Const(graph, {'value': mo_array([2], dtype=np.int32), 'name': name + '/ss_begin'}).create_node() - end = Const(graph, {'value': mo_array([4], dtype=np.int32), 'name': name + '/ss_end'}).create_node() - stride = Const(graph, {'value': mo_array([1], dtype=np.int32), 'name': name + '/ss_stride'}).create_node() - - shape_0 = Shape(graph, {'name': name + '/0_port'}).create_node() - ss_0 = StridedSlice(graph, {'name': name + '/ss_0_port', - 'begin_mask': mo_array([1], dtype=np.int32), - 'end_mask': mo_array([0], dtype=np.int32), - 'new_axis_mask': mo_array([0], dtype=np.int32), - 'shrink_axis_mask': mo_array([0], dtype=np.int32), - 'ellipsis_mask': mo_array([0], dtype=np.int32)}).create_node() - - shape_0.out_port(0).connect(ss_0.in_port(0)) - begin.out_port(0).connect(ss_0.in_port(1)) - end.out_port(0).connect(ss_0.in_port(2)) - stride.out_port(0).connect(ss_0.in_port(3)) - - source = node.in_port(0).get_connection().get_source() - node.in_port(0).disconnect() - source.connect(shape_0.in_port(0)) - ss_0.out_port(0).connect(node.in_port(0)) - - shape_1 = Shape(graph, {'name': name + '/1_port'}).create_node() - ss_1 = StridedSlice(graph, {'name': name + '/ss_1_port', - 'begin_mask': mo_array([1], dtype=np.int32), - 'end_mask': mo_array([0], dtype=np.int32), - 'new_axis_mask': mo_array([0], dtype=np.int32), - 'shrink_axis_mask': mo_array([0], dtype=np.int32), - 'ellipsis_mask': mo_array([0], dtype=np.int32)}).create_node() - - shape_1.out_port(0).connect(ss_1.in_port(0)) - begin.out_port(0).connect(ss_1.in_port(1)) - end.out_port(0).connect(ss_1.in_port(2)) - stride.out_port(0).connect(ss_1.in_port(3)) - - source = node.in_port(1).get_connection().get_source() - node.in_port(1).disconnect() - source.connect(shape_1.in_port(0)) - ss_1.out_port(0).connect(node.in_port(1)) - - ss_0['force_precision_in_ports'] = {1: 'int64', 2: 'int64', 3: 'int64'} - ss_1['force_precision_in_ports'] = {1: 'int64', 2: 'int64', 3: 'int64'} - - node['need_shape_inference'] = True - node['override_output_shape'] = True - node['V10_infer'] = True - unsqueeze = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]), {'name': name + '/unsqueeze'}) - naked_priorbox_name = name + '/naked_not_unsqueezed' - rename_nodes([(node, naked_priorbox_name), (unsqueeze, name)]) - - node.out_port(0).get_connection().set_source(unsqueeze.out_port(0)) - node.out_port(0).connect(unsqueeze.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/back/remove_last_softmax_pattern.py b/tools/mo/openvino/tools/mo/back/remove_last_softmax_pattern.py deleted file mode 100644 index d554bf7b4b5a8c..00000000000000 --- a/tools/mo/openvino/tools/mo/back/remove_last_softmax_pattern.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.eliminate import remove_op_node_with_data_node - - -class RemoveLastSoftMaxPattern(BackReplacementPattern): - enabled = True - graph_condition = [lambda graph: graph.graph['fw'] == 'kaldi' and graph.graph['cmd_params'].remove_output_softmax] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('softmax_node', dict(op='SoftMax')), - ('softmax_data', dict(kind='data')), - ('op_output', dict(op='Result')) - ], - edges=[ - ('softmax_node', 'softmax_data'), - ('softmax_data', 'op_output') - ] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - """ - Removes output SoftMax layer - :param graph: graph to operate on - :param match: dictionary with matched nodes - """ - if len(match['softmax_data'].out_nodes()) == 1: - remove_op_node_with_data_node(graph, match['softmax_node']) - else: - log.error("SoftMax is not last layer, so can't be removed", extra={'is_warning': True}) - - -class RemoveLastLogSoftMaxPattern(BackReplacementPattern): - enabled = True - graph_condition = [lambda graph: graph.graph['fw'] == 'kaldi' and graph.graph['cmd_params'].remove_output_softmax] - force_clean_up = True - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('input_data', {'kind': 'data'}), - ('sub_node', {'kind': 'op', 'op': 'Sub'}), - ('reduce_max_node', {'kind': 'op', 'op': 'ReduceMax'}), - ('reduce_max_node_data', {'kind': 'data'}), - ('sub_node_data', {'kind': 'data'}), - ('exp', {'kind': 'op', 'op': 'Exp'}), - ('exp_data', {'kind': 'data'}), - ('reduce_sum_node', {'kind': 'op', 'op': 'ReduceSum'}), - ('reduce_sum_node_data', {'kind': 'data'}), - ('reduce_sum_axis', {'kind': 'op', 'op': 'Const'}), - ('reduce_sum_axis_data', {'kind': 'data'}), - ('log', {'kind': 'op', 'op': 'Log'}), - ('log_data', {'kind': 'data'}), - ('last_sub', {'kind': 'op', 'op': 'Sub'}), - ('last_sub_data', {'kind': 'data'}), - ('op_output', {'kind': 'op', 'op': 'Result'}), - ], - edges=[ - ('input_data', 'sub_node', {'in': 0}), - ('input_data', 'reduce_max_node', {'in': 0}), - ('reduce_max_node', 'reduce_max_node_data'), - ('reduce_max_node_data', 'sub_node', {'in': 1}), - ('sub_node', 'sub_node_data'), - ('sub_node_data', 'exp', {'out': 0, 'in': 0}), - ('exp', 'exp_data'), - ('exp_data', 'reduce_sum_node', {'in': 0}), - ('reduce_sum_node', 'reduce_sum_node_data'), - ('reduce_sum_axis', 'reduce_sum_axis_data'), - ('reduce_sum_axis_data', 'reduce_sum_node', {'in': 1}), - ('reduce_sum_node_data', 'log'), - ('log', 'log_data'), - ('log_data', 'last_sub', {'in': 1}), - ('last_sub', 'last_sub_data'), - ('sub_node_data', 'last_sub', {'out': 0, 'in': 0}), - ('last_sub_data', 'op_output'), - ] - ) - - expected_number_of_outputs = { - 'reduce_max_node': 1, 'reduce_sum_node': 1, 'exp': 1, 'log': 1, 'sub_node': 2, 'last_sub': 1 - } - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - """ - Removes output LogSoftMax layer - :param graph: graph to operate on - :param match: dictionary with matched nodes - """ - reduce_max_node = match['reduce_max_node'] - second_input_of_reduce_max = reduce_max_node.in_port(1).get_connection().get_source().node - if not second_input_of_reduce_max.has_valid('value') or len(second_input_of_reduce_max.value) != 1: - return - - reduce_sum_node = match['reduce_sum_node'] - second_input_of_reduce_sum = reduce_sum_node.in_port(1).get_connection().get_source().node - if not second_input_of_reduce_sum.has_valid('value') or len(second_input_of_reduce_sum.value) != 1: - return - if second_input_of_reduce_max.value[0] != second_input_of_reduce_sum.value[0]: - return - - for name, number in RemoveLastLogSoftMaxPattern.expected_number_of_outputs.items(): - if len(match[name].out_port(0).get_destinations()) != number: - return - - match['op_output'].in_port(0).get_connection().set_source(match['sub_node'].in_port(0).get_source()) diff --git a/tools/mo/openvino/tools/mo/back/replacement.py b/tools/mo/openvino/tools/mo/back/replacement.py deleted file mode 100644 index 0b13565718c52c..00000000000000 --- a/tools/mo/openvino/tools/mo/back/replacement.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils import class_registration -from openvino.tools.mo.utils.replacement_pattern import ReplacementPattern - - -class BackReplacementPattern(ReplacementPattern): - registered_ops = {} - registered_cls = [] - - def run_after(self): - from openvino.tools.mo.back.pass_separator import BackStart - return [BackStart] - - def run_before(self): - from openvino.tools.mo.back.pass_separator import BackFinish - return [BackFinish] - - @classmethod - def class_type(cls): - return class_registration.ClassType.BACK_REPLACER - - -ReplacementPattern.excluded_replacers.append(BackReplacementPattern) diff --git a/tools/mo/openvino/tools/mo/convert.py b/tools/mo/openvino/tools/mo/convert.py deleted file mode 100644 index d9bad0d11b1ae4..00000000000000 --- a/tools/mo/openvino/tools/mo/convert.py +++ /dev/null @@ -1,340 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -import os -import pathlib -from collections import namedtuple -from typing import Any - -from openvino.runtime import PartialShape, Shape, Layout, Model -from openvino.tools.mo.convert_impl import _convert -from openvino.tools.mo.utils.cli_parser import get_all_cli_parser # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.utils.logger import get_logger_state, restore_logger_state # pylint: disable=no-name-in-module,import-error - -LayoutMap = namedtuple("LayoutMap", ["source_layout", "target_layout"], defaults=[None, None]) -InputCutInfo = namedtuple("InputInfo", ["name", "shape", "type", "value"], defaults=[None, None, None, None]) - - -def convert_model( - input_model: [str, pathlib.Path, Any] = None, - - # Optional parameters - help: bool = False, - framework: [str] = None, - - # Framework-agnostic parameters - input: [str, list, tuple, InputCutInfo] = None, - output: [str, list] = None, - input_shape: [str, PartialShape, Shape, list] = None, - example_input: Any = None, - batch: int = None, - mean_values: [str, dict, list] = (), - scale_values: [str, dict, list] = (), - scale: [str, float] = None, - reverse_input_channels: bool = False, - source_layout: [str, Layout, dict] = (), - target_layout: [str, Layout, dict] = (), - layout: [str, Layout, LayoutMap, list, dict] = (), - compress_to_fp16: bool = False, - extensions: [str, pathlib.Path, list, Any] = None, - transform: [str, list, tuple] = "", - transformations_config: [str, pathlib.Path] = None, - silent: bool = True, - log_level: str = 'ERROR', - version: bool = None, - progress: bool = False, - stream_output: bool = False, - share_weights: bool = False, - - # PaddlePaddle-specific parameters: - example_output: Any = None, - - # TensorFlow*-specific parameters - input_model_is_text: bool = None, - input_checkpoint: [str, pathlib.Path] = None, - input_meta_graph: [str, pathlib.Path] = None, - saved_model_dir: [str, pathlib.Path] = None, - saved_model_tags: [str, list] = None, - tensorflow_custom_operations_config_update: [str, pathlib.Path] = None, - tensorflow_object_detection_api_pipeline_config: [str, pathlib.Path] = None, - tensorboard_logdir: [str, pathlib.Path] = None, - tensorflow_custom_layer_libraries: [str, pathlib.Path] = None, - - # Caffe*-specific parameters: - input_proto: [str, pathlib.Path] = None, - caffe_parser_path: [str, pathlib.Path] = None, - k: [str, pathlib.Path] = None, - disable_omitting_optional: bool = False, - enable_flattening_nested_params: bool = False, - - # Kaldi-specific parameters: - counts: [str, pathlib.Path] = None, - remove_output_softmax: bool = False, - remove_memory: bool = False, - - **args -) -> Model: - """ - Converts the model from original framework to OpenVino Model. - - Args: - :param help: - Print available parameters. - :param framework: - Name of the framework used to train the input model. - - Framework-agnostic parameters: - :param input_model: - Model object in original framework (PyTorch, Tensorflow) or path to model file. - Tensorflow*: a file with a pre-trained model (binary or text .pb file after freezing). - Caffe*: a model proto file with model weights - - Supported formats of input model: - - PaddlePaddle - paddle.hapi.model.Model - paddle.fluid.dygraph.layers.Layer - paddle.fluid.executor.Executor - - PyTorch - torch.nn.Module - torch.jit.ScriptModule - torch.jit.ScriptFunction - - TF - tf.compat.v1.Graph - tf.compat.v1.GraphDef - tf.compat.v1.wrap_function - tf.compat.v1.session - - TF2 / Keras - tf.keras.Model - tf.keras.layers.Layer - tf.function - tf.Module - tf.train.checkpoint - - :param input: - Input can be set by passing a list of InputCutInfo objects or by a list - of tuples. Each tuple can contain optionally input name, input - type or input shape. Example: input=("op_name", PartialShape([-1, - 3, 100, 100]), Type(np.float32)). Alternatively input can be set by - a string or list of strings of the following format. Quoted list of comma-separated - input nodes names with shapes, data types, and values for freezing. - If operation names are specified, the order of inputs in converted - model will be the same as order of specified operation names (applicable for TF2, ONNX). - The shape and value are specified as comma-separated lists. The data type of input node is specified - in braces and can have one of the values: f64 (float64), f32 (float32), f16 (float16), i64 - (int64), i32 (int32), u8 (uint8), boolean (bool). Data type is optional. - If it's not specified explicitly then there are two options: if input - node is a parameter, data type is taken from the original node dtype, - if input node is not a parameter, data type is set to f32. Example, to set - `input_1` with shape [1,100], and Parameter node `sequence_len` with - scalar input with value `150`, and boolean input `is_training` with - `False` value use the following format: "input_1[1,100],sequence_len->150,is_training->False". - Another example, use the following format to set input port 0 of the node - `node_name1` with the shape [3,4] as an input node and freeze output - port 1 of the node `node_name2` with the value [20,15] of the int32 type - and shape [2]: "0:node_name1[3,4],node_name2:1[2]{i32}->[20,15]". - :param output: - The name of the output operation of the model or list of names. For TensorFlow*, - do not add :0 to this name.The order of outputs in converted model is the - same as order of specified operation names. - :param input_shape: - Input shape(s) that should be fed to an input node(s) of the model. Input - shapes can be defined by passing a list of objects of type PartialShape, - Shape, [Dimension, ...] or [int, ...] or by a string of the following - format. Shape is defined as a comma-separated list of integer numbers - enclosed in parentheses or square brackets, for example [1,3,227,227] - or (1,227,227,3), where the order of dimensions depends on the framework - input layout of the model. For example, [N,C,H,W] is used for ONNX* models - and [N,H,W,C] for TensorFlow* models. The shape can contain undefined - dimensions (? or -1) and should fit the dimensions defined in the input - operation of the graph. Boundaries of undefined dimension can be specified - with ellipsis, for example [1,1..10,128,128]. One boundary can be - undefined, for example [1,..100] or [1,3,1..,1..]. If there are multiple - inputs in the model, --input_shape should contain definition of shape - for each input separated by a comma, for example: [1,3,227,227],[2,4] - for a model with two inputs with 4D and 2D shapes. Alternatively, specify - shapes with the --input option. - :param example_input: - Sample of model input in original framework. - For PyTorch it can be torch.Tensor. - For Tensorflow it can be tf.Tensor or numpy.ndarray. - For PaddlePaddle it can be Paddle Variable. - :param batch: - Set batch size. It applies to 1D or higher dimension inputs. - The default dimension index for the batch is zero. - Use a label 'n' in --layout or --source_layout option to set the batch dimension. - For example, "x(hwnc)" defines the third dimension to be the batch. - :param mean_values: - Mean values to be used for the input image per channel. Mean values can - be set by passing a dictionary, where key is input name and value is mean - value. For example mean_values={'data':[255,255,255],'info':[255,255,255]}. - Or mean values can be set by a string of the following format. Values to - be provided in the (R,G,B) or [R,G,B] format. Can be defined for desired - input of the model, for example: "--mean_values data[255,255,255],info[255,255,255]". - The exact meaning and order of channels depend on how the original model - was trained. - :param scale_values: - Scale values to be used for the input image per channel. Scale values - can be set by passing a dictionary, where key is input name and value is - scale value. For example scale_values={'data':[255,255,255],'info':[255,255,255]}. - Or scale values can be set by a string of the following format. Values - are provided in the (R,G,B) or [R,G,B] format. Can be defined for desired - input of the model, for example: "--scale_values data[255,255,255],info[255,255,255]". - The exact meaning and order of channels depend on how the original model - was trained. If both --mean_values and --scale_values are specified, - the mean is subtracted first and then scale is applied regardless of - the order of options in command line. - :param scale: - All input values coming from original network inputs will be divided - by this value. When a list of inputs is overridden by the --input parameter, - this scale is not applied for any input that does not match with the original - input of the model. If both --mean_values and --scale are specified, - the mean is subtracted first and then scale is applied regardless of - the order of options in command line. - :param reverse_input_channels: - Switch the input channels order from RGB to BGR (or vice versa). Applied - to original inputs of the model if and only if a number of channels equals - 3. When --mean_values/--scale_values are also specified, reversing - of channels will be applied to user's input data first, so that numbers - in --mean_values and --scale_values go in the order of channels used - in the original model. In other words, if both options are specified, - then the data flow in the model looks as following: Parameter -> ReverseInputChannels - -> Mean apply-> Scale apply -> the original body of the model. - :param source_layout: - Layout of the input or output of the model in the framework. Layout can - be set by passing a dictionary, where key is input name and value is LayoutMap - object. Or layout can be set by string of the following format. Layout - can be specified in the short form, e.g. nhwc, or in complex form, e.g. - "[n,h,w,c]". Example for many names: "in_name1([n,h,w,c]),in_name2(nc),out_name1(n),out_name2(nc)". - Layout can be partially defined, "?" can be used to specify undefined - layout for one dimension, "..." can be used to specify undefined layout - for multiple dimensions, for example "?c??", "nc...", "n...c", etc. - :param target_layout: - Same as --source_layout, but specifies target layout that will be in - the model after processing by ModelOptimizer. - :param layout: - Combination of --source_layout and --target_layout. Can't be used - with either of them. If model has one input it is sufficient to specify - layout of this input, for example --layout nhwc. To specify layouts - of many tensors, names must be provided, for example: --layout "name1(nchw),name2(nc)". - It is possible to instruct ModelOptimizer to change layout, for example: - --layout "name1(nhwc->nchw),name2(cn->nc)". - Also "*" in long layout form can be used to fuse dimensions, for example "[n,c,...]->[n*c,...]". - :param compress_to_fp16: - If the original model has FP32 weights or biases, they are compressed - to FP16. All intermediate data is kept in original precision. Option - can be specified alone as "--compress_to_fp16", or explicit True/False - values can be set, for example: "--compress_to_fp16=False", or "--compress_to_fp16=True" - :param extensions: - Paths to libraries (.so or .dll) with extensions, comma-separated - list of paths, objects derived from BaseExtension class or lists of - objects. For the legacy MO path (if `--use_legacy_frontend` is used), - a directory or a comma-separated list of directories with extensions - are supported. To disable all extensions including those that are placed - at the default location, pass an empty string. - :param transform: - Apply additional transformations. 'transform' can be set by a list - of tuples, where the first element is transform name and the second element - is transform parameters. For example: [('LowLatency2', {{'use_const_initializer': - False}}), ...]"--transform transformation_name1[args],transformation_name2..." - where [args] is key=value pairs separated by semicolon. Examples: - "--transform LowLatency2" or - "--transform Pruning" or - "--transform LowLatency2[use_const_initializer=False]" or - "--transform "MakeStateful[param_res_names= - {'input_name_1':'output_name_1','input_name_2':'output_name_2'}]"" - Available transformations: "LowLatency2", "MakeStateful", "Pruning" - :param transformations_config: - Use the configuration file with transformations description or pass - object derived from BaseExtension class. Transformations file can - be specified as relative path from the current directory, as absolute - path or as relative path from the mo root directory. - :param silent: - Prevent any output messages except those that correspond to log level - equals ERROR, that can be set with the following option: --log_level. - By default, log level is already ERROR. - :param log_level: - Logger level of logging massages from MO. - Expected one of ['CRITICAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']. - :param version: - Version of Model Optimizer - :param progress: - Enable model conversion progress display. - :param stream_output: - Switch model conversion progress display to a multiline mode. - :param share_weights: - Map memory of weights instead reading files or share memory from input model. - Currently, mapping feature is provided only for ONNX models - that do not require fallback to the legacy ONNX frontend for the conversion. - - PaddlePaddle-specific parameters: - :param example_output: - Sample of model output in original framework. For PaddlePaddle it can be Paddle Variable. - - TensorFlow*-specific parameters: - :param input_model_is_text: - TensorFlow*: treat the input model file as a text protobuf format. If - not specified, the Model Optimizer treats it as a binary file by default. - :param input_checkpoint: - TensorFlow*: variables file to load. - :param input_meta_graph: - Tensorflow*: a file with a meta-graph of the model before freezing - :param saved_model_dir: - TensorFlow*: directory with a model in SavedModel format of TensorFlow - 1.x or 2.x version. - :param saved_model_tags: - Group of tag(s) of the MetaGraphDef to load, in string format, separated - by ','. For tag-set contains multiple tags, all tags must be passed in. - :param tensorflow_custom_operations_config_update: - TensorFlow*: update the configuration file with node name patterns - with input/output nodes information. - :param tensorflow_object_detection_api_pipeline_config: - TensorFlow*: path to the pipeline configuration file used to generate - model created with help of Object Detection API. - :param tensorboard_logdir: - TensorFlow*: dump the input graph to a given directory that should be - used with TensorBoard. - :param tensorflow_custom_layer_libraries: - TensorFlow*: comma separated list of shared libraries with TensorFlow* - custom operations implementation. - - Caffe*-specific parameters: - :param input_proto: - Deploy-ready prototxt file that contains a topology structure and - layer attributes - :param caffe_parser_path: - Path to Python Caffe* parser generated from caffe.proto - :param k: - Path to CustomLayersMapping.xml to register custom layers - :param disable_omitting_optional: - Disable omitting optional attributes to be used for custom layers. - Use this option if you want to transfer all attributes of a custom layer - to IR. Default behavior is to transfer the attributes with default values - and the attributes defined by the user to IR. - :param enable_flattening_nested_params: - Enable flattening optional params to be used for custom layers. Use - this option if you want to transfer attributes of a custom layer to IR - with flattened nested parameters. Default behavior is to transfer - the attributes without flattening nested parameters. - - Kaldi-specific parameters: - :param counts: - Path to the counts file - :param remove_output_softmax: - Removes the SoftMax layer that is the output layer - :param remove_memory: - Removes the Memory layer and use additional inputs outputs instead - - Returns: - openvino.runtime.Model - """ - params = locals() - logger_state = get_logger_state() - del params['args'] - params.update(args) - cli_parser = get_all_cli_parser() - ov_model, _ = _convert(cli_parser, framework, params, True) - restore_logger_state(logger_state) - return ov_model diff --git a/tools/mo/openvino/tools/mo/convert_impl.py b/tools/mo/openvino/tools/mo/convert_impl.py deleted file mode 100644 index ae80e6a33064f5..00000000000000 --- a/tools/mo/openvino/tools/mo/convert_impl.py +++ /dev/null @@ -1,939 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import datetime -import logging as log -import os -import platform -import sys -import traceback -from collections import OrderedDict -from copy import deepcopy -from pathlib import Path - -try: - import openvino_telemetry as tm - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm - -from openvino.tools.mo.back.SpecialNodesFinalization import RemoveConstOps, CreateConstNodesReplacement, NormalizeTI -from openvino.tools.mo.moc_frontend.check_config import legacy_transformations_config_used, \ - tensorflow_custom_operations_config_update_used, new_extensions_used # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.moc_frontend.pipeline import moc_pipeline # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.moc_frontend.moc_emit_ir import moc_emit_ir # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type -from openvino.tools.mo.pipeline.common import prepare_emit_ir -from openvino.tools.mo.pipeline.unified import unified_pipeline -from openvino.tools.mo.utils import import_extensions - -# pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.utils.cli_parser import check_available_transforms, \ - get_advanced_cli_options, get_available_front_ends, get_caffe_cli_options, \ - get_common_cli_options, get_freeze_placeholder_values, get_kaldi_cli_options, get_layout_values, \ - get_mean_scale_dictionary, get_onnx_cli_options, \ - get_placeholder_shapes, get_tf_cli_options, parse_transform, parse_tuple_pairs, \ - get_model_name_from_args, depersonalize, get_mo_convert_params, input_to_input_cut_info, \ - input_shape_to_input_cut_info, freeze_placeholder_to_input_cut_info - -from openvino.tools.mo.utils.error import Error, FrameworkError -from openvino.tools.mo.utils.get_ov_update_message import get_ov_update_message, \ - get_compression_message, get_ovc_message # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.utils.get_ov_update_message import get_try_legacy_fe_message -from openvino.tools.mo.utils.model_analysis import AnalysisResults -from openvino.tools.mo.utils.version import VersionChecker -from openvino.tools.mo.utils.guess_framework import deduce_legacy_frontend_by_namespace -from openvino.tools.mo.utils.logger import init_logger, progress_printer # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.utils.utils import refer_to_faq_msg, check_values_equal -from openvino.tools.mo.utils.telemetry_utils import send_params_info, send_framework_info, send_conversion_result, \ - init_mo_telemetry -from openvino.tools.mo.moc_frontend.check_config import legacy_extensions_used # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.moc_frontend.pytorch_frontend_utils import get_pytorch_decoder, extract_input_info_from_example # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.moc_frontend.paddle_frontend_utils import paddle_frontend_converter # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.moc_frontend.shape_utils import parse_input_shapes # pylint: disable=no-name-in-module,import-error - -# pylint: disable=no-name-in-module,import-error -from openvino.frontend import FrontEndManager, OpConversionFailure, ProgressReporterExtension, TelemetryExtension -from openvino.runtime import get_version as get_rt_version -from openvino.runtime import Type, PartialShape - -try: - from openvino.frontend.tensorflow.utils import type_supported_by_tf_fe, create_tf_graph_iterator, extract_model_graph # pylint: disable=no-name-in-module,import-error - tf_frontend_with_python_bindings_installed = True -except (ModuleNotFoundError, ImportError): - tf_frontend_with_python_bindings_installed = False - - -def load_extensions(argv: argparse.Namespace, is_tf: bool, is_caffe: bool, is_kaldi: bool, - is_onnx: bool): - extensions = None - if hasattr(argv, 'extensions') and argv.extensions and argv.extensions != '': - extensions = argv.extensions - if is_tf: - from openvino.tools.mo.front.tf.register_custom_ops import get_front_classes - import_extensions.load_dirs(argv.framework, extensions, get_front_classes) - elif is_caffe: - send_framework_info('caffe') - from openvino.tools.mo.front.caffe.register_custom_ops import get_front_classes - import_extensions.load_dirs(argv.framework, extensions, get_front_classes) - elif is_kaldi: - send_framework_info('kaldi') - from openvino.tools.mo.front.kaldi.register_custom_ops import get_front_classes - import_extensions.load_dirs(argv.framework, extensions, get_front_classes) - elif is_onnx: - send_framework_info('onnx') - from openvino.tools.mo.front.onnx.register_custom_ops import get_front_classes - import_extensions.load_dirs(argv.framework, extensions, get_front_classes) - - -def replace_ext(name: str, old: str, new: str): - base, ext = os.path.splitext(name) - log.debug("base: {}, ext: {}".format(base, ext)) - if ext == old: - return base + new - - -def print_argv(argv: argparse.Namespace, is_caffe: bool, is_tf: bool, is_kaldi: bool, is_onnx: bool, - model_name: str): - print('Model Optimizer arguments:') - props = OrderedDict() - props['common_args'] = get_common_cli_options(model_name) - props['advanced_args'] = get_advanced_cli_options() - if is_caffe: - props['caffe_args'] = get_caffe_cli_options() - if is_tf: - props['tf_args'] = get_tf_cli_options() - if is_kaldi: - props['kaldi_args'] = get_kaldi_cli_options() - if is_onnx: - props['onnx_args'] = get_onnx_cli_options() - - framework_specifics_map = { - 'common_args': 'Common parameters:', - 'advanced_args': 'Advanced parameters:', - 'caffe_args': 'Caffe specific parameters:', - 'tf_args': 'TensorFlow specific parameters:', - 'kaldi_args': 'Kaldi specific parameters:', - 'onnx_args': 'ONNX specific parameters:', - } - - lines = [] - for key in props: - lines.append(framework_specifics_map[key]) - for (op, desc) in props[key].items(): - if isinstance(desc, list): - lines.append('\t{}: \t{}'.format(desc[0], desc[1](getattr(argv, op, 'NONE')))) - else: - if op == 'k': - default_path = os.path.join(os.path.dirname(sys.argv[0]), - 'openvino/tools/mo/front/caffe/CustomLayersMapping.xml') - if getattr(argv, op, 'NONE') == default_path: - lines.append('\t{}: \t{}'.format(desc, 'Default')) - continue - lines.append('\t{}: \t{}'.format(desc, getattr(argv, op, 'NONE'))) - print('\n'.join(lines), flush=True) - - -def arguments_post_parsing(argv: argparse.Namespace): - use_legacy_frontend = argv.use_legacy_frontend - use_new_frontend = argv.use_new_frontend - if argv.extensions is None: - argv.extensions = [import_extensions.default_path()] - - if use_new_frontend and use_legacy_frontend: - raise Error('Options --use_new_frontend and --use_legacy_frontend must not be used simultaneously ' - 'in the Model Optimizer command-line') - - moc_front_end, available_moc_front_ends = get_moc_frontends(argv) - - if not moc_front_end and use_new_frontend: - raise Error('Option --use_new_frontend is specified but the Model Optimizer is unable to find new frontend. ' - 'Please ensure that your environment contains new frontend for the input model format or ' - 'try to convert the model without specifying --use_new_frontend option.') - - is_tf, is_caffe, is_kaldi, is_onnx = \ - deduce_legacy_frontend_by_namespace(argv) if not moc_front_end else [False, False, False, False] - - is_legacy_frontend = any([is_tf, is_caffe, is_kaldi, is_onnx]) - if not is_legacy_frontend and use_legacy_frontend: - raise Error('Option --use_legacy_frontend is specified but Model Optimizer does not have legacy frontend ' - 'for the input model format. Please try to convert the model without specifying --use_legacy_frontend option.') - - # handle a default case, i.e. use_new_frontend and use_legacy_frontend are not specified, when no frontend is found - if not is_legacy_frontend and not moc_front_end: - legacy_frameworks = ['tf', 'caffe', 'kaldi', 'onnx'] - frameworks = list(set(legacy_frameworks + available_moc_front_ends)) - if not argv.framework: - raise Error('Framework name can not be deduced from the given options: {}={}. ' - 'Please use --framework with one from the list: {}.', - '--input_model', argv.input_model, frameworks) - elif argv.framework not in frameworks: - if argv.framework == 'ir': - raise Error('OpenVINO IR is passed as input_model in convert_model/mo, the IR doesn\'t need ' - 'conversion, please use it in runtime for inference with read_model/compile_model.') - raise Error('Framework {} is not a valid target. Please use --framework with one from the list: {}. ' + - refer_to_faq_msg(15), argv.framework, frameworks) - - if is_legacy_frontend: - if new_extensions_used(argv): - raise Error('New kind of extensions used on legacy path') - - if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph: - raise Error('Path to input model or saved model dir is required: use --input_model, --saved_model_dir or ' - '--input_meta_graph') - elif is_caffe and not argv.input_model and not argv.input_proto: - raise Error('Path to input model or input proto is required: use --input_model or --input_proto') - elif (is_kaldi or is_onnx) and not argv.input_model: - raise Error('Path to input model is required: use --input_model.') - - log.debug("Model Optimizer started") - - log.debug('Output model name would be {}{{.xml, .bin}}'.format(argv.model_name)) - - # if --input_proto is not provided, try to retrieve another one - # by suffix substitution from model file name - if is_caffe and not argv.input_proto: - argv.input_proto = replace_ext(argv.input_model, '.caffemodel', '.prototxt') - - if not argv.input_proto: - raise Error("Cannot find prototxt file: for Caffe please specify --input_proto - a " + - "protobuf file that stores topology and --input_model that stores " + - "pretrained weights. " + - refer_to_faq_msg(20)) - log.info('Deduced name for prototxt: {}'.format(argv.input_proto)) - - if not argv.silent: - print_argv(argv, is_caffe, is_tf, is_kaldi, is_onnx, argv.model_name) - - VersionChecker().check_runtime_dependencies(argv.silent) - - argv.data_type = 'FP32' # if compression was enabled will be restored back to 'FP16' after apply_offline_transformations - - # This is just to check that transform key is valid and transformations are available - check_available_transforms(parse_transform(argv.transform)) - - if argv.scale and argv.scale_values: - raise Error( - 'Both --scale and --scale_values are defined. Specify either scale factor or scale values per input ' + - 'channels. ' + refer_to_faq_msg(19)) - - if argv.scale and argv.scale < 1.0: - log.error("The scale value is less than 1.0. This is most probably an issue because the scale value specifies " - "floating point value which all input values will be *divided*.", extra={'is_warning': True}) - - if argv.input_model and (is_tf and argv.saved_model_dir): - raise Error('Both --input_model and --saved_model_dir are defined. ' - 'Specify either input model or saved model directory.') - if is_tf: - if argv.saved_model_tags is not None: - if ' ' in argv.saved_model_tags: - raise Error('Incorrect saved model tag was provided. Specify --saved_model_tags with no spaces in it') - argv.saved_model_tags = argv.saved_model_tags.split(',') - - if hasattr(argv, 'is_python_api_used') and argv.is_python_api_used: - python_api_params_parsing(argv) - else: - argv.inputs_list, argv.placeholder_shapes, argv.placeholder_data_types = get_placeholder_shapes( - argv.input, argv.input_shape, argv.batch) - argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values( - argv.input, - argv.freeze_placeholder_with_value) - argv.unnamed_freeze_placeholder_with_value = {} - - argv.output = argv.output.split(',') if argv.output else None - argv.layout_values = get_layout_values(argv.layout, argv.source_layout, argv.target_layout) - mean_values = parse_tuple_pairs(argv.mean_values) - scale_values = parse_tuple_pairs(argv.scale_values) - mean_scale = get_mean_scale_dictionary(mean_values, scale_values, argv.input) - argv.mean_scale_values = mean_scale - - if not os.path.exists(argv.output_dir): - try: - os.makedirs(argv.output_dir) - except PermissionError as e: - raise Error("Failed to create directory {}. Permission denied! " + - refer_to_faq_msg(22), - argv.output_dir) from e - else: - if not os.access(argv.output_dir, os.W_OK): - raise Error("Output directory {} is not writable for current user. " + - refer_to_faq_msg(22), argv.output_dir) - - log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes)) - - load_extensions(argv, is_tf, is_caffe, is_kaldi, is_onnx) - - return argv - - -def check_fallback(argv: argparse.Namespace): - fallback_reasons = {} - - # Some frontend such as PDPD does not have legacy path so it has no reasons to fallback - if not any(deduce_legacy_frontend_by_namespace(argv)): - return fallback_reasons - - if argv.use_new_frontend: - return fallback_reasons - - fallback_reasons['extensions'] = legacy_extensions_used - fallback_reasons['transformations_config'] = legacy_transformations_config_used - fallback_reasons['tensorflow_custom_operations_config_update'] = tensorflow_custom_operations_config_update_used - - reasons = [reason for reason, is_applicable in fallback_reasons.items() if is_applicable(argv)] - return reasons - - -def update_fallback_with_conversion_error(use_new_frontend: bool, is_tf: bool, ex_msg: str, fallback_reasons: list): - import re - if not is_tf: - # this sort of fallback is only used by TensorFlow Frontend - return False - - if use_new_frontend: - # this option forces to use new TensorFlow Frontend - # so it is not possible for the fallback - return False - - # for TensorFlow FE we have a set of operations that should lead to the fallback to the legacy - conversion_error_re = r"^(\[TensorFlow\ Frontend\]\ Internal\ error\,\ no\ translator\ found\ for\ operation\(s\)\:\ )((\w+)(\,\ \w+)*)$" - conversion_error_match = re.findall(conversion_error_re, ex_msg, re.MULTILINE) - all_fallback_operations = [] - if len(conversion_error_match) < 1 or len(conversion_error_match[0]) != 4: - # no match for the fallback by unsupported operation - return False - - unsupported_operations = conversion_error_match[0][1].replace(" ", "").split(",") - fallback_operations = [operation for operation in unsupported_operations if operation in all_fallback_operations] - - if len(fallback_operations) == 0: - return False - - fallback_reasons.append("Fallback to the legacy TF FE due to operation(s): " + ', '.join(fallback_operations)) - return True - - -def get_default_frontends(): - # Set which frontend to use by default, values should be 'new' or 'legacy' - default_frontends = { - 'onnx': 'new', - 'tf': 'new' - } - return default_frontends - - -def get_moc_frontends(argv: argparse.Namespace): - fem = argv.feManager - - # Read user flags: - use_legacy_frontend = argv.use_legacy_frontend - use_new_frontend = argv.use_new_frontend - - if not fem or use_legacy_frontend: - return None, [] - - available_moc_front_ends = get_available_front_ends(fem) - - if not argv.framework and argv.input_model: - moc_front_end = fem.load_by_model(argv.input_model) - if not moc_front_end: - return None, available_moc_front_ends - argv.framework = moc_front_end.get_name() - elif argv.framework in available_moc_front_ends: - moc_front_end = fem.load_by_framework(argv.framework) - else: - return None, [] - - default_frontends = get_default_frontends() - # Disable MOC frontend if default is set to legacy and no user override - if default_frontends.get(moc_front_end.get_name()) == 'legacy' and not use_new_frontend: - return None, available_moc_front_ends - - # This check as a workaround to skip IR frontend - if not moc_front_end.get_name() in available_moc_front_ends: - return None, available_moc_front_ends - - return moc_front_end, available_moc_front_ends - - -def prepare_ir(argv: argparse.Namespace): - # TODO: remove this workaround once new TensorFlow frontend supports non-frozen formats: checkpoint, MetaGraph, and SavedModel - # Now it converts all TensorFlow formats to the frozen .pb format in case new TensorFlow frontend - is_tf, _, _, _ = deduce_legacy_frontend_by_namespace(argv) - argv = arguments_post_parsing(argv) - t = tm.Telemetry() - - graph = None - ngraph_function = None - fallback_reasons = [] - moc_front_end, available_moc_front_ends = get_moc_frontends(argv) - if moc_front_end: - fallback_reasons = check_fallback(argv) - if len(fallback_reasons) == 0: - if is_tf and tf_frontend_with_python_bindings_installed and \ - type_supported_by_tf_fe(argv.input_model): - argv.input_model = create_tf_graph_iterator(argv.input_model, - argv.placeholder_shapes, - argv.placeholder_data_types, - getattr(argv, "example_input", None), - argv.share_weights) - try: - t.send_event("mo", "conversion_method", moc_front_end.get_name() + "_frontend") - moc_front_end.add_extension(TelemetryExtension("mo", t.send_event, t.send_error, t.send_stack_trace)) - moc_front_end.add_extension(ProgressReporterExtension(progress_printer(argv))) - if legacy_transformations_config_used(argv): - raise Error('Legacy extensions are not supported for the new frontend') - if legacy_extensions_used(argv): - raise Error('Legacy transformations configuration is not supported for the new frontend') - if tensorflow_custom_operations_config_update_used(argv) and is_tf: - raise Error('TensorFlow custom operation config is not supported for the new frontend') - if new_extensions_used(argv): - for extension in argv.extensions: - moc_front_end.add_extension(extension) - ngraph_function = moc_pipeline(argv, moc_front_end) - return graph, ngraph_function - except OpConversionFailure as ex: - # in some set of operations (TF1 While), we have to fallback to the Legacy TensorFlow Frontend - # this is the second attempt for the fallback - if not update_fallback_with_conversion_error(argv.use_new_frontend, is_tf, str(ex), fallback_reasons): - # re-throw exception for all frontends except TensorFlow FE - # and in case unexpected conversion failures - raise - - if len(fallback_reasons) > 0: - reasons_message = ", ".join(fallback_reasons) - load_extensions(argv, *list(deduce_legacy_frontend_by_namespace(argv))) - t.send_event("mo", "fallback_reason", reasons_message) - log.warning("The IR preparation was executed by the legacy MO path. " - "This is a fallback scenario applicable only for some specific cases. " - f"The detailed reason why fallback was executed: not supported {reasons_message} were used. " - "You can specify --use_new_frontend flag to force using the Frontend MO path to avoid additional checks. " + - refer_to_faq_msg(105)) - assert not hasattr(argv, 'is_fallback'), '`is_fallback` argument must not exist.' - argv.is_fallback = True - - t.send_event("mo", "conversion_method", "mo_legacy") - graph = unified_pipeline(argv) - - return graph, ngraph_function - - -def read_model(fem: FrontEndManager, path_to_xml: str): - # We have to separate fe object lifetime from fem to - # avoid segfault during object destruction. So fe must - # be destructed before fem object explicitly. - fe = fem.load_by_framework(framework="ir") - # *.xml/.*bin files are temporary created in the legacy scenario, so we cannot map the memory - share_weights = False - function = fe.convert(fe.load(path_to_xml, share_weights)) - return function - - -def emit_ir(graph: Graph, argv: argparse.Namespace, non_default_params: dict): - NormalizeTI().find_and_replace_pattern(graph) - for_graph_and_each_sub_graph_recursively(graph, RemoveConstOps().find_and_replace_pattern) - for_graph_and_each_sub_graph_recursively(graph, CreateConstNodesReplacement().find_and_replace_pattern) - - if 'feManager' in argv: - del argv.feManager - - mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None - input_names = deepcopy(graph.graph['input_names']) if 'input_names' in graph.graph else [] - - output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd() - orig_model_name = os.path.normpath(os.path.join(output_dir, argv.model_name)) - - def clear_tmp_ir_files(): - for suf in [".xml", ".bin", ".mapping"]: - # remove existing files - path_to_file = orig_model_name + "_tmp" + suf - if os.path.exists(path_to_file): - os.remove(path_to_file) - - try: - prepare_emit_ir(graph=graph, - data_type=graph.graph['cmd_params'].data_type, - output_dir=argv.output_dir, - output_model_name=argv.model_name, - mean_data=mean_data, - input_names=input_names, - meta_info=non_default_params, - use_temporary_path=True) - - fem = FrontEndManager() - func = read_model(fem, orig_model_name + "_tmp.xml") - except Exception as err: - raise Error('Exception occurred while serialization or reading of the temporary IR: {}'.format( - str(err), - )) from err - finally: - # This graph cleanup is required to avoid double memory consumption - graph.clear() - clear_tmp_ir_files() - - return_code = "not executed" - if not (argv.framework == 'tf' and argv.tensorflow_custom_operations_config_update): - try: - from openvino.tools.mo.back.offline_transformations import apply_offline_transformations # pylint: disable=no-name-in-module,import-error - func = apply_offline_transformations(func, argv) - if "compress_to_fp16" in argv and argv.compress_to_fp16: - # restore data_type cmd parameter - argv.data_type = 'FP16' - return_code = 0 - except Exception as e: - return_code = "failed" - log.error(e) - message = str(dict({ - "platform": platform.system(), - "mo_version": VersionChecker().get_mo_simplified_version(), - "ie_version": VersionChecker().get_ie_simplified_version(), - "python_version": sys.version, - "return_code": return_code - })) - t = tm.Telemetry() - t.send_event('mo', 'offline_transformations_status', message) - - if return_code != 0: - raise Error("offline transformations step has failed.") - - return func - - -def check_model_object(argv): - model = argv['input_model'] - if 'tensorflow' in sys.modules: - if tf_frontend_with_python_bindings_installed and extract_model_graph(argv): - return "tf" - if 'torch' in sys.modules: - import torch - if isinstance(model, (torch.nn.Module, torch.jit.ScriptFunction)): - return "pytorch" - try: - from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder - - if isinstance(model, TorchScriptPythonDecoder): - return "pytorch" - except Exception as e: - pass - - import io - if isinstance(model, io.BytesIO): - return 'onnx' - - if 'paddle' in sys.modules: - import paddle - if isinstance(model, paddle.hapi.model.Model) or isinstance(model, paddle.fluid.dygraph.layers.Layer) or isinstance(model, paddle.fluid.executor.Executor): - return "paddle" - - raise Error('Unknown model type: {}'.format(type(model))) - - -def driver(argv: argparse.Namespace, non_default_params: dict): - init_logger(argv.log_level.upper(), argv.silent) - - # Log dictionary with non-default cli parameters where complex classes are excluded. - log.debug(str(non_default_params)) - - start_time = datetime.datetime.now() - - graph, ngraph_function = prepare_ir(argv) - legacy_path = False - if graph is not None: - res_ngraph_function = emit_ir(graph, argv, non_default_params) - legacy_path = True - else: - res_ngraph_function = moc_emit_ir(ngraph_function, argv) - - if res_ngraph_function is None: - return res_ngraph_function - - if not argv.silent: - elapsed_time = datetime.datetime.now() - start_time - print('[ SUCCESS ] Total execution time: {:.2f} seconds. '.format(elapsed_time.total_seconds())) - try: - import resource - mem_usage = round(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024) - if sys.platform == 'darwin': - mem_usage = round(mem_usage / 1024) - print('[ SUCCESS ] Memory consumed: {} MB. '.format(mem_usage)) - except ImportError: - pass - - return res_ngraph_function, legacy_path - - -def args_dict_to_list(cli_parser, **kwargs): - # This method is needed to prepare args from convert_model() for args_parse(). - # The method will not be needed when cli_parser checks are moved from cli_parser to a separate pass. - import inspect - from openvino.tools.mo import convert_model - signature = inspect.signature(convert_model) - result = [] - for key, value in kwargs.items(): - if value is None: - continue - if key in signature.parameters and check_values_equal(signature.parameters[key].default, value): - continue - if check_values_equal(cli_parser.get_default(key), value): - continue - # skip parser checking for non str objects - if not isinstance(value, (str, bool)): - continue - result.append('--{}'.format(key)) - if not isinstance(value, bool): - result.append(value) - - return result - - -def get_non_default_params(argv, cli_parser): - import numbers - import inspect - from openvino.tools.mo import convert_model - - signature = inspect.signature(convert_model) - # make dictionary with parameters which have non-default values to be serialized in IR in rt_info - non_default_params = {} - for arg, arg_value in vars(argv).items(): - if arg in signature.parameters and check_values_equal(arg_value, signature.parameters[arg].default): - continue - if check_values_equal(arg_value, cli_parser.get_default(arg)): - continue - value = depersonalize(arg_value, arg) - # Skip complex classes in params to prevent - # serializing it to rt_info - if isinstance(value, (str, bool, numbers.Number)): - non_default_params[arg] = value - return non_default_params - - -def params_to_string(**kwargs): - all_params = {} - for key, value in get_mo_convert_params().items(): - all_params.update(value) - - for key, value in kwargs.items(): - if key in all_params: - param_data = all_params[key] - if param_data.to_string is not None: - kwargs[key] = param_data.to_string(value) - return kwargs - - -def add_line_breaks(text: str, char_num: int, line_break: str): - words = text.replace('\n', "\n ").split(" ") - cnt = 0 - for i, w in enumerate(words): - cnt += len(w) - if '\n' in w: - cnt = len(w) - w.find('\n') - 1 - if cnt > char_num: - if words[i][-1] not in ['\n', '\t']: - words[i] = w + '\n' - cnt = 0 - text = ' '.join(words).replace("\n ", "\n") - return line_break + text.replace("\n", line_break) - - -def show_mo_convert_help(): - mo_convert_params = get_mo_convert_params() - for group_name, group in mo_convert_params.items(): - print(group_name) - for param_name in group: - param_data = group[param_name] - text = param_data.description.replace(" ", '') - text = add_line_breaks(text, 56, "\n\t\t\t") - print(" --{} {}".format(param_name, text)) - print() - - -def input_model_is_object(argv): - # Input model can be set as object only for --input_model parameter. - # --saved_model_dir or meta specific options are only used to store paths to the input model. - if 'input_model' not in argv: - return False - if isinstance(argv['input_model'], (str, Path)): - return False - if argv['input_model'] is None: - return False - return True - - -def python_api_params_parsing(argv: argparse.Namespace): - """ - Parses params passed to convert_model and wraps resulting values into dictionaries or lists. - After working of this method following values are set in argv: - - argv.input, argv.inputs_list - list of input names. Both values are used in some parts of MO. - Could be good to refactor it and use only one of these values. - - argv.placeholder_shapes - dictionary where key is node name, value is PartialShape, - or list of PartialShape if node names were not set. - - argv.placeholder_data_types - dictionary where key is node name, value is node np.type, - or list of np.types if node names were not set. - - argv.freeze_placeholder_with_value - dictionary where key is node name, value is np.ndarray - - argv.unnamed_freeze_placeholder_with_value - list with np.ndarray - - :param argv: MO arguments - """ - # Parse input to list of InputCutInfo - inputs = input_to_input_cut_info(argv.input) - - # Make list of input names - input_names_list = [] - for inp in inputs: - if inp.name is not None: - input_names_list.append(inp.name) - if len(input_names_list) > 0: - assert len(input_names_list) == len(inputs), "--input parameter has unnamed inputs and named inputs. " \ - "Please either set names for all inputs, " \ - "or do not set names for all inputs." - argv.inputs_list = input_names_list - argv.input = ','.join(input_names_list) - - # Parse input_shape param and update InputCutInfo list - input_shape_to_input_cut_info(argv.input_shape, inputs) - - # Parse freeze_placeholder_with_value. - # values for freezing can be set both by named and unnamed approach if - # 'input' was used without names and 'freeze_placeholder_with_value' was used with names. - # So named and unnamed values are stored separately. - argv.freeze_placeholder_with_value, argv.unnamed_freeze_placeholder_with_value = \ - freeze_placeholder_to_input_cut_info(argv.freeze_placeholder_with_value, inputs) - - if len(input_names_list) > 0: - # Named inputs case - shape_dict = {} - data_type_dict = {} - for inp in inputs: - if inp.shape is not None: - # Wrap shape to PartialShape for uniformity of stored values - shape_dict[inp.name] = PartialShape(inp.shape) - else: - shape_dict[inp.name] = None - if inp.type is not None: - # Convert type to numpy type for uniformity of stored values - if isinstance(inp.type, str): - data_type_dict[inp.name] = destination_type_to_np_data_type(inp.type) - elif isinstance(inp.type, Type): - data_type_dict[inp.name] = inp.type.to_dtype().type - else: - data_type_dict[inp.name] = inp.type - argv.placeholder_shapes = shape_dict if shape_dict else None - argv.placeholder_data_types = data_type_dict if data_type_dict else {} - else: - # Unnamed inputs case - shape_list = [] - data_type_list = [] - for inp in inputs: - if inp.shape is not None: - # Wrap shape to PartialShape for uniformity of stored values - shape_list.append(PartialShape(inp.shape)) - if inp.type is not None: - # Convert type to numpy type for uniformity of stored values - if isinstance(inp.type, str): - data_type_list.append(destination_type_to_np_data_type(inp.type)) - elif isinstance(inp.type, Type): - data_type_list.append(inp.type.to_dtype().type) - else: - data_type_list.append(inp.type) - argv.placeholder_shapes = shape_list if shape_list else None - argv.placeholder_data_types = data_type_list if data_type_list else {} - - if argv.framework == "pytorch" and getattr(argv, "example_input", None) is not None: - extract_input_info_from_example(argv, inputs) - - -def pack_params_to_args_namespace(args: dict, cli_parser: argparse.ArgumentParser): - if len(args) > 0: - args_string = params_to_string(**args) - argv, _ = cli_parser.parse_known_args(args_dict_to_list(cli_parser, **args_string)) - - # get list of all available params for convert_model() - all_params = {} - for key, value in get_mo_convert_params().items(): - all_params.update(value) - - # check that there are no unknown params provided - for key, value in args_string.items(): - if key not in argv and key not in all_params.keys(): - raise Error("Unrecognized argument: {}".format(key)) - - # Non string params like input_model or extensions are ignored by parse_args() - # so we need to set them in argv separately - if value is not None and not check_values_equal(getattr(argv, key, None), value): - setattr(argv, key, value) - else: - argv = cli_parser.parse_args() - return argv - - -def update_args_for_saved_model_dir(args: dict): - """ - If directory is set in 'input_model' argument, the directory is considered as TF saved model. - In this case this method updates args and moves saved model directory to 'saved_model_dir' param. - :param args: dictionary with arguments from user - """ - if 'saved_model_dir' in args and args['saved_model_dir'] is not None and \ - 'input_model' in args and args['input_model'] is not None: - raise Error("Both --input_model and --saved_model_dir are defined. " - "Please specify either input_model or saved_model_dir directory.") - - if 'input_model' in args and isinstance(args['input_model'], (str, Path)) and os.path.isdir(args['input_model']): - args['saved_model_dir'] = args['input_model'] - args['input_model'] = None - - -def silent_is_false(argv: argparse.Namespace): - return argv is not None and hasattr(argv, 'silent') and argv.silent is False - - -def framework_is_tf(args, argv): - if input_model_is_object(args) and check_model_object(args) == "tf": - return True - if argv is not None: - is_tf, _, _, _ = deduce_legacy_frontend_by_namespace(argv) - return is_tf - return False - - -def _convert(cli_parser: argparse.ArgumentParser, framework, args, python_api_used): - if 'help' in args and args['help']: - show_mo_convert_help() - return None, None - ovc_message = get_ovc_message() - if ovc_message is not None: - print(ovc_message) - simplified_mo_version = VersionChecker().get_mo_simplified_version() - telemetry = init_mo_telemetry() - telemetry.start_session('mo') - telemetry.send_event('mo', 'version', simplified_mo_version) - # Initialize logger with 'ERROR' as default level to be able to form nice messages - # before arg parser deliver log_level requested by user - init_logger('ERROR', False) - argv = None - try: - model_framework = None - inp_model_is_object = input_model_is_object(args) - if inp_model_is_object: - model_framework = check_model_object(args) - if model_framework == "pytorch": - example_inputs = None - if 'example_input' in args and args['example_input'] is not None: - example_inputs = args['example_input'] - elif 'example_inputs' in args: - raise AssertionError("'example_inputs' argument is not recognized, maybe you meant to provide 'example_input'?") - - decoder = get_pytorch_decoder(args['input_model'], parse_input_shapes(args), example_inputs, args) - if model_framework == "paddle": - example_inputs = None - if 'example_input' in args and args['example_input'] is not None: - example_inputs = args['example_input'] - - example_outputs = None - if 'example_output' in args and args['example_output'] is not None: - example_outputs = args['example_output'] - paddle_runtime_converter = paddle_frontend_converter(args['input_model'], example_inputs, example_outputs) - pdmodel = paddle_runtime_converter.convert_paddle_to_pdmodel() - args['input_model'] = pdmodel - args['framework'] = model_framework - - update_args_for_saved_model_dir(args) - - argv = pack_params_to_args_namespace(args, cli_parser) - argv.is_python_api_used = python_api_used - - argv.feManager = FrontEndManager() - frameworks = list(set(['tf', 'caffe', 'kaldi', 'onnx'] + (get_available_front_ends(argv.feManager) - if argv.feManager else []))) - framework = argv.framework if hasattr(argv, 'framework') and argv.framework is not None else framework - if framework is not None: - assert framework in frameworks, "error: argument --framework: invalid choice: '{}'. " \ - "Expected one of {}.".format(framework, frameworks) - setattr(argv, 'framework', framework) - - # send telemetry with params info - send_params_info(argv, cli_parser) - - non_default_params = get_non_default_params(argv, cli_parser) - - if inp_model_is_object: - argv.model_name = "model" - if not hasattr(argv, "model_name") or argv.model_name is None: - argv.model_name = get_model_name_from_args(argv) - - if model_framework is not None: - if argv.framework is not None: - if argv.framework != model_framework: - raise Error("Provided model does not correspond to provided framework. The provided " - "framework is {}, the model type is {} which is expected to be {} framework.".format( - argv.framework, - type(argv.input_model), - model_framework)) - else: - argv.framework = model_framework - - ov_model, legacy_path = driver(argv, {"conversion_parameters": non_default_params}) - - if inp_model_is_object and model_framework == "paddle": - if paddle_runtime_converter: - paddle_runtime_converter.destroy() - - # add MO meta data to model - ov_model.set_rt_info(VersionChecker().get_mo_version(), "MO_version") - ov_model.set_rt_info(get_rt_version(), "Runtime_version") - ov_model.set_rt_info(str(legacy_path), "legacy_frontend") - for key, value in non_default_params.items(): - ov_model.set_rt_info(str(value), ["conversion_parameters", str(key)]) - - if silent_is_false(argv) or not python_api_used: - if 'compress_to_fp16' in argv and argv.compress_to_fp16: - print(get_compression_message()) - - ov_update_message = get_ov_update_message() - _, is_caffe, is_kaldi, _ = deduce_legacy_frontend_by_namespace(argv) - if ov_update_message is not None: - print(ov_update_message) - - send_conversion_result('success') - return ov_model, argv - - except Exception as e: - if silent_is_false(argv) or not python_api_used: - if isinstance(e, (FileNotFoundError, NotADirectoryError)): - log.error('File {} was not found'.format(str(e).split('No such file or directory:')[1])) - log.debug(traceback.format_exc()) - elif isinstance(e, Error): - analysis_results = AnalysisResults() - if analysis_results.get_messages() is not None: - for el in analysis_results.get_messages(): - log.error(el, extra={'analysis_info': True}) - log.error(e) - log.debug(traceback.format_exc()) - elif isinstance(e, FrameworkError): - log.error(e, extra={'framework_error': True}) - log.debug(traceback.format_exc()) - else: - log.error("-------------------------------------------------") - log.error("----------------- INTERNAL ERROR ----------------") - log.error("Unexpected exception happened.") - log.error("Please contact Model Optimizer developers and forward the following information:") - log.error(str(e)) - log.error(traceback.format_exc()) - log.error("---------------- END OF BUG REPORT --------------") - log.error("-------------------------------------------------") - is_fallback = getattr(argv, 'is_fallback', False) if argv is not None else False - if not argv.use_legacy_frontend and framework_is_tf(args, argv) and not is_fallback: - print(get_try_legacy_fe_message()) - - send_conversion_result('fail') - if python_api_used: - raise e.with_traceback(None) - else: - return None, argv diff --git a/tools/mo/openvino/tools/mo/front/ATenToEmbeddingBag.py b/tools/mo/openvino/tools/mo/front/ATenToEmbeddingBag.py deleted file mode 100644 index 1cda95e4edfcfd..00000000000000 --- a/tools/mo/openvino/tools/mo/front/ATenToEmbeddingBag.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.embedding_bag import EmbeddingBagOffsetsSum, EmbeddingBagPackedSum -from openvino.tools.mo.ops.rank import Rank -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_node -from openvino.tools.mo.ops.broadcast import Broadcast -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.unsqueeze import Unsqueeze -from openvino.tools.mo.utils.shape import node_to_get_shape_value_of_indices, get_canonical_axis_index_node, \ - get_shape_values_by_indices_node - - -class AtenToEmbeddingBag(FrontReplacementPattern): - """ - Converts the ATen layer to EmbeddingBag layer. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='ATen', operator='embedding_bag'): - assert node.soft_get('mode') == 0, 'ATen::embedding_bag has unsupported mode, only "sum" ' \ - 'mode is supported for node {}.'.format(node.id) - node_name = node.soft_get('name', node.id) - rename_node(node, node_name + '/TBR') - is_packed = False - if len(node.in_ports()) < 3 or node.in_port(2).disconnected(): - is_packed = True - embedding_bag = EmbeddingBagPackedSum(graph, {'name': node_name}).create_node() - else: - embedding_bag = EmbeddingBagOffsetsSum(graph, {'name': node_name}).create_node() - node.in_port(2).get_connection().set_destination(embedding_bag.in_port(2)) - rename_node(embedding_bag, node_name) - node.in_port(0).get_connection().set_destination(embedding_bag.in_port(0)) - node.in_port(1).get_connection().set_destination(embedding_bag.in_port(1)) - node.out_port(0).get_connection().set_source(embedding_bag.out_port(0)) - if len(node.in_ports()) == 4 and not node.in_port(3).disconnected(): - if is_packed: - node.in_port(3).get_connection().set_destination(embedding_bag.in_port(2)) - else: - # connect per_sample_weights - node.in_port(3).get_connection().set_destination(embedding_bag.in_port(4)) - - weights_shape_node = Shape(graph, {'name': node_name + '/WeightsShape'}).create_node() - - weights_rank_node = Rank(graph, {'name': node_name + '/WeightsRank'}).create_node() - last_dim_node = get_canonical_axis_index_node(weights_rank_node, -1) - weights_last_dim = get_shape_values_by_indices_node(weights_shape_node, last_dim_node) - - weights_first_dim = node_to_get_shape_value_of_indices(weights_shape_node, [0]) - - zero_col_node = create_op_with_const_inputs(graph, Broadcast, {0: int64_array([0])}, - {'name': node_name + '/Broadcast'}) - zero_col_node.in_port(1).connect(weights_last_dim.out_port(0)) - - default_embeddings_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array(0)}, - {'name': node_name + '/Unsqueeze'}) - default_embeddings_node.in_port(0).connect(zero_col_node.out_port(0)) - - # expand embedding table with zeros - weights_concat = Concat(graph, {'axis': 0, 'in_ports_count': 2, - 'name': node_name + '/Concat'}).create_node() - embedding_bag.in_port(0).get_connection().set_destination(weights_concat.in_port(0)) - weights_concat.in_port(0).get_connection().add_destination(weights_shape_node.in_port(0)) - weights_concat.in_port(0).get_connection().add_destination(weights_rank_node.in_port(0)) - weights_concat.in_port(1).connect(default_embeddings_node.out_port(0)) - weights_concat.out_port(0).connect(embedding_bag.in_port(0)) - - # point default index to expanded part of embedding table - weights_first_dim.out_port(0).connect(embedding_bag.in_port(3)) diff --git a/tools/mo/openvino/tools/mo/front/ArgOpsSqueeze.py b/tools/mo/openvino/tools/mo/front/ArgOpsSqueeze.py deleted file mode 100644 index 465bcc39b0ce19..00000000000000 --- a/tools/mo/openvino/tools/mo/front/ArgOpsSqueeze.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.squeeze import Squeeze - - -class ArgOpsSqueeze(FrontReplacementSubgraph): - """ - In some frameworks ArgMax/ArgMin operation has keepdims attribute that indicates whether to stay a dimension - along which maximum is computed or not. In case of keepdims=0 this dimension should be removed but ArgMax/ArgMin - operation in IR format is not designed to cover this case. So we should additionally add Squeeze operation right - after ArgMax/ArgMin for this case. - """ - enabled = True - - def pattern(self): - return dict(nodes=[('node', dict(op=lambda x: x in ['ArgMax', 'ArgMin'], keepdims=0))], - edges=[]) - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['node'] - - connected_ports = [port for port in node.in_ports().values() if not port.disconnected()] - squeeze_node = Squeeze(graph, dict()).create_node([], dict(name=node.name + '/Squeeze')) - if len(connected_ports) == 2: - node.in_port(1).get_source().connect(squeeze_node.in_port(1)) - else: - axis_node = Const(graph, {'value': node.axis}).create_node() - squeeze_node.in_port(1).connect(axis_node.out_port(0)) - node.out_port(0).get_connection().set_source(squeeze_node.out_port(0)) - node.out_port(0).connect(squeeze_node.in_port(0)) - return [] diff --git a/tools/mo/openvino/tools/mo/front/AttributedClampNormalizer.py b/tools/mo/openvino/tools/mo/front/AttributedClampNormalizer.py deleted file mode 100644 index 5082585c3f7bfc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/AttributedClampNormalizer.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_node -from openvino.tools.mo.ops.clamp import Clamp - - -class AttributedClampNormalizer(FrontReplacementPattern): - """ - This transformation converts AttributedClamp operation (min/max are specified as attribute) to Clamp - operation. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for attr_clamp in graph.get_op_nodes(op='AttributedClamp'): - original_name = attr_clamp.soft_get('name', attr_clamp.id) - - rename_node(attr_clamp, original_name + '/TBR') - min_value = attr_clamp.soft_get('min', np.finfo(np.float32).min) - max_value = attr_clamp.soft_get('max', np.finfo(np.float32).max) - new_clamp = create_op_with_const_inputs(graph, Clamp, - {1: float32_array(min_value), - 2: float32_array(max_value)}, - {'name': original_name}) - rename_node(new_clamp, original_name) - - attr_clamp.in_port(0).get_connection().set_destination(new_clamp.in_port(0)) - attr_clamp.out_port(0).get_connection().set_source(new_clamp.out_port(0)) - graph.remove_node(attr_clamp.id) diff --git a/tools/mo/openvino/tools/mo/front/AttributedGatherNormalizer.py b/tools/mo/openvino/tools/mo/front/AttributedGatherNormalizer.py deleted file mode 100644 index 7f468759575795..00000000000000 --- a/tools/mo/openvino/tools/mo/front/AttributedGatherNormalizer.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class AttributedGatherNormalizer(FrontReplacementOp): - op = "AttributedGather" - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['op'] - name = node.soft_get('name', node.id) - assert node.has_valid('axis') - - axis = Const(graph, {'name': name + '/axis', 'value': int64_array(node.axis)}).create_node() - gather = Gather(graph, {'name': name}).create_node() - node.in_port(0).get_connection().set_destination(gather.in_port(0)) - node.in_port(1).get_connection().set_destination(gather.in_port(1)) - axis.out_port(0).connect(gather.in_port(2)) - node.out_port(0).get_connection().set_source(gather.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/AttributedPadToPad.py b/tools/mo/openvino/tools/mo/front/AttributedPadToPad.py deleted file mode 100644 index eb802ad3d7da0d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/AttributedPadToPad.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ConvertLike import ConvertLike -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.pad import Pad - - -class AttributedPadToPad(FrontReplacementPattern): - """ - This transformation converts AttributedPad operation (begin/end paddings are specified as attribute) to Pad - operation (OpenVINO semantic). - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for attr_pad in graph.get_op_nodes(op='AttributedPad'): - # save the original node name to use it in the new Pad op instance - original_name = attr_pad.soft_get('name', attr_pad.id) - - new_pad = Pad(graph, {'mode': attr_pad.soft_get('mode', None), }).create_node() - rename_nodes([(attr_pad, original_name + '/to_be_removed'), (new_pad, original_name)]) - - attr_pad.in_port(0).get_connection().set_destination(new_pad.in_port(0)) - new_pad.in_port(1).connect(Const(graph, {'value': attr_pad.pads[:, 0]}).create_node().out_port(0)) - new_pad.in_port(2).connect(Const(graph, {'value': attr_pad.pads[:, 1]}).create_node().out_port(0)) - if attr_pad.soft_get('mode') == 'constant': - # create Constant node of proper data type (equal to the data type of the Pad first input) - convert_pad_value = create_op_with_const_inputs(graph, ConvertLike, {0: attr_pad.fill_value}, - {'name': original_name + '/pad_value_convert'}) - convert_pad_value.in_port(1).connect(new_pad.in_port(0).get_source()) - new_pad.in_port(3).connect(convert_pad_value.out_port(0)) - - attr_pad.out_port(0).get_connection().set_source(new_pad.out_port(0)) - graph.remove_node(attr_pad.id) diff --git a/tools/mo/openvino/tools/mo/front/AttributedRandomUniformToRandomUniform.py b/tools/mo/openvino/tools/mo/front/AttributedRandomUniformToRandomUniform.py deleted file mode 100644 index 71095bd6078ba6..00000000000000 --- a/tools/mo/openvino/tools/mo/front/AttributedRandomUniformToRandomUniform.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.random_uniform import RandomUniform -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.utils.error import Error - - -class AttributedRandomUniformToRandomUniform(FrontReplacementPattern): - """ - This transformation converts AttributedRandomUniform operation (output shape, min value and max value - can be specified as attribute) to RandomUniform operation (OpenVINO semantic). - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for attr_random_uniform in graph.get_op_nodes(op='AttributedRandomUniform'): - original_name = attr_random_uniform.soft_get('name', attr_random_uniform.id) - - if not attr_random_uniform.has_valid('output_type'): - raise Error("RandomUniform should have valid ''output_type'' attribute.") - output_type = attr_random_uniform.soft_get('output_type') - - if attr_random_uniform.has_valid('min_val'): - min_val = attr_random_uniform['min_val'] - else: - min_val = output_type(0) - if attr_random_uniform.has_valid('max_val'): - max_val = attr_random_uniform['max_val'] - else: - max_val = output_type(1) - - port_value_dict = {1: min_val, 2: max_val} - - if not attr_random_uniform.has_port('in', 0) or attr_random_uniform.in_port(0).disconnected(): - if not attr_random_uniform.has_valid('shape'): - raise Error("RandomUniform should have valid ''shape'' attribute or input node on 0 port.") - else: - port_value_dict.update({0: attr_random_uniform.shape}) - - attrs = {'global_seed': attr_random_uniform.soft_get('global_seed', 0), 'op_seed': attr_random_uniform.soft_get('op_seed', 0), - 'output_type': output_type} - - new_random_uniform = create_op_with_const_inputs(graph, op=RandomUniform, port_value_dict=port_value_dict, - op_attrs=attrs) - rename_nodes([(attr_random_uniform, original_name + '/to_be_removed'), (new_random_uniform, original_name)]) - attr_random_uniform.out_port(0).get_connection().set_source(new_random_uniform.out_port(0)) - if new_random_uniform.in_port(0).disconnected(): - if attr_random_uniform.in_port(0).disconnected(): - raise Error('RandomUniform should have input node on 0 port.') - else: - new_random_uniform.in_port(0).connect(attr_random_uniform.in_port(0).get_connection().get_source()) - - graph.remove_node(attr_random_uniform.id) diff --git a/tools/mo/openvino/tools/mo/front/AttributedRollToRoll.py b/tools/mo/openvino/tools/mo/front/AttributedRollToRoll.py deleted file mode 100644 index 647069d2581200..00000000000000 --- a/tools/mo/openvino/tools/mo/front/AttributedRollToRoll.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.roll import Roll -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes - - -class AttributedRollToRoll(FrontReplacementPattern): - """ - This transformation converts AttributedRoll operation (axes and shift are specified as attributes) to Roll - operation (OpenVINO semantic). - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for attr_roll in graph.get_op_nodes(op='AttributedRoll'): - original_name = attr_roll.soft_get('name', attr_roll.id) - port_value_dict = {1: attr_roll.shift} - if attr_roll.has_valid('axes'): - port_value_dict.update({2: attr_roll.axes}) - - new_roll = create_op_with_const_inputs(graph, op=Roll, port_value_dict=port_value_dict) - rename_nodes([(attr_roll, original_name + '/to_be_removed'), (new_roll, original_name)]) - - attr_roll.in_port(0).get_connection().set_destination(new_roll.in_port(0)) - attr_roll.out_port(0).get_connection().set_source(new_roll.out_port(0)) - graph.remove_node(attr_roll.id) diff --git a/tools/mo/openvino/tools/mo/front/ExpandDimsToUnsqueeze.py b/tools/mo/openvino/tools/mo/front/ExpandDimsToUnsqueeze.py deleted file mode 100644 index 829448370ea3f5..00000000000000 --- a/tools/mo/openvino/tools/mo/front/ExpandDimsToUnsqueeze.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class ExpandDimsToUnsqueeze(FrontReplacementPattern): - """ - Converts the 'ExpandDims' layer to Unsqueeze layer with two inputs: the input with data and input with the - dimensions to unsqueeze. - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.front.Pack import Pack - return [Pack] - - def find_and_replace_pattern(self, graph: Graph): - for expand_dims_node in graph.get_op_nodes(op='ExpandDims'): - if len(expand_dims_node.in_nodes()) == 1: - expand_axis = expand_dims_node.expand_axis - if not isinstance(expand_axis, np.ndarray): - expand_axis = int64_array([expand_axis]).flatten() - unsqueeze_node = Unsqueeze(graph, {'name': expand_dims_node.id + '/Unsqueeze'}).create_node() - unsqueeze_dims_node = Const(graph, {'name': expand_dims_node.id + '/Dims', - 'value': expand_axis}).create_node() - expand_dims_node.in_port(0).get_connection().set_destination(unsqueeze_node.in_port(0)) - expand_dims_node.out_port(0).get_connection().set_source(unsqueeze_node.out_port(0)) - unsqueeze_node.in_port(1).connect(unsqueeze_dims_node.out_port(0)) - elif len(expand_dims_node.in_nodes()) == 2: - # For Unsqueeze-13 from ONNX - expand_dims_name = expand_dims_node.soft_get('name', expand_dims_node.id) - unsqueeze_node = Unsqueeze(graph, {'name': expand_dims_name + '/Unsqueeze'}).create_node() - rename_nodes([(expand_dims_node, expand_dims_name + "/TBR"), (unsqueeze_node, expand_dims_name)]) - - expand_dims_node.in_port(0).get_connection().set_destination(unsqueeze_node.in_port(0)) - expand_dims_node.in_port(1).get_connection().set_destination(unsqueeze_node.in_port(1)) - expand_dims_node.out_port(0).get_connection().set_source(unsqueeze_node.out_port(0)) - else: - log.error('The ExpandDims node {} has wrong number of inputs'.format(expand_dims_node.soft_get('name'))) diff --git a/tools/mo/openvino/tools/mo/front/FakeQuantWithMinMaxVars.py b/tools/mo/openvino/tools/mo/front/FakeQuantWithMinMaxVars.py deleted file mode 100644 index d4fb36d0f775fe..00000000000000 --- a/tools/mo/openvino/tools/mo/front/FakeQuantWithMinMaxVars.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import Dict - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.elementwise import Sub, Div, Less, Round, Mul, Add, Greater -from openvino.tools.mo.ops.fakequantize import FakeQuantize -from openvino.tools.mo.ops.select import Select - - -class FakeQuantWithMinMaxVarsToQuantize(FrontReplacementOp): - """ - Performs FakeQuantize limits adjustment for min <= max following rules: - If 0 < min < max: min_adj = 0 and max_adj = max - min. - If min < max < 0: min_adj = min - max and max_adj = 0. - If min <= 0 <= max: - scale = (max - min) / (2^num_bits - 1), - min_adj = scale * round(min / scale) and max_adj = max + min_adj - min. - """ - op = "FakeQuantWithMinMaxVars" - enabled = True - - def replace_sub_graph(self, graph: Graph, match: Dict[str, Node]): - node = match['op'] - name = node.name - - min_port_tuple = (node.in_port(1).get_source().node, node.in_port(1).get_source().idx) - max_port_tuple = (node.in_port(2).get_source().node, node.in_port(2).get_source().idx) - - if min_port_tuple[0].has_and_set('value') and max_port_tuple[0].has_and_set('value'): - assert min_port_tuple[0]['value'].dtype == max_port_tuple[0]['value'].dtype, \ - 'Type mismatch in port 1 and 2 of {}'.format(self.op) - dtype = max_port_tuple[0]['value'].dtype - else: - dtype = np.float32 - - node.in_port(1).disconnect() - node.in_port(2).disconnect() - - # make sure min < max - min_less_max = Less(graph, {'name': name + '/if_min_less_max'}).create_node([min_port_tuple, max_port_tuple]) - minimum = Select(graph, {'name': name + '/minimum'}).create_node([min_less_max, min_port_tuple, max_port_tuple]) - maximum = Select(graph, {'name': name + '/maximum'}).create_node([min_less_max, max_port_tuple, min_port_tuple]) - - # to create zero of limits data type, we multiply it by integer zero - zero = create_op_node_with_second_input(graph, Mul, mo_array(0, dtype=dtype), {'name': name + '/zero'}, - input_node=minimum) - - # if 0 < min < max: min_adj = 0 and max_adj = max - min - min_greater_zero = Greater(graph, {'name': name + '/if_minimum_greater_zero'}).create_node([minimum, zero]) - max_minus_min = Sub(graph, {'name': name + '/max_minus_min'}).create_node([maximum, minimum]) - minimum = Select(graph, {'name': name + '/first_adj_min'}).create_node([min_greater_zero, zero, minimum]) - maximum = Select(graph, {'name': name + '/first_adj_max'}).create_node([min_greater_zero, max_minus_min, maximum]) - - # if min < max < 0: min_adj = min - max and max_adj = 0 - max_less_zero = Less(graph, {'name': name + '/if_max_less_zero'}).create_node([maximum, zero]) - min_minus_max = Sub(graph, {'name': name + '/min_minus_max'}).create_node([minimum, maximum]) - minimum = Select(graph, {'name': name + '/second_adj_min'}).create_node([max_less_zero, min_minus_max, minimum]) - maximum = Select(graph, {'name': name + '/second_adj_max'}).create_node([max_less_zero, zero, maximum]) - - # scale = (max - min) / (2 ^ num_bits - 1), - float_range = Sub(graph, {'name': name + '/float_range'}).create_node([maximum, minimum]) - quant_min_value, quant_max_value = int(node.narrow_range), 2 ** node.num_bits - 1 - int_range_value = mo_array(quant_max_value - quant_min_value, dtype=dtype) - int_range = Const(graph, dict(name=name + '/int_range', value=int_range_value)).create_node() - scale = Div(graph, {'name': name + '/scale'}).create_node([float_range, int_range]) - # min_adj = scale * round(min / scale) - descaled_min = Div(graph, {'name': name + '/descaled_min'}).create_node([minimum, scale]) - rounded_descaled_min = Round(graph, {'name': name + '/rounded_descaled_min'}).create_node([descaled_min]) - min_adj = Mul(graph, {'name': name + '/min_adj'}).create_node([scale, rounded_descaled_min]) - # max_adj = max + min_adj - min. - adjustment = Sub(graph, {'name': name + '/limits_adjustment'}).create_node([min_adj, minimum]) - max_adj = Add(graph, {'name': name + '/max_adj'}).create_node([maximum, adjustment]) - - # FakeQuantize operation has 5 inputs instead of 3 inputs in TensorFlow - node.add_input_port(3, skip_if_exist=True) - node.add_input_port(4, skip_if_exist=True) - - node.in_port(1).connect(min_adj.out_port(0)) - node.in_port(2).connect(max_adj.out_port(0)) - node.in_port(3).connect(min_adj.out_port(0)) - node.in_port(4).connect(max_adj.out_port(0)) - - FakeQuantize.update_node_stat(node, {'levels': node['levels']}) diff --git a/tools/mo/openvino/tools/mo/front/FillToBroadcast.py b/tools/mo/openvino/tools/mo/front/FillToBroadcast.py deleted file mode 100644 index dbcf09a26ebefa..00000000000000 --- a/tools/mo/openvino/tools/mo/front/FillToBroadcast.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.broadcast import Broadcast -from openvino.tools.mo.ops.const import Const - - -class FillToBroadcast(FrontReplacementPattern): - """ - Converts the 'Fill' layer to 'Broadcast'. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for fill_node in graph.get_op_nodes(op='Fill'): - name = fill_node.soft_get('name', fill_node.id) - - broadcast_node = Broadcast(graph, {'name': name + '/Broadcast'}).create_node() - fill_node.in_port(0).get_connection().set_destination(broadcast_node.in_port(1)) - fill_node.in_port(1).get_connection().set_destination(broadcast_node.in_port(0)) - fill_node.out_port(0).get_connection().set_source(broadcast_node.out_port(0)) - - for fill_node in graph.get_op_nodes(op='ConstantFill'): - name = fill_node.soft_get('name', fill_node.id) - - assert fill_node.has_valid('fill_value') - assert fill_node.has_and_set('input_as_shape') - - const = Const(graph, {'value': mo_array(fill_node.fill_value), 'name': name + '/value'}).create_node() - broadcast_node = Broadcast(graph, {'name': name + '/Broadcast'}).create_node() - fill_node.in_port(0).get_connection().set_destination(broadcast_node.in_port(1)) - const.out_port(0).connect(broadcast_node.in_port(0)) - fill_node.out_port(0).get_connection().set_source(broadcast_node.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/GeLUMerger_Erf.py b/tools/mo/openvino/tools/mo/front/GeLUMerger_Erf.py deleted file mode 100644 index b6b87f715f9add..00000000000000 --- a/tools/mo/openvino/tools/mo/front/GeLUMerger_Erf.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from math import sqrt, fabs - -from openvino.tools.mo.ops.gelu import GeLUOP -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.pattern_match import apply_pattern - - -class GeLUMergerErf(FrontReplacementPattern): - enabled = True - - def pattern1(self): - # (0.5 * x) * (1 + erf(x / sqrt(2)) - return dict( - nodes=[ - ('mul', dict(op='Mul')), - ('mul0', dict(op='Mul')), - ('div', dict(op='Div')), - ('erf', dict(op='Erf')), - ('add', dict(op='Add')), - ('mul_param', dict(op='Const')), - ('div_param', dict(op='Const')), - ('add_param', dict(op='Const')), - ], - edges=[ - ('mul', 'mul0'), - ('div', 'erf'), - ('erf', 'add'), - ('add', 'mul0'), - ('mul_param', 'mul'), - ('div_param', 'div'), - ('add_param', 'add'), - ]) - - def pattern2(self): - # 0.5 * (x * (1 + erf(x / sqrt(2))) - return dict( - nodes=[ - ('mul', dict(op='Mul')), - ('mul0', dict(op='Mul')), - ('div', dict(op='Div')), - ('erf', dict(op='Erf')), - ('add', dict(op='Add')), - ('mul_param', dict(op='Const')), - ('div_param', dict(op='Const')), - ('add_param', dict(op='Const')), - ], - edges=[ - ('div', 'erf'), - ('erf', 'add'), - ('add', 'mul'), - ('mul', 'mul0'), - ('mul_param', 'mul0'), - ('div_param', 'div'), - ('add_param', 'add'), - ]) - - def pattern3(self): - # x * (0.5 * (1 + erf(x / sqrt(2))) - return dict( - nodes=[ - ('mul', dict(op='Mul')), - ('mul0', dict(op='Mul')), - ('div', dict(op='Div')), - ('erf', dict(op='Erf')), - ('add', dict(op='Add')), - ('mul_param', dict(op='Const')), - ('div_param', dict(op='Const')), - ('add_param', dict(op='Const')), - ], - edges=[ - ('div', 'erf'), - ('erf', 'add'), - ('add', 'mul'), - ('mul', 'mul0'), - ('mul_param', 'mul'), - ('div_param', 'div'), - ('add_param', 'add'), - ]) - - def find_and_replace_pattern(self, graph: Graph): - log.info('Enabled GeLU Merger replacement for approximation with Erf') - apply_pattern(graph, **self.pattern1(), action=self.replace_gelu) - apply_pattern(graph, **self.pattern2(), action=self.replace_gelu) - apply_pattern(graph, **self.pattern3(), action=self.replace_gelu) - - def replace_gelu(self, graph: Graph, match: dict): - # Gaussian Error Linear Unit - # f(x) = 0.5 * x * (1 + erf(x / sqrt(2)) - out_node = match['mul0'] - node_name = out_node.soft_get('name', out_node.id) - div = match['div'] - inp_node = div.in_port(0).get_source().node - inp_name = inp_node.soft_get('name', out_node.id) - log.debug('Found potential Erf-based GeLU pattern after {} with name {}'.format(inp_node.op, inp_name)) - - # take the values of the mul, add and div - div_param = match['div_param'] - add_param = match['add_param'] - mul_param = match['mul_param'] - - if add_param.value.size == 1 and mul_param.value.size == 1 and div_param.value.size == 1: - mul_param = match['mul_param'].value.item() - add_param = match['add_param'].value.item() - div_param = match['div_param'].value.item() - - sqrt2 = sqrt(2.0) - # check that the values match the approximation - if fabs(div_param - sqrt2) < 1e-06 and mul_param == 0.5 and add_param == 1.0: - log.debug('Confirmed Erf-based GELU pattern after {} with name {}'.format(inp_node.op, inp_name)) - gelu = GeLUOP(graph, dict(name=inp_name + '/GELU_', approximation_mode='erf')).create_node() - div.in_port(0).get_connection().set_destination(gelu.in_port(0)) - out_node.out_port(0).get_connection().set_source(gelu.out_port(0)) - rename_nodes([(out_node, node_name + '/TBD'), (gelu, node_name)]) diff --git a/tools/mo/openvino/tools/mo/front/GeLUMerger_Tanh.py b/tools/mo/openvino/tools/mo/front/GeLUMerger_Tanh.py deleted file mode 100644 index ffb908ec4f52c7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/GeLUMerger_Tanh.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from math import sqrt, fabs, pi - -from openvino.tools.mo.ops.gelu import GeLUOP -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph - - -class GeLUMergerTanh(FrontReplacementSubgraph): - enabled = True - - def pattern(self): - log.info('Enabled GeLU Merger for approximation with TanH') - return dict( - nodes=[ - ('pow', dict(op='Pow')), - ('mul', dict(op='Mul')), - ('mul0', dict(op='Mul')), - ('mul1', dict(op='Mul')), - ('mul2', dict(op='Mul')), - ('tanh', dict(op='Tanh')), - ('add', dict(op='Add')), - ('add0', dict(op='Add')), - ('mul_param', dict(op='Const')), - ('mul0_param', dict(op='Const')), - ('mul1_param', dict(op='Const')), - ], - edges=[ - ('pow', 'mul'), - ('mul', 'add'), - ('add', 'mul0'), - ('mul0', 'tanh'), - ('tanh', 'add0'), - ('add0', 'mul1'), - ('mul1', 'mul2'), - ('mul_param', 'mul'), - ('mul0_param', 'mul0'), - ('mul1_param', 'mul1'), - ]) - - def replace_sub_graph(self, graph: Graph, match: dict): - # Gaussian Error Linear Unit, TanH based approximation: - # 0.5*x*(1 + tanh([sqrt(2/pi)]*[x + 0.044715x3]) - inp_port = match['pow'].in_port(0).get_source() - inp = inp_port.node - log.debug('Found potential TanH-based GeLU pattern after {} with name {}'.format(inp.op, inp.name)) - - # take the values of the mul ops - mul_param = match['mul_param'] - mul0_param = match['mul0_param'] - mul1_param = match['mul1_param'] - if mul0_param.value.size == 1 and mul_param.value.size == 1 and mul1_param.value.size == 1: - mul_param = match['mul_param'].value.item() - mul0_param = match['mul0_param'].value.item() - mul1_param = match['mul1_param'].value.item() - sqrt2pi = sqrt(2.0/pi) - # check that the values match the approximation - if fabs(mul0_param - sqrt2pi) < 1e-06 and fabs(mul_param - 0.044715) < 1e-06 and mul1_param == 0.5: - log.debug('Confirmed TanH-based GELU pattern after {} with name {}'.format(inp.op, inp.name)) - gelu = GeLUOP(graph, dict(name=inp.name + '/GELU_', approximation_mode='tanh')).create_node() - inp_port.connect(gelu.in_port(0)) - match['mul2'].out_port(0).get_connection().set_source(gelu.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/HSigmoid_fusion.py b/tools/mo/openvino/tools/mo/front/HSigmoid_fusion.py deleted file mode 100644 index 840a4d35472c89..00000000000000 --- a/tools/mo/openvino/tools/mo/front/HSigmoid_fusion.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.AttributedClampNormalizer import AttributedClampNormalizer -from openvino.tools.mo.ops.activation_ops import HSigmoid -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.pattern_match import check_value -from openvino.tools.mo.utils.graph import Node - - -def replace_with_hsigmoid(graph: Graph, first_node: Node, last_node: Node): - # determine the input port of first and last nodes which gets the 'input' node output - add_input_port_idx = int(first_node.in_port(0).get_connection().get_source().node.soft_get('op') == 'Const') - last_node_name = last_node.soft_get('name', last_node.id) - - hsigmoid = HSigmoid(graph, {}).create_node() - hsigmoid.in_port(0).connect(first_node.in_port(add_input_port_idx).get_source()) - last_node.out_port(0).get_connection().set_source(hsigmoid.out_port(0)) - - rename_nodes([(last_node, last_node_name + '/TBR'), (hsigmoid, last_node_name)]) - - -class HSigmoidWithClamp(FrontReplacementSubgraph): - """ - The transformation looks for the pattern with ReLU6 (Clamp) defining the HSigmoid function: - HSigmoid(x) = Relu6(x + 3.0) / 6.0. - """ - enabled = True - - def run_after(self): - return [AttributedClampNormalizer] - - def pattern(self): - return dict( - nodes=[ - ('input', dict()), - ('add', dict(op='Add')), - ('const_0', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 0.0, atol=1e-6)))), - ('const_3', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 3.0, atol=1e-6)))), - ('const_6', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))), - ('const_1_6', - dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 1.0 / 6.0, atol=1e-6)))), - ('clamp', dict(op='Clamp')), - ('mul_2', dict(op='Mul')), - ], - edges=[ - ('input', 'add', {}), - ('const_3', 'add', {}), - ('add', 'clamp', {'in': 0}), - ('const_0', 'clamp', {'in': 1}), - ('const_6', 'clamp', {'in': 2}), - ('clamp', 'mul_2', {}), - ('const_1_6', 'mul_2', {}), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - replace_with_hsigmoid(graph, match['add'], match['mul_2']) - - -class HSigmoidWithMinMax(FrontReplacementSubgraph): - """ - The transformation looks for the pattern with Min/Max defining the HSigmoid function: - HSigmoid(x) = Min(Max(x + 3.0, 0), 6.0) / 6.0. - """ - enabled = True - - def run_after(self): - return [AttributedClampNormalizer] - - def pattern(self): - return dict( - nodes=[ - ('input', dict()), - ('add', dict(op='Add')), - ('const_0', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 0.0, atol=1e-6)))), - ('const_3', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 3.0, atol=1e-6)))), - ('const_6', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))), - ('const_1_6', - dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 1.0 / 6.0, atol=1e-6)))), - ('max', dict(op='Maximum')), - ('min', dict(op='Minimum')), - ('mul_2', dict(op='Mul')), - ], - edges=[ - ('input', 'add', {'out': 0}), - ('const_3', 'add', {}), - ('add', 'max', {}), - ('const_0', 'max', {}), - ('max', 'min', {}), - ('const_6', 'min', {}), - ('min', 'mul_2', {}), - ('const_1_6', 'mul_2', {}), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - replace_with_hsigmoid(graph, match['add'], match['mul_2']) - - -class HSigmoidWithReluDiv(FrontReplacementSubgraph): - """ - The transformation looks for the pattern with Relu/Div defining the HSigmoid function: - HSigmoid(x) = Min(Relu(x + 3.0), 6.0) / 6.0 - """ - enabled = True - - def run_after(self): - return [AttributedClampNormalizer] - - def pattern(self): - return dict( - nodes=[ - ('input', dict()), - ('add_const', - dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 3.0, atol=1e-6)))), - ('add', dict(op='Add')), - ('relu', dict(op='ReLU')), - ('min_const', - dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))), - ('min', dict(op='Minimum')), - ('div_const', - dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))), - ('div', dict(op='Div')), - ], - edges=[ - ('input', 'add', {'out': 0}), - ('add_const', 'add', {}), - ('add', 'relu', {}), - ('relu', 'min', {}), - ('min_const', 'min', {}), - ('min', 'div', {}), - ('div_const', 'div', {}), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - replace_with_hsigmoid(graph, match['add'], match['div']) - - -class HSigmoidWithReluMul(FrontReplacementSubgraph): - """ - The transformation looks for the pattern with Relu/Mul defining the HSigmoid function: - HSigmoid(x) = Min(Relu(x + 3.0), 6.0) * 1.0/6.0 - """ - enabled = True - - def run_after(self): - return [AttributedClampNormalizer] - - def pattern(self): - return dict( - nodes=[ - ('input', dict()), - ('add_const', - dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 3.0, atol=1e-6)))), - ('add', dict(op='Add')), - ('relu', dict(op='ReLU')), - ('min_const', - dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))), - ('min', dict(op='Minimum')), - ('mul_const', - dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 1.0 / 6.0, atol=1e-6)))), - ('mul', dict(op='Mul')), - ], - edges=[ - ('input', 'add', {'out': 0}), - ('add_const', 'add', {}), - ('add', 'relu', {}), - ('relu', 'min', {}), - ('min_const', 'min', {}), - ('min', 'mul', {}), - ('mul_const', 'mul', {}), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - replace_with_hsigmoid(graph, match['add'], match['mul']) diff --git a/tools/mo/openvino/tools/mo/front/HSwish_fusion.py b/tools/mo/openvino/tools/mo/front/HSwish_fusion.py deleted file mode 100644 index 80747f3422eb7e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/HSwish_fusion.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.AttributedClampNormalizer import AttributedClampNormalizer -from openvino.tools.mo.ops.activation_ops import HSwish -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.pattern_match import check_value - - -def replace_with_hswish(graph: Graph, match: [dict, SubgraphMatch]): - add = match['add'] - mul = match['mul'] - mul_2 = match['mul_2'] - - # determine the input port of Add and Mul which gets the 'input' node output - add_input_port_idx = int(add.in_port(0).get_connection().get_source().node.soft_get('op') == 'Const') - mul_input_port_idx = int(mul.in_port(0).get_connection().get_source().node.soft_get('op') in ['Clamp', 'Minimum']) - - # check that the same tensor provided as input to Add and Mul - if add.in_port(add_input_port_idx).get_source() != mul.in_port(mul_input_port_idx).get_source(): - return - mul_2_name = mul_2.soft_get('name', mul_2.id) - - hswish = HSwish(graph, {}).create_node() - hswish.in_port(0).connect(add.in_port(add_input_port_idx).get_source()) - mul_2.out_port(0).get_connection().set_source(hswish.out_port(0)) - - rename_nodes([(mul_2, mul_2_name + '/TBR'), (hswish, mul_2_name)]) - - -class HSwishWithClamp(FrontReplacementSubgraph): - """ - The transformation looks for the pattern with ReLU6 (Clamp) defining the HSwish function: - HSwish(x) = x * Relu6(x + 3) / 6.0. - """ - enabled = True - - def run_after(self): - return [AttributedClampNormalizer] - - def pattern(self): - return dict( - nodes=[ - ('input', dict()), - ('add', dict(op='Add')), - ('const_0', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 0.0, atol=1e-6)))), - ('const_3', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 3.0, atol=1e-6)))), - ('const_6', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))), - ('const_1_6', - dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 1.0 / 6.0, atol=1e-6)))), - ('clamp', dict(op='Clamp')), - ('mul', dict(op='Mul')), - ('mul_2', dict(op='Mul')), - ], - edges=[ - ('input', 'add', {}), - ('input', 'mul', {}), - ('const_3', 'add', {}), - ('add', 'clamp', {'in': 0}), - ('const_0', 'clamp', {'in': 1}), - ('const_6', 'clamp', {'in': 2}), - ('clamp', 'mul', {}), - ('mul', 'mul_2', {}), - ('const_1_6', 'mul_2', {}), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - replace_with_hswish(graph, match) - - -class HSwishWithMinMax(FrontReplacementSubgraph): - """ - The transformation looks for the pattern with Min/Max defining the HSwish function: - HSwish(x) = x * Min(Max(x + 3, 0), 6) / 6.0. - """ - enabled = True - - def run_after(self): - return [AttributedClampNormalizer] - - def pattern(self): - return dict( - nodes=[ - ('input', dict()), - ('add', dict(op='Add')), - ('const_0', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 0.0, atol=1e-6)))), - ('const_3', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 3.0, atol=1e-6)))), - ('const_6', dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 6.0, atol=1e-6)))), - ('const_1_6', - dict(op='Const', value=lambda v: check_value(v, lambda x: np.allclose(x, 1.0 / 6.0, atol=1e-6)))), - ('max', dict(op='Maximum')), - ('min', dict(op='Minimum')), - ('mul', dict(op='Mul')), - ('mul_2', dict(op='Mul')), - ], - edges=[ - ('input', 'add', {'out': 0}), - ('input', 'mul', {'out': 0}), - ('const_3', 'add', {}), - ('add', 'max', {}), - ('const_0', 'max', {}), - ('max', 'min', {}), - ('const_6', 'min', {}), - ('min', 'mul', {}), - ('mul', 'mul_2', {}), - ('const_1_6', 'mul_2', {}), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - replace_with_hswish(graph, match) diff --git a/tools/mo/openvino/tools/mo/front/InterpolateNormalizer.py b/tools/mo/openvino/tools/mo/front/InterpolateNormalizer.py deleted file mode 100644 index 0bf1be6871cb00..00000000000000 --- a/tools/mo/openvino/tools/mo/front/InterpolateNormalizer.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import inspect -import logging as log - -from openvino.tools.mo.ops.elementwise import Mul, Add -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.strided_slice import StridedSlice -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class InterpolateNormalizer(FrontReplacementOp): - op = 'Interpolate' - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['op'] - - if 1 not in node.in_ports() or node.in_port(1).disconnected(): - - if node.has_valid('factor') and not node.has_valid('width') and not node.has_valid('height'): - factor = Const(graph, {'value': mo_array(node.factor)}).create_node() - - shape = Shape(graph, {'name': node.name + '/shape'}).create_node() - - begin = Const(graph, {'value': mo_array([2])}).create_node() - end = Const(graph, {'value': mo_array([4])}).create_node() - stride = Const(graph, {'value': mo_array([1])}).create_node() - ss = StridedSlice(graph, {'name': node.name + '/ss_0_port', 'begin_mask': mo_array([1]), - 'end_mask': mo_array([0]), 'new_axis_mask': mo_array([0]), - 'shrink_axis_mask': mo_array([0]), - 'ellipsis_mask': mo_array([0])}).create_node() - - mul = Mul(graph, {'name': node.name + '/factor_mul_'}).create_node() - - source = node.in_port(0).get_connection().get_source() - source.connect(shape.in_port(0)) - shape.out_port(0).connect(ss.in_port(0)) - begin.out_port(0).connect(ss.in_port(1)) - end.out_port(0).connect(ss.in_port(2)) - stride.out_port(0).connect(ss.in_port(3)) - ss.out_port(0).connect(mul.in_port(0)) - factor.out_port(0).connect(mul.in_port(1)) - - node.add_input_port(1, skip_if_exist=True) - assert node.in_port(1).disconnected() - mul.out_port(0).connect(node.in_port(1)) - - else: - shape = Shape(graph, {'name': node.name + '/shape'}).create_node() - - begin = Const(graph, {'value': mo_array([2])}).create_node() - end = Const(graph, {'value': mo_array([4])}).create_node() - stride = Const(graph, {'value': mo_array([1])}).create_node() - ss = StridedSlice(graph, {'name': node.name + '/ss_0_port', 'begin_mask': mo_array([1]), - 'end_mask': mo_array([0]), 'new_axis_mask': mo_array([0]), - 'shrink_axis_mask': mo_array([0]), - 'ellipsis_mask': mo_array([0])}).create_node() - - source = node.in_port(0).get_connection().get_source() - source.connect(shape.in_port(0)) - shape.out_port(0).connect(ss.in_port(0)) - begin.out_port(0).connect(ss.in_port(1)) - end.out_port(0).connect(ss.in_port(2)) - stride.out_port(0).connect(ss.in_port(3)) - - pads_value = node.pads_begin + node.pads_end - pads_const = Const(graph, {'value': mo_array(pads_value)}).create_node() - add = Add(graph, {'name': node.name + '/pad_add'}).create_node() - ss.out_port(0).connect(add.in_port(0)) - add.in_port(1).connect(pads_const.out_port(0)) - - if node.soft_get('shrink_factor') != 1 and node.soft_get('zoom_factor') == 1: - shrink_factor = node.shrink_factor - if shrink_factor < 1: - log.error('Shrink factor should be positive in node {}'.format(node.id)) - return None - - const = Const(graph, {'name': node.name + '/pre_shrink_sub_const', - 'value': mo_array(-1)}).create_node() - sub = Add(graph, {'name': node.name + '/pre_shrink_sub'}).create_node() - add.out_port(0).connect(sub.in_port(0)) - sub.in_port(1).connect(const.out_port(0)) - - const = Const(graph, {'value': mo_array(1 / shrink_factor), - 'name': node.name + 'shrink_factor_div_const'}).create_node() - div = Mul(graph, {'name': node.name + 'shrink_factor_div'}).create_node() - sub.out_port(0).connect(div.in_port(0)) - div.in_port(1).connect(const.out_port(0)) - - const = Const(graph, {'name': node.name + '/shrink_factor_add_one_const', 'value': mo_array(1) - }).create_node() - add = Add(graph, {'name': node.name + '/shrink_factor_add_one'}).create_node() - div.out_port(0).connect(add.in_port(0)) - const.out_port(0).connect(add.in_port(1)) - - node.add_input_port(1, skip_if_exist=True) - assert node.in_port(1).disconnected() - add.out_port(0).connect(node.in_port(1)) - - elif node.soft_get('shrink_factor') == 1 and node.soft_get('zoom_factor') != 1: - zoom_factor = node.zoom_factor - if zoom_factor < 1: - log.error('Zoom factor should be positive in node {}'.format(node.id)) - return None - - node['debug_message'] = 'Interpolate layer replacer may be wrong, please, try to update it in the' \ - ' file (openvino/tools/mo/front/InterpolateNormalizer.py at the line {}).' \ - ''.format(inspect.currentframe().f_lineno) + refer_to_faq_msg(100) - - # Reshape methods can be different in some cases - # Commented out section represents reshape that used in deeplab-caffe - # Uncomment the following lines, if your model was trained with deeplab-caffe - # or have the same reshape method - # const = Const(graph, {'value': mo_array(-1), - # 'name': node.name + 'zoom_factor_deeplab-caffe_sub_const'}).create_node() - # sub = Add(graph, {'name': node.name + 'zoom_factor_deeplab-caffe_sub'}).create_node() - # add.out_port(0).connect(sub.in_port(0)) - # const.out_port(0).connect(sub.in_port(1)) - # - # const = Const(graph, {'value': mo_array(zoom_factor - 1), - # 'name': node.name + 'zoom_factor_deeplab-caffe_mul_const'}).create_node() - # mul = Mul(graph, {'name': node.name + 'zoom_factor_deeplab-caffe_mul'}).create_node() - # sub.out_port(0).connect(mul.in_port(0)) - # const.out_port(0).connect(mul.in_port(1)) - # - # sum = Add(graph, {'name': node.name + 'zoom_factor_deeplab-caffe_sum'}).create_node() - # add.out_port(0).connect(sum.in_port(0)) - # mul.out_port(0).connect(sum.in_port(1)) - # - # node.add_input_port(1, skip_if_exist=True) - # assert node.in_port(1).disconnected() - # sum.out_port(0).connect(node.in_port(1)) - - # Comment out the following lines if you use the reshape method from previous section - const = Const(graph, {'value': mo_array(zoom_factor), - 'name': node.name + '/zoom_factor_mul_const'}).create_node() - mul = Mul(graph, {'name': node.name + '/zoom_factor_mul'}).create_node() - - add.out_port(0).connect(mul.in_port(0)) - const.out_port(0).connect(mul.in_port(1)) - - node.add_input_port(1, skip_if_exist=True) - assert node.in_port(1).disconnected() - mul.out_port(0).connect(node.in_port(1)) - - elif node.soft_get('width') != 0 and node.soft_get('height') != 0: - const = Const(graph, {'value': mo_array([node.height, node.width])}).create_node() - node.add_input_port(1, skip_if_exist=True) - assert node.in_port(1).disconnected() - const.out_port(0).connect(node.in_port(1)) - - elif node.soft_get('shrink_factor') != 1 and node.soft_get('zoom_factor') != 1: - shrink_factor = node.shrink_factor - zoom_factor = node.zoom_factor - if shrink_factor < 1: - log.error('Shrink factor should be positive in node {}'.format(node.id)) - return None - if zoom_factor < 1: - log.error('Zoom factor should be positive in node {}'.format(node.id)) - return None - - const = Const(graph, {'value': mo_array(-1)}).create_node() - sub = Add(graph, {'name': node.name + '/shrink_zoom_factor_sub'}).create_node() - add.out_port(0).connect(sub.in_port(0)) - const.out_port(0).connect(sub.in_port(1)) - - const = Const(graph, {'value': mo_array(1 / (shrink_factor + 1))}).create_node() - div = Mul(graph, {'name': node.name + '/shrink_factor_div'}).create_node() - sub.out_port(0).connect(div.in_port(0)) - const.out_port(0).connect(div.in_port(1)) - - const = Const(graph, {'value': mo_array(-1), - 'name': node.name + 'shrink_zoom_factor_sum_const'}).create_node() - sum = Add(graph, {'name': node.name + '/shrink_zoom_factor_sum'}).create_node() - div.out_port(0).connect(sum.in_port(0)) - const.out_port(0).connect(sum.in_port(1)) - - const = Const(graph, {'value': mo_array(zoom_factor - 1)}).create_node() - mul = Mul(graph, {'name': node.name + '/zoom_factor_mul'}).create_node() - sum.out_port(0).connect(mul.in_port(0)) - const.out_port(0).connect(mul.in_port(1)) - - sum = Add(graph, {'name': node.name + '/final_shrink_zoom_factor_sum'}).create_node() - div.out_port(0).connect(sum.in_port(0)) - mul.out_port(0).connect(sum.in_port(1)) - - node.add_input_port(1, skip_if_exist=True) - assert node.in_port(1).disconnected() - sum.out_port(0).connect(node.in_port(1)) - else: - if node.soft_get('fw') == 'caffe': - shape = Shape(graph, {'name': node.name + '/shape'}).create_node() - - begin = Const(graph, {'value': mo_array([2])}).create_node() - end = Const(graph, {'value': mo_array([4])}).create_node() - stride = Const(graph, {'value': mo_array([1])}).create_node() - ss = StridedSlice(graph, {'name': node.name + '/ss_0_port', 'begin_mask': mo_array([1]), - 'end_mask': mo_array([0]), 'new_axis_mask': mo_array([0]), - 'shrink_axis_mask': mo_array([0]), - 'ellipsis_mask': mo_array([0])}).create_node() - - source = node.in_port(1).get_connection().get_source() - node.in_port(1).disconnect() - source.connect(shape.in_port(0)) - shape.out_port(0).connect(ss.in_port(0)) - begin.out_port(0).connect(ss.in_port(1)) - end.out_port(0).connect(ss.in_port(2)) - stride.out_port(0).connect(ss.in_port(3)) - ss.out_port(0).connect(node.in_port(1)) diff --git a/tools/mo/openvino/tools/mo/front/InterpolateV1ToInterpolate.py b/tools/mo/openvino/tools/mo/front/InterpolateV1ToInterpolate.py deleted file mode 100644 index 4fd33004eb91d3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/InterpolateV1ToInterpolate.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes - - -def correct_pad(pad): - return int64_array([pad] if not isinstance(pad, list) else pad) - - -class InterpolateV1ToInterpolate(FrontReplacementPattern): - """ - This transformation replaces the operation Interpolate-1 with the operation Interpolate-4. - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.front.InterpolateNormalizer import InterpolateNormalizer - return [InterpolateNormalizer] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='Interpolate', version='opset1'): - transformation_mode = 'align_corners' if int(node.soft_get('align_corners', 0)) else 'half_pixel' - interpolate1_name = node.soft_get('name', node.id) - interpolate4 = create_op_with_const_inputs(graph, Interpolate, - { - 2: mo_array([1.0, 1.0]), - 3: int64_array(node.axes) - }, - { - 'mode': node.mode, - 'antialias': node.antialias, - 'coordinate_transformation_mode': transformation_mode, - 'pads_begin': correct_pad(node.soft_get('pads_begin', 0)), - 'pads_end': correct_pad(node.soft_get('pads_end', 0)), - 'nearest_mode': 'round_prefer_floor', - 'cube_coeff': -0.75, - 'shape_calculation_mode': 'sizes', - 'version': 'opset4', - 'in_ports_count': 4, - }) - - interpolate1_input_connection = node.in_port(0).get_connection() - interpolate1_input_connection.set_destination(interpolate4.in_port(0)) - - sizes_connection = node.in_port(1).get_connection() - sizes_connection.set_destination(interpolate4.in_port(1)) - - node.out_port(0).get_connection().set_source(interpolate4.out_port(0)) - rename_nodes([(node, interpolate1_name + '/delete'), (interpolate4, interpolate1_name)]) diff --git a/tools/mo/openvino/tools/mo/front/LayerNorm.py b/tools/mo/openvino/tools/mo/front/LayerNorm.py deleted file mode 100644 index ce777df7e5ee5c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/LayerNorm.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.ops.mvn import MVN -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.pattern_match import apply_pattern - - -class LayerNorm(FrontReplacementPattern): - # Compose part of the LayerNorm pattern to the MVN - enabled = True - - def pattern1(self): - return dict( - nodes=[ - ('pool0', dict(op='ReduceMean')), - ('pool1', dict(op='ReduceMean')), - ('pow', dict(op='Pow')), - ('div', dict(op='Div')), - ('sqrt', dict(op='Pow')), - ('add', dict(op='Add')), - ('sub', dict(op='Sub')), - ('pool0_param', dict(op='Const')), - ('pool1_param', dict(op='Const')), - ('add_param', dict(op='Const')), - ('pow_param', dict(op='Const')), - ], - edges=[ - ('pool0', 'sub'), - ('sub', 'pow'), - ('pow', 'pool1'), - ('pool1', 'add'), - ('add', 'sqrt'), - ('sqrt', 'div'), - ('sub', 'div'), - ('pool0_param', 'pool0'), - ('pool1_param', 'pool1'), - ('pow_param', 'sqrt'), - ('add_param', 'add'), - ]) - - def pattern2(self): - # pattern from bert onnx model - return dict( - nodes=[ - ('pool0', dict(op='ReduceMean')), - ('pool1', dict(op='ReduceMean')), - ('cast', dict(op='Cast')), - ('pow', dict(op='Pow')), - ('div', dict(op='Div')), - ('sqrt', dict(op='Pow')), - ('add', dict(op='Add')), - ('sub', dict(op='Sub')), - ('pool0_param', dict(op='Const')), - ('pool1_param', dict(op='Const')), - ('add_param', dict(op='Const')), - ('pow_param', dict(op='Const')), - ], - edges=[ - ('pool0', 'sub'), - ('sub', 'cast'), - ('cast', 'pow'), - ('pow', 'pool1'), - ('pool1', 'add'), - ('add', 'sqrt'), - ('sqrt', 'div'), - ('sub', 'div'), - ('pool0_param', 'pool0'), - ('pool1_param', 'pool1'), - ('pow_param', 'sqrt'), - ('add_param', 'add'), - ]) - - def find_and_replace_pattern(self, graph: Graph): - log.info('Enabled LayerNorm pattern recognition') - apply_pattern(graph, **self.pattern1(), action=self.replace_layer_norm) - apply_pattern(graph, **self.pattern2(), action=self.replace_layer_norm) - - def replace_layer_norm(self, graph: Graph, match: dict): - inp = match['pool0'] - node_before = inp.in_port(0).get_source().node - node_before_name = node_before.soft_get('name', node_before.id) - - # take/check the values of the add, pow and axes for ReduceMean - pow_param = match['pow_param'] - add_param = match['add_param'] - if add_param.value.size == 1 and pow_param.value.size == 1 and add_param.value.item() <= 1e-05 \ - and pow_param.value.item() == 0.5 and match['pool0_param'].value == match['pool1_param'].value: - log.debug('Found LayerNorm pattern after {} with name {}'.format(node_before.op, node_before_name)) - mvn = create_op_with_const_inputs(graph, MVN, {1: match['pool1_param'].value}, - {'eps': add_param.value.item(), 'normalize_variance': 1, - 'eps_mode': 'inside_sqrt'}) - div_name = match['div'].soft_get('name', match['div'].id) - rename_nodes([(match['div'], div_name + '/to_be_removed'), (mvn, div_name)]) - - inp.in_port(0).get_connection().set_destination(mvn.in_port(0)) - match['div'].out_port(0).get_connection().set_source(mvn.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/Log1p.py b/tools/mo/openvino/tools/mo/front/Log1p.py deleted file mode 100644 index e3a86b1f1f4044..00000000000000 --- a/tools/mo/openvino/tools/mo/front/Log1p.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.activation_ops import Log -from openvino.tools.mo.ops.elementwise import Add -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.ops.const import Const - - -class Log1p(FrontReplacementOp): - """ - Log1p computes natural logarithm of (1 + x) element-wise. - It replaces Log1p operation with Add -> Log. - """ - op = "Log1p" - enabled = True - - def replace_op(self, graph: Graph, node: Node): - node_name = node.soft_get('name', node.id) - const_dtype = np.float32 - if node.has_valid('data_type'): - const_dtype = node.data_type - const = Const(graph, {'value': mo_array([1], dtype=const_dtype)}).create_node() - add = Add(graph, {'name': node.name + '/Add_'}).create_node() - log = Log(graph, {'name': node.name + '/Log_'}).create_node() - - # Connect nodes: input -> Add -> Log - const.out_port(0).connect(add.in_port(0)) - node.in_port(0).get_connection().set_destination(add.in_port(1)) - add.out_port(0).connect(log.in_port(0)) - rename_nodes([(node, node_name + '/delete'), (log, node_name)]) - - # The "explicit" version of the return value is: [(out_node.id, 0)]) - return [log.id] diff --git a/tools/mo/openvino/tools/mo/front/MatMul_normalizer.py b/tools/mo/openvino/tools/mo/front/MatMul_normalizer.py deleted file mode 100644 index f21350418a8593..00000000000000 --- a/tools/mo/openvino/tools/mo/front/MatMul_normalizer.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import math - -from openvino.tools.mo.ops.MatMul import MatMul -from openvino.tools.mo.ops.elementwise import Add, Mul -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.reshape import Reshape - - -class FullyConnectedDecomposer(FrontReplacementSubgraph): - """ - Decomposes FC operation: - 1. Biases are added separately with the help of Add node - 2. FC node itself is converted to MatMul - """ - enabled = True - - def pattern(self): - return dict( - nodes=[('op', dict(kind='op', type='FullyConnected'))], - edges=[] - ) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - node = match['op'] - name = node.soft_get('name', node.id) - - # biases normalization - if 2 in node.in_ports() and not node.in_port(2).disconnected(): - bias_node = Add(graph, {'name': name + '/Bias_'}).create_node() - node_name = node.name + '/WithoutBiases' - bias_node_name = node.name - rename_nodes([(node, node_name), (bias_node, bias_node_name)]) - node.out_port(0).get_connection().set_source(bias_node.out_port(0)) - node.in_port(2).get_connection().set_destination(bias_node.in_port(1)) - node.out_port(0).connect(bias_node.in_port(0)) - - # weights normalization - assert node.has_valid('out-size') - out_size = node['out-size'] - reshape_dim = int64_array([-1, out_size]) - if node.has_and_set('transpose_weights'): - reshape_dim = int64_array([out_size, -1]) - node.insert_op_on_input_port(in_port_idx=1, new_op_class=Reshape, - new_op_attrs={'name': name + '/weights_reshape'}, value=reshape_dim) - if node.has_and_set('transpose_weights'): - node.insert_op_on_input_port(in_port_idx=1, new_op_class=Transpose, - new_op_attrs={'name': name + '/weights_transpose'}, value=int64_array([1, 0])) - - # input normalization for 4D Caffe FullyConnected - if graph.graph['fw'] == 'caffe': - node.insert_op_on_input_port(in_port_idx=0, new_op_class=Reshape, - new_op_attrs={'name': name + '/flatten_fc_input', 'special_zero': True}, - value=int64_array([0, -1])) - - MatMul.update_node_stat(node, {}) - - -class GemmDecomposer(FrontReplacementSubgraph): - """ - Decomposes Gemm operation: - 1. Biases are added separately with the help of Add node - 2. Multiplication by `alpha` and `beta` values are separated to Mul operations - 3. Gemm operation itself is converted to MatMul - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='Gemm'): - name = node.soft_get('name', node.id) - node_output_port = node.out_port(0) - if node.has_valid('alpha') and not math.isclose(node.alpha, 1): - mul_alpha = create_op_with_const_inputs(graph, Mul, {1: mo_array(node.alpha)}, - {'name': name + '/Alpha', 'can_be_scaleshift': False}) - node_output_port.get_connection().insert_node(mul_alpha) - node_output_port = mul_alpha.out_port(0) - del node['alpha'] - - if node.is_in_port_connected(2): - # biases normalization - bias_node = Add(graph, {'name': name + '/Bias_', 'can_be_scaleshift': False}).create_node() - without_biases_node_name = name + '/WithoutBiases' - rename_nodes([(node, without_biases_node_name), (bias_node, name)]) - node_output_port.get_connection().set_source(bias_node.out_port(0)) - node.in_port(2).get_connection().set_destination(bias_node.in_port(1)) - node_output_port.connect(bias_node.in_port(0)) - if node.has_valid('beta') and not math.isclose(node.beta, 1): - bias_node.insert_op_on_input_port(in_port_idx=1, new_op_class=Mul, value=mo_array(node.beta), - new_op_attrs={'name': name + '/Beta', - 'can_be_scaleshift': False}) - del node['beta'] - - MatMul.update_node_stat(node, { - 'transpose_a': node.has_and_set('transpose_a'), - 'transpose_b': node.has_and_set('transpose_b'), - }) diff --git a/tools/mo/openvino/tools/mo/front/MoveEmbeddedInputsToInputs.py b/tools/mo/openvino/tools/mo/front/MoveEmbeddedInputsToInputs.py deleted file mode 100644 index fac34652f43591..00000000000000 --- a/tools/mo/openvino/tools/mo/front/MoveEmbeddedInputsToInputs.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.pass_separator import FrontStart -from openvino.tools.mo.front.restore_ports import RestorePorts -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class MoveEmbeddedInputsToInputs(FrontReplacementSubgraph): - enabled = True - - def run_before(self): - return [FrontStart] - - def run_after(self): - return [RestorePorts] - - @staticmethod - def pattern(): - return dict( - nodes=[('op', dict(kind='op', embedded_inputs=lambda x: x is not None))], - edges=[] - ) - - @staticmethod - def replace_sub_graph(graph: Graph, match: dict): - node = match['op'] - for port_index, value_attr, attrs in node['embedded_inputs']: - const = Const(graph, dict(value=node[value_attr])).create_node() - node.add_input_port(port_index, skip_if_exist=True) - const.out_port(0).connect(node.in_port(port_index)) - node.in_port(port_index).bin = attrs['bin'] - node.in_port(port_index).in_attrs.append('bin') - del node[value_attr] - del node['embedded_inputs'] diff --git a/tools/mo/openvino/tools/mo/front/OneHotDepthNormalizer.py b/tools/mo/openvino/tools/mo/front/OneHotDepthNormalizer.py deleted file mode 100644 index d41300340c97e9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/OneHotDepthNormalizer.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.reshape import Reshape - - -class OneHotDepthNormalizer(FrontReplacementPattern): - """ - Transformation performs squeezing one-element tensors on 1st input in OneHot into 0D scalars. This transformation - allows to avoid problems with some models produced by tf2onnx which have 1D depth in OneHot. - """ - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('onehot', dict(kind='op', type='OneHot'))], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['onehot'] - node_name = node.soft_get('name', node.id) - reshape = create_op_with_const_inputs(graph, Reshape, {1: int64_array([])}, {'name': node_name + '/Reshape'}) - node.in_port(1).get_connection().insert_node(reshape) diff --git a/tools/mo/openvino/tools/mo/front/Pack.py b/tools/mo/openvino/tools/mo/front/Pack.py deleted file mode 100644 index e0a3b7f8d3bd58..00000000000000 --- a/tools/mo/openvino/tools/mo/front/Pack.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Node, Graph, rename_nodes -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class Pack(FrontReplacementOp): - op = "Pack" - enabled = True - - def replace_op(self, graph: Graph, node: Node): - out_node = Concat(graph, {'axis': node.axis, 'in_ports_count': len(node.in_ports())}).create_node() - pack_name = node.soft_get('name', node.id) - - for ind in node.in_ports(): - unsqueeze_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array([node.axis])}, - {'name': node.soft_get('name', node.id) + '/Unsqueeze'}) - node.in_port(ind).get_connection().set_destination(unsqueeze_node.in_port(0)) - unsqueeze_node.out_port(0).connect(out_node.in_port(ind)) - - rename_nodes([(node, pack_name + '/TBR'), (out_node, pack_name)]) - return [out_node.id] - diff --git a/tools/mo/openvino/tools/mo/front/PowerToEltwises.py b/tools/mo/openvino/tools/mo/front/PowerToEltwises.py deleted file mode 100644 index 614afa3969f46e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/PowerToEltwises.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Mul, Add, Pow -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class PowerToEltwises(FrontReplacementOp): - op = "AttributedPower" - enabled = True - force_clean_up = True - - def replace_sub_graph(self, graph: Graph, match: dict): - op = match['op'] - out_port = op.in_port(0).get_source() - - if op.soft_get('scale', 1) != 1: - const = Const(graph, {'value': mo_array(op.scale)}).create_node() - mul = Mul(graph, {'name': op.name + '/mul_'}).create_node() - const.out_port(0).connect(mul.in_port(1)) - mul.in_port(0).get_connection().set_source(out_port) - out_port = mul.out_port(0) - - if op.soft_get('shift', 0) != 0: - const = Const(graph, {'value': mo_array(op.shift)}).create_node() - add = Add(graph, {'name': op.name + '/add_'}).create_node() - const.out_port(0).connect(add.in_port(1)) - add.in_port(0).get_connection().set_source(out_port) - out_port = add.out_port(0) - - if op.soft_get('power', 1) != 1: - const = Const(graph, {'value': mo_array(op.power)}).create_node() - pow = Pow(graph, {'name': op.name + '/pow_'}).create_node() - const.out_port(0).connect(pow.in_port(1)) - pow.in_port(0).get_connection().set_source(out_port) - out_port = pow.out_port(0) - - op.out_port(0).get_connection().set_source(out_port) diff --git a/tools/mo/openvino/tools/mo/front/RollWithEmptyAxesReplacer.py b/tools/mo/openvino/tools/mo/front/RollWithEmptyAxesReplacer.py deleted file mode 100644 index 3034f18f83857d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/RollWithEmptyAxesReplacer.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.AttributedRollToRoll import AttributedRollToRoll -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape - - -class RollWithEmptyAxesReplacer(FrontReplacementPattern): - """ - According to MxNet Roll specification axes is an optional parameter. If it is not specified - input tensor is flattened, then Roll is applied along 0 axis and then the resulting tensor - is reshaped to original shape. - This transformation replaces Roll with empty axes input with the following sequence of operations: - reshape to 1D tensor -> Roll -> reshape to original shape. - """ - enabled = True - - def run_after(self): - return [AttributedRollToRoll] - - def find_and_replace_pattern(self, graph: Graph): - for roll_node in graph.get_op_nodes(op='Roll'): - if not roll_node.in_port(2).disconnected(): - return - node_name = roll_node.soft_get('name', roll_node.id) - - # reshape to 1d tensor - reshape_to_1d = create_op_node_with_second_input(graph, Reshape, int64_array([-1]), - {'name': node_name + '/reshape'}) - roll_node.in_port(0).get_connection().insert_node(reshape_to_1d) - - # add zero const as axes input to roll - const_zero = Const(graph, {'value': int64_array([0]), 'name': node_name + '/axes'}).create_node() - const_zero.out_port(0).connect(roll_node.in_port(2)) - - # reshape to original shape - shape_of = Shape(graph, {'name': node_name + '/shape_of'}).create_node() - reshape_to_1d.in_port(0).get_connection().add_destination(shape_of.in_port(0)) - reshape_to_orig_shape = Reshape(graph, {}).create_node() - rename_nodes([(roll_node, node_name + '/roll'), (reshape_to_orig_shape, node_name)]) - shape_of.out_port(0).connect(reshape_to_orig_shape.in_port(1)) - roll_node.out_port(0).get_connection().insert_node(reshape_to_orig_shape) diff --git a/tools/mo/openvino/tools/mo/front/SizeReplacer.py b/tools/mo/openvino/tools/mo/front/SizeReplacer.py deleted file mode 100644 index 99dbe9e9c4c5a9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/SizeReplacer.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ReduceOps import ReduceProd -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.shape import Shape - - -class SizeFrontReplacer(FrontReplacementOp): - """ - Replace Size op by Shape -> ReduceProd operations - """ - op = "Size" - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['op'] - name = node.soft_get('name', node.id) - assert node.has_valid('output_type'), \ - 'Size node should have `output_type` attribute, but it`s not for node {}'.format(name) - - shape = Shape(graph, {'name': name + '/Shape/', 'output_type': node.output_type}).create_node() - node.in_port(0).get_connection().set_destination(shape.in_port(0)) - reduce_prod = create_op_node_with_second_input( - graph, ReduceProd, int64_array([0]), {'name': shape.name + 'ReduceProd/', 'keep_dims': False}, shape) - node.out_port(0).get_connection().set_source(reduce_prod.out_port(0)) - - rename_nodes([(node, name + '/ToBeDeleted'), (reduce_prod, name)]) diff --git a/tools/mo/openvino/tools/mo/front/SqueezeNormalize.py b/tools/mo/openvino/tools/mo/front/SqueezeNormalize.py deleted file mode 100644 index 12d2ce9f193ce3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/SqueezeNormalize.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.utils.error import Error - - -class SqueezeNormalize(FrontReplacementPattern): - """ - Normalizes inputs of the Squeeze layers. The layers should have two inputs: the input with data and input with the - dimensions to squeeze. If the second input is omitted then all dimensions of size 1 should be removed. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for squeeze_node in graph.get_op_nodes(op='Squeeze'): - if len(squeeze_node.in_nodes()) == 1 and squeeze_node.has_valid('squeeze_dims'): - dims_node = Const(graph, {'name': squeeze_node.id + '/Dims', - 'value': int64_array(squeeze_node.squeeze_dims)}).create_node() - squeeze_node.in_port(1).connect(dims_node.out_port(0)) - del squeeze_node['squeeze_dims'] - elif len(squeeze_node.in_nodes()) == 2: - log.debug('The Squeeze node "{}" is already normalized'.format(squeeze_node.name)) - else: - raise Error('The Squeeze layer "{}" should either have 2 inputs or one input and an "squeeze_dims" ' - 'attribute'.format(squeeze_node.soft_get('name'))) diff --git a/tools/mo/openvino/tools/mo/front/ThresholdedReluDecomposition.py b/tools/mo/openvino/tools/mo/front/ThresholdedReluDecomposition.py deleted file mode 100644 index 3ecbacac6355f6..00000000000000 --- a/tools/mo/openvino/tools/mo/front/ThresholdedReluDecomposition.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.elementwise import Greater, Mul -from openvino.tools.mo.front.common.partial_infer.utils import float_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np - - -class ThresholdedReluDecomposition(FrontReplacementPattern): - """ - ThresholdedRelu(x, alpha) = x ? x > alpha : 0 - - is replaced with - - ThresholdedRelu(x, alpha) = Mul(x, Cast(Greater(x, alpha), type=float)) - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='ThresholdedRelu'): - name = node.soft_get('name', node.id) - - greater = create_op_with_const_inputs(graph, Greater, {1: float_array([node.alpha])}) - greater.in_port(0).connect(node.in_port(0).get_source()) - float_greater = Cast(graph, - {'dst_type': data_type_str_to_np(graph.graph['cmd_params'].data_type)}).create_node() - greater.out_port(0).connect(float_greater.in_port(0)) - - mul = Mul(graph, {}).create_node() - node.out_port(0).get_connection().set_source(mul.out_port(0)) - mul.in_port(0).connect(node.in_port(0).get_source()) - mul.in_port(1).connect(float_greater.out_port(0)) - - rename_nodes([(node, name + '/TBR'), (mul, name)]) - graph.remove_node(node.id) diff --git a/tools/mo/openvino/tools/mo/front/TopKNormalize.py b/tools/mo/openvino/tools/mo/front/TopKNormalize.py deleted file mode 100644 index 5ba49e57ee2fe0..00000000000000 --- a/tools/mo/openvino/tools/mo/front/TopKNormalize.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class TopKNormalize(FrontReplacementPattern): - """ - This pass do TopK layer normalization: - 1. Adds the second input to the TopK layer if it has just one. In this case the attribute 'k' should be defined. - 2. If one of TopK ports isn't connected - adds output on this port to keep this port in IR. - - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for topk_node in graph.get_op_nodes(op='TopK'): - if topk_node.in_port(1).disconnected(): - assert topk_node.has_valid('k'), 'The TopK node "{}" misses "k" attribute'.format(topk_node.name) - k_node = Const(graph, {'name': topk_node.id + '/Dims', 'value': int64_array(topk_node.k)}).create_node() - topk_node.in_port(1).connect(k_node.out_port(0)) - del topk_node['k'] - else: - log.debug('The TopK node input "{}" is already normalized'.format(topk_node.name)) diff --git a/tools/mo/openvino/tools/mo/front/TransposeOrderNormalizer.py b/tools/mo/openvino/tools/mo/front/TransposeOrderNormalizer.py deleted file mode 100644 index 61de32f341f9d6..00000000000000 --- a/tools/mo/openvino/tools/mo/front/TransposeOrderNormalizer.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.tf.pad_tf_to_pad import PadTFToPad -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.utils.error import Error - - -class TransposeOrderNormalizer(FrontReplacementSubgraph): - """ - Transpose operation requires information about order, that is represented in original frameworks differently: - - by layer parameter - - by 1-port input value - - TransposeOrderNormalizer reforms Transpose operations to store axis info in 1-port input. - """ - enabled = True - - def run_before(self): - # refer to the comments of the ObjectDetectionAPIPreprocessorReplacement transformation in the - # /openvino/tools/mo/front/tf/ObjectDetectionAPI.py file for more details why this dependency is needed. - return [PadTFToPad] - - def pattern(self): - return dict( - nodes=[ - ('transpose', dict(type='Transpose')) - ], - edges=[] - ) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - node = match['transpose'] - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - if len(connected_in_ports) == 1: - if node.has_valid('order'): - const = Const(graph, {'value': node.order}).create_node() - node.add_input_port(1, skip_if_exist=True) - const.out_port(0).connect(node.in_port(1)) - del graph.node[node.id]['order'] - elif node.has('order') and node.order is None: - assert node.has_and_set('reverse_order') - else: - raise Error('Can not deduce transpose `order` for {}: only one in_port and no `order` parameter.' - ''.format(node.soft_get('name', node.id))) diff --git a/tools/mo/openvino/tools/mo/front/YOLO.py b/tools/mo/openvino/tools/mo/front/YOLO.py deleted file mode 100644 index 23dc63c2cf95bc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/YOLO.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.no_op_eraser import NoOpEraser -from openvino.tools.mo.ops.regionyolo import RegionYoloOp -from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.result import Result -from openvino.tools.mo.utils.error import Error - - -class YoloRegionAddon(FrontReplacementFromConfigFileGeneral): - """ - Replaces all Result nodes in graph with YoloRegion->Result nodes chain. - YoloRegion node attributes are taken from configuration file - """ - replacement_id = 'TFYOLO' - - def run_after(self): - return [NoOpEraser] - - def transform_graph(self, graph: Graph, replacement_descriptions): - op_outputs = [n for n, d in graph.nodes(data=True) if 'op' in d and d['op'] == 'Result'] - for op_output in op_outputs: - last_node = Node(graph, op_output).in_node(0) - op_params = dict(name=last_node.id + '/YoloRegion', axis=1, end_axis=-1, nchw_layout=True) - op_params.update(replacement_descriptions) - region_layer = RegionYoloOp(graph, op_params) - region_layer_node = region_layer.create_node([last_node]) - # here we remove 'axis' from 'dim_attrs' to avoid permutation from axis = 1 to axis = 2 - region_layer_node.dim_attrs.remove('axis') - Result(graph).create_node([region_layer_node]) - graph.remove_node(op_output) - - -class YoloV3RegionAddon(FrontReplacementFromConfigFileGeneral): - """ - Replaces all Result nodes in graph with YoloRegion->Result nodes chain. - YoloRegion node attributes are taken from configuration file - """ - replacement_id = 'TFYOLOV3' - - def transform_graph(self, graph: Graph, replacement_descriptions): - graph.remove_nodes_from(graph.get_nodes_with_attributes(op='Result')) - for i, input_node_name in enumerate(replacement_descriptions['entry_points']): - if input_node_name not in graph.nodes(): - raise Error('TensorFlow YOLO V3 conversion mechanism was enabled. ' - 'Entry points "{}" were provided in the configuration file. ' - 'Entry points are nodes that feed YOLO Region layers. ' - 'Node with name {} doesn\'t exist in the graph. ' - 'Refer to documentation about converting YOLO models for more information.'.format( - ', '.join(replacement_descriptions['entry_points']), input_node_name)) - last_node = Node(graph, input_node_name).in_node(0) - op_params = dict(name=last_node.id + '/YoloRegion', axis=1, end_axis=-1, do_softmax=0, nchw_layout=True) - op_params.update(replacement_descriptions) - if 'masks' in op_params: - op_params['mask'] = op_params['masks'][i] - del op_params['masks'] - region_layer_node = RegionYoloOp(graph, op_params).create_node([last_node]) - # TODO: do we need change axis for further permutation - region_layer_node.dim_attrs.remove('axis') - Result(graph, {'name': region_layer_node.id + '/Result'}).create_node([region_layer_node]) diff --git a/tools/mo/openvino/tools/mo/front/__init__.py b/tools/mo/openvino/tools/mo/front/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/binary_quantize_normalization.py b/tools/mo/openvino/tools/mo/front/binary_quantize_normalization.py deleted file mode 100644 index 8d413aabf11e14..00000000000000 --- a/tools/mo/openvino/tools/mo/front/binary_quantize_normalization.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Add, Mul -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class BinaryFakeQuantizeNormalization(FrontReplacementPattern): - """ - FakeQuantize in binary form has exceptional meaning of 1 and 2 input nodes. - This nodes values should be equal and express threshold to quantize tensors to two levels.. - """ - enabled = True - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('min_in', dict()), - ('max_in', dict()), - ('quantize', dict(op='FakeQuantize', levels=2))], - edges=[ - ('min_in', 'quantize', {'in': 1}), - ('max_in', 'quantize', {'in': 2}) - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - quantize = match['quantize'] - - sum_node = Add(graph, dict()).create_node() - const = Const(graph, {'value': mo_array(0.5)}).create_node() - mul_node = Mul(graph, dict()).create_node() - - mul_node.in_port(0).connect(sum_node.out_port(0)) - mul_node.in_port(1).connect(const.out_port(0)) - - quantize.in_port(1).get_connection().get_source().connect(sum_node.in_port(0)) - quantize.in_port(2).get_connection().get_source().connect(sum_node.in_port(1)) - - quantize.in_port(1).disconnect() - quantize.in_port(2).disconnect() - - mul_node.out_port(0).connect(quantize.in_port(1)) - mul_node.out_port(0).connect(quantize.in_port(2)) diff --git a/tools/mo/openvino/tools/mo/front/broadcast_with_range.py b/tools/mo/openvino/tools/mo/front/broadcast_with_range.py deleted file mode 100644 index c0679221cbaacc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/broadcast_with_range.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Equal -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.ops.select import Select -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs, create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, rename_nodes, Node -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class ExpandRangeConstant(FrontReplacementSubgraph): - r""" - Searches for Constant operations filled with range values starting from 0 and replaces it with Range operation - Faced in ONNX BERT -- replacing it makes model reshape-able by sequence length - - WARNING: true BIDIRECTIONAL mode of Broadcast could cause issues - (the probability is small, so we decided to keep the optimization) - - value_input[1, X] (value=range(0,X)) shape_input[Y, 1] - \ / - Broadcast(mode='bidirectional') [Y, X] - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(type='Broadcast'): - value = node.in_port(0).get_source().node - if value.soft_get('type') == 'Const': - self.replace(node, value) - - @staticmethod - def replace(node: Node, const: Node): - graph = node.graph - shape = const.shape - const_name = const.soft_get('name', const.id) - - non_one_dims = np.argwhere(shape != 1).flatten() - one_dims = np.argwhere(shape == 1).flatten() - - if not (non_one_dims.size == 1 and 5 < np.prod(shape) < 500): - # (5;500) range is deduced to affect less models - return - - value = const.value - if not np.array_equal(np.arange(0, np.prod(shape), 1).reshape(shape), value): - return - - positive_idx = non_one_dims.item(0) - negative_idx = positive_idx - len(shape) - - node_name = node.soft_get('name', node.id) - gather = create_op_with_const_inputs(graph, Gather, {1: int64_array(negative_idx), 2: int64_array(0)}, - {'name': node_name + '/BroadcastingDim'}) - gather_for_const = create_op_with_const_inputs(graph, Gather, {1: int64_array(negative_idx), 2: int64_array(0)}, - {'name': const_name + '/BroadcastingDim'}) - shapeof_node = Shape(graph, {'name': const_name + '/ShapeOf'}).create_node() - shapeof_node.out_port(0).connect(gather_for_const.in_port(0)) - - equal_node = create_op_with_const_inputs(graph, Equal, {1: int64_array(1)}, {'name': node_name + '/ConstOne'}) - gather.out_port(0).connect(equal_node.in_port(0)) - - select_node = Select(graph, {'name': node_name + '/Select', - 'auto_broadcast': 'numpy'}).create_node([equal_node, gather_for_const, gather]) - - const.out_port(0).connect(shapeof_node.in_port(0)) - - range_node = create_op_with_const_inputs(graph, Range, - {0: mo_array(0, dtype=value.dtype), - 2: mo_array(1, dtype=value.dtype)}, - {'name': const_name + '/Range', 'dtype': value.dtype}) - select_node.out_port(0).connect(range_node.in_port(1)) - - node.in_port(1).get_connection().add_destination(gather.in_port(0)) - - node.in_port(0).get_connection().set_source(range_node.out_port(0)) - - if one_dims.size: - unsqueeze = create_op_node_with_second_input(graph, Unsqueeze, one_dims, - {'name': const_name + '/KeepShape'}) - range_node.out_port(0).get_connection().insert_node(unsqueeze) - rename_nodes([(const, const_name + '/ToBeDeleted'), (unsqueeze, const_name)]) - else: - rename_nodes([(const, const_name + '/ToBeDeleted'), (range_node, const_name)]) diff --git a/tools/mo/openvino/tools/mo/front/caffe/ArgMaxFlatten.py b/tools/mo/openvino/tools/mo/front/caffe/ArgMaxFlatten.py deleted file mode 100644 index 77919b4e511710..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/ArgMaxFlatten.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.reshape import Reshape - - -class ArgMaxFlatten(FrontReplacementOp): - """ - The ArgMax layer in Caffe may have non-specified 'axis' attribute. In this case it should flatten input data before - calculating ArgMax. - """ - op = "ArgMax" - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - argmax_node = match['op'] - if not argmax_node.has_valid('axis'): - flatten_node = create_op_node_with_second_input(graph, Reshape, int64_array([0, 1, -1]), - dict(name=argmax_node.name + '/Flatten')) - argmax_node.in_port(0).get_connection().insert_node(flatten_node) - argmax_node.axis = 2 diff --git a/tools/mo/openvino/tools/mo/front/caffe/CustomLayersMapping.xml.example b/tools/mo/openvino/tools/mo/front/caffe/CustomLayersMapping.xml.example deleted file mode 100644 index 75f1c761b8dd1b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/CustomLayersMapping.xml.example +++ /dev/null @@ -1,3 +0,0 @@ - - - diff --git a/tools/mo/openvino/tools/mo/front/caffe/MVNCaffeToMVN.py b/tools/mo/openvino/tools/mo/front/caffe/MVNCaffeToMVN.py deleted file mode 100644 index 8baa322f7f6395..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/MVNCaffeToMVN.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.mvn import MVN -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.ops.rank import Rank -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes - -import numpy as np - - -class MVNCaffeToMVN(FrontReplacementPattern): - """ - Replace MVNCaffe operation with MVN - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='MVNCaffe'): - node_name = node.soft_get('name', node.id) - - start_axis = 2 - if node['across_channels'] == 1: - start_axis = 1 - - rank = Rank(graph, {'name': node_name + '/Rank'}).create_node() - - # create range of axes based on `start_axis` and rank of input - rng = create_op_with_const_inputs(graph, Range, {0: int64_array(start_axis), 2: int64_array(1)}, - {'name': node_name + '/Range', 'output_type': np.int64}) - rng.in_port(1).connect(rank.out_port(0)) - - new_mvn = MVN(graph, {'eps': node.soft_get('eps', 1e-9), 'eps_mode': 'inside_sqrt', - 'normalize_variance': node.soft_get('normalize_variance', 1)}).create_node( - [node.in_port(0).get_source().node, rng]) - new_mvn.in_port(0).get_connection().add_destination(rank.in_port(0)) - node.out_port(0).get_connection().set_source(new_mvn.out_port(0)) - rename_nodes([(node, node_name + '/tbd'), (new_mvn, node_name)]) - - graph.remove_node(node.id) diff --git a/tools/mo/openvino/tools/mo/front/caffe/__init__.py b/tools/mo/openvino/tools/mo/front/caffe/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/caffe/argmax_ext.py b/tools/mo/openvino/tools/mo/front/caffe/argmax_ext.py deleted file mode 100644 index 8f7483153a4a2c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/argmax_ext.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.argmax import ArgMaxOp -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ArgMaxFrontExtractor(FrontExtractorOp): - op = 'ArgMax' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.argmax_param - - update_attrs = { - 'out_max_val': int(param.out_max_val), - 'top_k': param.top_k, - 'axis': param.axis, - } - - mapping_rule = merge_attrs(param, update_attrs) - - ArgMaxOp.update_node_stat(node, mapping_rule) - # ArgMax must be converted to TopK but without the output with values - ArgMaxOp.update_node_stat(node, {'remove_values_output': True}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/axpy.py b/tools/mo/openvino/tools/mo/front/caffe/axpy.py deleted file mode 100644 index e8d2b8c2c37166..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/axpy.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Add -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp - - -class AxpyToSSandAdd(FrontReplacementOp): - """ - Replaces Axpy layer with ScaleShift and Add. - """ - op = "Axpy" - enabled = True - - def replace_op(self, graph: Graph, node: Node): - in_node_0 = node.in_node(0) - in_node_1 = node.in_node(1) - in_node_2 = node.in_node(2) - - ss = ScaleShiftOp(graph, {'name': node.id + "/ScaleShift_", 'axis': 0}) - scale_shift = ss.create_node(inputs=[in_node_1, in_node_0]) - - el = Add(graph, {'name': node.id + "/Add_"}) - el_node = el.create_node(inputs=[scale_shift, in_node_2]) - - return [el_node.id] diff --git a/tools/mo/openvino/tools/mo/front/caffe/batchnorm_ext.py b/tools/mo/openvino/tools/mo/front/caffe/batchnorm_ext.py deleted file mode 100644 index 3f534c355912f7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/batchnorm_ext.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.ops.BatchNormInference import BatchNormInference -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class BatchNormalizationExtractor(FrontExtractorOp): - op = 'batchnorm' - enabled = True - - @classmethod - def extract(cls, node): - eps = node.pb.batch_norm_param.eps - attrs = { - 'eps': eps - } - pb_model = None if not node.soft_get('model_pb', None) else node.model_pb - if pb_model: - blobs = pb_model.blobs - assert len(blobs) >= 2, 'BatchNorm accepts not less then two input blobs' - mean = mo_array(blobs[0].data) - variance = mo_array(blobs[1].data) - - if len(blobs) == 3: - scale = blobs[2].data[0] - if scale != 0: - scale = 1.0 / scale - mean *= scale - variance *= scale - - embed_input(attrs, 1, 'gamma', np.ones(mean.shape), 'gamma') - embed_input(attrs, 2, 'beta', np.zeros(variance.shape), 'beta') - embed_input(attrs, 3, 'mean', mean, 'biases') - embed_input(attrs, 4, 'variance', variance, 'weights') - - BatchNormInference.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/binarization.py b/tools/mo/openvino/tools/mo/front/caffe/binarization.py deleted file mode 100644 index 3ac3cf7d0dc2b6..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/binarization.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array -from openvino.tools.mo.ops.fakequantize import FakeQuantize -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.const import Const - - -class BinarizationToQuantize(FrontReplacementOp): - """ - Replaces Binarization layer with Quantize. - """ - op = "Binarization" - enabled = True - - def replace_op(self, graph: Graph, node: Node): - in_node_0 = node.in_node(0) - - broadcast = lambda x: float32_array([x]) - threshold = Const(graph, {'name': node.id + "/Input_1", "value": broadcast(0)}).create_node() - in_1 = threshold - in_2 = threshold - in_3 = Const(graph, {'name': node.id + "/Input_3", "value": broadcast(-1)}).create_node() - in_4 = Const(graph, {'name': node.id + "/Input_4", "value": broadcast(+1)}).create_node() - quant = FakeQuantize(graph, {'name': node.id + "/FakeQuantize_", "levels": 2}).create_node( - inputs=[in_node_0, in_1, in_2, in_3, in_4]) - - return [quant.id] diff --git a/tools/mo/openvino/tools/mo/front/caffe/binary_conv_ext.py b/tools/mo/openvino/tools/mo/front/caffe/binary_conv_ext.py deleted file mode 100644 index ce32d13245766b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/binary_conv_ext.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.caffe.conv_ext import conv_create_attrs, conv_set_params -from openvino.tools.mo.front.caffe.extractors.utils import weights_biases -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.utils.error import Error - - -class ConvFrontExtractor(FrontExtractorOp): - op = 'ConvolutionBinary' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer, model_layer = node.pb, node.model_pb - - if not proto_layer: - raise Error('Protobuf layer can not be empty') - - conv_param = proto_layer.convolution_param - conv_type = 'ConvND' if len(proto_layer.bottom) > 1 else 'Conv2D' - - params = conv_set_params(conv_param, conv_type) - attrs = conv_create_attrs(params) - attrs.update({'op': __class__.op, - 'get_group': lambda node: node.group, - 'get_output_feature_dim': lambda node: node.output, - 'weights_index': 1 if conv_type == 'Conv2D' else 2 - }) - - # Embed weights and biases as attributes - # It will be moved to a separate nodes in special pass - attrs.update( - weights_biases(conv_param.bias_term, model_layer, start_index=len(proto_layer.bottom), proto=conv_param)) - attrs.update(layout_attrs()) - - # update the attributes of the node - Convolution.update_node_stat(node, attrs) - return cls.enabled - diff --git a/tools/mo/openvino/tools/mo/front/caffe/bn.py b/tools/mo/openvino/tools/mo/front/caffe/bn.py deleted file mode 100644 index 0166cbeebf96e4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/bn.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import input_as_const -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp -from openvino.tools.mo.utils.error import Error - - -class BNToScaleShift(FrontReplacementOp): - """ - Replaces BN layer with ScaleShift. - """ - op = "BN" - enabled = True - - def replace_op(self, graph: Graph, node: Node): - attrs = {'name': node.id + "/ScaleShift_"} - - param = graph.node[node.id]['pb'].bn_param - pb_model = graph.node[node.id]['model_pb'] - - blobs = pb_model.blobs - - if len(blobs) != 4: - raise Error("Incorrect number of blobs in BN layer {}".format(node.id)) - - mean = mo_array(blobs[0].data) - var = mo_array(blobs[1].data) - betta = mo_array(blobs[2].data) - gamma = mo_array(blobs[3].data) - - gamma = gamma + np.repeat(param.eps, gamma.shape) - - scale = 1.0 / np.sqrt(gamma) * mean - shift = var - betta * scale - - ss = ScaleShiftOp(graph, attrs) - scale_shift = ss.create_node([node.in_node(0)]) - input_as_const(scale_shift, attrs, 1, 'weights', scale) - input_as_const(scale_shift, attrs, 2, 'biases', shift) - - return [scale_shift.id] diff --git a/tools/mo/openvino/tools/mo/front/caffe/bn_ext.py b/tools/mo/openvino/tools/mo/front/caffe/bn_ext.py deleted file mode 100644 index 606ca28ed6917a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/bn_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.BN import BN -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class BNExtractor(FrontExtractorOp): - op = 'BN' - enabled = True - - @classmethod - def extract(cls, node): - BN.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/collect_attributes.py b/tools/mo/openvino/tools/mo/front/caffe/collect_attributes.py deleted file mode 100644 index 528b619d9fa0b7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/collect_attributes.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - - -def cast_to_string(descriptor, value): - if descriptor.type != descriptor.TYPE_BOOL: - return str(value) - return str(int(value)) - - -def append_unique(attrs, new_attr, value): - if new_attr in attrs: - log.error('The parameter {} overwrites already existing value. '.format(new_attr) + - 'This happens due to flattening nested parameters. ' + - 'Use enable_flattening_nested_params to flatten nesting') - return {new_attr: value} - - -def append_unique_enum(attrs: dict, descriptor, value): - enum_name = '{}.{}'.format( - descriptor.enum_type.full_name.rsplit('.', 1)[0], # remove enum name Z from X.Y.Z name - descriptor.enum_type.values[value].name) - return append_unique(attrs, descriptor.name, str(enum_name)) - - -def unrolled_name(descriptor_name: str, enable_flattening_nested_params: bool = False, prefix: str = '') -> str: - if not enable_flattening_nested_params: - return descriptor_name - elif prefix: - return '{}__{}'.format(prefix, descriptor_name) - return descriptor_name - - -def collect_optional_attributes(obj, prefix: str = '', disable_omitting_optional: bool = False, - enable_flattening_nested_params: bool = False): - """ - Collect all optional attributes from protobuf message - Args: - attrs: dictionary with attributes - obj: protobuf message - prefix: prefix for this protobuf.message - disable_omitting_optional: disable omitting optional flag - enable_flattening_nested_params: disable flattening optional params flag - """ - attrs = {} - fields = [field[0].name for field in obj.ListFields()] - for descriptor in obj.DESCRIPTOR.fields: - value = getattr(obj, descriptor.name) - name = unrolled_name(descriptor.name, enable_flattening_nested_params, prefix) - if descriptor.label != descriptor.LABEL_OPTIONAL: - continue - if (descriptor.has_default_value or disable_omitting_optional) or descriptor.name in fields: - if descriptor.type == descriptor.TYPE_MESSAGE: - attrs.update(collect_optional_attributes(value, - prefix=name, - disable_omitting_optional=disable_omitting_optional, - enable_flattening_nested_params=enable_flattening_nested_params)) - elif descriptor.type == descriptor.TYPE_ENUM: - attrs.update(append_unique_enum(attrs, descriptor, value)) - else: - attrs.update(append_unique(attrs, name, cast_to_string(descriptor, value))) - return attrs - - -def collect_attributes(obj, prefix: str = '', disable_omitting_optional: bool = False, - enable_flattening_nested_params: bool = False): - """ - Collect all attributes from protobuf message - Args: - attrs: dictionary with attributes - obj: protobuf message - prefix: prefix for this protobuf.message - disable_omitting_optional: disable omitting optional flag - enable_flattening_nested_params: disable flattening optional params flag - """ - attrs = collect_optional_attributes(obj, prefix, disable_omitting_optional, enable_flattening_nested_params) - fields = [field[0].name for field in obj.ListFields()] - for descriptor in obj.DESCRIPTOR.fields: - value = getattr(obj, descriptor.name) - name = unrolled_name(descriptor.name, enable_flattening_nested_params, prefix) - if descriptor.label == descriptor.LABEL_REPEATED: - if descriptor.name not in fields: - log.warning('Field {} was ignored'.format(descriptor.name)) - continue - if descriptor.type == descriptor.TYPE_MESSAGE: - for x in value: - attrs.update(collect_attributes(x, prefix=name)) - else: - attrs.update(append_unique(attrs, name, ",".join([str(v) for v in value]))) - elif descriptor.label == descriptor.LABEL_REQUIRED: - if descriptor.type == descriptor.TYPE_MESSAGE: - for x in value: - attrs.update(collect_attributes(x, prefix=name)) - else: - attrs.update(append_unique(attrs, name, cast_to_string(descriptor, value))) - return attrs - - -def merge_attrs(param, update_attrs: dict): - all_attrs = collect_attributes(param) - mandatory_attrs = set(all_attrs.keys()).intersection(set(update_attrs.keys())) - return {value: update_attrs[value] for value in mandatory_attrs} diff --git a/tools/mo/openvino/tools/mo/front/caffe/concat_ext.py b/tools/mo/openvino/tools/mo/front/caffe/concat_ext.py deleted file mode 100644 index d7cc2216cd91e3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/concat_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.concat import Concat - - -class ConcatFrontExtractor(FrontExtractorOp): - op = 'concat' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.pb - mapping_rule = { - 'axis': pb.concat_param.axis, - } - Concat.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/conv_ext.py b/tools/mo/openvino/tools/mo/front/caffe/conv_ext.py deleted file mode 100644 index c698b99f08da36..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/conv_ext.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.caffe.extractors.utils import get_spatial_attr, get_list_from_container, weights_biases -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.utils.error import Error - - -class ConvFrontExtractor(FrontExtractorOp): - op = 'convolution' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer, model_layer = node.pb, node.model_pb - - if not proto_layer: - raise Error('Protobuf layer can not be empty') - - conv_param = proto_layer.convolution_param - conv_type = 'ConvND' if len(proto_layer.bottom) > 1 else 'Conv2D' - - params = conv_set_params(conv_param, conv_type) - attrs = conv_create_attrs(params) - attrs.update({'op': conv_type, - 'get_group': lambda node: node.group, - 'get_output_feature_dim': lambda node: node.output, - 'weights_index': 1 if conv_type == 'Conv2D' else 2 - }) - - # Embed weights and biases as attributes - # It will be moved to a separate nodes in special pass - attrs.update( - weights_biases(conv_param.bias_term, model_layer, start_index=len(proto_layer.bottom), proto=conv_param)) - attrs.update(layout_attrs()) - - # update the attributes of the node - Convolution.update_node_stat(node, attrs) - return cls.enabled - - -class DeconvFrontExtractor(FrontExtractorOp): - op = 'deconvolution' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer, model_layer = node.pb, node.model_pb - - if not proto_layer: - raise Error('Protobuf layer can not be empty') - - deconv_param = proto_layer.convolution_param - - params = conv_set_params(deconv_param, 'Deconv2D') - attrs = conv_create_attrs(params) - attrs.update({'type': 'Deconvolution', - 'op': 'Deconv2D', - 'get_group': lambda node: node.group, - 'get_output_feature_dim': lambda node: node.output, - 'input_feature_channel': 0, - 'output_feature_channel': 1, - }) - - # Embed weights and biases as attributes - # It will be moved to a separate nodes in special pass - attrs.update(weights_biases(deconv_param.bias_term, model_layer)) - attrs.update(layout_attrs()) - - # update the attributes of the node - Convolution.update_node_stat(node, attrs) - return cls.enabled - - -def conv_create_attrs(params): - """ - Creates object of attrs for convolution - Args: - params: { - type_str: type_str - padding: padding - dilate: dilate - stride: stride - kernel: kernel - group: group - output: output - bias_term: bias_term - } - Returns: - object with all necessary convolution attributes - - """ - return { - 'bias_addable': True, - 'bias_term': params['bias_term'], - 'pad': int64_array([[0, 0], [0, 0], [params['padding'][1], params['padding'][1]], - [params['padding'][0], params['padding'][0]]]), - 'pad_spatial_shape': int64_array([[params['padding'][1], params['padding'][1]], - [params['padding'][0], params['padding'][0]]]), - 'dilation': int64_array([1, 1, params['dilate'][1], params['dilate'][0]]), - 'output_spatial_shape': None, - 'output_shape': None, - 'stride': int64_array([1, 1, params['stride'][1], params['stride'][0]]), - 'group': params['group'], - 'output': params['output'], - 'kernel_spatial': int64_array([params['kernel'][1], params['kernel'][0]]), - 'kernel_spatial_idx': int64_array([2, 3]), - 'reshape_kernel': True, - - 'input_feature_channel': 1, - 'output_feature_channel': 0, - } - - -def conv_set_params(conv_param, conv_type): - # Defaults - padding = [0, 0] - stride = [1, 1] - kernel = [0, 0] - dilate = [1, 1] - group = 1 - - kernel = get_spatial_attr(kernel, 'kernel_size', 'kernel', conv_param) - padding = get_spatial_attr(padding, 'pad', 'pad', conv_param) - stride = get_spatial_attr(stride, 'stride', 'stride', conv_param) - dilates = get_list_from_container(conv_param, 'dilation', int) - if len(dilates) > 0: - dilate[0] = dilate[1] = dilates[0] - - groups = get_list_from_container(conv_param, 'group', int) - group = groups[0] if len(groups) > 0 and groups[0] != 1 else group - - return { - 'type_str': conv_type, - 'padding': padding, - 'dilate': dilate, - 'stride': stride, - 'kernel': kernel, - 'group': group, - 'output': conv_param.num_output, - 'bias_term': conv_param.bias_term - } diff --git a/tools/mo/openvino/tools/mo/front/caffe/crop_ext.py b/tools/mo/openvino/tools/mo/front/caffe/crop_ext.py deleted file mode 100644 index a41b47bc5f58d8..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/crop_ext.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.crop import crop_infer -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.crop import Crop - - -class CropFrontExtractor(FrontExtractorOp): - op = 'Crop' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.crop_param - mapping_rule = { - 'axis': param.axis, - 'offset': param.offset, - 'dim': None, # set in infer - 'infer': crop_infer - } - # update the attributes of the node - Crop.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/ctcgreedydecoder_ext.py b/tools/mo/openvino/tools/mo/front/caffe/ctcgreedydecoder_ext.py deleted file mode 100644 index 896c24a34f4a83..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/ctcgreedydecoder_ext.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ctc_greedy_decoder import CTCGreedyDecoderOp -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class CTCGreedyDecoderFrontExtractor(FrontExtractorOp): - op = 'CTCGreedyDecoder' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.ctc_decoder_param - - update_attrs = { - 'ctc_merge_repeated': (int)(param.ctc_merge_repeated) - } - - mapping_rule = merge_attrs(param, update_attrs) - - mapping_rule.update(layout_attrs()) - - # update the attributes of the node - CTCGreedyDecoderOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/custom_layers_mapping.py b/tools/mo/openvino/tools/mo/front/caffe/custom_layers_mapping.py deleted file mode 100644 index b25145fcc034d9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/custom_layers_mapping.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from builtins import AttributeError - -from defusedxml import ElementTree - -from openvino.tools.mo.front.caffe.collect_attributes import collect_attributes -from openvino.tools.mo.front.caffe.extractor import node_pb_arg -from openvino.tools.mo.front.common.register_custom_ops import check_for_duplicates, add_or_override_extractor - - -def expected_attribs(layer_attrs: list, attrs: list, fileName: str): - missing = [attr for attr in attrs if attr not in layer_attrs] - if len(missing): - layer = "layer {}".format(layer_attrs['NativeType']) if 'NativeType' in layer_attrs else "one of the layers" - log.error('Missing required attribute(s) {} for {} in {}. Skipped.'.format(', '.join(missing), layer, fileName)) - return False - return True - - -def load_layers_xml(fileName: str): - try: - xml = ElementTree.parse(fileName).getroot() - except: - return {} - - layers_map = {} - for child in xml: - if child.tag == 'CustomLayer': - if expected_attribs(child.attrib, ['NativeType', 'hasParam'], fileName): - layer = child.attrib['NativeType'] - if layer in layers_map: - log.error('Duplicated layer definition in {} for NativeType = {}. Skipped.'.format(fileName, layer)) - else: - has_param = child.attrib['hasParam'].lower() - if has_param == 'true' and expected_attribs(child.attrib, ['protoParamName'], - fileName) or has_param == 'false': - layers_map[layer] = child.attrib - else: - log.error( - 'Cannot recognize {} value for hasParam for layer {}. Should be true or false. Skipped.'.format( - child.attrib['hasParam'], layer)) - - else: - log.error('Unexpected "{}" tag in {}. Should be CustomLayer. Skipped.'.format(child.tag, fileName)) - return layers_map - - -special_keys = ['id', 'name', 'precision', 'type', 'layer', 'value', 'shape', 'op', 'kind', 'infer'] - -obfuscation_counter = 0 - - -def new_obfuscated_key(attrs: dict, key: str): - global obfuscation_counter - while True: - new_key = key + str(obfuscation_counter) - obfuscation_counter += 1 - if new_key not in attrs and new_key not in special_keys: - return new_key - - -def obfuscate_attr_key(attrs: dict, key: str, keys: list): - """ - Replace attribute with key by another key that is not in - special_keys list and do not match other attributes. - """ - if key not in attrs or key not in special_keys: - return - - new_key = new_obfuscated_key(attrs, key) - assert new_key not in attrs - assert new_key not in keys - attrs[new_key] = attrs[key] - del attrs[key] - key_index = keys.index(key) - keys[key_index] = (key, new_key) - log.debug('Obfuscated attribute name {} to {}'.format(key, new_key)) - - -def obfuscate_special_attrs(attrs: dict, keys: list): - for key in special_keys: - obfuscate_attr_key(attrs, key, keys) - - -def proto_extractor(pb, model_pb, mapping, disable_omitting_optional, enable_flattening_nested_params): - log.info("Custom extractor for layer {} with mapping {}".format(pb.type, mapping)) - log.debug('Found custom layer {}. Params are processed'.format(pb.name)) - if mapping['hasParam'].lower() != 'true': - return {} - try: - native_attr = collect_attributes(getattr(pb, mapping['protoParamName']), - disable_omitting_optional=disable_omitting_optional, - enable_flattening_nested_params=enable_flattening_nested_params) - except AttributeError as e: - error_message = 'Layer {} has no attribute {}'.format(pb.type, str(e).split(' ')[-1]) - log.error(error_message) - raise ValueError(error_message) - keys = list(native_attr.keys()) - obfuscate_special_attrs(native_attr, keys) - # avoid 'mo_caffe' appearing in param - for attr in native_attr: - if 'mo_caffe' in native_attr[attr]: - native_attr[attr] = native_attr[attr].replace('mo_caffe', 'caffe') - log.debug(str(keys)) - log.debug(str(native_attr)) - - attrs = { - 'IE': [( - 'layer', - [('id', lambda node: node.id), 'name', 'type'], - [ - ('data', keys, []), - '@ports', - '@consts'])]} - attrs.update(native_attr) - return attrs - - -def update_extractors(extractors, layers_map, disable_omitting_optional, enable_flattening_nested_params): - keys = check_for_duplicates(extractors) - for layer, attrs in layers_map.items(): - add_or_override_extractor( - extractors, - keys, - layer, - ( - lambda l: node_pb_arg( - lambda pb, model_pb: proto_extractor( - pb, model_pb, l, disable_omitting_optional, enable_flattening_nested_params - ) - ) - )(layers_map[layer]), - 'custom layer {} from custom layers mapping xml file'.format(layer) - ) - check_for_duplicates(extractors) diff --git a/tools/mo/openvino/tools/mo/front/caffe/detection_output_ext.py b/tools/mo/openvino/tools/mo/front/caffe/detection_output_ext.py deleted file mode 100644 index 60c68b235bcf58..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/detection_output_ext.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.ops.DetectionOutput import DetectionOutput -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class DetectionOutputFrontExtractor(FrontExtractorOp): - op = 'DetectionOutput' - enabled = True - - @classmethod - def extract(cls, node): - pl = node.pb - assert pl, 'Protobuf layer can not be empty' - - param = pl.detection_output_param - - # TODO rewrite params as complex structures - if hasattr(param, 'nms_param'): - nms_threshold = param.nms_param.nms_threshold - eta = param.nms_param.eta - if param.nms_param.top_k == 0: - top_k = -1 - else: - top_k = param.nms_param.top_k - - code_type_values = [ - "", - "caffe.PriorBoxParameter.CORNER", - "caffe.PriorBoxParameter.CENTER_SIZE", - "caffe.PriorBoxParameter.CORNER_SIZE" - ] - - code_type = code_type_values[1] - if hasattr(param, 'code_type'): - if param.code_type < 1 or param.code_type > 3: - log.error("Incorrect value of code_type parameter") - return - code_type = code_type_values[param.code_type] - - visualize_threshold = param.visualize_threshold if param.visualize_threshold else 0.6 - - resize_mode_values = [ - "", - "caffe.ResizeParameter.WARP", - "caffe.ResizeParameter.FIT_SMALL_SIZE", - "caffe.ResizeParameter.FIT_LARGE_SIZE_AND_PAD" - ] - - if param.save_output_param.resize_param.resize_mode < 1 or param.save_output_param.resize_param.resize_mode > 3: - log.error("Incorrect value of resize_mode parameter") - return - resize_mode = resize_mode_values[param.save_output_param.resize_param.resize_mode] - - pad_mode_values = [ - "", - "caffe.ResizeParameter.CONSTANT", - "caffe.ResizeParameter.MIRRORED", - "caffe.ResizeParameter.REPEAT_NEAREST" - ] - - if param.save_output_param.resize_param.pad_mode < 1 or param.save_output_param.resize_param.pad_mode > 3: - log.error("Incorrect value of pad_mode parameter") - else: - pad_mode = pad_mode_values[param.save_output_param.resize_param.pad_mode] - - interp_mode_values = [ - "", - "caffe.ResizeParameter.LINEAR", - "caffe.ResizeParameter.AREA", - "caffe.ResizeParameter.NEAREST", - "caffe.ResizeParameter.CUBIC", - "caffe.ResizeParameter.LANCZOS4" - ] - interp_mode = "" - for x in param.save_output_param.resize_param.interp_mode: - if x < 1 or x > 5: - log.error("Incorrect value of interp_mode parameter") - return - interp_mode += interp_mode_values[x] - - attrs = { - 'share_location': int(param.share_location), - 'background_label_id': param.background_label_id, - 'code_type': code_type, - 'variance_encoded_in_target': int(param.variance_encoded_in_target), - 'keep_top_k': param.keep_top_k, - 'confidence_threshold': param.confidence_threshold, - 'visualize': param.visualize, - 'visualize_threshold': visualize_threshold, - 'save_file': param.save_file, - # nms_param - 'nms_threshold': nms_threshold, # pylint: disable=possibly-used-before-assignment - 'top_k': top_k, # pylint: disable=possibly-used-before-assignment - 'eta': eta, # pylint: disable=possibly-used-before-assignment - # save_output_param - 'output_directory': param.save_output_param.output_directory, - 'output_name_prefix': param.save_output_param.output_name_prefix, - 'output_format': param.save_output_param.output_format, - 'label_map_file': param.save_output_param.label_map_file, - 'name_size_file': param.save_output_param.name_size_file, - 'num_test_image': param.save_output_param.num_test_image, - # save_output_param.resize_param - 'prob': param.save_output_param.resize_param.prob, - 'resize_mode': resize_mode, - 'height': param.save_output_param.resize_param.height, - 'width': param.save_output_param.resize_param.width, - 'height_scale': param.save_output_param.resize_param.height_scale, - 'width_scale': param.save_output_param.resize_param.width_scale, - 'pad_mode': pad_mode, # pylint: disable=possibly-used-before-assignment - 'pad_value': ','.join(str(x) for x in param.save_output_param.resize_param.pad_value), - 'interp_mode': interp_mode, - } - - # these params can be omitted in caffe.proto and in param as consequence, - # so check if it is set or set to default - fields = [field[0].name for field in param.ListFields()] - if 'input_width' in fields: - attrs['input_width'] = param.input_width - if 'input_height' in fields: - attrs['input_height'] = param.input_height - if 'normalized' in fields: - attrs['normalized'] = int(param.normalized) - if 'objectness_score' in fields: - attrs['objectness_score'] = param.objectness_score - - mapping_rule = merge_attrs(param, attrs) - - # update the attributes of the node - DetectionOutput.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/dropout_ext.py b/tools/mo/openvino/tools/mo/front/caffe/dropout_ext.py deleted file mode 100644 index 3ebbb84630f02c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/dropout_ext.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.identity import Identity -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node - - -class DropoutFrontExtractor(FrontExtractorOp): - op = 'dropout' - enabled = True - - @classmethod - def extract(cls, node: Node): - Identity.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/elementwise_ext.py b/tools/mo/openvino/tools/mo/front/caffe/elementwise_ext.py deleted file mode 100644 index 55af2cf01bed69..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/elementwise_ext.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.ops.elementwise import Add, Mul, Maximum -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.eltwise_n import EltwiseNMul, EltwiseNAdd, EltwiseNMax -from openvino.tools.mo.ops.power import AttributedPower - - -class BiasToAdd(FrontExtractorOp): - """ - Replaces Bias layer with Add. - """ - op = "Bias" - enabled = True - - @classmethod - def extract(cls, node: Node): - attrs = {'axis': node.pb.bias_param.axis} - embed_input(attrs, 1, 'bias', node.model_pb.blobs[0].data, 'biases') - - Add.update_node_stat(node, attrs) - - return cls.enabled - - -class EltwiseExtractor(FrontExtractorOp): - op = 'Eltwise' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.eltwise_param - - input_len = len(node.in_edges()) - - eltwise_caffe_map = { - 0: EltwiseNMul if input_len > 2 else Mul, - 1: EltwiseNAdd if input_len > 2 else Add, - 2: EltwiseNMax if input_len > 2 else Maximum, - } - - operation = int(param.operation) - if operation not in eltwise_caffe_map: - raise Exception('Unsupported type of operation in Eltwise layer: ' + node.name) - - lin_op_class = eltwise_caffe_map[operation] - - mapping_rule = merge_attrs(param, {'coeff': mo_array(param.coeff)}) - mapping_rule.update(layout_attrs()) - - assert len(param.coeff) <= input_len - - lin_op_class.update_node_stat(node, mapping_rule) - return cls.enabled - - -class PowerExtractor(FrontExtractorOp): - op = 'power' - enabled = True - - @classmethod - def extract(cls, node: Node): - pb = node.pb - assert pb, 'Protobuf layer can not be empty' - param = pb.power_param - attrs = { - 'output_spatial_shape': None, - 'power': param.power, - 'scale': param.scale, - 'shift': param.shift, - } - AttributedPower.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/eltwise_add_normalize.py b/tools/mo/openvino/tools/mo/front/caffe/eltwise_add_normalize.py deleted file mode 100644 index 96fc98f4e3a449..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/eltwise_add_normalize.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.eltwise_n import EltwiseNReplacement -from openvino.tools.mo.ops.elementwise import Mul -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.const import Const - - -class EltwiseAddNormalize(FrontReplacementPattern): - """ - The Caffe layer "Eltwise" with operation SUM has optional attribute "coeff" which specifies the constant to multiply - the inputs before applying. This transformation inserts Mul operation to the inputs and removes the "coeff" - attribute from the node. - """ - enabled = True - - def run_before(self): - return [EltwiseNReplacement] - - @staticmethod - def __insert_mul_node_with_coeff(node: Node, port: int, coeff: float): - if coeff != 1: - mul_node = Mul(node.graph, {'name': node.id + '/coeff_mul'}).create_node() - const_node = Const(node.graph, {'name': node.id + '/coeff', 'value': mo_array([coeff])}).create_node() - node.in_port(port).get_connection().insert_node(mul_node) - const_node.out_port(0).connect(mul_node.in_port(1)) - - def find_and_replace_pattern(self, graph: Graph): - for eltwise_node in graph.get_op_nodes(op='EltwiseN', operation='sum') + graph.get_op_nodes(op='Add'): - if eltwise_node.has_valid('coeff') and len(eltwise_node.coeff): - coeff = eltwise_node.coeff - - for i in range(len(coeff)): - __class__.__insert_mul_node_with_coeff(eltwise_node, i, coeff[i]) - - eltwise_node.coeff = None - if len(coeff) > 2: - eltwise_node.op = "EltwiseN" - eltwise_node.type = "EltwiseN" - eltwise_node['operation'] = "sum" - diff --git a/tools/mo/openvino/tools/mo/front/caffe/elu.py b/tools/mo/openvino/tools/mo/front/caffe/elu.py deleted file mode 100644 index 5639c1e7dac811..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/elu.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import Elu -from openvino.tools.mo.front.caffe.collect_attributes import collect_attributes -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ELUFrontExtractor(FrontExtractorOp): - op = 'ELU' - enabled = True - - @classmethod - def extract(cls, node): - param = node.pb.elu_param - attrs = collect_attributes(param) - - Elu.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/extractor.py b/tools/mo/openvino/tools/mo/front/caffe/extractor.py deleted file mode 100644 index d44604e1e8daa1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/extractor.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.caffe.extractors.native_caffe import native_caffe_node_extractor -from openvino.tools.mo.front.common.register_custom_ops import extension_op_extractor -from openvino.tools.mo.front.extractor import CaffePythonFrontExtractorOp -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def node_pb_arg(pb_extractor): - return lambda node: pb_extractor(node.pb, node.model_pb) - - -""" -Keys are names that appear as layer names in .prototxt. -Full list is available here: http://caffe.berkeleyvision.org/tutorial/layers.html -""" - -caffe_type_extractors = {} - - -def common_caffe_fields(node: Node) -> dict: - if node.has_valid('op') and node.op == 'Identity': - return {} - pb = node.pb if node.pb else node - layer_type = pb.type - if isinstance(layer_type, int): - layer_type = pb.LayerType.DESCRIPTOR.values_by_number[layer_type].name - layer_type = str(layer_type) - - return { - 'kind': 'op', - 'name': pb.name, - 'type': layer_type, - 'op': layer_type, - # generic code relies on op; it should be overridden by specific op extractor - 'infer': None, - } - - -def caffe_extractor(node: Node, lowered_keys_map: dict) -> (bool, dict): - if node.has_valid('op') and node.op == 'Identity': - return True, {} - result = common_caffe_fields(node) - supported = False - name = None - - layer_type = result['type'].lower() - if layer_type in lowered_keys_map: - layer_type = lowered_keys_map[layer_type] - assert layer_type in caffe_type_extractors - name = layer_type - - if name: # it is either standard or registered via CustomLayersMapping.xml - attrs = caffe_type_extractors[name](node) - # intentionally as Python registry if not found returns None - if attrs is not None: - result.update(attrs) - supported = True - - if not supported: - raise Error('Found custom layer "{}". Model Optimizer does not support this layer. '.format(node.id) + - 'Please, implement extension. ' + - refer_to_faq_msg(45)) - - if 'infer' not in result or not result['infer']: - result.update(native_caffe_node_extractor(node)) - - phase_attr = check_phase(node) - result.update(phase_attr) - return supported, result - - -def check_phase(node: Node): - if node.has_valid('pb') and hasattr(node.pb, 'include'): - for i in node.pb.include: - if hasattr(i, 'phase'): - return {'phase': i.phase} - return {} - - -def register_caffe_python_extractor(op: Op, name: str = None): - if not name and hasattr(op, 'op'): - name = op.op - if not name: - raise Error("Can not register Op {}. Please, call function 'register_caffe_python_extractor' " - "with parameter 'name' .".format(op), - refer_to_faq_msg(87)) - CaffePythonFrontExtractorOp.registered_ops[name] = lambda node: extension_op_extractor(node, op) diff --git a/tools/mo/openvino/tools/mo/front/caffe/extractors/__init__.py b/tools/mo/openvino/tools/mo/front/caffe/extractors/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/extractors/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/caffe/extractors/native_caffe.py b/tools/mo/openvino/tools/mo/front/caffe/extractors/native_caffe.py deleted file mode 100644 index bdfca14eab68d5..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/extractors/native_caffe.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.common.partial_infer.caffe_fallback import caffe_native_node_infer - - -def blob_name(i): - """ - Implements legacy schema for blobs naming: - 0-th blob is called 'weights' - 1-th blob is called 'biases' - then, next blobs are called according to the new default schema - with 'custom_' prefix: custom_2, custom_3 and so on. - """ - predefined_names = ['weights', 'biases'] - if i < len(predefined_names): - return predefined_names[i] - else: - return 'custom_{}'.format(i) - - -def extract_custom_blobs(node): - """ - Enumerate all blobs in node.model_pb, for each blob - creates a new embedded input of name 'custom_X', where X is an index >= 0 according - to the order blobs appear in node.model_pb. The order is also enforced by input port index. - So the order of blobs is preserved in the final IR generation. - Order is important because they can be accessed by indices (in addition to names). - Update node attributes in-place. - """ - base_port = len(node.in_nodes()) - if not hasattr(node.model_pb, 'blobs'): - return - for i, blob in enumerate(node.model_pb.blobs): - port = base_port + i - internal_name = '_custom_blob_' + str(i) - log.debug("Found new custom blob of length {} for node {}. ".format( - len(blob.data), - node.name if node.has_valid('name') else '' - ) + - "It will appear as input {} and internal attribute {}.".format( - port, - internal_name)) - embed_input(node.graph.node[node.id], port, internal_name, blob.data, blob_name(i)) - - -def native_caffe_node_extractor(node): - extract_custom_blobs(node) - return dict(infer=caffe_native_node_infer, top=list(node.pb.top)[0]) diff --git a/tools/mo/openvino/tools/mo/front/caffe/extractors/tile.py b/tools/mo/openvino/tools/mo/front/caffe/extractors/tile.py deleted file mode 100644 index 4b1f2acd01bee9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/extractors/tile.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.tile import AttributedTile - - -class TileFrontExtractor(FrontExtractorOp): - op = 'Tile' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.tile_param - mapping_rule = { - 'axis': int(param.axis), - 'tiles': int(param.tiles), - } - mapping_rule = merge_attrs(param, mapping_rule) - - AttributedTile.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/extractors/utils.py b/tools/mo/openvino/tools/mo/front/caffe/extractors/utils.py deleted file mode 100644 index 610a82e587db78..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/extractors/utils.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.const import Const - - -def dim_to_shape(dim): - """ - Extracts proto message with shape dimensions to shape expressed as np.array. - Args: - dim: proto message with shape dimensions - - Returns: - shape of the layer as np.array - """ - return int64_array(dim) - - -def embed_input(attrs: dict, port: int, name: str, value: np.array, bin_name: str = None): - """ - Appends port information to the given set of attributes of the current layer. - Mutates passed attributes. - Args: - attrs: dictionary of existing attributes - port: relative number of the port for the layer - name: name of the input - value: np.array of values - bin_name: optional, representing the specific behavior of the blob, - either 'weights' or 'biases' - - Returns: - mutated attributes dictionary with new properties under 'embedded_inputs' key - - """ - assert name not in attrs - - # memory safe value conversion to numpy; - # previously we used `np.array(value)` and it was greedy for memory on caffe models especially - # previously we always created float64 np.ndarray, now we force float32, we can't get data type from "value" for - # Caffe, because it comes as float64 from protobuf - val = np.ndarray(shape=(len(value),), dtype=np.float32) - for i, item in enumerate(value): - val[i] = item - attrs[name] = val - - if 'embedded_inputs' not in attrs: - attrs['embedded_inputs'] = [] - if not bin_name: - bin_name = name - input_val = (port, name, {'bin': bin_name}) - # (input index, input name, future edge attributes) - attrs['embedded_inputs'].append(input_val) # pylint: disable=not-callable - - -def input_as_const(node: Node, attrs: dict, port: int, bin: str, value: np.ndarray): - """ - Inserts constant node on input `port` of `node` with `values` and `attrs`. Marks input edge with bin `attribute` - """ - graph = node.graph - const = Const(graph, {'value': value, **attrs}).create_node() - node.add_input_port(port, skip_if_exist=True) - const.out_port(0).connect(node.in_port(port)) - node.in_port(port).bin = bin - node.in_port(port).in_attrs.append('bin') - - -def weights_biases(bias_term: bool, model_layer, start_index: int = 1, proto={}): - """ - Creates object with configured inputs in the following order: 0: weights, 1: biases - Args: - bias_term: flag to whether include biases in the final input or not - model_layer: caffemodel layer containing values in blobs - - Returns: - dictionary with set up inputs or empty dictionary - """ - attrs = {} - if not model_layer: - if proto != {}: - if proto.weight_filler: - if proto.weight_filler.type == "diagonal": - data_len = proto.kernel_size[0] * proto.kernel_size[0] * proto.num_output - data = np.zeros(data_len * data_len, dtype=np.float32) - for i in range(0, data_len): - data[i * (data_len + 1)] = proto.weight_filler.diag_val[i] - - bias = np.zeros(proto.num_output, np.float32) - embed_input(attrs, start_index, 'weights', data) - if bias_term: - embed_input(attrs, start_index + 1, 'biases', bias) - - return attrs - - blobs = model_layer.blobs - embed_input(attrs, start_index, 'weights', blobs[0].data) - if bias_term: - embed_input(attrs, start_index + 1, 'biases', blobs[1].data) - return attrs - - -def get_list_from_container(param, prop: str, t): - """ - Takes proto parameter and extracts a value it stores. - Args: - param: proto parameter - prop: name of the property to take - t: type of the value (int, float etc.) - only primitive ones - - Returns: - If it is a container, returns the list with values. - If it is a single value of the given type - a list of single value. - If neither or property does not exist for param - empty list. - """ - if not param or (param and not hasattr(param, prop)): - return [] - - prop_val = getattr(param, prop) - - if not prop_val: - return [] - elif isinstance(prop_val, t): - return [prop_val] - elif len(prop_val) > 0: - return prop_val - return [] - - -def get_spatial_attr(default: list, single_name: str, name: str, param): - attr_h = default[1] - attr_w = default[0] - if hasattr(param, '{}_h'.format(name)): - if getattr(param, '{}_h'.format(name)) != default[1] and getattr(param, '{}_h'.format(name)) != 0: - attr_h = getattr(param, '{}_h'.format(name)) - if hasattr(param, '{}_w'.format(name)): - if getattr(param, '{}_w'.format(name)) != default[0] and getattr(param, '{}_w'.format(name)) != 0: - attr_w = getattr(param, '{}_w'.format(name)) - if (not attr_h or not attr_w) or (attr_h == attr_w == default[0]): - attrs = get_list_from_container(param, single_name, int) - if len(attrs) > 0 and attrs != default: - attr_w = attr_h = attrs[0] - return attr_w, attr_h - - -def get_canonical_axis_index(shape, axis): - return len(shape) + axis if axis < 0 else axis diff --git a/tools/mo/openvino/tools/mo/front/caffe/flatten_ext.py b/tools/mo/openvino/tools/mo/front/caffe/flatten_ext.py deleted file mode 100644 index 235638bb436fec..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/flatten_ext.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.flatten import Flatten - - -class FlattenFrontExtractor(FrontExtractorOp): - op = 'Flatten' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.flatten_param - - attrs = { - 'axis': param.axis, - 'end_axis': param.end_axis, - } - - Flatten.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/grn_ext.py b/tools/mo/openvino/tools/mo/front/caffe/grn_ext.py deleted file mode 100644 index cd56f3332eaa87..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/grn_ext.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.grn import GRNOp -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class GRNFrontExtractor(FrontExtractorOp): - op = 'GRN' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.grn_param - - update_attrs = { - 'bias': param.bias, - } - - mapping_rule = merge_attrs(param, update_attrs) - - # update the attributes of the node - GRNOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/inner_product_ext.py b/tools/mo/openvino/tools/mo/front/caffe/inner_product_ext.py deleted file mode 100644 index 3102bd24f2c3f4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/inner_product_ext.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.MatMul import FullyConnected -from openvino.tools.mo.front.caffe.extractors.utils import weights_biases -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class InnerProductFrontExtractor(FrontExtractorOp): - op = 'innerproduct' - enabled = True - - @classmethod - def extract(cls, node): - param = node.pb.inner_product_param - pb_model = node.model_pb - attrs = { - 'out-size': param.num_output, - 'transpose_weights': not param.transpose, - } - attrs.update(weights_biases(param.bias_term, pb_model)) - FullyConnected.update_node_stat(node, attrs) - return cls.enabled - - -class AnotherInnerProductFrontExtractor(FrontExtractorOp): - op = 'inner_product' - enabled = True - - @classmethod - def extract(cls, node): - param = node.pb.inner_product_param - pb_model = node.model_pb - attrs = { - 'out-size': param.num_output, - 'transpose_weights': not param.transpose, - } - attrs.update(weights_biases(param.bias_term, pb_model)) - FullyConnected.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/input_ext.py b/tools/mo/openvino/tools/mo/front/caffe/input_ext.py deleted file mode 100644 index 5fd2961021af60..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/input_ext.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.front.caffe.extractors.utils import dim_to_shape -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class InputFrontExtractor(FrontExtractorOp): - op = 'input' - enabled = True - - @classmethod - def extract(cls, node): - Parameter.update_node_stat(node, {'shape': dim_to_shape(node.pb.input_param.shape[0].dim)}) - return cls.enabled - - -class GlobalInputFrontExtractor(FrontExtractorOp): - op = 'globalinput' - enabled = True - - @classmethod - def extract(cls, node): - Parameter.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/interp_ext.py b/tools/mo/openvino/tools/mo/front/caffe/interp_ext.py deleted file mode 100644 index fb774f9eb0d0f4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/interp_ext.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class InterpFrontExtractor(FrontExtractorOp): - op = 'Interp' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.interp_param - - update_attrs = { - 'height': param.height, - 'width': param.width, - 'zoom_factor': param.zoom_factor, - 'shrink_factor': param.shrink_factor, - } - - mapping_rule = merge_attrs(param, update_attrs) - mapping_rule.update({'fw': 'caffe', 'mode': 'linear', 'axes': int64_array([2, 3]), - 'pads_begin': param.pad_beg, 'pads_end': param.pad_end, 'align_corners': 1}) - Interpolate.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/loader.py b/tools/mo/openvino/tools/mo/front/caffe/loader.py deleted file mode 100644 index 120725d2f43d5c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/loader.py +++ /dev/null @@ -1,280 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import importlib -import logging as log -import mmap -import os -import sys - -import numpy as np -from google.protobuf import text_format -from google.protobuf.internal import api_implementation - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array, int64_array -from openvino.tools.mo.front.extractor import add_outputs_identity -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.error import Error, FrameworkError -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def import_caffe_pb2(caffe_parser_path: str): - # import caffe_pb2 - sys.path.insert(0, caffe_parser_path) - caffe_pb2 = importlib.import_module("caffe_pb2") - sys.path.pop(0) - - return caffe_pb2 - -def load_caffe_proto_model(caffe_pb2, proto_path: str, model_path: [str, None] = None): - # 1. python protobuf is used - if api_implementation._implementation_type == 'python': - message = 'Please expect that Model Optimizer conversion might be slow. ' \ - 'You are currently using Python protobuf library implementation. \n' - try: - from google.protobuf.pyext import cpp_message - # Check os windows and env variable PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION - if os.name == 'nt' and os.environ.get('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', default='') != 'cpp': - # 2. cpp implementation is available but not used - message += 'However, cpp implementation is available, you can boost ' \ - 'model conversion by setting PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION env variable to cpp. \n' \ - 'Run: set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=cpp \n' - except ImportError: - # 3. cpp implementation is not available - message += 'Check that your protobuf package version is aligned with requirements_caffe.txt.' - print(message + '\n\n' + refer_to_faq_msg(80)) - - # Read proto layers - try: - proto = caffe_pb2.NetParameter() - with open(proto_path, "r") as file: - text_format.Merge(str(file.read()), proto) - except Exception as e: - log.error('Exception message: {}\n\n'.format(e) + - ' Possible reasons:\n' + - ' 1. {} does not exist\n'.format(proto_path) + - ' 2. {} does not have a valid structure, for example, it was downloaded as html\n'.format( - proto_path) + - ' 3. {} contains custom layers or attributes that are not supported\n'.format(proto_path) + - ' in Model Optimizer by default.\n\n' + - ' After you made sure that {} has a valid structure and still see this issue, then\n'.format( - proto_path) + - ' you need to generate a python parser for caffe.proto that was used when the model\n' + - ' was created.\n' + - ' Run "python3 generate_caffe_pb2.py --input_proto ${PATH_TO_CAFFE}/src/caffe/proto/caffe.proto"' + - refer_to_faq_msg(1) + '\n\n', extra={'framework_error': True}) - raise FrameworkError('Model Optimizer is not able to parse {}'.format(proto_path)) from e - - # Read model layer if exists - model = None - try: - if model_path: - model = caffe_pb2.NetParameter() - with open(model_path, "rb") as infile: - map = mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ) - model.MergeFromString(map) - except Exception as e: - third_point = '' - if api_implementation._implementation_type == 'python': - third_point = ' 3. Python protobuf implementation was used. Some models can\'t be converted ' + \ - ' in this configuration. Please, use Python version with existing cpp implementation of ' + \ - 'protobuf library or build it by yourself\n' + refer_to_faq_msg(103) - log.error('Exception message: {}\n\n'.format(e) + - ' Possible reasons:\n' + - ' 1. {} does not exist\n'.format(model_path) + - ' 2. {} does not have a valid structure\n'.format(model_path) + third_point, - extra={'framework_error': True}) - raise FrameworkError('Model Optimizer is not able to parse {}'.format(model_path)) from e - - return proto, model - - -def get_layers(proto): - if len(proto.layer): - return proto.layer - elif len(proto.layers): - return proto.layers - else: - raise Error('Invalid proto file: there is neither "layer" nor "layers" top-level messages. ' + - refer_to_faq_msg(7)) - - -def caffe_pb_to_nx(graph, proto, model): - """ - Converts proto/model layers to a graph. Edges are restored by bottom/top attributes. - Graph nodes has two attributes: pb for prototxt definition and model_pb for caffemodel definition. - - Parameters - ---------- - proto : NetParameter - Protobuf message for NetParameter, representing .prototxt. - model : NetParameter - Protobuf message for NetParameter, representing .caffemodel. - - Returns - ---------- - Graph - built NX Directed graph. - """ - # Blobs in prototxt model can be reused by inplace layer. - # This requires loading of pb layers in order and tracking the latest - # layer that writes a particular blob. - blob_producers = {} # maps layer blob name to node id in graph, port and layer name - proto_layers = get_layers(proto) - model_layers = None - if model: - model_layers = get_layers(model) - - input_dims = [] - input_names = [] - if len(proto.input_dim) > 0 and len(list(proto.input)) > 1: - # example of proto input - # input: "data" - # input_dim: 1 - # input_dim: 3 - # input_dim: 500 - # input_dim: 500 - # input: "info" - # input_dim: 1 - # input_dim: 3 - raise Error('Old-style inputs (via "input_dims") are not supported. ' + - 'Please specify inputs via "input_shape". ' + - refer_to_faq_msg(8)) - elif len(list(proto.input)) == 1 and len(list(proto.input_dim)): - # example of proto input - # input: "data" - # input_dim: 1 - # input_dim: 3 - # input_dim: 500 - # input_dim: 500 - input_dims = [int64_array(list(proto.input_dim))] - input_names = [proto.input[0]] - - elif len(list(proto.input)) == 1 and len(list(proto.input_shape)): - # example of proto input - # input: "data" - # input_shape - # { - # dim: 1 - # dim: 3 - # dim: 227 - # dim: 227 - # } - input_dims = [int64_array(proto.input_shape[0].dim)] - input_names = [proto.input[0]] - - elif len(proto.input_shape) > 0: - # example of proto input - # input: "data" - # input_shape - # { - # dim: 1 - # dim: 3 - # dim: 600 - # dim: 1000 - # } - # input: "im_info" - # input_shape - # { - # dim: 1 - # dim: 3 - # } - for i in range(len(proto.input_shape)): - input_dims.append(int64_array(proto.input_shape[i].dim)) - input_names.append(proto.input[i]) - - for i in range(len(input_names)): - input_name = input_names[i] - input_dim = input_dims[i] - # Input is defined at the top level of proto instead of distinct Input layer - graph.add_node(input_name, pb=None, model_pb=None, type='GlobalInput', name=input_name, shape=input_dim, - kind='op') - blob_producers[input_name] = (input_name, 0, input_name) - - used_blobs = set() - for i, layer in enumerate(proto_layers): - - model_layer = None - - if model_layers: - for ml in model_layers: - if ml.name == layer.name: - model_layer = ml - break - if layer.type == 'Input': - if hasattr(layer, 'input_param'): - input_param = layer.input_param - else: - raise Error('Input layer has no input dims. ' + - refer_to_faq_msg(8)) - if hasattr(input_param, 'shape'): - """ - example of proto input - layer - { - name: "data" - type: "Input" - top: "data" - input_param {shape: {dim: 1 dim: 3 dim: 600 dim: 1000}} - } - - layer - { - name: "im_info" - type: "Input" - top: "im_info" - input_param {shape: {dim: 1 dim: 3}} - } - """ - dims = map(int, list(filter(None, str(list(input_param.shape)[0]).split('dim:')))) - input_dims.append(int64_array(list(dims))) - input_names.append(layer.name) - - node_id = graph.unique_id(layer.name) - graph.add_node(node_id, pb=layer, model_pb=model_layer, kind='op', type='Parameter') - if hasattr(graph, 'op_names_statistic') and hasattr(layer, 'type'): - graph.op_names_statistic[layer.type] += 1 - - # connect inputs based on blob_producers dictionary - for dst_port, bottom in enumerate(layer.bottom): - add_edge_caffe(graph, bottom, node_id, blob_producers, dst_port) - used_blobs.add(bottom) - - # update blob producers dictionary by output ports - for src_port, top in enumerate(layer.top): - if top in blob_producers: - log.debug("Detected reuse of blob {} by layer {}".format(top, node_id)) - blob_producers[top] = (node_id, src_port, layer.name) - - # Tensor names information corresponding to a node is stored on outgoing edges. - # As output nodes do not have outgoing edges, fake outputs are required. In the following code - # for each output Identity node is added, and tensor name for the output is kept - # on (output, fake output) edge. After Result nodes adding transformation fake outputs - # are deleted from graph. - all_blobs = set(blob_producers.keys()) - add_outputs_identity(graph, all_blobs - used_blobs, add_edge_caffe, - {'blob_producers': blob_producers, 'dst_port': 0}) - - if len(input_names) <= 0: - raise Error('The topology contains no "input" layers. ' + - refer_to_faq_msg(79)) - return {fake_node_name: shape for (fake_node_name, shape) in zip(input_names, input_dims)} - - -def add_edge_caffe(graph: Graph, bottom: str, dst_layer: str, blob_producers: dict, dst_port: int): - """ - Creates an edge and adds it to the graph. - """ - src_layer = blob_producers[bottom][0] - src_port = blob_producers[bottom][1] - edge_attrs = { - 'out': src_port, - 'in': dst_port, - 'name': bottom, - # debug anchor for a framework name and tensor name - 'fw_tensor_debug_info': [(blob_producers[bottom][2], bottom)], - 'in_attrs': ['in', 'name'], - 'out_attrs': ['out', 'name'], - 'data_attrs': ['fw_tensor_debug_info'] - } - graph.add_edge(src_layer, dst_layer, **edge_attrs) diff --git a/tools/mo/openvino/tools/mo/front/caffe/lrn_ext.py b/tools/mo/openvino/tools/mo/front/caffe/lrn_ext.py deleted file mode 100644 index a8eb1caa1b7df0..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/lrn_ext.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.lrn import AttributedLRN - - -class LRNExtractor(FrontExtractorOp): - op = 'LRN' - enabled = True - - @classmethod - def extract(cls, node): - param = node.pb.lrn_param - region = 'same' if param.norm_region == 1 else 'across' - - AttributedLRN.update_node_stat(node, { - 'alpha': param.alpha, - 'beta': param.beta, - 'bias': param.k, - 'local_size': param.local_size, - 'region': region, - }) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/mvn_ext.py b/tools/mo/openvino/tools/mo/front/caffe/mvn_ext.py deleted file mode 100644 index f9b608d788f543..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/mvn_ext.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.mvn import MVNCaffe -from openvino.tools.mo.front.caffe.collect_attributes import collect_attributes -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class MVNFrontExtractor(FrontExtractorOp): - op = 'MVN' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.mvn_param - - attrs = collect_attributes(param) - - if 'normalize_variance' not in attrs: - attrs['normalize_variance'] = 1 - if 'across_channels' not in attrs: - attrs['across_channels'] = 0 - - # update the attributes of the node - MVNCaffe.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/normalize_ext.py b/tools/mo/openvino/tools/mo/front/caffe/normalize_ext.py deleted file mode 100644 index 26e6a3ea45d982..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/normalize_ext.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.normalize import NormalizeOp -from openvino.tools.mo.front.caffe.collect_attributes import collect_attributes -from openvino.tools.mo.front.caffe.extractors.utils import weights_biases -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class NormalizeFrontExtractor(FrontExtractorOp): - op = 'Normalize' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.norm_param - - attrs = collect_attributes(param, enable_flattening_nested_params=True) - attrs.update(weights_biases(False, node.model_pb)) - # update the attributes of the node - NormalizeOp.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/permute_ext.py b/tools/mo/openvino/tools/mo/front/caffe/permute_ext.py deleted file mode 100644 index 213d1c745bc382..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/permute_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class PermuteFrontExtractor(FrontExtractorOp): - op = 'permute' - enabled = True - - @classmethod - def extract(cls, node): - order = node.pb.permute_param.order - Transpose.update_node_stat(node, {'order': mo_array(order, dtype=np.int32)}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/pooling_ext.py b/tools/mo/openvino/tools/mo/front/caffe/pooling_ext.py deleted file mode 100644 index b306e112fe1fbc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/pooling_ext.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.caffe.extractors.utils import get_spatial_attr -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.pooling import Pooling - - -class PoolingFrontExtractor(FrontExtractorOp): - op = 'pooling' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.pooling_param - - method = 'max' - exclude_pad = True - kernel = [0, 0] - stride = [1, 1] - padding = [0, 0] - global_pooling = False - - if hasattr(param, 'global_pooling') and param.global_pooling: - global_pooling = param.global_pooling - else: - kernel = get_spatial_attr(kernel, 'kernel_size', 'kernel', param) - padding = get_spatial_attr(padding, 'pad', 'pad', param) - stride = get_spatial_attr(stride, 'stride', 'stride', param) - - if param.pool == 0: - method = 'max' - exclude_pad = True - elif param.pool == 1: - method = 'avg' - exclude_pad = False - else: - raise ValueError('Unknown Pooling Method!') - - pooling_convention = 'full' # for Caffe rounding type should be ceil - rt = 'ceil' - - if hasattr(param, 'ceil_mode') and not param.ceil_mode: - # If pooling has ceil_mode and ceil_mode is False using floor for rounding shapes in partial_infer - pooling_convention = 'valid' - rt = 'floor' - - attrs = { - 'window': int64_array([1, 1, kernel[1], kernel[0]]), - 'stride': int64_array([1, 1, stride[1], stride[0]]), - 'pad': int64_array([[0, 0], [0, 0], [padding[1], padding[1]], [padding[0], padding[0]]]), - 'pad_spatial_shape': int64_array([[padding[1], padding[1]], [padding[0], padding[0]]]), - 'pool_method': method, - 'exclude_pad': exclude_pad, - 'global_pool': global_pooling, - 'output_spatial_shape': None, - 'rounding_type': rt - } - - attrs.update(layout_attrs()) - attrs['pooling_convention'] = pooling_convention - - # update the attributes of the node - Pooling.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/prelu_ext.py b/tools/mo/openvino/tools/mo/front/caffe/prelu_ext.py deleted file mode 100644 index b36657f2bef428..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/prelu_ext.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.prelu import PReLU -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.caffe.extractors.utils import weights_biases -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class PreluFrontExtractor(FrontExtractorOp): - op = 'PReLU' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - pb_model = node.model_pb - param = proto_layer.prelu_param - - update_attrs = { - 'channel_shared': int(param.channel_shared) - } - - variance_norm_caffe_map = { - 0: 'caffe.FillerParameter.FAN_IN', - 1: 'caffe.FillerParameter.FAN_OUT', - 2: 'caffe.FillerParameter.AVERAGE' - } - - if hasattr(param, 'filler'): - update_attrs.update({ - 'filler_type': param.filler.type, - 'filler_value': int(param.filler.value), - 'min': int(param.filler.min), - 'max': int(param.filler.max), - 'mean': int(param.filler.mean), - 'std': int(param.filler.std), - 'sparse': param.filler.sparse, - 'variance_norm': variance_norm_caffe_map[param.filler.variance_norm] - }) - - mapping_rule = merge_attrs(param, update_attrs) - mapping_rule.update(weights_biases(False, pb_model)) - mapping_rule.update(layout_attrs()) - - # update the attributes of the node - PReLU.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/priorbox_clustered_ext.py b/tools/mo/openvino/tools/mo/front/caffe/priorbox_clustered_ext.py deleted file mode 100644 index 9e08398d30c6b1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/priorbox_clustered_ext.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.priorbox_clustered import PriorBoxClusteredOp -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class PriorBoxClusteredFrontExtractor(FrontExtractorOp): - op = 'PriorBoxClustered' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.prior_box_param - - variance = param.variance - if len(variance) == 0: - variance = [0.1] - - update_attrs = { - 'width': list(param.width), - 'height': list(param.height), - 'flip': int(param.flip), - 'clip': int(param.clip), - 'variance': list(variance), - 'img_size': param.img_size, - 'img_h': param.img_h, - 'img_w': param.img_w, - 'step': param.step, - 'step_h': param.step_h, - 'step_w': param.step_w, - 'offset': param.offset, - } - - mapping_rule = merge_attrs(param, update_attrs) - - mapping_rule.update(layout_attrs()) - - # update the attributes of the node - PriorBoxClusteredOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/priorbox_ext.py b/tools/mo/openvino/tools/mo/front/caffe/priorbox_ext.py deleted file mode 100644 index 984f2bc70ecd7d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/priorbox_ext.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.ops.priorbox import PriorBoxOp -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class PriorBoxFrontExtractor(FrontExtractorOp): - op = 'PriorBox' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.prior_box_param - - variance = param.variance - if len(variance) == 0: - variance = [0.1] - - update_attrs = { - 'aspect_ratio': mo_array(param.aspect_ratio), - 'min_size': mo_array(param.min_size), - 'max_size': mo_array(param.max_size), - 'flip': int(param.flip), - 'clip': int(param.clip), - 'variance': list(variance), - 'img_size': param.img_size, - 'img_h': param.img_h, - 'img_w': param.img_w, - 'step': param.step, - 'step_h': param.step_h, - 'step_w': param.step_w, - 'offset': param.offset, - } - - # these params can be omitted in caffe.proto and in param as consequence, - # so check if it is set or set to default - fields = [field[0].name for field in param.ListFields()] - if 'density' in fields: - update_attrs['density'] = mo_array(param.density) - if 'fixed_size' in fields: - update_attrs['fixed_size'] = mo_array(param.fixed_size) - if 'fixed_ratio' in fields: - update_attrs['fixed_ratio'] = mo_array(param.fixed_ratio) - - mapping_rule = merge_attrs(param, update_attrs) - - mapping_rule.update(layout_attrs()) - - # update the attributes of the node - PriorBoxOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/proposal_ext.py b/tools/mo/openvino/tools/mo/front/caffe/proposal_ext.py deleted file mode 100644 index 08f7302ad3c8cc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/proposal_ext.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.ops.proposal import ProposalOp -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ProposalFrontExtractor(FrontExtractorOp): - op = 'Proposal' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.proposal_param - update_attrs = { - 'feat_stride': param.feat_stride, - 'base_size': param.base_size, - 'min_size': param.min_size, - 'ratio': mo_array(param.ratio), - 'scale': mo_array(param.scale), - 'pre_nms_topn': param.pre_nms_topn, - 'post_nms_topn': param.post_nms_topn, - 'nms_thresh': param.nms_thresh - } - - mapping_rule = merge_attrs(param, update_attrs) - # update the attributes of the node - ProposalOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/proposal_python_ext.py b/tools/mo/openvino/tools/mo/front/caffe/proposal_python_ext.py deleted file mode 100644 index e831da45ede474..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/proposal_python_ext.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.ops.proposal import ProposalOp -from openvino.tools.mo.front.extractor import CaffePythonFrontExtractorOp - - -class ProposalPythonFrontExtractor(CaffePythonFrontExtractorOp): - op = 'rpn.proposal_layer.ProposalLayer' - enabled = True - - @staticmethod - def extract_proposal_params(node, defaults): - param = node.pb.python_param - attrs = CaffePythonFrontExtractorOp.parse_param_str(param.param_str) - update_attrs = defaults - if 'ratios' in attrs and 'ratio' in attrs: - log.error('Both ratios and ratio found, value of ratios will be used', extra={'is_warning': True}) - if 'scales' in attrs and 'scale' in attrs: - log.error('Both scales and scale found, value of scales will be used', extra={'is_warning': True}) - - if 'ratios' in attrs: - attrs['ratio'] = attrs['ratios'] - del attrs['ratios'] - if 'scales' in attrs: - attrs['scale'] = attrs['scales'] - del attrs['scales'] - - update_attrs.update(attrs) - CaffePythonFrontExtractorOp.check_param(ProposalOp, update_attrs) - ProposalOp.update_node_stat(node, update_attrs) - - @classmethod - def extract(cls, node): - defaults = { - 'feat_stride': 16, - 'base_size': 16, - 'min_size': 16, - 'ratio': [0.5, 1, 2], - 'scale': [8, 16, 32], - 'pre_nms_topn': 6000, - 'post_nms_topn': 300, - 'nms_thresh': 0.7 - } - cls.extract_proposal_params(node, defaults) - return cls.enabled - - -class SSHProposalPythonFrontExtractor(CaffePythonFrontExtractorOp): - op = 'SSH.layers.proposal_layer.ProposalLayer' - enabled = True - - @classmethod - def extract(cls, node): - defaults = { - 'feat_stride': 16, - 'base_size': 16, - 'min_size': 16, - 'ratio': [0.5, 1, 2], - 'scale': [8, 16, 32], - 'pre_nms_topn': 1000, - 'post_nms_topn': 1000, - 'nms_thresh': 1.0 - } - ProposalPythonFrontExtractor.extract_proposal_params(node, defaults) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/proto/__init__.py b/tools/mo/openvino/tools/mo/front/caffe/proto/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/proto/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/caffe/proto/caffe_pb2.py b/tools/mo/openvino/tools/mo/front/caffe/proto/caffe_pb2.py deleted file mode 100644 index 2518d1857277a3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/proto/caffe_pb2.py +++ /dev/null @@ -1,9544 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: mo_caffe.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf.internal import enum_type_wrapper -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='mo_caffe.proto', - package='mo_caffe', - serialized_pb=_b('\n\x0emo_caffe.proto\x12\x08mo_caffe\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcf\x01\n\tBlobProto\x12\"\n\x05shape\x18\x07 \x01(\x0b\x32\x13.mo_caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"5\n\x0f\x42lobProtoVector\x12\"\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x13.mo_caffe.BlobProto\"M\n\x1e\x43osineSimilarityBatchParameter\x12\x14\n\tpos_label\x18\x01 \x01(\x01:\x01\x31\x12\x15\n\tneg_label\x18\x02 \x01(\x01:\x02-1\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"A\n\x0cLabelMapItem\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05label\x18\x02 \x01(\x05\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\"0\n\x08LabelMap\x12$\n\x04item\x18\x01 \x03(\x0b\x32\x16.mo_caffe.LabelMapItem\"\x87\x01\n\x0eNormalizedBBox\x12\x0c\n\x04xmin\x18\x01 \x01(\x02\x12\x0c\n\x04ymin\x18\x02 \x01(\x02\x12\x0c\n\x04xmax\x18\x03 \x01(\x02\x12\x0c\n\x04ymax\x18\x04 \x01(\x02\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x11\n\tdifficult\x18\x06 \x01(\x08\x12\r\n\x05score\x18\x07 \x01(\x02\x12\x0c\n\x04size\x18\x08 \x01(\x02\"\xad\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x45\n\rvariance_norm\x18\x08 \x01(\x0e\x32&.mo_caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\x12\x0c\n\x04\x66ile\x18\t \x01(\t\x12\x10\n\x08\x64iag_val\x18\n \x03(\x02\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\xed\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12(\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x13.mo_caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12!\n\x05state\x18\x06 \x01(\x0b\x32\x12.mo_caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0cprofile_info\x18\t \x01(\x08:\x05\x66\x61lse\x12\x18\n\x0cprofile_iter\x18\n \x01(\x05:\x02\x35\x30\x12\x1a\n\x0eprofile_warmup\x18\x0b \x01(\x05:\x02\x31\x30\x12\'\n\x05layer\x18\x64 \x03(\x0b\x32\x18.mo_caffe.LayerParameter\x12*\n\x06layers\x18\x02 \x03(\x0b\x32\x1a.mo_caffe.V1LayerParameter\"\xf4\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12)\n\tnet_param\x18\x19 \x01(\x0b\x32\x16.mo_caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12/\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x16.mo_caffe.NetParameter\x12.\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x16.mo_caffe.NetParameter\x12\'\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x12.mo_caffe.NetState\x12&\n\ntest_state\x18\x1b \x03(\x0b\x32\x12.mo_caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x17\n\x0fplateau_winsize\x18* \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12N\n\x0fsnapshot_format\x18% \x01(\x0e\x32(.mo_caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12>\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32$.mo_caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x14\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x05\x31\x65-08\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x17\n\trms_decay\x18& \x01(\x02:\x04\x30.99\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12>\n\x0bsolver_type\x18\x1e \x01(\x0e\x32$.mo_caffe.SolverParameter.SolverType:\x03SGD\x12\x1f\n\x11layer_wise_reduce\x18) \x01(\x08:\x04true\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"\xa8\x01\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12$\n\x07history\x18\x03 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\x12\x1b\n\x0cminimum_loss\x18\x05 \x01(\x02:\x05\x31\x65+38\x12\x1a\n\x0fiter_last_event\x18\x06 \x01(\x05:\x01\x30\"Q\n\x08NetState\x12$\n\x05phase\x18\x01 \x01(\x0e\x32\x0f.mo_caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"v\n\x0cNetStateRule\x12\x1e\n\x05phase\x18\x01 \x01(\x0e\x32\x0f.mo_caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\"\xad\x02\n\x1bSpatialTransformerParameter\x12\x1e\n\x0etransform_type\x18\x01 \x01(\t:\x06\x61\x66\x66ine\x12\x1e\n\x0csampler_type\x18\x02 \x01(\t:\x08\x62ilinear\x12\x10\n\x08output_H\x18\x03 \x01(\x05\x12\x10\n\x08output_W\x18\x04 \x01(\x05\x12\x1b\n\rto_compute_dU\x18\x05 \x01(\x08:\x04true\x12\x11\n\ttheta_1_1\x18\x06 \x01(\x01\x12\x11\n\ttheta_1_2\x18\x07 \x01(\x01\x12\x11\n\ttheta_1_3\x18\x08 \x01(\x01\x12\x11\n\ttheta_2_1\x18\t \x01(\x01\x12\x11\n\ttheta_2_2\x18\n \x01(\x01\x12\x11\n\ttheta_2_3\x18\x0b \x01(\x01\x12\x1b\n\x0c\x64\x65_transform\x18\x0c \x01(\x08:\x05\x66\x61lse\"(\n\x12PowerFileParameter\x12\x12\n\nshift_file\x18\x01 \x01(\t\"5\n\x0fSTLossParameter\x12\x10\n\x08output_H\x18\x01 \x02(\x05\x12\x10\n\x08output_W\x18\x02 \x02(\x05\"%\n\x10LocLossParameter\x12\x11\n\tthreshold\x18\x01 \x02(\x01\"\xa6\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\nshare_mode\x18\x02 \x01(\x0e\x32 .mo_caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xf4#\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1e\n\x05phase\x18\n \x01(\x0e\x32\x0f.mo_caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\"\n\x05param\x18\x06 \x03(\x0b\x32\x13.mo_caffe.ParamSpec\x12\"\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12\'\n\x07include\x18\x08 \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12\'\n\x07\x65xclude\x18\t \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12:\n\x0ftransform_param\x18\x64 \x01(\x0b\x32!.mo_caffe.TransformationParameter\x12+\n\nloss_param\x18\x65 \x01(\x0b\x32\x17.mo_caffe.LossParameter\x12\x33\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x1b.mo_caffe.AccuracyParameter\x12/\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x19.mo_caffe.ArgMaxParameter\x12\x37\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x1c.mo_caffe.BatchNormParameter\x12,\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x17.mo_caffe.BiasParameter\x12I\n\x19\x63hannel_permutation_param\x18\x92? \x01(\x0b\x32%.mo_caffe.ChannelPermutationParameter\x12/\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x19.mo_caffe.ConcatParameter\x12\x42\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\".mo_caffe.ContrastiveLossParameter\x12\x39\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1e.mo_caffe.ConvolutionParameter\x12,\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x17.mo_caffe.CropParameter\x12\x39\n\x11\x63tc_decoder_param\x18\x95\x01 \x01(\x0b\x32\x1d.mo_caffe.CTCDecoderParameter\x12\x33\n\x0e\x63tc_loss_param\x18\x94\x01 \x01(\x0b\x32\x1a.mo_caffe.CTCLossParameter\x12+\n\ndata_param\x18k \x01(\x0b\x32\x17.mo_caffe.DataParameter\x12\x31\n\rdropout_param\x18l \x01(\x0b\x32\x1a.mo_caffe.DropoutParameter\x12\x36\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x1c.mo_caffe.DummyDataParameter\x12\x31\n\reltwise_param\x18n \x01(\x0b\x32\x1a.mo_caffe.EltwiseParameter\x12*\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x16.mo_caffe.ELUParameter\x12.\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x18.mo_caffe.EmbedParameter\x12)\n\texp_param\x18o \x01(\x0b\x32\x16.mo_caffe.ExpParameter\x12\x32\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x1a.mo_caffe.FlattenParameter\x12*\n\tgrn_param\x18\xd5\x01 \x01(\x0b\x32\x16.mo_caffe.GRNParameter\x12\x34\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x1b.mo_caffe.HDF5DataParameter\x12\x38\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1d.mo_caffe.HDF5OutputParameter\x12\x36\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x1c.mo_caffe.HingeLossParameter\x12\x36\n\x10image_data_param\x18s \x01(\x0b\x32\x1c.mo_caffe.ImageDataParameter\x12<\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1f.mo_caffe.InfogainLossParameter\x12<\n\x13inner_product_param\x18u \x01(\x0b\x32\x1f.mo_caffe.InnerProductParameter\x12.\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x18.mo_caffe.InputParameter\x12*\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x16.mo_caffe.LogParameter\x12)\n\tlrn_param\x18v \x01(\x0b\x32\x16.mo_caffe.LRNParameter\x12\x38\n\x11memory_data_param\x18w \x01(\x0b\x32\x1d.mo_caffe.MemoryDataParameter\x12)\n\tmvn_param\x18x \x01(\x0b\x32\x16.mo_caffe.MVNParameter\x12\x36\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x1c.mo_caffe.ParameterParameter\x12\x31\n\rpooling_param\x18y \x01(\x0b\x32\x1a.mo_caffe.PoolingParameter\x12\x32\n\rpermute_param\x18\x9a\x01 \x01(\x0b\x32\x1a.mo_caffe.PermuteParameter\x12-\n\x0bpower_param\x18z \x01(\x0b\x32\x18.mo_caffe.PowerParameter\x12.\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x18.mo_caffe.PReLUParameter\x12\x30\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x19.mo_caffe.PythonParameter\x12\x36\n\x0frecurrent_param\x18\x92\x01 \x01(\x0b\x32\x1c.mo_caffe.RecurrentParameter\x12\x36\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x1c.mo_caffe.ReductionParameter\x12+\n\nrelu_param\x18{ \x01(\x0b\x32\x17.mo_caffe.ReLUParameter\x12\x32\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x1a.mo_caffe.ReshapeParameter\x12\x32\n\rreverse_param\x18\x93\x01 \x01(\x0b\x32\x1a.mo_caffe.ReverseParameter\x12.\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x18.mo_caffe.ScaleParameter\x12\x31\n\rsigmoid_param\x18| \x01(\x0b\x32\x1a.mo_caffe.SigmoidParameter\x12\x31\n\rsoftmax_param\x18} \x01(\x0b\x32\x1a.mo_caffe.SoftmaxParameter\x12*\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x16.mo_caffe.SPPParameter\x12-\n\x0bslice_param\x18~ \x01(\x0b\x32\x18.mo_caffe.SliceParameter\x12+\n\ntanh_param\x18\x7f \x01(\x0b\x32\x17.mo_caffe.TanHParameter\x12\x36\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x1c.mo_caffe.ThresholdParameter\x12,\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x17.mo_caffe.TileParameter\x12\x39\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1d.mo_caffe.WindowDataParameter\x12\x38\n\x08st_param\x18\x96\x01 \x01(\x0b\x32%.mo_caffe.SpatialTransformerParameter\x12\x31\n\rst_loss_param\x18\x97\x01 \x01(\x0b\x32\x19.mo_caffe.STLossParameter\x12\x37\n\x10power_file_param\x18\x98\x01 \x01(\x0b\x32\x1c.mo_caffe.PowerFileParameter\x12\x33\n\x0eloc_loss_param\x18\x99\x01 \x01(\x0b\x32\x1a.mo_caffe.LocLossParameter\x12\x34\n\x0eproposal_param\x18\xc9\x01 \x01(\x0b\x32\x1b.mo_caffe.ProposalParameter\x12P\n\x1d\x63osine_similarity_batch_param\x18\xca\x01 \x01(\x0b\x32(.mo_caffe.CosineSimilarityBatchParameter\x12\x45\n\x0erss_loss_param\x18\xcb\x01 \x01(\x0b\x32,.mo_caffe.RandomSamplingSoftmaxLossParameter\x12\x31\n\nnorm_param\x18\xcc\x01 \x01(\x0b\x32\x1c.mo_caffe.NormalizeParameter\x12\x39\n\x11roi_warping_param\x18\xcd\x01 \x01(\x0b\x32\x1d.mo_caffe.ROIWarpingParameter\x12=\n\x13psroi_pooling_param\x18\xcf\x01 \x01(\x0b\x32\x1f.mo_caffe.PSROIPoolingParameter\x12\x39\n\x11roi_pooling_param\x18\xd0\x01 \x01(\x0b\x32\x1d.mo_caffe.ROIPoolingParameter\x12>\n\x14smooth_l1_loss_param\x18\xd1\x01 \x01(\x0b\x32\x1f.mo_caffe.SmoothL1LossParameter\x12\x46\n\x18\x62ox_annotator_ohem_param\x18\xd2\x01 \x01(\x0b\x32#.mo_caffe.BoxAnnotatorOHEMParameter\x12\x43\n\x16\x64\x65tection_output_param\x18\xd3\x01 \x01(\x0b\x32\".mo_caffe.DetectionOutputParameter\x12\x35\n\x0fprior_box_param\x18\xd4\x01 \x01(\x0b\x32\x1b.mo_caffe.PriorBoxParameter\x12\x39\n\x11region_yolo_param\x18\xd6\x01 \x01(\x0b\x32\x1d.mo_caffe.RegionYoloParameter\x12\x37\n\x10reorg_yolo_param\x18\xd7\x01 \x01(\x0b\x32\x1c.mo_caffe.ReorgYoloParameter\x12.\n\x0brelu6_param\x18\xd8\x01 \x01(\x0b\x32\x18.mo_caffe.ReLU6Parameter\x12\x30\n\x0cinterp_param\x18\xd9\x01 \x01(\x0b\x32\x19.mo_caffe.InterpParameter\x12<\n\x12\x61ugmentation_param\x18\xda\x01 \x01(\x0b\x32\x1f.mo_caffe.AugmentationParameter\x12:\n\x11\x63orrelation_param\x18\xdb\x01 \x01(\x0b\x32\x1e.mo_caffe.CorrelationParameter\x12\x34\n\x0eresample_param\x18\xdc\x01 \x01(\x0b\x32\x1b.mo_caffe.ResampleParameter\x12\x35\n\x0f\x66low_warp_param\x18\xdd\x01 \x01(\x0b\x32\x1b.mo_caffe.FlowWarpParameter\x12.\n\x0b\x61\x63\x63um_param\x18\xde\x01 \x01(\x0b\x32\x18.mo_caffe.AccumParameter\x12?\n\x14\x63oeff_schedule_param\x18\xdf\x01 \x01(\x0b\x32 .mo_caffe.CoeffScheduleParameter\x12\x41\n\x15shuffle_channel_param\x18\xe0\x01 \x01(\x0b\x32!.mo_caffe.ShuffleChannelParameter\"\x90\x01\n\x0fInterpParameter\x12\x11\n\x06height\x18\x01 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x02 \x01(\x05:\x01\x30\x12\x16\n\x0bzoom_factor\x18\x03 \x01(\x05:\x01\x31\x12\x18\n\rshrink_factor\x18\x04 \x01(\x05:\x01\x31\x12\x12\n\x07pad_beg\x18\x05 \x01(\x05:\x01\x30\x12\x12\n\x07pad_end\x18\x06 \x01(\x05:\x01\x30\"n\n\"RandomSamplingSoftmaxLossParameter\x12 \n\x13random_sampling_num\x18\x01 \x01(\x05:\x03\x31\x30\x30\x12&\n\x16random_sampling_policy\x18\x02 \x01(\t:\x06random\"\xc8\x01\n\x11ProposalParameter\x12\x17\n\x0b\x66\x65\x61t_stride\x18\x01 \x01(\r:\x02\x31\x36\x12\x15\n\tbase_size\x18\x02 \x01(\r:\x02\x31\x36\x12\x14\n\x08min_size\x18\x03 \x01(\r:\x02\x31\x36\x12\r\n\x05ratio\x18\x04 \x03(\x02\x12\r\n\x05scale\x18\x05 \x03(\x02\x12\x1a\n\x0cpre_nms_topn\x18\x06 \x01(\r:\x04\x36\x30\x30\x30\x12\x1a\n\rpost_nms_topn\x18\x07 \x01(\r:\x03\x33\x30\x30\x12\x17\n\nnms_thresh\x18\x08 \x01(\x02:\x03\x30.7\"\x95\x01\n\x12NormalizeParameter\x12\x1c\n\x0e\x61\x63ross_spatial\x18\x01 \x01(\x08:\x04true\x12/\n\x0cscale_filler\x18\x02 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x1c\n\x0e\x63hannel_shared\x18\x03 \x01(\x08:\x04true\x12\x12\n\x03\x65ps\x18\x04 \x01(\x02:\x05\x31\x65-10\"!\n\x10PermuteParameter\x12\r\n\x05order\x18\x01 \x03(\r\"\xb6\x01\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\"\xb4\x02\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12G\n\rnormalization\x18\x03 \x01(\x0e\x32).mo_caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\x12\x1f\n\x14pre_fixed_normalizer\x18\x04 \x01(\x02:\x01\x31\x12$\n\x15weight_by_label_freqs\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0f\x63lass_weighting\x18\x06 \x03(\x02\"Q\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\r\n\tPRE_FIXED\x10\x03\x12\x08\n\x04NONE\x10\x04\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"D\n\x18\x43hannelPermutationAction\x12\x0c\n\x04\x63han\x18\x01 \x02(\r\x12\x0c\n\x04\x63opy\x18\x02 \x01(\r\x12\x0c\n\x04\x66ill\x18\x03 \x01(\x02\"\x9a\x01\n\x1b\x43hannelPermutationParameter\x12\x32\n\x06\x61\x63tion\x18\x01 \x03(\x0b\x32\".mo_caffe.ChannelPermutationAction\x12\x12\n\nnum_output\x18\x10 \x02(\r\x12\x1f\n\x10inplace_possible\x18\x11 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x07version\x18\x12 \x01(\x05:\x01\x30\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"j\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12&\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x05\x30.999\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-05\"J\n\x19\x42oxAnnotatorOHEMParameter\x12\x13\n\x0broi_per_img\x18\x01 \x02(\r\x12\x18\n\x0cignore_label\x18\x02 \x01(\x05:\x02-1\"`\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12)\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\x85\x04\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12\x30\n\rweight_filler\x18\x07 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12>\n\x06\x65ngine\x18\x0f \x01(\x0e\x32%.mo_caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"A\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\x12\x0f\n\x07\x64imsize\x18\x03 \x03(\r\"P\n\x13\x43TCDecoderParameter\x12\x17\n\x0b\x62lank_index\x18\x01 \x01(\x05:\x02-1\x12 \n\x12\x63tc_merge_repeated\x18\x02 \x01(\x08:\x04true\"\xb2\x01\n\x10\x43TCLossParameter\x12\x17\n\x0coutput_delay\x18\x01 \x01(\x05:\x01\x30\x12\x17\n\x0b\x62lank_index\x18\x02 \x01(\x05:\x02-1\x12+\n\x1cpreprocess_collapse_repeated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12 \n\x12\x63tc_merge_repeated\x18\x04 \x01(\x08:\x04true\x12\x1d\n\x12loss_calculation_t\x18\x05 \x01(\x05:\x01\x30\"\xa7\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x34\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x1a.mo_caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x34\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\"[\n\x1eNonMaximumSuppressionParameter\x12\x1a\n\rnms_threshold\x18\x01 \x01(\x02:\x03\x30.3\x12\r\n\x05top_k\x18\x02 \x01(\x05\x12\x0e\n\x03\x65ta\x18\x03 \x01(\x02:\x01\x31\"\x99\x04\n\x0fResizeParameter\x12\x0f\n\x04prob\x18\x01 \x01(\x02:\x01\x31\x12@\n\x0bresize_mode\x18\x02 \x01(\x0e\x32%.mo_caffe.ResizeParameter.Resize_mode:\x04WARP\x12\x11\n\x06height\x18\x03 \x01(\r:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\r:\x01\x30\x12\x17\n\x0cheight_scale\x18\x08 \x01(\r:\x01\x30\x12\x16\n\x0bwidth_scale\x18\t \x01(\r:\x01\x30\x12>\n\x08pad_mode\x18\x05 \x01(\x0e\x32\".mo_caffe.ResizeParameter.Pad_mode:\x08\x43ONSTANT\x12\x11\n\tpad_value\x18\x06 \x03(\x02\x12:\n\x0binterp_mode\x18\x07 \x03(\x0e\x32%.mo_caffe.ResizeParameter.Interp_mode\"G\n\x0bResize_mode\x12\x08\n\x04WARP\x10\x01\x12\x12\n\x0e\x46IT_SMALL_SIZE\x10\x02\x12\x1a\n\x16\x46IT_LARGE_SIZE_AND_PAD\x10\x03\":\n\x08Pad_mode\x12\x0c\n\x08\x43ONSTANT\x10\x01\x12\x0c\n\x08MIRRORED\x10\x02\x12\x12\n\x0eREPEAT_NEAREST\x10\x03\"I\n\x0bInterp_mode\x12\n\n\x06LINEAR\x10\x01\x12\x08\n\x04\x41REA\x10\x02\x12\x0b\n\x07NEAREST\x10\x03\x12\t\n\x05\x43UBIC\x10\x04\x12\x0c\n\x08LANCZOS4\x10\x05\"\xdb\x01\n\x13SaveOutputParameter\x12\x18\n\x10output_directory\x18\x01 \x01(\t\x12\x1a\n\x12output_name_prefix\x18\x02 \x01(\t\x12\x15\n\routput_format\x18\x03 \x01(\t\x12\x16\n\x0elabel_map_file\x18\x04 \x01(\t\x12\x16\n\x0ename_size_file\x18\x05 \x01(\t\x12\x16\n\x0enum_test_image\x18\x06 \x01(\r\x12/\n\x0cresize_param\x18\x07 \x01(\x0b\x32\x19.mo_caffe.ResizeParameter\"\xbd\x04\n\x18\x44\x65tectionOutputParameter\x12\x13\n\x0bnum_classes\x18\x01 \x01(\r\x12\x1c\n\x0eshare_location\x18\x02 \x01(\x08:\x04true\x12\x1e\n\x13\x62\x61\x63kground_label_id\x18\x03 \x01(\x05:\x01\x30\x12;\n\tnms_param\x18\x04 \x01(\x0b\x32(.mo_caffe.NonMaximumSuppressionParameter\x12\x38\n\x11save_output_param\x18\x05 \x01(\x0b\x32\x1d.mo_caffe.SaveOutputParameter\x12?\n\tcode_type\x18\x06 \x01(\x0e\x32$.mo_caffe.PriorBoxParameter.CodeType:\x06\x43ORNER\x12)\n\x1avariance_encoded_in_target\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x16\n\nkeep_top_k\x18\x07 \x01(\x05:\x02-1\x12\x1c\n\x14\x63onfidence_threshold\x18\t \x01(\x02\x12\x18\n\tvisualize\x18\n \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x13visualize_threshold\x18\x0b \x01(\x02\x12\x11\n\tsave_file\x18\x0c \x01(\t\x12\x17\n\x0binput_width\x18\r \x01(\x05:\x02-1\x12\x18\n\x0cinput_height\x18\x0e \x01(\x05:\x02-1\x12\x18\n\nnormalized\x18\x0f \x01(\x08:\x04true\x12\x1e\n\x10objectness_score\x18\x10 \x01(\x02:\x04\x30.01\".\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\"\xa6\x01\n\x12\x44ummyDataParameter\x12.\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x19.mo_caffe.FillerParameter\x12\"\n\x05shape\x18\x06 \x03(\x0b\x32\x13.mo_caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa8\x01\n\x10\x45ltwiseParameter\x12<\n\toperation\x18\x01 \x01(\x0e\x32$.mo_caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xb2\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12\x30\n\rweight_filler\x18\x04 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"a\n\x12HingeLossParameter\x12\x33\n\x04norm\x18\x01 \x01(\x0e\x32!.mo_caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xd1\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x30\n\rweight_filler\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"4\n\x0eInputParameter\x12\"\n\x05shape\x18\x01 \x03(\x0b\x32\x13.mo_caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xbe\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12G\n\x0bnorm_region\x18\x04 \x01(\x0e\x32!.mo_caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x36\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1d.mo_caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\x1f\n\x0cGRNParameter\x12\x0f\n\x04\x62ias\x18\x01 \x01(\x02:\x01\x31\"Z\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\"d\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-09\"8\n\x12ParameterParameter\x12\"\n\x05shape\x18\x01 \x01(\x0b\x32\x13.mo_caffe.BlobShape\"\xc1\x03\n\x10PoolingParameter\x12\x38\n\x04pool\x18\x01 \x01(\x0e\x32%.mo_caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12:\n\x06\x65ngine\x18\x0b \x01(\x0e\x32!.mo_caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x17\n\tceil_mode\x18\r \x01(\x08:\x04true\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\x8e\x03\n\x11PriorBoxParameter\x12\x10\n\x08min_size\x18\x01 \x03(\x02\x12\x10\n\x08max_size\x18\x02 \x03(\x02\x12\x14\n\x0c\x61spect_ratio\x18\x03 \x03(\x02\x12\x12\n\x04\x66lip\x18\x04 \x01(\x08:\x04true\x12\x13\n\x04\x63lip\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x08variance\x18\x06 \x03(\x02\x12\x10\n\x08img_size\x18\x07 \x01(\r\x12\r\n\x05img_h\x18\x08 \x01(\r\x12\r\n\x05img_w\x18\t \x01(\r\x12\x0c\n\x04step\x18\n \x01(\x02\x12\x0e\n\x06step_h\x18\x0b \x01(\x02\x12\x0e\n\x06step_w\x18\x0c \x01(\x02\x12\x13\n\x06offset\x18\r \x01(\x02:\x03\x30.5\x12\r\n\x05width\x18\x0e \x03(\x02\x12\x0e\n\x06height\x18\x0f \x03(\x02\x12\x12\n\nfixed_size\x18\x10 \x03(\x02\x12\x13\n\x0b\x66ixed_ratio\x18\x11 \x03(\x02\x12\x0f\n\x07\x64\x65nsity\x18\x12 \x03(\x02\"8\n\x08\x43odeType\x12\n\n\x06\x43ORNER\x10\x01\x12\x0f\n\x0b\x43\x45NTER_SIZE\x10\x02\x12\x0f\n\x0b\x43ORNER_SIZE\x10\x03\"V\n\x15PSROIPoolingParameter\x12\x15\n\rspatial_scale\x18\x01 \x02(\x02\x12\x12\n\noutput_dim\x18\x02 \x02(\x05\x12\x12\n\ngroup_size\x18\x03 \x02(\x05\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xc6\x01\n\x12RecurrentParameter\x12\x15\n\nnum_output\x18\x01 \x01(\r:\x01\x30\x12\x30\n\rweight_filler\x18\x02 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x19\n\ndebug_info\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\rexpose_hidden\x18\x05 \x01(\x08:\x05\x66\x61lse\"\xb0\x01\n\x12ReductionParameter\x12@\n\toperation\x18\x01 \x01(\x0e\x32(.mo_caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x90\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x37\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1e.mo_caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\x1e\n\x0eReLU6Parameter\x12\x0c\n\x01n\x18\x01 \x01(\x02:\x01\x36\"]\n\x10ReshapeParameter\x12\"\n\x05shape\x18\x01 \x01(\x0b\x32\x13.mo_caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"#\n\x10ReverseParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x30\"Y\n\x13ROIPoolingParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"]\n\x17ROIWarpingTestParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"Y\n\x13ROIWarpingParameter\x12\x13\n\x08pooled_h\x18\x01 \x01(\r:\x01\x30\x12\x13\n\x08pooled_w\x18\x02 \x01(\r:\x01\x30\x12\x18\n\rspatial_scale\x18\x03 \x01(\x02:\x01\x31\"\xab\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12)\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12.\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\"{\n\x10SigmoidParameter\x12:\n\x06\x65ngine\x18\x01 \x01(\x0e\x32!.mo_caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\")\n\x15SmoothL1LossParameter\x12\x10\n\x05sigma\x18\x01 \x01(\x02:\x01\x31\"\x8c\x01\n\x10SoftmaxParameter\x12:\n\x06\x65ngine\x18\x01 \x01(\x0e\x32!.mo_caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"u\n\rTanHParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.mo_caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"/\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xf1\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x34\n\x04pool\x18\x02 \x01(\x0e\x32!.mo_caffe.SPPParameter.PoolMethod:\x03MAX\x12\x36\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1d.mo_caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xcc\x14\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\'\n\x07include\x18 \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12\'\n\x07\x65xclude\x18! \x03(\x0b\x32\x16.mo_caffe.NetStateRule\x12\x32\n\x04type\x18\x05 \x01(\x0e\x32$.mo_caffe.V1LayerParameter.LayerType\x12\"\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12\x41\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32\'.mo_caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x33\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x1b.mo_caffe.AccuracyParameter\x12/\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x19.mo_caffe.ArgMaxParameter\x12/\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x19.mo_caffe.ConcatParameter\x12\x42\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\".mo_caffe.ContrastiveLossParameter\x12\x39\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1e.mo_caffe.ConvolutionParameter\x12+\n\ndata_param\x18\x0b \x01(\x0b\x32\x17.mo_caffe.DataParameter\x12\x31\n\rdropout_param\x18\x0c \x01(\x0b\x32\x1a.mo_caffe.DropoutParameter\x12\x36\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x1c.mo_caffe.DummyDataParameter\x12\x31\n\reltwise_param\x18\x18 \x01(\x0b\x32\x1a.mo_caffe.EltwiseParameter\x12)\n\texp_param\x18) \x01(\x0b\x32\x16.mo_caffe.ExpParameter\x12\x34\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x1b.mo_caffe.HDF5DataParameter\x12\x38\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1d.mo_caffe.HDF5OutputParameter\x12\x36\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x1c.mo_caffe.HingeLossParameter\x12\x36\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x1c.mo_caffe.ImageDataParameter\x12<\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1f.mo_caffe.InfogainLossParameter\x12<\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1f.mo_caffe.InnerProductParameter\x12)\n\tlrn_param\x18\x12 \x01(\x0b\x32\x16.mo_caffe.LRNParameter\x12\x38\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1d.mo_caffe.MemoryDataParameter\x12)\n\tmvn_param\x18\" \x01(\x0b\x32\x16.mo_caffe.MVNParameter\x12\x31\n\rpooling_param\x18\x13 \x01(\x0b\x32\x1a.mo_caffe.PoolingParameter\x12-\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x18.mo_caffe.PowerParameter\x12+\n\nrelu_param\x18\x1e \x01(\x0b\x32\x17.mo_caffe.ReLUParameter\x12\x31\n\rsigmoid_param\x18& \x01(\x0b\x32\x1a.mo_caffe.SigmoidParameter\x12\x31\n\rsoftmax_param\x18\' \x01(\x0b\x32\x1a.mo_caffe.SoftmaxParameter\x12-\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x18.mo_caffe.SliceParameter\x12+\n\ntanh_param\x18% \x01(\x0b\x32\x17.mo_caffe.TanHParameter\x12\x35\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x1c.mo_caffe.ThresholdParameter\x12\x38\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1d.mo_caffe.WindowDataParameter\x12:\n\x0ftransform_param\x18$ \x01(\x0b\x32!.mo_caffe.TransformationParameter\x12+\n\nloss_param\x18* \x01(\x0b\x32\x17.mo_caffe.LossParameter\x12)\n\x05layer\x18\x01 \x01(\x0b\x32\x1a.mo_caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\x8c\x08\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12\x30\n\rweight_filler\x18\x05 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12.\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x38\n\x04pool\x18\x0b \x01(\x0e\x32%.mo_caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\"\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x13.mo_caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x39\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1d.mo_caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"Z\n\x0ePReLUParameter\x12)\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x19.mo_caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse\"\x86\x01\n\x13RegionYoloParameter\x12\x11\n\x06\x63oords\x18\x01 \x01(\x05:\x01\x34\x12\x13\n\x07\x63lasses\x18\x02 \x01(\x05:\x02\x32\x30\x12\x0e\n\x03num\x18\x03 \x01(\x05:\x01\x31\x12\x18\n\ndo_softmax\x18\x04 \x01(\x08:\x04true\x12\x0f\n\x07\x61nchors\x18\x05 \x03(\x02\x12\x0c\n\x04mask\x18\x06 \x03(\x05\"\'\n\x12ReorgYoloParameter\x12\x11\n\x06stride\x18\x01 \x01(\x05:\x01\x31\"\xcf\x01\n\x18RandomGeneratorParameter\x12\x1a\n\trand_type\x18\x01 \x01(\t:\x07uniform\x12\x12\n\x03\x65xp\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x0f\n\x04mean\x18\x04 \x01(\x02:\x01\x30\x12\x11\n\x06spread\x18\x05 \x01(\x02:\x01\x30\x12\x0f\n\x04prob\x18\x06 \x01(\x02:\x01\x31\x12\x1c\n\x0e\x61pply_schedule\x18\x07 \x01(\x08:\x04true\x12\x19\n\ndiscretize\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nmultiplier\x18\t \x01(\x02:\x01\x31\"`\n\x16\x43oeffScheduleParameter\x12\x14\n\thalf_life\x18\x01 \x01(\x02:\x01\x31\x12\x18\n\rinitial_coeff\x18\x02 \x01(\x02:\x01\x31\x12\x16\n\x0b\x66inal_coeff\x18\x03 \x01(\x02:\x01\x31\"\xde\x07\n\x11\x41ugmentationCoeff\x12\x11\n\x06mirror\x18\x01 \x01(\x02:\x01\x30\x12\r\n\x02\x64x\x18\x02 \x01(\x02:\x01\x30\x12\r\n\x02\x64y\x18\x03 \x01(\x02:\x01\x30\x12\x10\n\x05\x61ngle\x18\x04 \x01(\x02:\x01\x30\x12\x11\n\x06zoom_x\x18\x05 \x01(\x02:\x01\x31\x12\x11\n\x06zoom_y\x18\x06 \x01(\x02:\x01\x31\x12\x10\n\x05gamma\x18\x64 \x01(\x02:\x01\x31\x12\x15\n\nbrightness\x18\x65 \x01(\x02:\x01\x30\x12\x13\n\x08\x63ontrast\x18\x66 \x01(\x02:\x01\x31\x12\x11\n\x06\x63olor1\x18g \x01(\x02:\x01\x31\x12\x11\n\x06\x63olor2\x18h \x01(\x02:\x01\x31\x12\x11\n\x06\x63olor3\x18i \x01(\x02:\x01\x31\x12\x16\n\x0bpow_nomean0\x18\n \x01(\x02:\x01\x31\x12\x16\n\x0bpow_nomean1\x18\x0b \x01(\x02:\x01\x31\x12\x16\n\x0bpow_nomean2\x18\x0c \x01(\x02:\x01\x31\x12\x16\n\x0b\x61\x64\x64_nomean0\x18\r \x01(\x02:\x01\x30\x12\x16\n\x0b\x61\x64\x64_nomean1\x18\x0e \x01(\x02:\x01\x30\x12\x16\n\x0b\x61\x64\x64_nomean2\x18\x0f \x01(\x02:\x01\x30\x12\x17\n\x0cmult_nomean0\x18\x10 \x01(\x02:\x01\x31\x12\x17\n\x0cmult_nomean1\x18\x11 \x01(\x02:\x01\x31\x12\x17\n\x0cmult_nomean2\x18\x12 \x01(\x02:\x01\x31\x12\x18\n\rpow_withmean0\x18\x13 \x01(\x02:\x01\x31\x12\x18\n\rpow_withmean1\x18\x14 \x01(\x02:\x01\x31\x12\x18\n\rpow_withmean2\x18\x15 \x01(\x02:\x01\x31\x12\x18\n\radd_withmean0\x18\x16 \x01(\x02:\x01\x30\x12\x18\n\radd_withmean1\x18\x17 \x01(\x02:\x01\x30\x12\x18\n\radd_withmean2\x18\x18 \x01(\x02:\x01\x30\x12\x19\n\x0emult_withmean0\x18\x19 \x01(\x02:\x01\x31\x12\x19\n\x0emult_withmean1\x18\x1a \x01(\x02:\x01\x31\x12\x19\n\x0emult_withmean2\x18\x1b \x01(\x02:\x01\x31\x12\x14\n\tlmult_pow\x18\x1c \x01(\x02:\x01\x31\x12\x14\n\tlmult_add\x18\x1d \x01(\x02:\x01\x30\x12\x15\n\nlmult_mult\x18\x1e \x01(\x02:\x01\x31\x12\x14\n\tcol_angle\x18\x1f \x01(\x02:\x01\x30\x12\x15\n\nfog_amount\x18& \x01(\x02:\x01\x30\x12\x13\n\x08\x66og_size\x18\' \x01(\x02:\x01\x30\x12\x1c\n\x11motion_blur_angle\x18( \x01(\x02:\x01\x30\x12\x1b\n\x10motion_blur_size\x18) \x01(\x02:\x01\x30\x12\x17\n\x0cshadow_angle\x18* \x01(\x02:\x01\x30\x12\x1a\n\x0fshadow_distance\x18+ \x01(\x02:\x01\x30\x12\x1a\n\x0fshadow_strength\x18, \x01(\x02:\x01\x30\x12\x10\n\x05noise\x18- \x01(\x02:\x01\x30\"\xcc\x10\n\x15\x41ugmentationParameter\x12\x15\n\ncrop_width\x18! \x01(\r:\x01\x30\x12\x16\n\x0b\x63rop_height\x18\" \x01(\r:\x01\x30\x12\x19\n\x0fwrite_augmented\x18\x02 \x01(\t:\x00\x12\x1b\n\x0emax_multiplier\x18\x03 \x01(\x02:\x03\x32\x35\x35\x12\"\n\x13\x61ugment_during_test\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0erecompute_mean\x18\x05 \x01(\r:\x01\x30\x12\x14\n\nwrite_mean\x18\x06 \x01(\t:\x00\x12\x1c\n\x0emean_per_pixel\x18\x07 \x01(\x08:\x04true\x12\x0c\n\x04mean\x18\x12 \x03(\x02\x12\x11\n\x04mode\x18\x08 \x01(\t:\x03\x61\x64\x64\x12\x16\n\x0b\x62ottomwidth\x18P \x01(\r:\x01\x30\x12\x17\n\x0c\x62ottomheight\x18Q \x01(\r:\x01\x30\x12\x0e\n\x03num\x18R \x01(\r:\x01\x30\x12\x18\n\x10\x63hromatic_eigvec\x18S \x03(\x02\x12\x32\n\x06mirror\x18\n \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\ttranslate\x18\x0b \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x32\n\x06rotate\x18\x0c \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x30\n\x04zoom\x18\r \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07squeeze\x18\x0e \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x37\n\x0btranslate_x\x18\x0f \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x37\n\x0btranslate_y\x18\x10 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x31\n\x05gamma\x18# \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\nbrightness\x18$ \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08\x63ontrast\x18% \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x31\n\x05\x63olor\x18& \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\tlmult_pow\x18\x14 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\nlmult_mult\x18\x15 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\tlmult_add\x18\x16 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07sat_pow\x18\x17 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08sat_mult\x18\x18 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07sat_add\x18\x19 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07\x63ol_pow\x18\x1a \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08\x63ol_mult\x18\x1b \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x33\n\x07\x63ol_add\x18\x1c \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08ladd_pow\x18\x1d \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x35\n\tladd_mult\x18\x1e \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08ladd_add\x18\x1f \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\ncol_rotate\x18 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x36\n\nfog_amount\x18\x64 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x34\n\x08\x66og_size\x18\x65 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12=\n\x11motion_blur_angle\x18\x66 \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12<\n\x10motion_blur_size\x18g \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x38\n\x0cshadow_angle\x18h \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12;\n\x0fshadow_distance\x18i \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12;\n\x0fshadow_strength\x18j \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\x12\x31\n\x05noise\x18k \x01(\x0b\x32\".mo_caffe.RandomGeneratorParameter\"\x85\x01\n\x11\x46lowWarpParameter\x12\x43\n\nfill_value\x18\x01 \x01(\x0e\x32).mo_caffe.FlowWarpParameter.FillParameter:\x04ZERO\"+\n\rFillParameter\x12\x08\n\x04ZERO\x10\x01\x12\x10\n\x0cNOT_A_NUMBER\x10\x02\"\xb6\x02\n\x14\x43orrelationParameter\x12\x0e\n\x03pad\x18\x02 \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x03 \x01(\r\x12\x18\n\x10max_displacement\x18\x04 \x01(\r\x12\x13\n\x08stride_1\x18\x05 \x01(\r:\x01\x31\x12\x13\n\x08stride_2\x18\x06 \x01(\r:\x01\x31\x12\x1b\n\x10single_direction\x18\x08 \x01(\x05:\x01\x30\x12\x15\n\x06\x64o_abs\x18\x07 \x01(\x08:\x05\x66\x61lse\x12R\n\x10\x63orrelation_type\x18\x0f \x01(\x0e\x32..mo_caffe.CorrelationParameter.CorrelationType:\x08MULTIPLY\"-\n\x0f\x43orrelationType\x12\x0c\n\x08MULTIPLY\x10\x00\x12\x0c\n\x08SUBTRACT\x10\x01\"\xdc\x01\n\x11ResampleParameter\x12\x17\n\tantialias\x18\x04 \x01(\x08:\x04true\x12\r\n\x05width\x18\x01 \x01(\r\x12\x0e\n\x06height\x18\x02 \x01(\r\x12>\n\x04type\x18\x03 \x01(\x0e\x32(.mo_caffe.ResampleParameter.ResampleType:\x06LINEAR\x12\x11\n\x06\x66\x61\x63tor\x18\x05 \x01(\x02:\x01\x31\"<\n\x0cResampleType\x12\x0b\n\x07NEAREST\x10\x01\x12\n\n\x06LINEAR\x10\x02\x12\t\n\x05\x43UBIC\x10\x03\x12\x08\n\x04\x41REA\x10\x04\"z\n\x0e\x41\x63\x63umParameter\x12\x15\n\ntop_height\x18\x01 \x01(\r:\x01\x30\x12\x14\n\ttop_width\x18\x02 \x01(\r:\x01\x30\x12\x1c\n\x11size_divisible_by\x18\x03 \x01(\r:\x01\x30\x12\x1d\n\x0ehave_reference\x18\x04 \x01(\x08:\x05\x66\x61lse\"(\n\x17ShuffleChannelParameter\x12\r\n\x05group\x18\x01 \x02(\r*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01') -) -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -_PHASE = _descriptor.EnumDescriptor( - name='Phase', - full_name='mo_caffe.Phase', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='TRAIN', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='TEST', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=26831, - serialized_end=26859, -) -_sym_db.RegisterEnumDescriptor(_PHASE) - -Phase = enum_type_wrapper.EnumTypeWrapper(_PHASE) -TRAIN = 0 -TEST = 1 - - -_FILLERPARAMETER_VARIANCENORM = _descriptor.EnumDescriptor( - name='VarianceNorm', - full_name='mo_caffe.FillerParameter.VarianceNorm', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='FAN_IN', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FAN_OUT', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='AVERAGE', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=1039, - serialized_end=1091, -) -_sym_db.RegisterEnumDescriptor(_FILLERPARAMETER_VARIANCENORM) - -_SOLVERPARAMETER_SNAPSHOTFORMAT = _descriptor.EnumDescriptor( - name='SnapshotFormat', - full_name='mo_caffe.SolverParameter.SnapshotFormat', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='HDF5', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BINARYPROTO', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=2696, - serialized_end=2739, -) -_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SNAPSHOTFORMAT) - -_SOLVERPARAMETER_SOLVERMODE = _descriptor.EnumDescriptor( - name='SolverMode', - full_name='mo_caffe.SolverParameter.SolverMode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='CPU', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='GPU', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=2741, - serialized_end=2771, -) -_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERMODE) - -_SOLVERPARAMETER_SOLVERTYPE = _descriptor.EnumDescriptor( - name='SolverType', - full_name='mo_caffe.SolverParameter.SolverType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='SGD', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NESTEROV', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ADAGRAD', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RMSPROP', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ADADELTA', index=4, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ADAM', index=5, number=5, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=2773, - serialized_end=2858, -) -_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERTYPE) - -_PARAMSPEC_DIMCHECKMODE = _descriptor.EnumDescriptor( - name='DimCheckMode', - full_name='mo_caffe.ParamSpec.DimCheckMode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STRICT', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PERMISSIVE', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=3799, - serialized_end=3841, -) -_sym_db.RegisterEnumDescriptor(_PARAMSPEC_DIMCHECKMODE) - -_LOSSPARAMETER_NORMALIZATIONMODE = _descriptor.EnumDescriptor( - name='NormalizationMode', - full_name='mo_caffe.LossParameter.NormalizationMode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='FULL', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='VALID', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BATCH_SIZE', index=2, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PRE_FIXED', index=3, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NONE', index=4, number=4, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=9504, - serialized_end=9585, -) -_sym_db.RegisterEnumDescriptor(_LOSSPARAMETER_NORMALIZATIONMODE) - -_CONVOLUTIONPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='mo_caffe.ConvolutionParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=10865, - serialized_end=10908, -) -_sym_db.RegisterEnumDescriptor(_CONVOLUTIONPARAMETER_ENGINE) - -_DATAPARAMETER_DB = _descriptor.EnumDescriptor( - name='DB', - full_name='mo_caffe.DataParameter.DB', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='LEVELDB', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LMDB', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=11509, - serialized_end=11536, -) -_sym_db.RegisterEnumDescriptor(_DATAPARAMETER_DB) - -_RESIZEPARAMETER_RESIZE_MODE = _descriptor.EnumDescriptor( - name='Resize_mode', - full_name='mo_caffe.ResizeParameter.Resize_mode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='WARP', index=0, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FIT_SMALL_SIZE', index=1, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FIT_LARGE_SIZE_AND_PAD', index=2, number=3, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=11963, - serialized_end=12034, -) -_sym_db.RegisterEnumDescriptor(_RESIZEPARAMETER_RESIZE_MODE) - -_RESIZEPARAMETER_PAD_MODE = _descriptor.EnumDescriptor( - name='Pad_mode', - full_name='mo_caffe.ResizeParameter.Pad_mode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='CONSTANT', index=0, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MIRRORED', index=1, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='REPEAT_NEAREST', index=2, number=3, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=12036, - serialized_end=12094, -) -_sym_db.RegisterEnumDescriptor(_RESIZEPARAMETER_PAD_MODE) - -_RESIZEPARAMETER_INTERP_MODE = _descriptor.EnumDescriptor( - name='Interp_mode', - full_name='mo_caffe.ResizeParameter.Interp_mode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='LINEAR', index=0, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='AREA', index=1, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NEAREST', index=2, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUBIC', index=3, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LANCZOS4', index=4, number=5, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=12096, - serialized_end=12169, -) -_sym_db.RegisterEnumDescriptor(_RESIZEPARAMETER_INTERP_MODE) - -_ELTWISEPARAMETER_ELTWISEOP = _descriptor.EnumDescriptor( - name='EltwiseOp', - full_name='mo_caffe.EltwiseParameter.EltwiseOp', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='PROD', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SUM', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MAX', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=13316, - serialized_end=13355, -) -_sym_db.RegisterEnumDescriptor(_ELTWISEPARAMETER_ELTWISEOP) - -_HINGELOSSPARAMETER_NORM = _descriptor.EnumDescriptor( - name='Norm', - full_name='mo_caffe.HingeLossParameter.Norm', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='L1', index=0, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='L2', index=1, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=13899, - serialized_end=13921, -) -_sym_db.RegisterEnumDescriptor(_HINGELOSSPARAMETER_NORM) - -_LRNPARAMETER_NORMREGION = _descriptor.EnumDescriptor( - name='NormRegion', - full_name='mo_caffe.LRNParameter.NormRegion', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='ACROSS_CHANNELS', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='WITHIN_CHANNEL', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=14803, - serialized_end=14856, -) -_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_NORMREGION) - -_LRNPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='mo_caffe.LRNParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=10865, - serialized_end=10908, -) -_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_ENGINE) - -_POOLINGPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( - name='PoolMethod', - full_name='mo_caffe.PoolingParameter.PoolMethod', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MAX', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='AVE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STOCHASTIC', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=15547, - serialized_end=15593, -) -_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_POOLMETHOD) - -_POOLINGPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='mo_caffe.PoolingParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=10865, - serialized_end=10908, -) -_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_ENGINE) - -_PRIORBOXPARAMETER_CODETYPE = _descriptor.EnumDescriptor( - name='CodeType', - full_name='mo_caffe.PriorBoxParameter.CodeType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='CORNER', index=0, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CENTER_SIZE', index=1, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CORNER_SIZE', index=2, number=3, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=16055, - serialized_end=16111, -) -_sym_db.RegisterEnumDescriptor(_PRIORBOXPARAMETER_CODETYPE) - -_REDUCTIONPARAMETER_REDUCTIONOP = _descriptor.EnumDescriptor( - name='ReductionOp', - full_name='mo_caffe.ReductionParameter.ReductionOp', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='SUM', index=0, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ASUM', index=1, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SUMSQ', index=2, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MEAN', index=3, number=4, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=16631, - serialized_end=16684, -) -_sym_db.RegisterEnumDescriptor(_REDUCTIONPARAMETER_REDUCTIONOP) - -_RELUPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='mo_caffe.ReLUParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=10865, - serialized_end=10908, -) -_sym_db.RegisterEnumDescriptor(_RELUPARAMETER_ENGINE) - -_SIGMOIDPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='mo_caffe.SigmoidParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=10865, - serialized_end=10908, -) -_sym_db.RegisterEnumDescriptor(_SIGMOIDPARAMETER_ENGINE) - -_SOFTMAXPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='mo_caffe.SoftmaxParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=10865, - serialized_end=10908, -) -_sym_db.RegisterEnumDescriptor(_SOFTMAXPARAMETER_ENGINE) - -_TANHPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='mo_caffe.TanHParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=10865, - serialized_end=10908, -) -_sym_db.RegisterEnumDescriptor(_TANHPARAMETER_ENGINE) - -_SPPPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( - name='PoolMethod', - full_name='mo_caffe.SPPParameter.PoolMethod', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MAX', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='AVE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STOCHASTIC', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=15547, - serialized_end=15593, -) -_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_POOLMETHOD) - -_SPPPARAMETER_ENGINE = _descriptor.EnumDescriptor( - name='Engine', - full_name='mo_caffe.SPPParameter.Engine', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='DEFAULT', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CAFFE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUDNN', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=10865, - serialized_end=10908, -) -_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_ENGINE) - -_V1LAYERPARAMETER_LAYERTYPE = _descriptor.EnumDescriptor( - name='LayerType', - full_name='mo_caffe.V1LayerParameter.LayerType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='NONE', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ABSVAL', index=1, number=35, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ACCURACY', index=2, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ARGMAX', index=3, number=30, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='BNLL', index=4, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CONCAT', index=5, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CONTRASTIVE_LOSS', index=6, number=37, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CONVOLUTION', index=7, number=4, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DATA', index=8, number=5, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DECONVOLUTION', index=9, number=39, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DROPOUT', index=10, number=6, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='DUMMY_DATA', index=11, number=32, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='EUCLIDEAN_LOSS', index=12, number=7, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='ELTWISE', index=13, number=25, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='EXP', index=14, number=38, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='FLATTEN', index=15, number=8, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='HDF5_DATA', index=16, number=9, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='HDF5_OUTPUT', index=17, number=10, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='HINGE_LOSS', index=18, number=28, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='IM2COL', index=19, number=11, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='IMAGE_DATA', index=20, number=12, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INFOGAIN_LOSS', index=21, number=13, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='INNER_PRODUCT', index=22, number=14, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LRN', index=23, number=15, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MEMORY_DATA', index=24, number=29, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MULTINOMIAL_LOGISTIC_LOSS', index=25, number=16, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='MVN', index=26, number=34, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='POOLING', index=27, number=17, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='POWER', index=28, number=26, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='RELU', index=29, number=18, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SIGMOID', index=30, number=19, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SIGMOID_CROSS_ENTROPY_LOSS', index=31, number=27, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SILENCE', index=32, number=36, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SOFTMAX', index=33, number=20, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SOFTMAX_LOSS', index=34, number=21, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SPLIT', index=35, number=22, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SLICE', index=36, number=33, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='TANH', index=37, number=23, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='WINDOW_DATA', index=38, number=24, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='THRESHOLD', index=39, number=31, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=20610, - serialized_end=21210, -) -_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_LAYERTYPE) - -_V1LAYERPARAMETER_DIMCHECKMODE = _descriptor.EnumDescriptor( - name='DimCheckMode', - full_name='mo_caffe.V1LayerParameter.DimCheckMode', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='STRICT', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='PERMISSIVE', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=3799, - serialized_end=3841, -) -_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_DIMCHECKMODE) - -_V0LAYERPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( - name='PoolMethod', - full_name='mo_caffe.V0LayerParameter.PoolMethod', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MAX', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='AVE', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='STOCHASTIC', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=15547, - serialized_end=15593, -) -_sym_db.RegisterEnumDescriptor(_V0LAYERPARAMETER_POOLMETHOD) - -_FLOWWARPPARAMETER_FILLPARAMETER = _descriptor.EnumDescriptor( - name='FillParameter', - full_name='mo_caffe.FlowWarpParameter.FillParameter', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='ZERO', index=0, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='NOT_A_NUMBER', index=1, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=26084, - serialized_end=26127, -) -_sym_db.RegisterEnumDescriptor(_FLOWWARPPARAMETER_FILLPARAMETER) - -_CORRELATIONPARAMETER_CORRELATIONTYPE = _descriptor.EnumDescriptor( - name='CorrelationType', - full_name='mo_caffe.CorrelationParameter.CorrelationType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MULTIPLY', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='SUBTRACT', index=1, number=1, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=26395, - serialized_end=26440, -) -_sym_db.RegisterEnumDescriptor(_CORRELATIONPARAMETER_CORRELATIONTYPE) - -_RESAMPLEPARAMETER_RESAMPLETYPE = _descriptor.EnumDescriptor( - name='ResampleType', - full_name='mo_caffe.ResampleParameter.ResampleType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='NEAREST', index=0, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='LINEAR', index=1, number=2, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='CUBIC', index=2, number=3, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='AREA', index=3, number=4, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=26603, - serialized_end=26663, -) -_sym_db.RegisterEnumDescriptor(_RESAMPLEPARAMETER_RESAMPLETYPE) - - -_BLOBSHAPE = _descriptor.Descriptor( - name='BlobShape', - full_name='mo_caffe.BlobShape', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='dim', full_name='mo_caffe.BlobShape.dim', index=0, - number=1, type=3, cpp_type=2, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=28, - serialized_end=56, -) - - -_BLOBPROTO = _descriptor.Descriptor( - name='BlobProto', - full_name='mo_caffe.BlobProto', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='shape', full_name='mo_caffe.BlobProto.shape', index=0, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='data', full_name='mo_caffe.BlobProto.data', index=1, - number=5, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), - _descriptor.FieldDescriptor( - name='diff', full_name='mo_caffe.BlobProto.diff', index=2, - number=6, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), - _descriptor.FieldDescriptor( - name='double_data', full_name='mo_caffe.BlobProto.double_data', index=3, - number=8, type=1, cpp_type=5, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), - _descriptor.FieldDescriptor( - name='double_diff', full_name='mo_caffe.BlobProto.double_diff', index=4, - number=9, type=1, cpp_type=5, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), - _descriptor.FieldDescriptor( - name='num', full_name='mo_caffe.BlobProto.num', index=5, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='channels', full_name='mo_caffe.BlobProto.channels', index=6, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='height', full_name='mo_caffe.BlobProto.height', index=7, - number=3, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='width', full_name='mo_caffe.BlobProto.width', index=8, - number=4, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=59, - serialized_end=266, -) - - -_BLOBPROTOVECTOR = _descriptor.Descriptor( - name='BlobProtoVector', - full_name='mo_caffe.BlobProtoVector', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='blobs', full_name='mo_caffe.BlobProtoVector.blobs', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=268, - serialized_end=321, -) - - -_COSINESIMILARITYBATCHPARAMETER = _descriptor.Descriptor( - name='CosineSimilarityBatchParameter', - full_name='mo_caffe.CosineSimilarityBatchParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='pos_label', full_name='mo_caffe.CosineSimilarityBatchParameter.pos_label', index=0, - number=1, type=1, cpp_type=5, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='neg_label', full_name='mo_caffe.CosineSimilarityBatchParameter.neg_label', index=1, - number=2, type=1, cpp_type=5, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=323, - serialized_end=400, -) - - -_DATUM = _descriptor.Descriptor( - name='Datum', - full_name='mo_caffe.Datum', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='channels', full_name='mo_caffe.Datum.channels', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='height', full_name='mo_caffe.Datum.height', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='width', full_name='mo_caffe.Datum.width', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='data', full_name='mo_caffe.Datum.data', index=3, - number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='label', full_name='mo_caffe.Datum.label', index=4, - number=5, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='float_data', full_name='mo_caffe.Datum.float_data', index=5, - number=6, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='encoded', full_name='mo_caffe.Datum.encoded', index=6, - number=7, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=403, - serialized_end=532, -) - - -_LABELMAPITEM = _descriptor.Descriptor( - name='LabelMapItem', - full_name='mo_caffe.LabelMapItem', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='mo_caffe.LabelMapItem.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='label', full_name='mo_caffe.LabelMapItem.label', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='display_name', full_name='mo_caffe.LabelMapItem.display_name', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=534, - serialized_end=599, -) - - -_LABELMAP = _descriptor.Descriptor( - name='LabelMap', - full_name='mo_caffe.LabelMap', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='item', full_name='mo_caffe.LabelMap.item', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=601, - serialized_end=649, -) - - -_NORMALIZEDBBOX = _descriptor.Descriptor( - name='NormalizedBBox', - full_name='mo_caffe.NormalizedBBox', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='xmin', full_name='mo_caffe.NormalizedBBox.xmin', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ymin', full_name='mo_caffe.NormalizedBBox.ymin', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='xmax', full_name='mo_caffe.NormalizedBBox.xmax', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ymax', full_name='mo_caffe.NormalizedBBox.ymax', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='label', full_name='mo_caffe.NormalizedBBox.label', index=4, - number=5, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='difficult', full_name='mo_caffe.NormalizedBBox.difficult', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='score', full_name='mo_caffe.NormalizedBBox.score', index=6, - number=7, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='size', full_name='mo_caffe.NormalizedBBox.size', index=7, - number=8, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=652, - serialized_end=787, -) - - -_FILLERPARAMETER = _descriptor.Descriptor( - name='FillerParameter', - full_name='mo_caffe.FillerParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='type', full_name='mo_caffe.FillerParameter.type', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("constant").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='value', full_name='mo_caffe.FillerParameter.value', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='min', full_name='mo_caffe.FillerParameter.min', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='max', full_name='mo_caffe.FillerParameter.max', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mean', full_name='mo_caffe.FillerParameter.mean', index=4, - number=5, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='std', full_name='mo_caffe.FillerParameter.std', index=5, - number=6, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='sparse', full_name='mo_caffe.FillerParameter.sparse', index=6, - number=7, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='variance_norm', full_name='mo_caffe.FillerParameter.variance_norm', index=7, - number=8, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='file', full_name='mo_caffe.FillerParameter.file', index=8, - number=9, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='diag_val', full_name='mo_caffe.FillerParameter.diag_val', index=9, - number=10, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _FILLERPARAMETER_VARIANCENORM, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=790, - serialized_end=1091, -) - - -_NETPARAMETER = _descriptor.Descriptor( - name='NetParameter', - full_name='mo_caffe.NetParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='mo_caffe.NetParameter.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='input', full_name='mo_caffe.NetParameter.input', index=1, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='input_shape', full_name='mo_caffe.NetParameter.input_shape', index=2, - number=8, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='input_dim', full_name='mo_caffe.NetParameter.input_dim', index=3, - number=4, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='force_backward', full_name='mo_caffe.NetParameter.force_backward', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='state', full_name='mo_caffe.NetParameter.state', index=5, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='debug_info', full_name='mo_caffe.NetParameter.debug_info', index=6, - number=7, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='profile_info', full_name='mo_caffe.NetParameter.profile_info', index=7, - number=9, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='profile_iter', full_name='mo_caffe.NetParameter.profile_iter', index=8, - number=10, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=50, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='profile_warmup', full_name='mo_caffe.NetParameter.profile_warmup', index=9, - number=11, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=10, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='layer', full_name='mo_caffe.NetParameter.layer', index=10, - number=100, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='layers', full_name='mo_caffe.NetParameter.layers', index=11, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=1094, - serialized_end=1459, -) - - -_SOLVERPARAMETER = _descriptor.Descriptor( - name='SolverParameter', - full_name='mo_caffe.SolverParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='net', full_name='mo_caffe.SolverParameter.net', index=0, - number=24, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='net_param', full_name='mo_caffe.SolverParameter.net_param', index=1, - number=25, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='train_net', full_name='mo_caffe.SolverParameter.train_net', index=2, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='test_net', full_name='mo_caffe.SolverParameter.test_net', index=3, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='train_net_param', full_name='mo_caffe.SolverParameter.train_net_param', index=4, - number=21, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='test_net_param', full_name='mo_caffe.SolverParameter.test_net_param', index=5, - number=22, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='train_state', full_name='mo_caffe.SolverParameter.train_state', index=6, - number=26, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='test_state', full_name='mo_caffe.SolverParameter.test_state', index=7, - number=27, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='test_iter', full_name='mo_caffe.SolverParameter.test_iter', index=8, - number=3, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='test_interval', full_name='mo_caffe.SolverParameter.test_interval', index=9, - number=4, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='test_compute_loss', full_name='mo_caffe.SolverParameter.test_compute_loss', index=10, - number=19, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='test_initialization', full_name='mo_caffe.SolverParameter.test_initialization', index=11, - number=32, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='base_lr', full_name='mo_caffe.SolverParameter.base_lr', index=12, - number=5, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='display', full_name='mo_caffe.SolverParameter.display', index=13, - number=6, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='average_loss', full_name='mo_caffe.SolverParameter.average_loss', index=14, - number=33, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='max_iter', full_name='mo_caffe.SolverParameter.max_iter', index=15, - number=7, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='iter_size', full_name='mo_caffe.SolverParameter.iter_size', index=16, - number=36, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lr_policy', full_name='mo_caffe.SolverParameter.lr_policy', index=17, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='gamma', full_name='mo_caffe.SolverParameter.gamma', index=18, - number=9, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='power', full_name='mo_caffe.SolverParameter.power', index=19, - number=10, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='momentum', full_name='mo_caffe.SolverParameter.momentum', index=20, - number=11, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='weight_decay', full_name='mo_caffe.SolverParameter.weight_decay', index=21, - number=12, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='regularization_type', full_name='mo_caffe.SolverParameter.regularization_type', index=22, - number=29, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("L2").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stepsize', full_name='mo_caffe.SolverParameter.stepsize', index=23, - number=13, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stepvalue', full_name='mo_caffe.SolverParameter.stepvalue', index=24, - number=34, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='plateau_winsize', full_name='mo_caffe.SolverParameter.plateau_winsize', index=25, - number=42, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='clip_gradients', full_name='mo_caffe.SolverParameter.clip_gradients', index=26, - number=35, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='snapshot', full_name='mo_caffe.SolverParameter.snapshot', index=27, - number=14, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='snapshot_prefix', full_name='mo_caffe.SolverParameter.snapshot_prefix', index=28, - number=15, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='snapshot_diff', full_name='mo_caffe.SolverParameter.snapshot_diff', index=29, - number=16, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='snapshot_format', full_name='mo_caffe.SolverParameter.snapshot_format', index=30, - number=37, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='solver_mode', full_name='mo_caffe.SolverParameter.solver_mode', index=31, - number=17, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='device_id', full_name='mo_caffe.SolverParameter.device_id', index=32, - number=18, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='random_seed', full_name='mo_caffe.SolverParameter.random_seed', index=33, - number=20, type=3, cpp_type=2, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='type', full_name='mo_caffe.SolverParameter.type', index=34, - number=40, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("SGD").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='delta', full_name='mo_caffe.SolverParameter.delta', index=35, - number=31, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1e-08, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='momentum2', full_name='mo_caffe.SolverParameter.momentum2', index=36, - number=39, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.999, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rms_decay', full_name='mo_caffe.SolverParameter.rms_decay', index=37, - number=38, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.99, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='debug_info', full_name='mo_caffe.SolverParameter.debug_info', index=38, - number=23, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='snapshot_after_train', full_name='mo_caffe.SolverParameter.snapshot_after_train', index=39, - number=28, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='solver_type', full_name='mo_caffe.SolverParameter.solver_type', index=40, - number=30, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='layer_wise_reduce', full_name='mo_caffe.SolverParameter.layer_wise_reduce', index=41, - number=41, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _SOLVERPARAMETER_SNAPSHOTFORMAT, - _SOLVERPARAMETER_SOLVERMODE, - _SOLVERPARAMETER_SOLVERTYPE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=1462, - serialized_end=2858, -) - - -_SOLVERSTATE = _descriptor.Descriptor( - name='SolverState', - full_name='mo_caffe.SolverState', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='iter', full_name='mo_caffe.SolverState.iter', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='learned_net', full_name='mo_caffe.SolverState.learned_net', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='history', full_name='mo_caffe.SolverState.history', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='current_step', full_name='mo_caffe.SolverState.current_step', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='minimum_loss', full_name='mo_caffe.SolverState.minimum_loss', index=4, - number=5, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1e+38, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='iter_last_event', full_name='mo_caffe.SolverState.iter_last_event', index=5, - number=6, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=2861, - serialized_end=3029, -) - - -_NETSTATE = _descriptor.Descriptor( - name='NetState', - full_name='mo_caffe.NetState', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='phase', full_name='mo_caffe.NetState.phase', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='level', full_name='mo_caffe.NetState.level', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stage', full_name='mo_caffe.NetState.stage', index=2, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=3031, - serialized_end=3112, -) - - -_NETSTATERULE = _descriptor.Descriptor( - name='NetStateRule', - full_name='mo_caffe.NetStateRule', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='phase', full_name='mo_caffe.NetStateRule.phase', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='min_level', full_name='mo_caffe.NetStateRule.min_level', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='max_level', full_name='mo_caffe.NetStateRule.max_level', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stage', full_name='mo_caffe.NetStateRule.stage', index=3, - number=4, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='not_stage', full_name='mo_caffe.NetStateRule.not_stage', index=4, - number=5, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=3114, - serialized_end=3232, -) - - -_SPATIALTRANSFORMERPARAMETER = _descriptor.Descriptor( - name='SpatialTransformerParameter', - full_name='mo_caffe.SpatialTransformerParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='transform_type', full_name='mo_caffe.SpatialTransformerParameter.transform_type', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("affine").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='sampler_type', full_name='mo_caffe.SpatialTransformerParameter.sampler_type', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("bilinear").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='output_H', full_name='mo_caffe.SpatialTransformerParameter.output_H', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='output_W', full_name='mo_caffe.SpatialTransformerParameter.output_W', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='to_compute_dU', full_name='mo_caffe.SpatialTransformerParameter.to_compute_dU', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='theta_1_1', full_name='mo_caffe.SpatialTransformerParameter.theta_1_1', index=5, - number=6, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='theta_1_2', full_name='mo_caffe.SpatialTransformerParameter.theta_1_2', index=6, - number=7, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='theta_1_3', full_name='mo_caffe.SpatialTransformerParameter.theta_1_3', index=7, - number=8, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='theta_2_1', full_name='mo_caffe.SpatialTransformerParameter.theta_2_1', index=8, - number=9, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='theta_2_2', full_name='mo_caffe.SpatialTransformerParameter.theta_2_2', index=9, - number=10, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='theta_2_3', full_name='mo_caffe.SpatialTransformerParameter.theta_2_3', index=10, - number=11, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='de_transform', full_name='mo_caffe.SpatialTransformerParameter.de_transform', index=11, - number=12, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=3235, - serialized_end=3536, -) - - -_POWERFILEPARAMETER = _descriptor.Descriptor( - name='PowerFileParameter', - full_name='mo_caffe.PowerFileParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='shift_file', full_name='mo_caffe.PowerFileParameter.shift_file', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=3538, - serialized_end=3578, -) - - -_STLOSSPARAMETER = _descriptor.Descriptor( - name='STLossParameter', - full_name='mo_caffe.STLossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='output_H', full_name='mo_caffe.STLossParameter.output_H', index=0, - number=1, type=5, cpp_type=1, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='output_W', full_name='mo_caffe.STLossParameter.output_W', index=1, - number=2, type=5, cpp_type=1, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=3580, - serialized_end=3633, -) - - -_LOCLOSSPARAMETER = _descriptor.Descriptor( - name='LocLossParameter', - full_name='mo_caffe.LocLossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='threshold', full_name='mo_caffe.LocLossParameter.threshold', index=0, - number=1, type=1, cpp_type=5, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=3635, - serialized_end=3672, -) - - -_PARAMSPEC = _descriptor.Descriptor( - name='ParamSpec', - full_name='mo_caffe.ParamSpec', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='mo_caffe.ParamSpec.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='share_mode', full_name='mo_caffe.ParamSpec.share_mode', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lr_mult', full_name='mo_caffe.ParamSpec.lr_mult', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='decay_mult', full_name='mo_caffe.ParamSpec.decay_mult', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _PARAMSPEC_DIMCHECKMODE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=3675, - serialized_end=3841, -) - - -_LAYERPARAMETER = _descriptor.Descriptor( - name='LayerParameter', - full_name='mo_caffe.LayerParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='mo_caffe.LayerParameter.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='type', full_name='mo_caffe.LayerParameter.type', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bottom', full_name='mo_caffe.LayerParameter.bottom', index=2, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='top', full_name='mo_caffe.LayerParameter.top', index=3, - number=4, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='phase', full_name='mo_caffe.LayerParameter.phase', index=4, - number=10, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='loss_weight', full_name='mo_caffe.LayerParameter.loss_weight', index=5, - number=5, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='param', full_name='mo_caffe.LayerParameter.param', index=6, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='blobs', full_name='mo_caffe.LayerParameter.blobs', index=7, - number=7, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='propagate_down', full_name='mo_caffe.LayerParameter.propagate_down', index=8, - number=11, type=8, cpp_type=7, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='include', full_name='mo_caffe.LayerParameter.include', index=9, - number=8, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='exclude', full_name='mo_caffe.LayerParameter.exclude', index=10, - number=9, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='transform_param', full_name='mo_caffe.LayerParameter.transform_param', index=11, - number=100, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='loss_param', full_name='mo_caffe.LayerParameter.loss_param', index=12, - number=101, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='accuracy_param', full_name='mo_caffe.LayerParameter.accuracy_param', index=13, - number=102, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='argmax_param', full_name='mo_caffe.LayerParameter.argmax_param', index=14, - number=103, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='batch_norm_param', full_name='mo_caffe.LayerParameter.batch_norm_param', index=15, - number=139, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bias_param', full_name='mo_caffe.LayerParameter.bias_param', index=16, - number=141, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='channel_permutation_param', full_name='mo_caffe.LayerParameter.channel_permutation_param', index=17, - number=8082, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='concat_param', full_name='mo_caffe.LayerParameter.concat_param', index=18, - number=104, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='contrastive_loss_param', full_name='mo_caffe.LayerParameter.contrastive_loss_param', index=19, - number=105, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='convolution_param', full_name='mo_caffe.LayerParameter.convolution_param', index=20, - number=106, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='crop_param', full_name='mo_caffe.LayerParameter.crop_param', index=21, - number=144, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ctc_decoder_param', full_name='mo_caffe.LayerParameter.ctc_decoder_param', index=22, - number=149, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ctc_loss_param', full_name='mo_caffe.LayerParameter.ctc_loss_param', index=23, - number=148, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='data_param', full_name='mo_caffe.LayerParameter.data_param', index=24, - number=107, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dropout_param', full_name='mo_caffe.LayerParameter.dropout_param', index=25, - number=108, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dummy_data_param', full_name='mo_caffe.LayerParameter.dummy_data_param', index=26, - number=109, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='eltwise_param', full_name='mo_caffe.LayerParameter.eltwise_param', index=27, - number=110, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='elu_param', full_name='mo_caffe.LayerParameter.elu_param', index=28, - number=140, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='embed_param', full_name='mo_caffe.LayerParameter.embed_param', index=29, - number=137, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='exp_param', full_name='mo_caffe.LayerParameter.exp_param', index=30, - number=111, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='flatten_param', full_name='mo_caffe.LayerParameter.flatten_param', index=31, - number=135, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='grn_param', full_name='mo_caffe.LayerParameter.grn_param', index=32, - number=213, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='hdf5_data_param', full_name='mo_caffe.LayerParameter.hdf5_data_param', index=33, - number=112, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='hdf5_output_param', full_name='mo_caffe.LayerParameter.hdf5_output_param', index=34, - number=113, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='hinge_loss_param', full_name='mo_caffe.LayerParameter.hinge_loss_param', index=35, - number=114, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='image_data_param', full_name='mo_caffe.LayerParameter.image_data_param', index=36, - number=115, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='infogain_loss_param', full_name='mo_caffe.LayerParameter.infogain_loss_param', index=37, - number=116, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='inner_product_param', full_name='mo_caffe.LayerParameter.inner_product_param', index=38, - number=117, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='input_param', full_name='mo_caffe.LayerParameter.input_param', index=39, - number=143, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='log_param', full_name='mo_caffe.LayerParameter.log_param', index=40, - number=134, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lrn_param', full_name='mo_caffe.LayerParameter.lrn_param', index=41, - number=118, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='memory_data_param', full_name='mo_caffe.LayerParameter.memory_data_param', index=42, - number=119, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mvn_param', full_name='mo_caffe.LayerParameter.mvn_param', index=43, - number=120, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='parameter_param', full_name='mo_caffe.LayerParameter.parameter_param', index=44, - number=145, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pooling_param', full_name='mo_caffe.LayerParameter.pooling_param', index=45, - number=121, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='permute_param', full_name='mo_caffe.LayerParameter.permute_param', index=46, - number=154, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='power_param', full_name='mo_caffe.LayerParameter.power_param', index=47, - number=122, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='prelu_param', full_name='mo_caffe.LayerParameter.prelu_param', index=48, - number=131, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='python_param', full_name='mo_caffe.LayerParameter.python_param', index=49, - number=130, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='recurrent_param', full_name='mo_caffe.LayerParameter.recurrent_param', index=50, - number=146, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='reduction_param', full_name='mo_caffe.LayerParameter.reduction_param', index=51, - number=136, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='relu_param', full_name='mo_caffe.LayerParameter.relu_param', index=52, - number=123, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='reshape_param', full_name='mo_caffe.LayerParameter.reshape_param', index=53, - number=133, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='reverse_param', full_name='mo_caffe.LayerParameter.reverse_param', index=54, - number=147, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='scale_param', full_name='mo_caffe.LayerParameter.scale_param', index=55, - number=142, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='sigmoid_param', full_name='mo_caffe.LayerParameter.sigmoid_param', index=56, - number=124, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='softmax_param', full_name='mo_caffe.LayerParameter.softmax_param', index=57, - number=125, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='spp_param', full_name='mo_caffe.LayerParameter.spp_param', index=58, - number=132, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='slice_param', full_name='mo_caffe.LayerParameter.slice_param', index=59, - number=126, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='tanh_param', full_name='mo_caffe.LayerParameter.tanh_param', index=60, - number=127, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='threshold_param', full_name='mo_caffe.LayerParameter.threshold_param', index=61, - number=128, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='tile_param', full_name='mo_caffe.LayerParameter.tile_param', index=62, - number=138, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='window_data_param', full_name='mo_caffe.LayerParameter.window_data_param', index=63, - number=129, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='st_param', full_name='mo_caffe.LayerParameter.st_param', index=64, - number=150, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='st_loss_param', full_name='mo_caffe.LayerParameter.st_loss_param', index=65, - number=151, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='power_file_param', full_name='mo_caffe.LayerParameter.power_file_param', index=66, - number=152, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='loc_loss_param', full_name='mo_caffe.LayerParameter.loc_loss_param', index=67, - number=153, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='proposal_param', full_name='mo_caffe.LayerParameter.proposal_param', index=68, - number=201, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cosine_similarity_batch_param', full_name='mo_caffe.LayerParameter.cosine_similarity_batch_param', index=69, - number=202, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rss_loss_param', full_name='mo_caffe.LayerParameter.rss_loss_param', index=70, - number=203, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='norm_param', full_name='mo_caffe.LayerParameter.norm_param', index=71, - number=204, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='roi_warping_param', full_name='mo_caffe.LayerParameter.roi_warping_param', index=72, - number=205, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='psroi_pooling_param', full_name='mo_caffe.LayerParameter.psroi_pooling_param', index=73, - number=207, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='roi_pooling_param', full_name='mo_caffe.LayerParameter.roi_pooling_param', index=74, - number=208, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='smooth_l1_loss_param', full_name='mo_caffe.LayerParameter.smooth_l1_loss_param', index=75, - number=209, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='box_annotator_ohem_param', full_name='mo_caffe.LayerParameter.box_annotator_ohem_param', index=76, - number=210, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='detection_output_param', full_name='mo_caffe.LayerParameter.detection_output_param', index=77, - number=211, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='prior_box_param', full_name='mo_caffe.LayerParameter.prior_box_param', index=78, - number=212, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='region_yolo_param', full_name='mo_caffe.LayerParameter.region_yolo_param', index=79, - number=214, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='reorg_yolo_param', full_name='mo_caffe.LayerParameter.reorg_yolo_param', index=80, - number=215, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='relu6_param', full_name='mo_caffe.LayerParameter.relu6_param', index=81, - number=216, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='interp_param', full_name='mo_caffe.LayerParameter.interp_param', index=82, - number=217, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='augmentation_param', full_name='mo_caffe.LayerParameter.augmentation_param', index=83, - number=218, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='correlation_param', full_name='mo_caffe.LayerParameter.correlation_param', index=84, - number=219, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='resample_param', full_name='mo_caffe.LayerParameter.resample_param', index=85, - number=220, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='flow_warp_param', full_name='mo_caffe.LayerParameter.flow_warp_param', index=86, - number=221, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='accum_param', full_name='mo_caffe.LayerParameter.accum_param', index=87, - number=222, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='coeff_schedule_param', full_name='mo_caffe.LayerParameter.coeff_schedule_param', index=88, - number=223, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shuffle_channel_param', full_name='mo_caffe.LayerParameter.shuffle_channel_param', index=89, - number=224, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=3844, - serialized_end=8440, -) - - -_INTERPPARAMETER = _descriptor.Descriptor( - name='InterpParameter', - full_name='mo_caffe.InterpParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='height', full_name='mo_caffe.InterpParameter.height', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='width', full_name='mo_caffe.InterpParameter.width', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='zoom_factor', full_name='mo_caffe.InterpParameter.zoom_factor', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shrink_factor', full_name='mo_caffe.InterpParameter.shrink_factor', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pad_beg', full_name='mo_caffe.InterpParameter.pad_beg', index=4, - number=5, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pad_end', full_name='mo_caffe.InterpParameter.pad_end', index=5, - number=6, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=8443, - serialized_end=8587, -) - - -_RANDOMSAMPLINGSOFTMAXLOSSPARAMETER = _descriptor.Descriptor( - name='RandomSamplingSoftmaxLossParameter', - full_name='mo_caffe.RandomSamplingSoftmaxLossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='random_sampling_num', full_name='mo_caffe.RandomSamplingSoftmaxLossParameter.random_sampling_num', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=100, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='random_sampling_policy', full_name='mo_caffe.RandomSamplingSoftmaxLossParameter.random_sampling_policy', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("random").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=8589, - serialized_end=8699, -) - - -_PROPOSALPARAMETER = _descriptor.Descriptor( - name='ProposalParameter', - full_name='mo_caffe.ProposalParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='feat_stride', full_name='mo_caffe.ProposalParameter.feat_stride', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=16, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='base_size', full_name='mo_caffe.ProposalParameter.base_size', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=16, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='min_size', full_name='mo_caffe.ProposalParameter.min_size', index=2, - number=3, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=16, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ratio', full_name='mo_caffe.ProposalParameter.ratio', index=3, - number=4, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='scale', full_name='mo_caffe.ProposalParameter.scale', index=4, - number=5, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pre_nms_topn', full_name='mo_caffe.ProposalParameter.pre_nms_topn', index=5, - number=6, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=6000, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='post_nms_topn', full_name='mo_caffe.ProposalParameter.post_nms_topn', index=6, - number=7, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=300, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='nms_thresh', full_name='mo_caffe.ProposalParameter.nms_thresh', index=7, - number=8, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.7, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=8702, - serialized_end=8902, -) - - -_NORMALIZEPARAMETER = _descriptor.Descriptor( - name='NormalizeParameter', - full_name='mo_caffe.NormalizeParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='across_spatial', full_name='mo_caffe.NormalizeParameter.across_spatial', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='scale_filler', full_name='mo_caffe.NormalizeParameter.scale_filler', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='channel_shared', full_name='mo_caffe.NormalizeParameter.channel_shared', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='eps', full_name='mo_caffe.NormalizeParameter.eps', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1e-10, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=8905, - serialized_end=9054, -) - - -_PERMUTEPARAMETER = _descriptor.Descriptor( - name='PermuteParameter', - full_name='mo_caffe.PermuteParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='order', full_name='mo_caffe.PermuteParameter.order', index=0, - number=1, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=9056, - serialized_end=9089, -) - - -_TRANSFORMATIONPARAMETER = _descriptor.Descriptor( - name='TransformationParameter', - full_name='mo_caffe.TransformationParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='scale', full_name='mo_caffe.TransformationParameter.scale', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mirror', full_name='mo_caffe.TransformationParameter.mirror', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='crop_size', full_name='mo_caffe.TransformationParameter.crop_size', index=2, - number=3, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mean_file', full_name='mo_caffe.TransformationParameter.mean_file', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mean_value', full_name='mo_caffe.TransformationParameter.mean_value', index=4, - number=5, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='force_color', full_name='mo_caffe.TransformationParameter.force_color', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='force_gray', full_name='mo_caffe.TransformationParameter.force_gray', index=6, - number=7, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=9092, - serialized_end=9274, -) - - -_LOSSPARAMETER = _descriptor.Descriptor( - name='LossParameter', - full_name='mo_caffe.LossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='ignore_label', full_name='mo_caffe.LossParameter.ignore_label', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='normalization', full_name='mo_caffe.LossParameter.normalization', index=1, - number=3, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='normalize', full_name='mo_caffe.LossParameter.normalize', index=2, - number=2, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pre_fixed_normalizer', full_name='mo_caffe.LossParameter.pre_fixed_normalizer', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='weight_by_label_freqs', full_name='mo_caffe.LossParameter.weight_by_label_freqs', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='class_weighting', full_name='mo_caffe.LossParameter.class_weighting', index=5, - number=6, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _LOSSPARAMETER_NORMALIZATIONMODE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=9277, - serialized_end=9585, -) - - -_ACCURACYPARAMETER = _descriptor.Descriptor( - name='AccuracyParameter', - full_name='mo_caffe.AccuracyParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='top_k', full_name='mo_caffe.AccuracyParameter.top_k', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.AccuracyParameter.axis', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ignore_label', full_name='mo_caffe.AccuracyParameter.ignore_label', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=9587, - serialized_end=9663, -) - - -_ARGMAXPARAMETER = _descriptor.Descriptor( - name='ArgMaxParameter', - full_name='mo_caffe.ArgMaxParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='out_max_val', full_name='mo_caffe.ArgMaxParameter.out_max_val', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='top_k', full_name='mo_caffe.ArgMaxParameter.top_k', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.ArgMaxParameter.axis', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=9665, - serialized_end=9742, -) - - -_CHANNELPERMUTATIONACTION = _descriptor.Descriptor( - name='ChannelPermutationAction', - full_name='mo_caffe.ChannelPermutationAction', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='chan', full_name='mo_caffe.ChannelPermutationAction.chan', index=0, - number=1, type=13, cpp_type=3, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='copy', full_name='mo_caffe.ChannelPermutationAction.copy', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='fill', full_name='mo_caffe.ChannelPermutationAction.fill', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=9744, - serialized_end=9812, -) - - -_CHANNELPERMUTATIONPARAMETER = _descriptor.Descriptor( - name='ChannelPermutationParameter', - full_name='mo_caffe.ChannelPermutationParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='action', full_name='mo_caffe.ChannelPermutationParameter.action', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='num_output', full_name='mo_caffe.ChannelPermutationParameter.num_output', index=1, - number=16, type=13, cpp_type=3, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='inplace_possible', full_name='mo_caffe.ChannelPermutationParameter.inplace_possible', index=2, - number=17, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='version', full_name='mo_caffe.ChannelPermutationParameter.version', index=3, - number=18, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=9815, - serialized_end=9969, -) - - -_CONCATPARAMETER = _descriptor.Descriptor( - name='ConcatParameter', - full_name='mo_caffe.ConcatParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.ConcatParameter.axis', index=0, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='concat_dim', full_name='mo_caffe.ConcatParameter.concat_dim', index=1, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=9971, - serialized_end=10028, -) - - -_BATCHNORMPARAMETER = _descriptor.Descriptor( - name='BatchNormParameter', - full_name='mo_caffe.BatchNormParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='use_global_stats', full_name='mo_caffe.BatchNormParameter.use_global_stats', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='moving_average_fraction', full_name='mo_caffe.BatchNormParameter.moving_average_fraction', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.999, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='eps', full_name='mo_caffe.BatchNormParameter.eps', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1e-05, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=10030, - serialized_end=10136, -) - - -_BOXANNOTATOROHEMPARAMETER = _descriptor.Descriptor( - name='BoxAnnotatorOHEMParameter', - full_name='mo_caffe.BoxAnnotatorOHEMParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='roi_per_img', full_name='mo_caffe.BoxAnnotatorOHEMParameter.roi_per_img', index=0, - number=1, type=13, cpp_type=3, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ignore_label', full_name='mo_caffe.BoxAnnotatorOHEMParameter.ignore_label', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=10138, - serialized_end=10212, -) - - -_BIASPARAMETER = _descriptor.Descriptor( - name='BiasParameter', - full_name='mo_caffe.BiasParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.BiasParameter.axis', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='num_axes', full_name='mo_caffe.BiasParameter.num_axes', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='filler', full_name='mo_caffe.BiasParameter.filler', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=10214, - serialized_end=10310, -) - - -_CONTRASTIVELOSSPARAMETER = _descriptor.Descriptor( - name='ContrastiveLossParameter', - full_name='mo_caffe.ContrastiveLossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='margin', full_name='mo_caffe.ContrastiveLossParameter.margin', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='legacy_version', full_name='mo_caffe.ContrastiveLossParameter.legacy_version', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=10312, - serialized_end=10388, -) - - -_CONVOLUTIONPARAMETER = _descriptor.Descriptor( - name='ConvolutionParameter', - full_name='mo_caffe.ConvolutionParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num_output', full_name='mo_caffe.ConvolutionParameter.num_output', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bias_term', full_name='mo_caffe.ConvolutionParameter.bias_term', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pad', full_name='mo_caffe.ConvolutionParameter.pad', index=2, - number=3, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='kernel_size', full_name='mo_caffe.ConvolutionParameter.kernel_size', index=3, - number=4, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stride', full_name='mo_caffe.ConvolutionParameter.stride', index=4, - number=6, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dilation', full_name='mo_caffe.ConvolutionParameter.dilation', index=5, - number=18, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pad_h', full_name='mo_caffe.ConvolutionParameter.pad_h', index=6, - number=9, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pad_w', full_name='mo_caffe.ConvolutionParameter.pad_w', index=7, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='kernel_h', full_name='mo_caffe.ConvolutionParameter.kernel_h', index=8, - number=11, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='kernel_w', full_name='mo_caffe.ConvolutionParameter.kernel_w', index=9, - number=12, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stride_h', full_name='mo_caffe.ConvolutionParameter.stride_h', index=10, - number=13, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stride_w', full_name='mo_caffe.ConvolutionParameter.stride_w', index=11, - number=14, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='group', full_name='mo_caffe.ConvolutionParameter.group', index=12, - number=5, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='weight_filler', full_name='mo_caffe.ConvolutionParameter.weight_filler', index=13, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='mo_caffe.ConvolutionParameter.bias_filler', index=14, - number=8, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='engine', full_name='mo_caffe.ConvolutionParameter.engine', index=15, - number=15, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.ConvolutionParameter.axis', index=16, - number=16, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='force_nd_im2col', full_name='mo_caffe.ConvolutionParameter.force_nd_im2col', index=17, - number=17, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _CONVOLUTIONPARAMETER_ENGINE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=10391, - serialized_end=10908, -) - - -_CROPPARAMETER = _descriptor.Descriptor( - name='CropParameter', - full_name='mo_caffe.CropParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.CropParameter.axis', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=2, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='offset', full_name='mo_caffe.CropParameter.offset', index=1, - number=2, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dimsize', full_name='mo_caffe.CropParameter.dimsize', index=2, - number=3, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=10910, - serialized_end=10975, -) - - -_CTCDECODERPARAMETER = _descriptor.Descriptor( - name='CTCDecoderParameter', - full_name='mo_caffe.CTCDecoderParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='blank_index', full_name='mo_caffe.CTCDecoderParameter.blank_index', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ctc_merge_repeated', full_name='mo_caffe.CTCDecoderParameter.ctc_merge_repeated', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=10977, - serialized_end=11057, -) - - -_CTCLOSSPARAMETER = _descriptor.Descriptor( - name='CTCLossParameter', - full_name='mo_caffe.CTCLossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='output_delay', full_name='mo_caffe.CTCLossParameter.output_delay', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='blank_index', full_name='mo_caffe.CTCLossParameter.blank_index', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='preprocess_collapse_repeated', full_name='mo_caffe.CTCLossParameter.preprocess_collapse_repeated', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ctc_merge_repeated', full_name='mo_caffe.CTCLossParameter.ctc_merge_repeated', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='loss_calculation_t', full_name='mo_caffe.CTCLossParameter.loss_calculation_t', index=4, - number=5, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=11060, - serialized_end=11238, -) - - -_DATAPARAMETER = _descriptor.Descriptor( - name='DataParameter', - full_name='mo_caffe.DataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='source', full_name='mo_caffe.DataParameter.source', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='batch_size', full_name='mo_caffe.DataParameter.batch_size', index=1, - number=4, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rand_skip', full_name='mo_caffe.DataParameter.rand_skip', index=2, - number=7, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='backend', full_name='mo_caffe.DataParameter.backend', index=3, - number=8, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='scale', full_name='mo_caffe.DataParameter.scale', index=4, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mean_file', full_name='mo_caffe.DataParameter.mean_file', index=5, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='crop_size', full_name='mo_caffe.DataParameter.crop_size', index=6, - number=5, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mirror', full_name='mo_caffe.DataParameter.mirror', index=7, - number=6, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='force_encoded_color', full_name='mo_caffe.DataParameter.force_encoded_color', index=8, - number=9, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='prefetch', full_name='mo_caffe.DataParameter.prefetch', index=9, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=4, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _DATAPARAMETER_DB, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=11241, - serialized_end=11536, -) - - -_NONMAXIMUMSUPPRESSIONPARAMETER = _descriptor.Descriptor( - name='NonMaximumSuppressionParameter', - full_name='mo_caffe.NonMaximumSuppressionParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='nms_threshold', full_name='mo_caffe.NonMaximumSuppressionParameter.nms_threshold', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.3, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='top_k', full_name='mo_caffe.NonMaximumSuppressionParameter.top_k', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='eta', full_name='mo_caffe.NonMaximumSuppressionParameter.eta', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=11538, - serialized_end=11629, -) - - -_RESIZEPARAMETER = _descriptor.Descriptor( - name='ResizeParameter', - full_name='mo_caffe.ResizeParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='prob', full_name='mo_caffe.ResizeParameter.prob', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='resize_mode', full_name='mo_caffe.ResizeParameter.resize_mode', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='height', full_name='mo_caffe.ResizeParameter.height', index=2, - number=3, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='width', full_name='mo_caffe.ResizeParameter.width', index=3, - number=4, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='height_scale', full_name='mo_caffe.ResizeParameter.height_scale', index=4, - number=8, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='width_scale', full_name='mo_caffe.ResizeParameter.width_scale', index=5, - number=9, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pad_mode', full_name='mo_caffe.ResizeParameter.pad_mode', index=6, - number=5, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pad_value', full_name='mo_caffe.ResizeParameter.pad_value', index=7, - number=6, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='interp_mode', full_name='mo_caffe.ResizeParameter.interp_mode', index=8, - number=7, type=14, cpp_type=8, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _RESIZEPARAMETER_RESIZE_MODE, - _RESIZEPARAMETER_PAD_MODE, - _RESIZEPARAMETER_INTERP_MODE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=11632, - serialized_end=12169, -) - - -_SAVEOUTPUTPARAMETER = _descriptor.Descriptor( - name='SaveOutputParameter', - full_name='mo_caffe.SaveOutputParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='output_directory', full_name='mo_caffe.SaveOutputParameter.output_directory', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='output_name_prefix', full_name='mo_caffe.SaveOutputParameter.output_name_prefix', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='output_format', full_name='mo_caffe.SaveOutputParameter.output_format', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='label_map_file', full_name='mo_caffe.SaveOutputParameter.label_map_file', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='name_size_file', full_name='mo_caffe.SaveOutputParameter.name_size_file', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='num_test_image', full_name='mo_caffe.SaveOutputParameter.num_test_image', index=5, - number=6, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='resize_param', full_name='mo_caffe.SaveOutputParameter.resize_param', index=6, - number=7, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=12172, - serialized_end=12391, -) - - -_DETECTIONOUTPUTPARAMETER = _descriptor.Descriptor( - name='DetectionOutputParameter', - full_name='mo_caffe.DetectionOutputParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num_classes', full_name='mo_caffe.DetectionOutputParameter.num_classes', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='share_location', full_name='mo_caffe.DetectionOutputParameter.share_location', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='background_label_id', full_name='mo_caffe.DetectionOutputParameter.background_label_id', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='nms_param', full_name='mo_caffe.DetectionOutputParameter.nms_param', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='save_output_param', full_name='mo_caffe.DetectionOutputParameter.save_output_param', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='code_type', full_name='mo_caffe.DetectionOutputParameter.code_type', index=5, - number=6, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='variance_encoded_in_target', full_name='mo_caffe.DetectionOutputParameter.variance_encoded_in_target', index=6, - number=8, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='keep_top_k', full_name='mo_caffe.DetectionOutputParameter.keep_top_k', index=7, - number=7, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='confidence_threshold', full_name='mo_caffe.DetectionOutputParameter.confidence_threshold', index=8, - number=9, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='visualize', full_name='mo_caffe.DetectionOutputParameter.visualize', index=9, - number=10, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='visualize_threshold', full_name='mo_caffe.DetectionOutputParameter.visualize_threshold', index=10, - number=11, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='save_file', full_name='mo_caffe.DetectionOutputParameter.save_file', index=11, - number=12, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='input_width', full_name='mo_caffe.DetectionOutputParameter.input_width', index=12, - number=13, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='input_height', full_name='mo_caffe.DetectionOutputParameter.input_height', index=13, - number=14, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='normalized', full_name='mo_caffe.DetectionOutputParameter.normalized', index=14, - number=15, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='objectness_score', full_name='mo_caffe.DetectionOutputParameter.objectness_score', index=15, - number=16, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.01, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=12394, - serialized_end=12967, -) - - -_DROPOUTPARAMETER = _descriptor.Descriptor( - name='DropoutParameter', - full_name='mo_caffe.DropoutParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='dropout_ratio', full_name='mo_caffe.DropoutParameter.dropout_ratio', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=12969, - serialized_end=13015, -) - - -_DUMMYDATAPARAMETER = _descriptor.Descriptor( - name='DummyDataParameter', - full_name='mo_caffe.DummyDataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='data_filler', full_name='mo_caffe.DummyDataParameter.data_filler', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shape', full_name='mo_caffe.DummyDataParameter.shape', index=1, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='num', full_name='mo_caffe.DummyDataParameter.num', index=2, - number=2, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='channels', full_name='mo_caffe.DummyDataParameter.channels', index=3, - number=3, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='height', full_name='mo_caffe.DummyDataParameter.height', index=4, - number=4, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='width', full_name='mo_caffe.DummyDataParameter.width', index=5, - number=5, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=13018, - serialized_end=13184, -) - - -_ELTWISEPARAMETER = _descriptor.Descriptor( - name='EltwiseParameter', - full_name='mo_caffe.EltwiseParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='operation', full_name='mo_caffe.EltwiseParameter.operation', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='coeff', full_name='mo_caffe.EltwiseParameter.coeff', index=1, - number=2, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stable_prod_grad', full_name='mo_caffe.EltwiseParameter.stable_prod_grad', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _ELTWISEPARAMETER_ELTWISEOP, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=13187, - serialized_end=13355, -) - - -_ELUPARAMETER = _descriptor.Descriptor( - name='ELUParameter', - full_name='mo_caffe.ELUParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='alpha', full_name='mo_caffe.ELUParameter.alpha', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=13357, - serialized_end=13389, -) - - -_EMBEDPARAMETER = _descriptor.Descriptor( - name='EmbedParameter', - full_name='mo_caffe.EmbedParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num_output', full_name='mo_caffe.EmbedParameter.num_output', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='input_dim', full_name='mo_caffe.EmbedParameter.input_dim', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bias_term', full_name='mo_caffe.EmbedParameter.bias_term', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='weight_filler', full_name='mo_caffe.EmbedParameter.weight_filler', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='mo_caffe.EmbedParameter.bias_filler', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=13392, - serialized_end=13570, -) - - -_EXPPARAMETER = _descriptor.Descriptor( - name='ExpParameter', - full_name='mo_caffe.ExpParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='base', full_name='mo_caffe.ExpParameter.base', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='scale', full_name='mo_caffe.ExpParameter.scale', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shift', full_name='mo_caffe.ExpParameter.shift', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=13572, - serialized_end=13640, -) - - -_FLATTENPARAMETER = _descriptor.Descriptor( - name='FlattenParameter', - full_name='mo_caffe.FlattenParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.FlattenParameter.axis', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='end_axis', full_name='mo_caffe.FlattenParameter.end_axis', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=13642, - serialized_end=13699, -) - - -_HDF5DATAPARAMETER = _descriptor.Descriptor( - name='HDF5DataParameter', - full_name='mo_caffe.HDF5DataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='source', full_name='mo_caffe.HDF5DataParameter.source', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='batch_size', full_name='mo_caffe.HDF5DataParameter.batch_size', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shuffle', full_name='mo_caffe.HDF5DataParameter.shuffle', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=13701, - serialized_end=13780, -) - - -_HDF5OUTPUTPARAMETER = _descriptor.Descriptor( - name='HDF5OutputParameter', - full_name='mo_caffe.HDF5OutputParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='file_name', full_name='mo_caffe.HDF5OutputParameter.file_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=13782, - serialized_end=13822, -) - - -_HINGELOSSPARAMETER = _descriptor.Descriptor( - name='HingeLossParameter', - full_name='mo_caffe.HingeLossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='norm', full_name='mo_caffe.HingeLossParameter.norm', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _HINGELOSSPARAMETER_NORM, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=13824, - serialized_end=13921, -) - - -_IMAGEDATAPARAMETER = _descriptor.Descriptor( - name='ImageDataParameter', - full_name='mo_caffe.ImageDataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='source', full_name='mo_caffe.ImageDataParameter.source', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='batch_size', full_name='mo_caffe.ImageDataParameter.batch_size', index=1, - number=4, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rand_skip', full_name='mo_caffe.ImageDataParameter.rand_skip', index=2, - number=7, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shuffle', full_name='mo_caffe.ImageDataParameter.shuffle', index=3, - number=8, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='new_height', full_name='mo_caffe.ImageDataParameter.new_height', index=4, - number=9, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='new_width', full_name='mo_caffe.ImageDataParameter.new_width', index=5, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='is_color', full_name='mo_caffe.ImageDataParameter.is_color', index=6, - number=11, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='scale', full_name='mo_caffe.ImageDataParameter.scale', index=7, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mean_file', full_name='mo_caffe.ImageDataParameter.mean_file', index=8, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='crop_size', full_name='mo_caffe.ImageDataParameter.crop_size', index=9, - number=5, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mirror', full_name='mo_caffe.ImageDataParameter.mirror', index=10, - number=6, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='root_folder', full_name='mo_caffe.ImageDataParameter.root_folder', index=11, - number=12, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=13924, - serialized_end=14203, -) - - -_INFOGAINLOSSPARAMETER = _descriptor.Descriptor( - name='InfogainLossParameter', - full_name='mo_caffe.InfogainLossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='source', full_name='mo_caffe.InfogainLossParameter.source', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=14205, - serialized_end=14244, -) - - -_INNERPRODUCTPARAMETER = _descriptor.Descriptor( - name='InnerProductParameter', - full_name='mo_caffe.InnerProductParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num_output', full_name='mo_caffe.InnerProductParameter.num_output', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bias_term', full_name='mo_caffe.InnerProductParameter.bias_term', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='weight_filler', full_name='mo_caffe.InnerProductParameter.weight_filler', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='mo_caffe.InnerProductParameter.bias_filler', index=3, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.InnerProductParameter.axis', index=4, - number=5, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='transpose', full_name='mo_caffe.InnerProductParameter.transpose', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=14247, - serialized_end=14456, -) - - -_INPUTPARAMETER = _descriptor.Descriptor( - name='InputParameter', - full_name='mo_caffe.InputParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='shape', full_name='mo_caffe.InputParameter.shape', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=14458, - serialized_end=14510, -) - - -_LOGPARAMETER = _descriptor.Descriptor( - name='LogParameter', - full_name='mo_caffe.LogParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='base', full_name='mo_caffe.LogParameter.base', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='scale', full_name='mo_caffe.LogParameter.scale', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shift', full_name='mo_caffe.LogParameter.shift', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=14512, - serialized_end=14580, -) - - -_LRNPARAMETER = _descriptor.Descriptor( - name='LRNParameter', - full_name='mo_caffe.LRNParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='local_size', full_name='mo_caffe.LRNParameter.local_size', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=5, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='alpha', full_name='mo_caffe.LRNParameter.alpha', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='beta', full_name='mo_caffe.LRNParameter.beta', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.75, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='norm_region', full_name='mo_caffe.LRNParameter.norm_region', index=3, - number=4, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='k', full_name='mo_caffe.LRNParameter.k', index=4, - number=5, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='engine', full_name='mo_caffe.LRNParameter.engine', index=5, - number=6, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _LRNPARAMETER_NORMREGION, - _LRNPARAMETER_ENGINE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=14583, - serialized_end=14901, -) - - -_GRNPARAMETER = _descriptor.Descriptor( - name='GRNParameter', - full_name='mo_caffe.GRNParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='bias', full_name='mo_caffe.GRNParameter.bias', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=14903, - serialized_end=14934, -) - - -_MEMORYDATAPARAMETER = _descriptor.Descriptor( - name='MemoryDataParameter', - full_name='mo_caffe.MemoryDataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='batch_size', full_name='mo_caffe.MemoryDataParameter.batch_size', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='channels', full_name='mo_caffe.MemoryDataParameter.channels', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='height', full_name='mo_caffe.MemoryDataParameter.height', index=2, - number=3, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='width', full_name='mo_caffe.MemoryDataParameter.width', index=3, - number=4, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=14936, - serialized_end=15026, -) - - -_MVNPARAMETER = _descriptor.Descriptor( - name='MVNParameter', - full_name='mo_caffe.MVNParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='normalize_variance', full_name='mo_caffe.MVNParameter.normalize_variance', index=0, - number=1, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='across_channels', full_name='mo_caffe.MVNParameter.across_channels', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='eps', full_name='mo_caffe.MVNParameter.eps', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1e-09, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=15028, - serialized_end=15128, -) - - -_PARAMETERPARAMETER = _descriptor.Descriptor( - name='ParameterParameter', - full_name='mo_caffe.ParameterParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='shape', full_name='mo_caffe.ParameterParameter.shape', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=15130, - serialized_end=15186, -) - - -_POOLINGPARAMETER = _descriptor.Descriptor( - name='PoolingParameter', - full_name='mo_caffe.PoolingParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='pool', full_name='mo_caffe.PoolingParameter.pool', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pad', full_name='mo_caffe.PoolingParameter.pad', index=1, - number=4, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pad_h', full_name='mo_caffe.PoolingParameter.pad_h', index=2, - number=9, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pad_w', full_name='mo_caffe.PoolingParameter.pad_w', index=3, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='kernel_size', full_name='mo_caffe.PoolingParameter.kernel_size', index=4, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='kernel_h', full_name='mo_caffe.PoolingParameter.kernel_h', index=5, - number=5, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='kernel_w', full_name='mo_caffe.PoolingParameter.kernel_w', index=6, - number=6, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stride', full_name='mo_caffe.PoolingParameter.stride', index=7, - number=3, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stride_h', full_name='mo_caffe.PoolingParameter.stride_h', index=8, - number=7, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stride_w', full_name='mo_caffe.PoolingParameter.stride_w', index=9, - number=8, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='engine', full_name='mo_caffe.PoolingParameter.engine', index=10, - number=11, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='global_pooling', full_name='mo_caffe.PoolingParameter.global_pooling', index=11, - number=12, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ceil_mode', full_name='mo_caffe.PoolingParameter.ceil_mode', index=12, - number=13, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _POOLINGPARAMETER_POOLMETHOD, - _POOLINGPARAMETER_ENGINE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=15189, - serialized_end=15638, -) - - -_POWERPARAMETER = _descriptor.Descriptor( - name='PowerParameter', - full_name='mo_caffe.PowerParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='power', full_name='mo_caffe.PowerParameter.power', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='scale', full_name='mo_caffe.PowerParameter.scale', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shift', full_name='mo_caffe.PowerParameter.shift', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=15640, - serialized_end=15710, -) - - -_PRIORBOXPARAMETER = _descriptor.Descriptor( - name='PriorBoxParameter', - full_name='mo_caffe.PriorBoxParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='min_size', full_name='mo_caffe.PriorBoxParameter.min_size', index=0, - number=1, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='max_size', full_name='mo_caffe.PriorBoxParameter.max_size', index=1, - number=2, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='aspect_ratio', full_name='mo_caffe.PriorBoxParameter.aspect_ratio', index=2, - number=3, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='flip', full_name='mo_caffe.PriorBoxParameter.flip', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='clip', full_name='mo_caffe.PriorBoxParameter.clip', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='variance', full_name='mo_caffe.PriorBoxParameter.variance', index=5, - number=6, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='img_size', full_name='mo_caffe.PriorBoxParameter.img_size', index=6, - number=7, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='img_h', full_name='mo_caffe.PriorBoxParameter.img_h', index=7, - number=8, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='img_w', full_name='mo_caffe.PriorBoxParameter.img_w', index=8, - number=9, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='step', full_name='mo_caffe.PriorBoxParameter.step', index=9, - number=10, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='step_h', full_name='mo_caffe.PriorBoxParameter.step_h', index=10, - number=11, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='step_w', full_name='mo_caffe.PriorBoxParameter.step_w', index=11, - number=12, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='offset', full_name='mo_caffe.PriorBoxParameter.offset', index=12, - number=13, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='width', full_name='mo_caffe.PriorBoxParameter.width', index=13, - number=14, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='height', full_name='mo_caffe.PriorBoxParameter.height', index=14, - number=15, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='fixed_size', full_name='mo_caffe.PriorBoxParameter.fixed_size', index=15, - number=16, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='fixed_ratio', full_name='mo_caffe.PriorBoxParameter.fixed_ratio', index=16, - number=17, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='density', full_name='mo_caffe.PriorBoxParameter.density', index=17, - number=18, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _PRIORBOXPARAMETER_CODETYPE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=15713, - serialized_end=16111, -) - - -_PSROIPOOLINGPARAMETER = _descriptor.Descriptor( - name='PSROIPoolingParameter', - full_name='mo_caffe.PSROIPoolingParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='spatial_scale', full_name='mo_caffe.PSROIPoolingParameter.spatial_scale', index=0, - number=1, type=2, cpp_type=6, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='output_dim', full_name='mo_caffe.PSROIPoolingParameter.output_dim', index=1, - number=2, type=5, cpp_type=1, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='group_size', full_name='mo_caffe.PSROIPoolingParameter.group_size', index=2, - number=3, type=5, cpp_type=1, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=16113, - serialized_end=16199, -) - - -_PYTHONPARAMETER = _descriptor.Descriptor( - name='PythonParameter', - full_name='mo_caffe.PythonParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='module', full_name='mo_caffe.PythonParameter.module', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='layer', full_name='mo_caffe.PythonParameter.layer', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='param_str', full_name='mo_caffe.PythonParameter.param_str', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='share_in_parallel', full_name='mo_caffe.PythonParameter.share_in_parallel', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=16201, - serialized_end=16304, -) - - -_RECURRENTPARAMETER = _descriptor.Descriptor( - name='RecurrentParameter', - full_name='mo_caffe.RecurrentParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='num_output', full_name='mo_caffe.RecurrentParameter.num_output', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='weight_filler', full_name='mo_caffe.RecurrentParameter.weight_filler', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='mo_caffe.RecurrentParameter.bias_filler', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='debug_info', full_name='mo_caffe.RecurrentParameter.debug_info', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='expose_hidden', full_name='mo_caffe.RecurrentParameter.expose_hidden', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=16307, - serialized_end=16505, -) - - -_REDUCTIONPARAMETER = _descriptor.Descriptor( - name='ReductionParameter', - full_name='mo_caffe.ReductionParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='operation', full_name='mo_caffe.ReductionParameter.operation', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.ReductionParameter.axis', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='coeff', full_name='mo_caffe.ReductionParameter.coeff', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _REDUCTIONPARAMETER_REDUCTIONOP, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=16508, - serialized_end=16684, -) - - -_RELUPARAMETER = _descriptor.Descriptor( - name='ReLUParameter', - full_name='mo_caffe.ReLUParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='negative_slope', full_name='mo_caffe.ReLUParameter.negative_slope', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='engine', full_name='mo_caffe.ReLUParameter.engine', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _RELUPARAMETER_ENGINE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=16687, - serialized_end=16831, -) - - -_RELU6PARAMETER = _descriptor.Descriptor( - name='ReLU6Parameter', - full_name='mo_caffe.ReLU6Parameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='n', full_name='mo_caffe.ReLU6Parameter.n', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=6, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=16833, - serialized_end=16863, -) - - -_RESHAPEPARAMETER = _descriptor.Descriptor( - name='ReshapeParameter', - full_name='mo_caffe.ReshapeParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='shape', full_name='mo_caffe.ReshapeParameter.shape', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.ReshapeParameter.axis', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='num_axes', full_name='mo_caffe.ReshapeParameter.num_axes', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=-1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=16865, - serialized_end=16958, -) - - -_REVERSEPARAMETER = _descriptor.Descriptor( - name='ReverseParameter', - full_name='mo_caffe.ReverseParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.ReverseParameter.axis', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=16960, - serialized_end=16995, -) - - -_ROIPOOLINGPARAMETER = _descriptor.Descriptor( - name='ROIPoolingParameter', - full_name='mo_caffe.ROIPoolingParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='pooled_h', full_name='mo_caffe.ROIPoolingParameter.pooled_h', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pooled_w', full_name='mo_caffe.ROIPoolingParameter.pooled_w', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='spatial_scale', full_name='mo_caffe.ROIPoolingParameter.spatial_scale', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=16997, - serialized_end=17086, -) - - -_ROIWARPINGTESTPARAMETER = _descriptor.Descriptor( - name='ROIWarpingTestParameter', - full_name='mo_caffe.ROIWarpingTestParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='pooled_h', full_name='mo_caffe.ROIWarpingTestParameter.pooled_h', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pooled_w', full_name='mo_caffe.ROIWarpingTestParameter.pooled_w', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='spatial_scale', full_name='mo_caffe.ROIWarpingTestParameter.spatial_scale', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=17088, - serialized_end=17181, -) - - -_ROIWARPINGPARAMETER = _descriptor.Descriptor( - name='ROIWarpingParameter', - full_name='mo_caffe.ROIWarpingParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='pooled_h', full_name='mo_caffe.ROIWarpingParameter.pooled_h', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pooled_w', full_name='mo_caffe.ROIWarpingParameter.pooled_w', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='spatial_scale', full_name='mo_caffe.ROIWarpingParameter.spatial_scale', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=17183, - serialized_end=17272, -) - - -_SCALEPARAMETER = _descriptor.Descriptor( - name='ScaleParameter', - full_name='mo_caffe.ScaleParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.ScaleParameter.axis', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='num_axes', full_name='mo_caffe.ScaleParameter.num_axes', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='filler', full_name='mo_caffe.ScaleParameter.filler', index=2, - number=3, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bias_term', full_name='mo_caffe.ScaleParameter.bias_term', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='mo_caffe.ScaleParameter.bias_filler', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=17275, - serialized_end=17446, -) - - -_SIGMOIDPARAMETER = _descriptor.Descriptor( - name='SigmoidParameter', - full_name='mo_caffe.SigmoidParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='engine', full_name='mo_caffe.SigmoidParameter.engine', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _SIGMOIDPARAMETER_ENGINE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=17448, - serialized_end=17571, -) - - -_SLICEPARAMETER = _descriptor.Descriptor( - name='SliceParameter', - full_name='mo_caffe.SliceParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.SliceParameter.axis', index=0, - number=3, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='slice_point', full_name='mo_caffe.SliceParameter.slice_point', index=1, - number=2, type=13, cpp_type=3, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='slice_dim', full_name='mo_caffe.SliceParameter.slice_dim', index=2, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=17573, - serialized_end=17649, -) - - -_SMOOTHL1LOSSPARAMETER = _descriptor.Descriptor( - name='SmoothL1LossParameter', - full_name='mo_caffe.SmoothL1LossParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='sigma', full_name='mo_caffe.SmoothL1LossParameter.sigma', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=17651, - serialized_end=17692, -) - - -_SOFTMAXPARAMETER = _descriptor.Descriptor( - name='SoftmaxParameter', - full_name='mo_caffe.SoftmaxParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='engine', full_name='mo_caffe.SoftmaxParameter.engine', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.SoftmaxParameter.axis', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _SOFTMAXPARAMETER_ENGINE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=17695, - serialized_end=17835, -) - - -_TANHPARAMETER = _descriptor.Descriptor( - name='TanHParameter', - full_name='mo_caffe.TanHParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='engine', full_name='mo_caffe.TanHParameter.engine', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _TANHPARAMETER_ENGINE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=17837, - serialized_end=17954, -) - - -_TILEPARAMETER = _descriptor.Descriptor( - name='TileParameter', - full_name='mo_caffe.TileParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='axis', full_name='mo_caffe.TileParameter.axis', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='tiles', full_name='mo_caffe.TileParameter.tiles', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=17956, - serialized_end=18003, -) - - -_THRESHOLDPARAMETER = _descriptor.Descriptor( - name='ThresholdParameter', - full_name='mo_caffe.ThresholdParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='threshold', full_name='mo_caffe.ThresholdParameter.threshold', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=18005, - serialized_end=18047, -) - - -_WINDOWDATAPARAMETER = _descriptor.Descriptor( - name='WindowDataParameter', - full_name='mo_caffe.WindowDataParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='source', full_name='mo_caffe.WindowDataParameter.source', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='scale', full_name='mo_caffe.WindowDataParameter.scale', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mean_file', full_name='mo_caffe.WindowDataParameter.mean_file', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='batch_size', full_name='mo_caffe.WindowDataParameter.batch_size', index=3, - number=4, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='crop_size', full_name='mo_caffe.WindowDataParameter.crop_size', index=4, - number=5, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mirror', full_name='mo_caffe.WindowDataParameter.mirror', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='fg_threshold', full_name='mo_caffe.WindowDataParameter.fg_threshold', index=6, - number=7, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bg_threshold', full_name='mo_caffe.WindowDataParameter.bg_threshold', index=7, - number=8, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='fg_fraction', full_name='mo_caffe.WindowDataParameter.fg_fraction', index=8, - number=9, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.25, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='context_pad', full_name='mo_caffe.WindowDataParameter.context_pad', index=9, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='crop_mode', full_name='mo_caffe.WindowDataParameter.crop_mode', index=10, - number=11, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("warp").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cache_images', full_name='mo_caffe.WindowDataParameter.cache_images', index=11, - number=12, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='root_folder', full_name='mo_caffe.WindowDataParameter.root_folder', index=12, - number=13, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=18050, - serialized_end=18371, -) - - -_SPPPARAMETER = _descriptor.Descriptor( - name='SPPParameter', - full_name='mo_caffe.SPPParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='pyramid_height', full_name='mo_caffe.SPPParameter.pyramid_height', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pool', full_name='mo_caffe.SPPParameter.pool', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='engine', full_name='mo_caffe.SPPParameter.engine', index=2, - number=6, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _SPPPARAMETER_POOLMETHOD, - _SPPPARAMETER_ENGINE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=18374, - serialized_end=18615, -) - - -_V1LAYERPARAMETER = _descriptor.Descriptor( - name='V1LayerParameter', - full_name='mo_caffe.V1LayerParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='bottom', full_name='mo_caffe.V1LayerParameter.bottom', index=0, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='top', full_name='mo_caffe.V1LayerParameter.top', index=1, - number=3, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='name', full_name='mo_caffe.V1LayerParameter.name', index=2, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='include', full_name='mo_caffe.V1LayerParameter.include', index=3, - number=32, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='exclude', full_name='mo_caffe.V1LayerParameter.exclude', index=4, - number=33, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='type', full_name='mo_caffe.V1LayerParameter.type', index=5, - number=5, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='blobs', full_name='mo_caffe.V1LayerParameter.blobs', index=6, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='param', full_name='mo_caffe.V1LayerParameter.param', index=7, - number=1001, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='blob_share_mode', full_name='mo_caffe.V1LayerParameter.blob_share_mode', index=8, - number=1002, type=14, cpp_type=8, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='blobs_lr', full_name='mo_caffe.V1LayerParameter.blobs_lr', index=9, - number=7, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='weight_decay', full_name='mo_caffe.V1LayerParameter.weight_decay', index=10, - number=8, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='loss_weight', full_name='mo_caffe.V1LayerParameter.loss_weight', index=11, - number=35, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='accuracy_param', full_name='mo_caffe.V1LayerParameter.accuracy_param', index=12, - number=27, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='argmax_param', full_name='mo_caffe.V1LayerParameter.argmax_param', index=13, - number=23, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='concat_param', full_name='mo_caffe.V1LayerParameter.concat_param', index=14, - number=9, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='contrastive_loss_param', full_name='mo_caffe.V1LayerParameter.contrastive_loss_param', index=15, - number=40, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='convolution_param', full_name='mo_caffe.V1LayerParameter.convolution_param', index=16, - number=10, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='data_param', full_name='mo_caffe.V1LayerParameter.data_param', index=17, - number=11, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dropout_param', full_name='mo_caffe.V1LayerParameter.dropout_param', index=18, - number=12, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dummy_data_param', full_name='mo_caffe.V1LayerParameter.dummy_data_param', index=19, - number=26, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='eltwise_param', full_name='mo_caffe.V1LayerParameter.eltwise_param', index=20, - number=24, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='exp_param', full_name='mo_caffe.V1LayerParameter.exp_param', index=21, - number=41, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='hdf5_data_param', full_name='mo_caffe.V1LayerParameter.hdf5_data_param', index=22, - number=13, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='hdf5_output_param', full_name='mo_caffe.V1LayerParameter.hdf5_output_param', index=23, - number=14, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='hinge_loss_param', full_name='mo_caffe.V1LayerParameter.hinge_loss_param', index=24, - number=29, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='image_data_param', full_name='mo_caffe.V1LayerParameter.image_data_param', index=25, - number=15, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='infogain_loss_param', full_name='mo_caffe.V1LayerParameter.infogain_loss_param', index=26, - number=16, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='inner_product_param', full_name='mo_caffe.V1LayerParameter.inner_product_param', index=27, - number=17, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lrn_param', full_name='mo_caffe.V1LayerParameter.lrn_param', index=28, - number=18, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='memory_data_param', full_name='mo_caffe.V1LayerParameter.memory_data_param', index=29, - number=22, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mvn_param', full_name='mo_caffe.V1LayerParameter.mvn_param', index=30, - number=34, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pooling_param', full_name='mo_caffe.V1LayerParameter.pooling_param', index=31, - number=19, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='power_param', full_name='mo_caffe.V1LayerParameter.power_param', index=32, - number=21, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='relu_param', full_name='mo_caffe.V1LayerParameter.relu_param', index=33, - number=30, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='sigmoid_param', full_name='mo_caffe.V1LayerParameter.sigmoid_param', index=34, - number=38, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='softmax_param', full_name='mo_caffe.V1LayerParameter.softmax_param', index=35, - number=39, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='slice_param', full_name='mo_caffe.V1LayerParameter.slice_param', index=36, - number=31, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='tanh_param', full_name='mo_caffe.V1LayerParameter.tanh_param', index=37, - number=37, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='threshold_param', full_name='mo_caffe.V1LayerParameter.threshold_param', index=38, - number=25, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='window_data_param', full_name='mo_caffe.V1LayerParameter.window_data_param', index=39, - number=20, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='transform_param', full_name='mo_caffe.V1LayerParameter.transform_param', index=40, - number=36, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='loss_param', full_name='mo_caffe.V1LayerParameter.loss_param', index=41, - number=42, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='layer', full_name='mo_caffe.V1LayerParameter.layer', index=42, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _V1LAYERPARAMETER_LAYERTYPE, - _V1LAYERPARAMETER_DIMCHECKMODE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=18618, - serialized_end=21254, -) - - -_V0LAYERPARAMETER = _descriptor.Descriptor( - name='V0LayerParameter', - full_name='mo_caffe.V0LayerParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='mo_caffe.V0LayerParameter.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='type', full_name='mo_caffe.V0LayerParameter.type', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='num_output', full_name='mo_caffe.V0LayerParameter.num_output', index=2, - number=3, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='biasterm', full_name='mo_caffe.V0LayerParameter.biasterm', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='weight_filler', full_name='mo_caffe.V0LayerParameter.weight_filler', index=4, - number=5, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bias_filler', full_name='mo_caffe.V0LayerParameter.bias_filler', index=5, - number=6, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pad', full_name='mo_caffe.V0LayerParameter.pad', index=6, - number=7, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='kernelsize', full_name='mo_caffe.V0LayerParameter.kernelsize', index=7, - number=8, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='group', full_name='mo_caffe.V0LayerParameter.group', index=8, - number=9, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stride', full_name='mo_caffe.V0LayerParameter.stride', index=9, - number=10, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pool', full_name='mo_caffe.V0LayerParameter.pool', index=10, - number=11, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dropout_ratio', full_name='mo_caffe.V0LayerParameter.dropout_ratio', index=11, - number=12, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='local_size', full_name='mo_caffe.V0LayerParameter.local_size', index=12, - number=13, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=5, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='alpha', full_name='mo_caffe.V0LayerParameter.alpha', index=13, - number=14, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='beta', full_name='mo_caffe.V0LayerParameter.beta', index=14, - number=15, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.75, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='k', full_name='mo_caffe.V0LayerParameter.k', index=15, - number=22, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='source', full_name='mo_caffe.V0LayerParameter.source', index=16, - number=16, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='scale', full_name='mo_caffe.V0LayerParameter.scale', index=17, - number=17, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='meanfile', full_name='mo_caffe.V0LayerParameter.meanfile', index=18, - number=18, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='batchsize', full_name='mo_caffe.V0LayerParameter.batchsize', index=19, - number=19, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='cropsize', full_name='mo_caffe.V0LayerParameter.cropsize', index=20, - number=20, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mirror', full_name='mo_caffe.V0LayerParameter.mirror', index=21, - number=21, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='blobs', full_name='mo_caffe.V0LayerParameter.blobs', index=22, - number=50, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='blobs_lr', full_name='mo_caffe.V0LayerParameter.blobs_lr', index=23, - number=51, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='weight_decay', full_name='mo_caffe.V0LayerParameter.weight_decay', index=24, - number=52, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rand_skip', full_name='mo_caffe.V0LayerParameter.rand_skip', index=25, - number=53, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='det_fg_threshold', full_name='mo_caffe.V0LayerParameter.det_fg_threshold', index=26, - number=54, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='det_bg_threshold', full_name='mo_caffe.V0LayerParameter.det_bg_threshold', index=27, - number=55, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='det_fg_fraction', full_name='mo_caffe.V0LayerParameter.det_fg_fraction', index=28, - number=56, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.25, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='det_context_pad', full_name='mo_caffe.V0LayerParameter.det_context_pad', index=29, - number=58, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='det_crop_mode', full_name='mo_caffe.V0LayerParameter.det_crop_mode', index=30, - number=59, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("warp").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='new_num', full_name='mo_caffe.V0LayerParameter.new_num', index=31, - number=60, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='new_channels', full_name='mo_caffe.V0LayerParameter.new_channels', index=32, - number=61, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='new_height', full_name='mo_caffe.V0LayerParameter.new_height', index=33, - number=62, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='new_width', full_name='mo_caffe.V0LayerParameter.new_width', index=34, - number=63, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shuffle_images', full_name='mo_caffe.V0LayerParameter.shuffle_images', index=35, - number=64, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='concat_dim', full_name='mo_caffe.V0LayerParameter.concat_dim', index=36, - number=65, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='hdf5_output_param', full_name='mo_caffe.V0LayerParameter.hdf5_output_param', index=37, - number=1001, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _V0LAYERPARAMETER_POOLMETHOD, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=21257, - serialized_end=22293, -) - - -_PRELUPARAMETER = _descriptor.Descriptor( - name='PReLUParameter', - full_name='mo_caffe.PReLUParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='filler', full_name='mo_caffe.PReLUParameter.filler', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='channel_shared', full_name='mo_caffe.PReLUParameter.channel_shared', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=22295, - serialized_end=22385, -) - - -_REGIONYOLOPARAMETER = _descriptor.Descriptor( - name='RegionYoloParameter', - full_name='mo_caffe.RegionYoloParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='coords', full_name='mo_caffe.RegionYoloParameter.coords', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=4, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='classes', full_name='mo_caffe.RegionYoloParameter.classes', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=20, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='num', full_name='mo_caffe.RegionYoloParameter.num', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='do_softmax', full_name='mo_caffe.RegionYoloParameter.do_softmax', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='anchors', full_name='mo_caffe.RegionYoloParameter.anchors', index=4, - number=5, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mask', full_name='mo_caffe.RegionYoloParameter.mask', index=5, - number=6, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=22388, - serialized_end=22522, -) - - -_REORGYOLOPARAMETER = _descriptor.Descriptor( - name='ReorgYoloParameter', - full_name='mo_caffe.ReorgYoloParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='stride', full_name='mo_caffe.ReorgYoloParameter.stride', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=22524, - serialized_end=22563, -) - - -_RANDOMGENERATORPARAMETER = _descriptor.Descriptor( - name='RandomGeneratorParameter', - full_name='mo_caffe.RandomGeneratorParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='rand_type', full_name='mo_caffe.RandomGeneratorParameter.rand_type', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("uniform").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='exp', full_name='mo_caffe.RandomGeneratorParameter.exp', index=1, - number=2, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mean', full_name='mo_caffe.RandomGeneratorParameter.mean', index=2, - number=4, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='spread', full_name='mo_caffe.RandomGeneratorParameter.spread', index=3, - number=5, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='prob', full_name='mo_caffe.RandomGeneratorParameter.prob', index=4, - number=6, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='apply_schedule', full_name='mo_caffe.RandomGeneratorParameter.apply_schedule', index=5, - number=7, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='discretize', full_name='mo_caffe.RandomGeneratorParameter.discretize', index=6, - number=8, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='multiplier', full_name='mo_caffe.RandomGeneratorParameter.multiplier', index=7, - number=9, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=22566, - serialized_end=22773, -) - - -_COEFFSCHEDULEPARAMETER = _descriptor.Descriptor( - name='CoeffScheduleParameter', - full_name='mo_caffe.CoeffScheduleParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='half_life', full_name='mo_caffe.CoeffScheduleParameter.half_life', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='initial_coeff', full_name='mo_caffe.CoeffScheduleParameter.initial_coeff', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='final_coeff', full_name='mo_caffe.CoeffScheduleParameter.final_coeff', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=22775, - serialized_end=22871, -) - - -_AUGMENTATIONCOEFF = _descriptor.Descriptor( - name='AugmentationCoeff', - full_name='mo_caffe.AugmentationCoeff', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='mirror', full_name='mo_caffe.AugmentationCoeff.mirror', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dx', full_name='mo_caffe.AugmentationCoeff.dx', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='dy', full_name='mo_caffe.AugmentationCoeff.dy', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='angle', full_name='mo_caffe.AugmentationCoeff.angle', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='zoom_x', full_name='mo_caffe.AugmentationCoeff.zoom_x', index=4, - number=5, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='zoom_y', full_name='mo_caffe.AugmentationCoeff.zoom_y', index=5, - number=6, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='gamma', full_name='mo_caffe.AugmentationCoeff.gamma', index=6, - number=100, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='brightness', full_name='mo_caffe.AugmentationCoeff.brightness', index=7, - number=101, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='contrast', full_name='mo_caffe.AugmentationCoeff.contrast', index=8, - number=102, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='color1', full_name='mo_caffe.AugmentationCoeff.color1', index=9, - number=103, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='color2', full_name='mo_caffe.AugmentationCoeff.color2', index=10, - number=104, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='color3', full_name='mo_caffe.AugmentationCoeff.color3', index=11, - number=105, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pow_nomean0', full_name='mo_caffe.AugmentationCoeff.pow_nomean0', index=12, - number=10, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pow_nomean1', full_name='mo_caffe.AugmentationCoeff.pow_nomean1', index=13, - number=11, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pow_nomean2', full_name='mo_caffe.AugmentationCoeff.pow_nomean2', index=14, - number=12, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='add_nomean0', full_name='mo_caffe.AugmentationCoeff.add_nomean0', index=15, - number=13, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='add_nomean1', full_name='mo_caffe.AugmentationCoeff.add_nomean1', index=16, - number=14, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='add_nomean2', full_name='mo_caffe.AugmentationCoeff.add_nomean2', index=17, - number=15, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mult_nomean0', full_name='mo_caffe.AugmentationCoeff.mult_nomean0', index=18, - number=16, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mult_nomean1', full_name='mo_caffe.AugmentationCoeff.mult_nomean1', index=19, - number=17, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mult_nomean2', full_name='mo_caffe.AugmentationCoeff.mult_nomean2', index=20, - number=18, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pow_withmean0', full_name='mo_caffe.AugmentationCoeff.pow_withmean0', index=21, - number=19, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pow_withmean1', full_name='mo_caffe.AugmentationCoeff.pow_withmean1', index=22, - number=20, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='pow_withmean2', full_name='mo_caffe.AugmentationCoeff.pow_withmean2', index=23, - number=21, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='add_withmean0', full_name='mo_caffe.AugmentationCoeff.add_withmean0', index=24, - number=22, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='add_withmean1', full_name='mo_caffe.AugmentationCoeff.add_withmean1', index=25, - number=23, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='add_withmean2', full_name='mo_caffe.AugmentationCoeff.add_withmean2', index=26, - number=24, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mult_withmean0', full_name='mo_caffe.AugmentationCoeff.mult_withmean0', index=27, - number=25, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mult_withmean1', full_name='mo_caffe.AugmentationCoeff.mult_withmean1', index=28, - number=26, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mult_withmean2', full_name='mo_caffe.AugmentationCoeff.mult_withmean2', index=29, - number=27, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lmult_pow', full_name='mo_caffe.AugmentationCoeff.lmult_pow', index=30, - number=28, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lmult_add', full_name='mo_caffe.AugmentationCoeff.lmult_add', index=31, - number=29, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lmult_mult', full_name='mo_caffe.AugmentationCoeff.lmult_mult', index=32, - number=30, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='col_angle', full_name='mo_caffe.AugmentationCoeff.col_angle', index=33, - number=31, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='fog_amount', full_name='mo_caffe.AugmentationCoeff.fog_amount', index=34, - number=38, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='fog_size', full_name='mo_caffe.AugmentationCoeff.fog_size', index=35, - number=39, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='motion_blur_angle', full_name='mo_caffe.AugmentationCoeff.motion_blur_angle', index=36, - number=40, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='motion_blur_size', full_name='mo_caffe.AugmentationCoeff.motion_blur_size', index=37, - number=41, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shadow_angle', full_name='mo_caffe.AugmentationCoeff.shadow_angle', index=38, - number=42, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shadow_distance', full_name='mo_caffe.AugmentationCoeff.shadow_distance', index=39, - number=43, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shadow_strength', full_name='mo_caffe.AugmentationCoeff.shadow_strength', index=40, - number=44, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='noise', full_name='mo_caffe.AugmentationCoeff.noise', index=41, - number=45, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=22874, - serialized_end=23864, -) - - -_AUGMENTATIONPARAMETER = _descriptor.Descriptor( - name='AugmentationParameter', - full_name='mo_caffe.AugmentationParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='crop_width', full_name='mo_caffe.AugmentationParameter.crop_width', index=0, - number=33, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='crop_height', full_name='mo_caffe.AugmentationParameter.crop_height', index=1, - number=34, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='write_augmented', full_name='mo_caffe.AugmentationParameter.write_augmented', index=2, - number=2, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='max_multiplier', full_name='mo_caffe.AugmentationParameter.max_multiplier', index=3, - number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=255, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='augment_during_test', full_name='mo_caffe.AugmentationParameter.augment_during_test', index=4, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='recompute_mean', full_name='mo_caffe.AugmentationParameter.recompute_mean', index=5, - number=5, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='write_mean', full_name='mo_caffe.AugmentationParameter.write_mean', index=6, - number=6, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mean_per_pixel', full_name='mo_caffe.AugmentationParameter.mean_per_pixel', index=7, - number=7, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mean', full_name='mo_caffe.AugmentationParameter.mean', index=8, - number=18, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mode', full_name='mo_caffe.AugmentationParameter.mode', index=9, - number=8, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=_b("add").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bottomwidth', full_name='mo_caffe.AugmentationParameter.bottomwidth', index=10, - number=80, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='bottomheight', full_name='mo_caffe.AugmentationParameter.bottomheight', index=11, - number=81, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='num', full_name='mo_caffe.AugmentationParameter.num', index=12, - number=82, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='chromatic_eigvec', full_name='mo_caffe.AugmentationParameter.chromatic_eigvec', index=13, - number=83, type=2, cpp_type=6, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='mirror', full_name='mo_caffe.AugmentationParameter.mirror', index=14, - number=10, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='translate', full_name='mo_caffe.AugmentationParameter.translate', index=15, - number=11, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='rotate', full_name='mo_caffe.AugmentationParameter.rotate', index=16, - number=12, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='zoom', full_name='mo_caffe.AugmentationParameter.zoom', index=17, - number=13, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='squeeze', full_name='mo_caffe.AugmentationParameter.squeeze', index=18, - number=14, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='translate_x', full_name='mo_caffe.AugmentationParameter.translate_x', index=19, - number=15, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='translate_y', full_name='mo_caffe.AugmentationParameter.translate_y', index=20, - number=16, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='gamma', full_name='mo_caffe.AugmentationParameter.gamma', index=21, - number=35, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='brightness', full_name='mo_caffe.AugmentationParameter.brightness', index=22, - number=36, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='contrast', full_name='mo_caffe.AugmentationParameter.contrast', index=23, - number=37, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='color', full_name='mo_caffe.AugmentationParameter.color', index=24, - number=38, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lmult_pow', full_name='mo_caffe.AugmentationParameter.lmult_pow', index=25, - number=20, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lmult_mult', full_name='mo_caffe.AugmentationParameter.lmult_mult', index=26, - number=21, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='lmult_add', full_name='mo_caffe.AugmentationParameter.lmult_add', index=27, - number=22, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='sat_pow', full_name='mo_caffe.AugmentationParameter.sat_pow', index=28, - number=23, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='sat_mult', full_name='mo_caffe.AugmentationParameter.sat_mult', index=29, - number=24, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='sat_add', full_name='mo_caffe.AugmentationParameter.sat_add', index=30, - number=25, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='col_pow', full_name='mo_caffe.AugmentationParameter.col_pow', index=31, - number=26, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='col_mult', full_name='mo_caffe.AugmentationParameter.col_mult', index=32, - number=27, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='col_add', full_name='mo_caffe.AugmentationParameter.col_add', index=33, - number=28, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ladd_pow', full_name='mo_caffe.AugmentationParameter.ladd_pow', index=34, - number=29, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ladd_mult', full_name='mo_caffe.AugmentationParameter.ladd_mult', index=35, - number=30, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='ladd_add', full_name='mo_caffe.AugmentationParameter.ladd_add', index=36, - number=31, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='col_rotate', full_name='mo_caffe.AugmentationParameter.col_rotate', index=37, - number=32, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='fog_amount', full_name='mo_caffe.AugmentationParameter.fog_amount', index=38, - number=100, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='fog_size', full_name='mo_caffe.AugmentationParameter.fog_size', index=39, - number=101, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='motion_blur_angle', full_name='mo_caffe.AugmentationParameter.motion_blur_angle', index=40, - number=102, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='motion_blur_size', full_name='mo_caffe.AugmentationParameter.motion_blur_size', index=41, - number=103, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shadow_angle', full_name='mo_caffe.AugmentationParameter.shadow_angle', index=42, - number=104, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shadow_distance', full_name='mo_caffe.AugmentationParameter.shadow_distance', index=43, - number=105, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='shadow_strength', full_name='mo_caffe.AugmentationParameter.shadow_strength', index=44, - number=106, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='noise', full_name='mo_caffe.AugmentationParameter.noise', index=45, - number=107, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=23867, - serialized_end=25991, -) - - -_FLOWWARPPARAMETER = _descriptor.Descriptor( - name='FlowWarpParameter', - full_name='mo_caffe.FlowWarpParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='fill_value', full_name='mo_caffe.FlowWarpParameter.fill_value', index=0, - number=1, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _FLOWWARPPARAMETER_FILLPARAMETER, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=25994, - serialized_end=26127, -) - - -_CORRELATIONPARAMETER = _descriptor.Descriptor( - name='CorrelationParameter', - full_name='mo_caffe.CorrelationParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='pad', full_name='mo_caffe.CorrelationParameter.pad', index=0, - number=2, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='kernel_size', full_name='mo_caffe.CorrelationParameter.kernel_size', index=1, - number=3, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='max_displacement', full_name='mo_caffe.CorrelationParameter.max_displacement', index=2, - number=4, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stride_1', full_name='mo_caffe.CorrelationParameter.stride_1', index=3, - number=5, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='stride_2', full_name='mo_caffe.CorrelationParameter.stride_2', index=4, - number=6, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='single_direction', full_name='mo_caffe.CorrelationParameter.single_direction', index=5, - number=8, type=5, cpp_type=1, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='do_abs', full_name='mo_caffe.CorrelationParameter.do_abs', index=6, - number=7, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='correlation_type', full_name='mo_caffe.CorrelationParameter.correlation_type', index=7, - number=15, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _CORRELATIONPARAMETER_CORRELATIONTYPE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=26130, - serialized_end=26440, -) - - -_RESAMPLEPARAMETER = _descriptor.Descriptor( - name='ResampleParameter', - full_name='mo_caffe.ResampleParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='antialias', full_name='mo_caffe.ResampleParameter.antialias', index=0, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=True, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='width', full_name='mo_caffe.ResampleParameter.width', index=1, - number=1, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='height', full_name='mo_caffe.ResampleParameter.height', index=2, - number=2, type=13, cpp_type=3, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='type', full_name='mo_caffe.ResampleParameter.type', index=3, - number=3, type=14, cpp_type=8, label=1, - has_default_value=True, default_value=2, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='factor', full_name='mo_caffe.ResampleParameter.factor', index=4, - number=5, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - _RESAMPLEPARAMETER_RESAMPLETYPE, - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=26443, - serialized_end=26663, -) - - -_ACCUMPARAMETER = _descriptor.Descriptor( - name='AccumParameter', - full_name='mo_caffe.AccumParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='top_height', full_name='mo_caffe.AccumParameter.top_height', index=0, - number=1, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='top_width', full_name='mo_caffe.AccumParameter.top_width', index=1, - number=2, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='size_divisible_by', full_name='mo_caffe.AccumParameter.size_divisible_by', index=2, - number=3, type=13, cpp_type=3, label=1, - has_default_value=True, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - _descriptor.FieldDescriptor( - name='have_reference', full_name='mo_caffe.AccumParameter.have_reference', index=3, - number=4, type=8, cpp_type=7, label=1, - has_default_value=True, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=26665, - serialized_end=26787, -) - - -_SHUFFLECHANNELPARAMETER = _descriptor.Descriptor( - name='ShuffleChannelParameter', - full_name='mo_caffe.ShuffleChannelParameter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='group', full_name='mo_caffe.ShuffleChannelParameter.group', index=0, - number=1, type=13, cpp_type=3, label=2, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - extension_ranges=[], - oneofs=[ - ], - serialized_start=26789, - serialized_end=26829, -) - -_BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE -_BLOBPROTOVECTOR.fields_by_name['blobs'].message_type = _BLOBPROTO -_LABELMAP.fields_by_name['item'].message_type = _LABELMAPITEM -_FILLERPARAMETER.fields_by_name['variance_norm'].enum_type = _FILLERPARAMETER_VARIANCENORM -_FILLERPARAMETER_VARIANCENORM.containing_type = _FILLERPARAMETER -_NETPARAMETER.fields_by_name['input_shape'].message_type = _BLOBSHAPE -_NETPARAMETER.fields_by_name['state'].message_type = _NETSTATE -_NETPARAMETER.fields_by_name['layer'].message_type = _LAYERPARAMETER -_NETPARAMETER.fields_by_name['layers'].message_type = _V1LAYERPARAMETER -_SOLVERPARAMETER.fields_by_name['net_param'].message_type = _NETPARAMETER -_SOLVERPARAMETER.fields_by_name['train_net_param'].message_type = _NETPARAMETER -_SOLVERPARAMETER.fields_by_name['test_net_param'].message_type = _NETPARAMETER -_SOLVERPARAMETER.fields_by_name['train_state'].message_type = _NETSTATE -_SOLVERPARAMETER.fields_by_name['test_state'].message_type = _NETSTATE -_SOLVERPARAMETER.fields_by_name['snapshot_format'].enum_type = _SOLVERPARAMETER_SNAPSHOTFORMAT -_SOLVERPARAMETER.fields_by_name['solver_mode'].enum_type = _SOLVERPARAMETER_SOLVERMODE -_SOLVERPARAMETER.fields_by_name['solver_type'].enum_type = _SOLVERPARAMETER_SOLVERTYPE -_SOLVERPARAMETER_SNAPSHOTFORMAT.containing_type = _SOLVERPARAMETER -_SOLVERPARAMETER_SOLVERMODE.containing_type = _SOLVERPARAMETER -_SOLVERPARAMETER_SOLVERTYPE.containing_type = _SOLVERPARAMETER -_SOLVERSTATE.fields_by_name['history'].message_type = _BLOBPROTO -_NETSTATE.fields_by_name['phase'].enum_type = _PHASE -_NETSTATERULE.fields_by_name['phase'].enum_type = _PHASE -_PARAMSPEC.fields_by_name['share_mode'].enum_type = _PARAMSPEC_DIMCHECKMODE -_PARAMSPEC_DIMCHECKMODE.containing_type = _PARAMSPEC -_LAYERPARAMETER.fields_by_name['phase'].enum_type = _PHASE -_LAYERPARAMETER.fields_by_name['param'].message_type = _PARAMSPEC -_LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO -_LAYERPARAMETER.fields_by_name['include'].message_type = _NETSTATERULE -_LAYERPARAMETER.fields_by_name['exclude'].message_type = _NETSTATERULE -_LAYERPARAMETER.fields_by_name['transform_param'].message_type = _TRANSFORMATIONPARAMETER -_LAYERPARAMETER.fields_by_name['loss_param'].message_type = _LOSSPARAMETER -_LAYERPARAMETER.fields_by_name['accuracy_param'].message_type = _ACCURACYPARAMETER -_LAYERPARAMETER.fields_by_name['argmax_param'].message_type = _ARGMAXPARAMETER -_LAYERPARAMETER.fields_by_name['batch_norm_param'].message_type = _BATCHNORMPARAMETER -_LAYERPARAMETER.fields_by_name['bias_param'].message_type = _BIASPARAMETER -_LAYERPARAMETER.fields_by_name['channel_permutation_param'].message_type = _CHANNELPERMUTATIONPARAMETER -_LAYERPARAMETER.fields_by_name['concat_param'].message_type = _CONCATPARAMETER -_LAYERPARAMETER.fields_by_name['contrastive_loss_param'].message_type = _CONTRASTIVELOSSPARAMETER -_LAYERPARAMETER.fields_by_name['convolution_param'].message_type = _CONVOLUTIONPARAMETER -_LAYERPARAMETER.fields_by_name['crop_param'].message_type = _CROPPARAMETER -_LAYERPARAMETER.fields_by_name['ctc_decoder_param'].message_type = _CTCDECODERPARAMETER -_LAYERPARAMETER.fields_by_name['ctc_loss_param'].message_type = _CTCLOSSPARAMETER -_LAYERPARAMETER.fields_by_name['data_param'].message_type = _DATAPARAMETER -_LAYERPARAMETER.fields_by_name['dropout_param'].message_type = _DROPOUTPARAMETER -_LAYERPARAMETER.fields_by_name['dummy_data_param'].message_type = _DUMMYDATAPARAMETER -_LAYERPARAMETER.fields_by_name['eltwise_param'].message_type = _ELTWISEPARAMETER -_LAYERPARAMETER.fields_by_name['elu_param'].message_type = _ELUPARAMETER -_LAYERPARAMETER.fields_by_name['embed_param'].message_type = _EMBEDPARAMETER -_LAYERPARAMETER.fields_by_name['exp_param'].message_type = _EXPPARAMETER -_LAYERPARAMETER.fields_by_name['flatten_param'].message_type = _FLATTENPARAMETER -_LAYERPARAMETER.fields_by_name['grn_param'].message_type = _GRNPARAMETER -_LAYERPARAMETER.fields_by_name['hdf5_data_param'].message_type = _HDF5DATAPARAMETER -_LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER -_LAYERPARAMETER.fields_by_name['hinge_loss_param'].message_type = _HINGELOSSPARAMETER -_LAYERPARAMETER.fields_by_name['image_data_param'].message_type = _IMAGEDATAPARAMETER -_LAYERPARAMETER.fields_by_name['infogain_loss_param'].message_type = _INFOGAINLOSSPARAMETER -_LAYERPARAMETER.fields_by_name['inner_product_param'].message_type = _INNERPRODUCTPARAMETER -_LAYERPARAMETER.fields_by_name['input_param'].message_type = _INPUTPARAMETER -_LAYERPARAMETER.fields_by_name['log_param'].message_type = _LOGPARAMETER -_LAYERPARAMETER.fields_by_name['lrn_param'].message_type = _LRNPARAMETER -_LAYERPARAMETER.fields_by_name['memory_data_param'].message_type = _MEMORYDATAPARAMETER -_LAYERPARAMETER.fields_by_name['mvn_param'].message_type = _MVNPARAMETER -_LAYERPARAMETER.fields_by_name['parameter_param'].message_type = _PARAMETERPARAMETER -_LAYERPARAMETER.fields_by_name['pooling_param'].message_type = _POOLINGPARAMETER -_LAYERPARAMETER.fields_by_name['permute_param'].message_type = _PERMUTEPARAMETER -_LAYERPARAMETER.fields_by_name['power_param'].message_type = _POWERPARAMETER -_LAYERPARAMETER.fields_by_name['prelu_param'].message_type = _PRELUPARAMETER -_LAYERPARAMETER.fields_by_name['python_param'].message_type = _PYTHONPARAMETER -_LAYERPARAMETER.fields_by_name['recurrent_param'].message_type = _RECURRENTPARAMETER -_LAYERPARAMETER.fields_by_name['reduction_param'].message_type = _REDUCTIONPARAMETER -_LAYERPARAMETER.fields_by_name['relu_param'].message_type = _RELUPARAMETER -_LAYERPARAMETER.fields_by_name['reshape_param'].message_type = _RESHAPEPARAMETER -_LAYERPARAMETER.fields_by_name['reverse_param'].message_type = _REVERSEPARAMETER -_LAYERPARAMETER.fields_by_name['scale_param'].message_type = _SCALEPARAMETER -_LAYERPARAMETER.fields_by_name['sigmoid_param'].message_type = _SIGMOIDPARAMETER -_LAYERPARAMETER.fields_by_name['softmax_param'].message_type = _SOFTMAXPARAMETER -_LAYERPARAMETER.fields_by_name['spp_param'].message_type = _SPPPARAMETER -_LAYERPARAMETER.fields_by_name['slice_param'].message_type = _SLICEPARAMETER -_LAYERPARAMETER.fields_by_name['tanh_param'].message_type = _TANHPARAMETER -_LAYERPARAMETER.fields_by_name['threshold_param'].message_type = _THRESHOLDPARAMETER -_LAYERPARAMETER.fields_by_name['tile_param'].message_type = _TILEPARAMETER -_LAYERPARAMETER.fields_by_name['window_data_param'].message_type = _WINDOWDATAPARAMETER -_LAYERPARAMETER.fields_by_name['st_param'].message_type = _SPATIALTRANSFORMERPARAMETER -_LAYERPARAMETER.fields_by_name['st_loss_param'].message_type = _STLOSSPARAMETER -_LAYERPARAMETER.fields_by_name['power_file_param'].message_type = _POWERFILEPARAMETER -_LAYERPARAMETER.fields_by_name['loc_loss_param'].message_type = _LOCLOSSPARAMETER -_LAYERPARAMETER.fields_by_name['proposal_param'].message_type = _PROPOSALPARAMETER -_LAYERPARAMETER.fields_by_name['cosine_similarity_batch_param'].message_type = _COSINESIMILARITYBATCHPARAMETER -_LAYERPARAMETER.fields_by_name['rss_loss_param'].message_type = _RANDOMSAMPLINGSOFTMAXLOSSPARAMETER -_LAYERPARAMETER.fields_by_name['norm_param'].message_type = _NORMALIZEPARAMETER -_LAYERPARAMETER.fields_by_name['roi_warping_param'].message_type = _ROIWARPINGPARAMETER -_LAYERPARAMETER.fields_by_name['psroi_pooling_param'].message_type = _PSROIPOOLINGPARAMETER -_LAYERPARAMETER.fields_by_name['roi_pooling_param'].message_type = _ROIPOOLINGPARAMETER -_LAYERPARAMETER.fields_by_name['smooth_l1_loss_param'].message_type = _SMOOTHL1LOSSPARAMETER -_LAYERPARAMETER.fields_by_name['box_annotator_ohem_param'].message_type = _BOXANNOTATOROHEMPARAMETER -_LAYERPARAMETER.fields_by_name['detection_output_param'].message_type = _DETECTIONOUTPUTPARAMETER -_LAYERPARAMETER.fields_by_name['prior_box_param'].message_type = _PRIORBOXPARAMETER -_LAYERPARAMETER.fields_by_name['region_yolo_param'].message_type = _REGIONYOLOPARAMETER -_LAYERPARAMETER.fields_by_name['reorg_yolo_param'].message_type = _REORGYOLOPARAMETER -_LAYERPARAMETER.fields_by_name['relu6_param'].message_type = _RELU6PARAMETER -_LAYERPARAMETER.fields_by_name['interp_param'].message_type = _INTERPPARAMETER -_LAYERPARAMETER.fields_by_name['augmentation_param'].message_type = _AUGMENTATIONPARAMETER -_LAYERPARAMETER.fields_by_name['correlation_param'].message_type = _CORRELATIONPARAMETER -_LAYERPARAMETER.fields_by_name['resample_param'].message_type = _RESAMPLEPARAMETER -_LAYERPARAMETER.fields_by_name['flow_warp_param'].message_type = _FLOWWARPPARAMETER -_LAYERPARAMETER.fields_by_name['accum_param'].message_type = _ACCUMPARAMETER -_LAYERPARAMETER.fields_by_name['coeff_schedule_param'].message_type = _COEFFSCHEDULEPARAMETER -_LAYERPARAMETER.fields_by_name['shuffle_channel_param'].message_type = _SHUFFLECHANNELPARAMETER -_NORMALIZEPARAMETER.fields_by_name['scale_filler'].message_type = _FILLERPARAMETER -_LOSSPARAMETER.fields_by_name['normalization'].enum_type = _LOSSPARAMETER_NORMALIZATIONMODE -_LOSSPARAMETER_NORMALIZATIONMODE.containing_type = _LOSSPARAMETER -_CHANNELPERMUTATIONPARAMETER.fields_by_name['action'].message_type = _CHANNELPERMUTATIONACTION -_BIASPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER -_CONVOLUTIONPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER -_CONVOLUTIONPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_CONVOLUTIONPARAMETER.fields_by_name['engine'].enum_type = _CONVOLUTIONPARAMETER_ENGINE -_CONVOLUTIONPARAMETER_ENGINE.containing_type = _CONVOLUTIONPARAMETER -_DATAPARAMETER.fields_by_name['backend'].enum_type = _DATAPARAMETER_DB -_DATAPARAMETER_DB.containing_type = _DATAPARAMETER -_RESIZEPARAMETER.fields_by_name['resize_mode'].enum_type = _RESIZEPARAMETER_RESIZE_MODE -_RESIZEPARAMETER.fields_by_name['pad_mode'].enum_type = _RESIZEPARAMETER_PAD_MODE -_RESIZEPARAMETER.fields_by_name['interp_mode'].enum_type = _RESIZEPARAMETER_INTERP_MODE -_RESIZEPARAMETER_RESIZE_MODE.containing_type = _RESIZEPARAMETER -_RESIZEPARAMETER_PAD_MODE.containing_type = _RESIZEPARAMETER -_RESIZEPARAMETER_INTERP_MODE.containing_type = _RESIZEPARAMETER -_SAVEOUTPUTPARAMETER.fields_by_name['resize_param'].message_type = _RESIZEPARAMETER -_DETECTIONOUTPUTPARAMETER.fields_by_name['nms_param'].message_type = _NONMAXIMUMSUPPRESSIONPARAMETER -_DETECTIONOUTPUTPARAMETER.fields_by_name['save_output_param'].message_type = _SAVEOUTPUTPARAMETER -_DETECTIONOUTPUTPARAMETER.fields_by_name['code_type'].enum_type = _PRIORBOXPARAMETER_CODETYPE -_DUMMYDATAPARAMETER.fields_by_name['data_filler'].message_type = _FILLERPARAMETER -_DUMMYDATAPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE -_ELTWISEPARAMETER.fields_by_name['operation'].enum_type = _ELTWISEPARAMETER_ELTWISEOP -_ELTWISEPARAMETER_ELTWISEOP.containing_type = _ELTWISEPARAMETER -_EMBEDPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER -_EMBEDPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_HINGELOSSPARAMETER.fields_by_name['norm'].enum_type = _HINGELOSSPARAMETER_NORM -_HINGELOSSPARAMETER_NORM.containing_type = _HINGELOSSPARAMETER -_INNERPRODUCTPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER -_INNERPRODUCTPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_INPUTPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE -_LRNPARAMETER.fields_by_name['norm_region'].enum_type = _LRNPARAMETER_NORMREGION -_LRNPARAMETER.fields_by_name['engine'].enum_type = _LRNPARAMETER_ENGINE -_LRNPARAMETER_NORMREGION.containing_type = _LRNPARAMETER -_LRNPARAMETER_ENGINE.containing_type = _LRNPARAMETER -_PARAMETERPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE -_POOLINGPARAMETER.fields_by_name['pool'].enum_type = _POOLINGPARAMETER_POOLMETHOD -_POOLINGPARAMETER.fields_by_name['engine'].enum_type = _POOLINGPARAMETER_ENGINE -_POOLINGPARAMETER_POOLMETHOD.containing_type = _POOLINGPARAMETER -_POOLINGPARAMETER_ENGINE.containing_type = _POOLINGPARAMETER -_PRIORBOXPARAMETER_CODETYPE.containing_type = _PRIORBOXPARAMETER -_RECURRENTPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER -_RECURRENTPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_REDUCTIONPARAMETER.fields_by_name['operation'].enum_type = _REDUCTIONPARAMETER_REDUCTIONOP -_REDUCTIONPARAMETER_REDUCTIONOP.containing_type = _REDUCTIONPARAMETER -_RELUPARAMETER.fields_by_name['engine'].enum_type = _RELUPARAMETER_ENGINE -_RELUPARAMETER_ENGINE.containing_type = _RELUPARAMETER -_RESHAPEPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE -_SCALEPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER -_SCALEPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_SIGMOIDPARAMETER.fields_by_name['engine'].enum_type = _SIGMOIDPARAMETER_ENGINE -_SIGMOIDPARAMETER_ENGINE.containing_type = _SIGMOIDPARAMETER -_SOFTMAXPARAMETER.fields_by_name['engine'].enum_type = _SOFTMAXPARAMETER_ENGINE -_SOFTMAXPARAMETER_ENGINE.containing_type = _SOFTMAXPARAMETER -_TANHPARAMETER.fields_by_name['engine'].enum_type = _TANHPARAMETER_ENGINE -_TANHPARAMETER_ENGINE.containing_type = _TANHPARAMETER -_SPPPARAMETER.fields_by_name['pool'].enum_type = _SPPPARAMETER_POOLMETHOD -_SPPPARAMETER.fields_by_name['engine'].enum_type = _SPPPARAMETER_ENGINE -_SPPPARAMETER_POOLMETHOD.containing_type = _SPPPARAMETER -_SPPPARAMETER_ENGINE.containing_type = _SPPPARAMETER -_V1LAYERPARAMETER.fields_by_name['include'].message_type = _NETSTATERULE -_V1LAYERPARAMETER.fields_by_name['exclude'].message_type = _NETSTATERULE -_V1LAYERPARAMETER.fields_by_name['type'].enum_type = _V1LAYERPARAMETER_LAYERTYPE -_V1LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO -_V1LAYERPARAMETER.fields_by_name['blob_share_mode'].enum_type = _V1LAYERPARAMETER_DIMCHECKMODE -_V1LAYERPARAMETER.fields_by_name['accuracy_param'].message_type = _ACCURACYPARAMETER -_V1LAYERPARAMETER.fields_by_name['argmax_param'].message_type = _ARGMAXPARAMETER -_V1LAYERPARAMETER.fields_by_name['concat_param'].message_type = _CONCATPARAMETER -_V1LAYERPARAMETER.fields_by_name['contrastive_loss_param'].message_type = _CONTRASTIVELOSSPARAMETER -_V1LAYERPARAMETER.fields_by_name['convolution_param'].message_type = _CONVOLUTIONPARAMETER -_V1LAYERPARAMETER.fields_by_name['data_param'].message_type = _DATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['dropout_param'].message_type = _DROPOUTPARAMETER -_V1LAYERPARAMETER.fields_by_name['dummy_data_param'].message_type = _DUMMYDATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['eltwise_param'].message_type = _ELTWISEPARAMETER -_V1LAYERPARAMETER.fields_by_name['exp_param'].message_type = _EXPPARAMETER -_V1LAYERPARAMETER.fields_by_name['hdf5_data_param'].message_type = _HDF5DATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER -_V1LAYERPARAMETER.fields_by_name['hinge_loss_param'].message_type = _HINGELOSSPARAMETER -_V1LAYERPARAMETER.fields_by_name['image_data_param'].message_type = _IMAGEDATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['infogain_loss_param'].message_type = _INFOGAINLOSSPARAMETER -_V1LAYERPARAMETER.fields_by_name['inner_product_param'].message_type = _INNERPRODUCTPARAMETER -_V1LAYERPARAMETER.fields_by_name['lrn_param'].message_type = _LRNPARAMETER -_V1LAYERPARAMETER.fields_by_name['memory_data_param'].message_type = _MEMORYDATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['mvn_param'].message_type = _MVNPARAMETER -_V1LAYERPARAMETER.fields_by_name['pooling_param'].message_type = _POOLINGPARAMETER -_V1LAYERPARAMETER.fields_by_name['power_param'].message_type = _POWERPARAMETER -_V1LAYERPARAMETER.fields_by_name['relu_param'].message_type = _RELUPARAMETER -_V1LAYERPARAMETER.fields_by_name['sigmoid_param'].message_type = _SIGMOIDPARAMETER -_V1LAYERPARAMETER.fields_by_name['softmax_param'].message_type = _SOFTMAXPARAMETER -_V1LAYERPARAMETER.fields_by_name['slice_param'].message_type = _SLICEPARAMETER -_V1LAYERPARAMETER.fields_by_name['tanh_param'].message_type = _TANHPARAMETER -_V1LAYERPARAMETER.fields_by_name['threshold_param'].message_type = _THRESHOLDPARAMETER -_V1LAYERPARAMETER.fields_by_name['window_data_param'].message_type = _WINDOWDATAPARAMETER -_V1LAYERPARAMETER.fields_by_name['transform_param'].message_type = _TRANSFORMATIONPARAMETER -_V1LAYERPARAMETER.fields_by_name['loss_param'].message_type = _LOSSPARAMETER -_V1LAYERPARAMETER.fields_by_name['layer'].message_type = _V0LAYERPARAMETER -_V1LAYERPARAMETER_LAYERTYPE.containing_type = _V1LAYERPARAMETER -_V1LAYERPARAMETER_DIMCHECKMODE.containing_type = _V1LAYERPARAMETER -_V0LAYERPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER -_V0LAYERPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER -_V0LAYERPARAMETER.fields_by_name['pool'].enum_type = _V0LAYERPARAMETER_POOLMETHOD -_V0LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO -_V0LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER -_V0LAYERPARAMETER_POOLMETHOD.containing_type = _V0LAYERPARAMETER -_PRELUPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['mirror'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['translate'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['rotate'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['zoom'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['squeeze'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['translate_x'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['translate_y'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['gamma'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['brightness'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['contrast'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['color'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['lmult_pow'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['lmult_mult'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['lmult_add'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['sat_pow'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['sat_mult'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['sat_add'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['col_pow'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['col_mult'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['col_add'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['ladd_pow'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['ladd_mult'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['ladd_add'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['col_rotate'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['fog_amount'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['fog_size'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['motion_blur_angle'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['motion_blur_size'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['shadow_angle'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['shadow_distance'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['shadow_strength'].message_type = _RANDOMGENERATORPARAMETER -_AUGMENTATIONPARAMETER.fields_by_name['noise'].message_type = _RANDOMGENERATORPARAMETER -_FLOWWARPPARAMETER.fields_by_name['fill_value'].enum_type = _FLOWWARPPARAMETER_FILLPARAMETER -_FLOWWARPPARAMETER_FILLPARAMETER.containing_type = _FLOWWARPPARAMETER -_CORRELATIONPARAMETER.fields_by_name['correlation_type'].enum_type = _CORRELATIONPARAMETER_CORRELATIONTYPE -_CORRELATIONPARAMETER_CORRELATIONTYPE.containing_type = _CORRELATIONPARAMETER -_RESAMPLEPARAMETER.fields_by_name['type'].enum_type = _RESAMPLEPARAMETER_RESAMPLETYPE -_RESAMPLEPARAMETER_RESAMPLETYPE.containing_type = _RESAMPLEPARAMETER -DESCRIPTOR.message_types_by_name['BlobShape'] = _BLOBSHAPE -DESCRIPTOR.message_types_by_name['BlobProto'] = _BLOBPROTO -DESCRIPTOR.message_types_by_name['BlobProtoVector'] = _BLOBPROTOVECTOR -DESCRIPTOR.message_types_by_name['CosineSimilarityBatchParameter'] = _COSINESIMILARITYBATCHPARAMETER -DESCRIPTOR.message_types_by_name['Datum'] = _DATUM -DESCRIPTOR.message_types_by_name['LabelMapItem'] = _LABELMAPITEM -DESCRIPTOR.message_types_by_name['LabelMap'] = _LABELMAP -DESCRIPTOR.message_types_by_name['NormalizedBBox'] = _NORMALIZEDBBOX -DESCRIPTOR.message_types_by_name['FillerParameter'] = _FILLERPARAMETER -DESCRIPTOR.message_types_by_name['NetParameter'] = _NETPARAMETER -DESCRIPTOR.message_types_by_name['SolverParameter'] = _SOLVERPARAMETER -DESCRIPTOR.message_types_by_name['SolverState'] = _SOLVERSTATE -DESCRIPTOR.message_types_by_name['NetState'] = _NETSTATE -DESCRIPTOR.message_types_by_name['NetStateRule'] = _NETSTATERULE -DESCRIPTOR.message_types_by_name['SpatialTransformerParameter'] = _SPATIALTRANSFORMERPARAMETER -DESCRIPTOR.message_types_by_name['PowerFileParameter'] = _POWERFILEPARAMETER -DESCRIPTOR.message_types_by_name['STLossParameter'] = _STLOSSPARAMETER -DESCRIPTOR.message_types_by_name['LocLossParameter'] = _LOCLOSSPARAMETER -DESCRIPTOR.message_types_by_name['ParamSpec'] = _PARAMSPEC -DESCRIPTOR.message_types_by_name['LayerParameter'] = _LAYERPARAMETER -DESCRIPTOR.message_types_by_name['InterpParameter'] = _INTERPPARAMETER -DESCRIPTOR.message_types_by_name['RandomSamplingSoftmaxLossParameter'] = _RANDOMSAMPLINGSOFTMAXLOSSPARAMETER -DESCRIPTOR.message_types_by_name['ProposalParameter'] = _PROPOSALPARAMETER -DESCRIPTOR.message_types_by_name['NormalizeParameter'] = _NORMALIZEPARAMETER -DESCRIPTOR.message_types_by_name['PermuteParameter'] = _PERMUTEPARAMETER -DESCRIPTOR.message_types_by_name['TransformationParameter'] = _TRANSFORMATIONPARAMETER -DESCRIPTOR.message_types_by_name['LossParameter'] = _LOSSPARAMETER -DESCRIPTOR.message_types_by_name['AccuracyParameter'] = _ACCURACYPARAMETER -DESCRIPTOR.message_types_by_name['ArgMaxParameter'] = _ARGMAXPARAMETER -DESCRIPTOR.message_types_by_name['ChannelPermutationAction'] = _CHANNELPERMUTATIONACTION -DESCRIPTOR.message_types_by_name['ChannelPermutationParameter'] = _CHANNELPERMUTATIONPARAMETER -DESCRIPTOR.message_types_by_name['ConcatParameter'] = _CONCATPARAMETER -DESCRIPTOR.message_types_by_name['BatchNormParameter'] = _BATCHNORMPARAMETER -DESCRIPTOR.message_types_by_name['BoxAnnotatorOHEMParameter'] = _BOXANNOTATOROHEMPARAMETER -DESCRIPTOR.message_types_by_name['BiasParameter'] = _BIASPARAMETER -DESCRIPTOR.message_types_by_name['ContrastiveLossParameter'] = _CONTRASTIVELOSSPARAMETER -DESCRIPTOR.message_types_by_name['ConvolutionParameter'] = _CONVOLUTIONPARAMETER -DESCRIPTOR.message_types_by_name['CropParameter'] = _CROPPARAMETER -DESCRIPTOR.message_types_by_name['CTCDecoderParameter'] = _CTCDECODERPARAMETER -DESCRIPTOR.message_types_by_name['CTCLossParameter'] = _CTCLOSSPARAMETER -DESCRIPTOR.message_types_by_name['DataParameter'] = _DATAPARAMETER -DESCRIPTOR.message_types_by_name['NonMaximumSuppressionParameter'] = _NONMAXIMUMSUPPRESSIONPARAMETER -DESCRIPTOR.message_types_by_name['ResizeParameter'] = _RESIZEPARAMETER -DESCRIPTOR.message_types_by_name['SaveOutputParameter'] = _SAVEOUTPUTPARAMETER -DESCRIPTOR.message_types_by_name['DetectionOutputParameter'] = _DETECTIONOUTPUTPARAMETER -DESCRIPTOR.message_types_by_name['DropoutParameter'] = _DROPOUTPARAMETER -DESCRIPTOR.message_types_by_name['DummyDataParameter'] = _DUMMYDATAPARAMETER -DESCRIPTOR.message_types_by_name['EltwiseParameter'] = _ELTWISEPARAMETER -DESCRIPTOR.message_types_by_name['ELUParameter'] = _ELUPARAMETER -DESCRIPTOR.message_types_by_name['EmbedParameter'] = _EMBEDPARAMETER -DESCRIPTOR.message_types_by_name['ExpParameter'] = _EXPPARAMETER -DESCRIPTOR.message_types_by_name['FlattenParameter'] = _FLATTENPARAMETER -DESCRIPTOR.message_types_by_name['HDF5DataParameter'] = _HDF5DATAPARAMETER -DESCRIPTOR.message_types_by_name['HDF5OutputParameter'] = _HDF5OUTPUTPARAMETER -DESCRIPTOR.message_types_by_name['HingeLossParameter'] = _HINGELOSSPARAMETER -DESCRIPTOR.message_types_by_name['ImageDataParameter'] = _IMAGEDATAPARAMETER -DESCRIPTOR.message_types_by_name['InfogainLossParameter'] = _INFOGAINLOSSPARAMETER -DESCRIPTOR.message_types_by_name['InnerProductParameter'] = _INNERPRODUCTPARAMETER -DESCRIPTOR.message_types_by_name['InputParameter'] = _INPUTPARAMETER -DESCRIPTOR.message_types_by_name['LogParameter'] = _LOGPARAMETER -DESCRIPTOR.message_types_by_name['LRNParameter'] = _LRNPARAMETER -DESCRIPTOR.message_types_by_name['GRNParameter'] = _GRNPARAMETER -DESCRIPTOR.message_types_by_name['MemoryDataParameter'] = _MEMORYDATAPARAMETER -DESCRIPTOR.message_types_by_name['MVNParameter'] = _MVNPARAMETER -DESCRIPTOR.message_types_by_name['ParameterParameter'] = _PARAMETERPARAMETER -DESCRIPTOR.message_types_by_name['PoolingParameter'] = _POOLINGPARAMETER -DESCRIPTOR.message_types_by_name['PowerParameter'] = _POWERPARAMETER -DESCRIPTOR.message_types_by_name['PriorBoxParameter'] = _PRIORBOXPARAMETER -DESCRIPTOR.message_types_by_name['PSROIPoolingParameter'] = _PSROIPOOLINGPARAMETER -DESCRIPTOR.message_types_by_name['PythonParameter'] = _PYTHONPARAMETER -DESCRIPTOR.message_types_by_name['RecurrentParameter'] = _RECURRENTPARAMETER -DESCRIPTOR.message_types_by_name['ReductionParameter'] = _REDUCTIONPARAMETER -DESCRIPTOR.message_types_by_name['ReLUParameter'] = _RELUPARAMETER -DESCRIPTOR.message_types_by_name['ReLU6Parameter'] = _RELU6PARAMETER -DESCRIPTOR.message_types_by_name['ReshapeParameter'] = _RESHAPEPARAMETER -DESCRIPTOR.message_types_by_name['ReverseParameter'] = _REVERSEPARAMETER -DESCRIPTOR.message_types_by_name['ROIPoolingParameter'] = _ROIPOOLINGPARAMETER -DESCRIPTOR.message_types_by_name['ROIWarpingTestParameter'] = _ROIWARPINGTESTPARAMETER -DESCRIPTOR.message_types_by_name['ROIWarpingParameter'] = _ROIWARPINGPARAMETER -DESCRIPTOR.message_types_by_name['ScaleParameter'] = _SCALEPARAMETER -DESCRIPTOR.message_types_by_name['SigmoidParameter'] = _SIGMOIDPARAMETER -DESCRIPTOR.message_types_by_name['SliceParameter'] = _SLICEPARAMETER -DESCRIPTOR.message_types_by_name['SmoothL1LossParameter'] = _SMOOTHL1LOSSPARAMETER -DESCRIPTOR.message_types_by_name['SoftmaxParameter'] = _SOFTMAXPARAMETER -DESCRIPTOR.message_types_by_name['TanHParameter'] = _TANHPARAMETER -DESCRIPTOR.message_types_by_name['TileParameter'] = _TILEPARAMETER -DESCRIPTOR.message_types_by_name['ThresholdParameter'] = _THRESHOLDPARAMETER -DESCRIPTOR.message_types_by_name['WindowDataParameter'] = _WINDOWDATAPARAMETER -DESCRIPTOR.message_types_by_name['SPPParameter'] = _SPPPARAMETER -DESCRIPTOR.message_types_by_name['V1LayerParameter'] = _V1LAYERPARAMETER -DESCRIPTOR.message_types_by_name['V0LayerParameter'] = _V0LAYERPARAMETER -DESCRIPTOR.message_types_by_name['PReLUParameter'] = _PRELUPARAMETER -DESCRIPTOR.message_types_by_name['RegionYoloParameter'] = _REGIONYOLOPARAMETER -DESCRIPTOR.message_types_by_name['ReorgYoloParameter'] = _REORGYOLOPARAMETER -DESCRIPTOR.message_types_by_name['RandomGeneratorParameter'] = _RANDOMGENERATORPARAMETER -DESCRIPTOR.message_types_by_name['CoeffScheduleParameter'] = _COEFFSCHEDULEPARAMETER -DESCRIPTOR.message_types_by_name['AugmentationCoeff'] = _AUGMENTATIONCOEFF -DESCRIPTOR.message_types_by_name['AugmentationParameter'] = _AUGMENTATIONPARAMETER -DESCRIPTOR.message_types_by_name['FlowWarpParameter'] = _FLOWWARPPARAMETER -DESCRIPTOR.message_types_by_name['CorrelationParameter'] = _CORRELATIONPARAMETER -DESCRIPTOR.message_types_by_name['ResampleParameter'] = _RESAMPLEPARAMETER -DESCRIPTOR.message_types_by_name['AccumParameter'] = _ACCUMPARAMETER -DESCRIPTOR.message_types_by_name['ShuffleChannelParameter'] = _SHUFFLECHANNELPARAMETER -DESCRIPTOR.enum_types_by_name['Phase'] = _PHASE - -BlobShape = _reflection.GeneratedProtocolMessageType('BlobShape', (_message.Message,), dict( - DESCRIPTOR = _BLOBSHAPE, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.BlobShape) - )) -_sym_db.RegisterMessage(BlobShape) - -BlobProto = _reflection.GeneratedProtocolMessageType('BlobProto', (_message.Message,), dict( - DESCRIPTOR = _BLOBPROTO, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.BlobProto) - )) -_sym_db.RegisterMessage(BlobProto) - -BlobProtoVector = _reflection.GeneratedProtocolMessageType('BlobProtoVector', (_message.Message,), dict( - DESCRIPTOR = _BLOBPROTOVECTOR, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.BlobProtoVector) - )) -_sym_db.RegisterMessage(BlobProtoVector) - -CosineSimilarityBatchParameter = _reflection.GeneratedProtocolMessageType('CosineSimilarityBatchParameter', (_message.Message,), dict( - DESCRIPTOR = _COSINESIMILARITYBATCHPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.CosineSimilarityBatchParameter) - )) -_sym_db.RegisterMessage(CosineSimilarityBatchParameter) - -Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), dict( - DESCRIPTOR = _DATUM, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.Datum) - )) -_sym_db.RegisterMessage(Datum) - -LabelMapItem = _reflection.GeneratedProtocolMessageType('LabelMapItem', (_message.Message,), dict( - DESCRIPTOR = _LABELMAPITEM, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.LabelMapItem) - )) -_sym_db.RegisterMessage(LabelMapItem) - -LabelMap = _reflection.GeneratedProtocolMessageType('LabelMap', (_message.Message,), dict( - DESCRIPTOR = _LABELMAP, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.LabelMap) - )) -_sym_db.RegisterMessage(LabelMap) - -NormalizedBBox = _reflection.GeneratedProtocolMessageType('NormalizedBBox', (_message.Message,), dict( - DESCRIPTOR = _NORMALIZEDBBOX, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.NormalizedBBox) - )) -_sym_db.RegisterMessage(NormalizedBBox) - -FillerParameter = _reflection.GeneratedProtocolMessageType('FillerParameter', (_message.Message,), dict( - DESCRIPTOR = _FILLERPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.FillerParameter) - )) -_sym_db.RegisterMessage(FillerParameter) - -NetParameter = _reflection.GeneratedProtocolMessageType('NetParameter', (_message.Message,), dict( - DESCRIPTOR = _NETPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.NetParameter) - )) -_sym_db.RegisterMessage(NetParameter) - -SolverParameter = _reflection.GeneratedProtocolMessageType('SolverParameter', (_message.Message,), dict( - DESCRIPTOR = _SOLVERPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.SolverParameter) - )) -_sym_db.RegisterMessage(SolverParameter) - -SolverState = _reflection.GeneratedProtocolMessageType('SolverState', (_message.Message,), dict( - DESCRIPTOR = _SOLVERSTATE, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.SolverState) - )) -_sym_db.RegisterMessage(SolverState) - -NetState = _reflection.GeneratedProtocolMessageType('NetState', (_message.Message,), dict( - DESCRIPTOR = _NETSTATE, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.NetState) - )) -_sym_db.RegisterMessage(NetState) - -NetStateRule = _reflection.GeneratedProtocolMessageType('NetStateRule', (_message.Message,), dict( - DESCRIPTOR = _NETSTATERULE, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.NetStateRule) - )) -_sym_db.RegisterMessage(NetStateRule) - -SpatialTransformerParameter = _reflection.GeneratedProtocolMessageType('SpatialTransformerParameter', (_message.Message,), dict( - DESCRIPTOR = _SPATIALTRANSFORMERPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.SpatialTransformerParameter) - )) -_sym_db.RegisterMessage(SpatialTransformerParameter) - -PowerFileParameter = _reflection.GeneratedProtocolMessageType('PowerFileParameter', (_message.Message,), dict( - DESCRIPTOR = _POWERFILEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.PowerFileParameter) - )) -_sym_db.RegisterMessage(PowerFileParameter) - -STLossParameter = _reflection.GeneratedProtocolMessageType('STLossParameter', (_message.Message,), dict( - DESCRIPTOR = _STLOSSPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.STLossParameter) - )) -_sym_db.RegisterMessage(STLossParameter) - -LocLossParameter = _reflection.GeneratedProtocolMessageType('LocLossParameter', (_message.Message,), dict( - DESCRIPTOR = _LOCLOSSPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.LocLossParameter) - )) -_sym_db.RegisterMessage(LocLossParameter) - -ParamSpec = _reflection.GeneratedProtocolMessageType('ParamSpec', (_message.Message,), dict( - DESCRIPTOR = _PARAMSPEC, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ParamSpec) - )) -_sym_db.RegisterMessage(ParamSpec) - -LayerParameter = _reflection.GeneratedProtocolMessageType('LayerParameter', (_message.Message,), dict( - DESCRIPTOR = _LAYERPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.LayerParameter) - )) -_sym_db.RegisterMessage(LayerParameter) - -InterpParameter = _reflection.GeneratedProtocolMessageType('InterpParameter', (_message.Message,), dict( - DESCRIPTOR = _INTERPPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.InterpParameter) - )) -_sym_db.RegisterMessage(InterpParameter) - -RandomSamplingSoftmaxLossParameter = _reflection.GeneratedProtocolMessageType('RandomSamplingSoftmaxLossParameter', (_message.Message,), dict( - DESCRIPTOR = _RANDOMSAMPLINGSOFTMAXLOSSPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.RandomSamplingSoftmaxLossParameter) - )) -_sym_db.RegisterMessage(RandomSamplingSoftmaxLossParameter) - -ProposalParameter = _reflection.GeneratedProtocolMessageType('ProposalParameter', (_message.Message,), dict( - DESCRIPTOR = _PROPOSALPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ProposalParameter) - )) -_sym_db.RegisterMessage(ProposalParameter) - -NormalizeParameter = _reflection.GeneratedProtocolMessageType('NormalizeParameter', (_message.Message,), dict( - DESCRIPTOR = _NORMALIZEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.NormalizeParameter) - )) -_sym_db.RegisterMessage(NormalizeParameter) - -PermuteParameter = _reflection.GeneratedProtocolMessageType('PermuteParameter', (_message.Message,), dict( - DESCRIPTOR = _PERMUTEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.PermuteParameter) - )) -_sym_db.RegisterMessage(PermuteParameter) - -TransformationParameter = _reflection.GeneratedProtocolMessageType('TransformationParameter', (_message.Message,), dict( - DESCRIPTOR = _TRANSFORMATIONPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.TransformationParameter) - )) -_sym_db.RegisterMessage(TransformationParameter) - -LossParameter = _reflection.GeneratedProtocolMessageType('LossParameter', (_message.Message,), dict( - DESCRIPTOR = _LOSSPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.LossParameter) - )) -_sym_db.RegisterMessage(LossParameter) - -AccuracyParameter = _reflection.GeneratedProtocolMessageType('AccuracyParameter', (_message.Message,), dict( - DESCRIPTOR = _ACCURACYPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.AccuracyParameter) - )) -_sym_db.RegisterMessage(AccuracyParameter) - -ArgMaxParameter = _reflection.GeneratedProtocolMessageType('ArgMaxParameter', (_message.Message,), dict( - DESCRIPTOR = _ARGMAXPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ArgMaxParameter) - )) -_sym_db.RegisterMessage(ArgMaxParameter) - -ChannelPermutationAction = _reflection.GeneratedProtocolMessageType('ChannelPermutationAction', (_message.Message,), dict( - DESCRIPTOR = _CHANNELPERMUTATIONACTION, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ChannelPermutationAction) - )) -_sym_db.RegisterMessage(ChannelPermutationAction) - -ChannelPermutationParameter = _reflection.GeneratedProtocolMessageType('ChannelPermutationParameter', (_message.Message,), dict( - DESCRIPTOR = _CHANNELPERMUTATIONPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ChannelPermutationParameter) - )) -_sym_db.RegisterMessage(ChannelPermutationParameter) - -ConcatParameter = _reflection.GeneratedProtocolMessageType('ConcatParameter', (_message.Message,), dict( - DESCRIPTOR = _CONCATPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ConcatParameter) - )) -_sym_db.RegisterMessage(ConcatParameter) - -BatchNormParameter = _reflection.GeneratedProtocolMessageType('BatchNormParameter', (_message.Message,), dict( - DESCRIPTOR = _BATCHNORMPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.BatchNormParameter) - )) -_sym_db.RegisterMessage(BatchNormParameter) - -BoxAnnotatorOHEMParameter = _reflection.GeneratedProtocolMessageType('BoxAnnotatorOHEMParameter', (_message.Message,), dict( - DESCRIPTOR = _BOXANNOTATOROHEMPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.BoxAnnotatorOHEMParameter) - )) -_sym_db.RegisterMessage(BoxAnnotatorOHEMParameter) - -BiasParameter = _reflection.GeneratedProtocolMessageType('BiasParameter', (_message.Message,), dict( - DESCRIPTOR = _BIASPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.BiasParameter) - )) -_sym_db.RegisterMessage(BiasParameter) - -ContrastiveLossParameter = _reflection.GeneratedProtocolMessageType('ContrastiveLossParameter', (_message.Message,), dict( - DESCRIPTOR = _CONTRASTIVELOSSPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ContrastiveLossParameter) - )) -_sym_db.RegisterMessage(ContrastiveLossParameter) - -ConvolutionParameter = _reflection.GeneratedProtocolMessageType('ConvolutionParameter', (_message.Message,), dict( - DESCRIPTOR = _CONVOLUTIONPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ConvolutionParameter) - )) -_sym_db.RegisterMessage(ConvolutionParameter) - -CropParameter = _reflection.GeneratedProtocolMessageType('CropParameter', (_message.Message,), dict( - DESCRIPTOR = _CROPPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.CropParameter) - )) -_sym_db.RegisterMessage(CropParameter) - -CTCDecoderParameter = _reflection.GeneratedProtocolMessageType('CTCDecoderParameter', (_message.Message,), dict( - DESCRIPTOR = _CTCDECODERPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.CTCDecoderParameter) - )) -_sym_db.RegisterMessage(CTCDecoderParameter) - -CTCLossParameter = _reflection.GeneratedProtocolMessageType('CTCLossParameter', (_message.Message,), dict( - DESCRIPTOR = _CTCLOSSPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.CTCLossParameter) - )) -_sym_db.RegisterMessage(CTCLossParameter) - -DataParameter = _reflection.GeneratedProtocolMessageType('DataParameter', (_message.Message,), dict( - DESCRIPTOR = _DATAPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.DataParameter) - )) -_sym_db.RegisterMessage(DataParameter) - -NonMaximumSuppressionParameter = _reflection.GeneratedProtocolMessageType('NonMaximumSuppressionParameter', (_message.Message,), dict( - DESCRIPTOR = _NONMAXIMUMSUPPRESSIONPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.NonMaximumSuppressionParameter) - )) -_sym_db.RegisterMessage(NonMaximumSuppressionParameter) - -ResizeParameter = _reflection.GeneratedProtocolMessageType('ResizeParameter', (_message.Message,), dict( - DESCRIPTOR = _RESIZEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ResizeParameter) - )) -_sym_db.RegisterMessage(ResizeParameter) - -SaveOutputParameter = _reflection.GeneratedProtocolMessageType('SaveOutputParameter', (_message.Message,), dict( - DESCRIPTOR = _SAVEOUTPUTPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.SaveOutputParameter) - )) -_sym_db.RegisterMessage(SaveOutputParameter) - -DetectionOutputParameter = _reflection.GeneratedProtocolMessageType('DetectionOutputParameter', (_message.Message,), dict( - DESCRIPTOR = _DETECTIONOUTPUTPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.DetectionOutputParameter) - )) -_sym_db.RegisterMessage(DetectionOutputParameter) - -DropoutParameter = _reflection.GeneratedProtocolMessageType('DropoutParameter', (_message.Message,), dict( - DESCRIPTOR = _DROPOUTPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.DropoutParameter) - )) -_sym_db.RegisterMessage(DropoutParameter) - -DummyDataParameter = _reflection.GeneratedProtocolMessageType('DummyDataParameter', (_message.Message,), dict( - DESCRIPTOR = _DUMMYDATAPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.DummyDataParameter) - )) -_sym_db.RegisterMessage(DummyDataParameter) - -EltwiseParameter = _reflection.GeneratedProtocolMessageType('EltwiseParameter', (_message.Message,), dict( - DESCRIPTOR = _ELTWISEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.EltwiseParameter) - )) -_sym_db.RegisterMessage(EltwiseParameter) - -ELUParameter = _reflection.GeneratedProtocolMessageType('ELUParameter', (_message.Message,), dict( - DESCRIPTOR = _ELUPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ELUParameter) - )) -_sym_db.RegisterMessage(ELUParameter) - -EmbedParameter = _reflection.GeneratedProtocolMessageType('EmbedParameter', (_message.Message,), dict( - DESCRIPTOR = _EMBEDPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.EmbedParameter) - )) -_sym_db.RegisterMessage(EmbedParameter) - -ExpParameter = _reflection.GeneratedProtocolMessageType('ExpParameter', (_message.Message,), dict( - DESCRIPTOR = _EXPPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ExpParameter) - )) -_sym_db.RegisterMessage(ExpParameter) - -FlattenParameter = _reflection.GeneratedProtocolMessageType('FlattenParameter', (_message.Message,), dict( - DESCRIPTOR = _FLATTENPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.FlattenParameter) - )) -_sym_db.RegisterMessage(FlattenParameter) - -HDF5DataParameter = _reflection.GeneratedProtocolMessageType('HDF5DataParameter', (_message.Message,), dict( - DESCRIPTOR = _HDF5DATAPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.HDF5DataParameter) - )) -_sym_db.RegisterMessage(HDF5DataParameter) - -HDF5OutputParameter = _reflection.GeneratedProtocolMessageType('HDF5OutputParameter', (_message.Message,), dict( - DESCRIPTOR = _HDF5OUTPUTPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.HDF5OutputParameter) - )) -_sym_db.RegisterMessage(HDF5OutputParameter) - -HingeLossParameter = _reflection.GeneratedProtocolMessageType('HingeLossParameter', (_message.Message,), dict( - DESCRIPTOR = _HINGELOSSPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.HingeLossParameter) - )) -_sym_db.RegisterMessage(HingeLossParameter) - -ImageDataParameter = _reflection.GeneratedProtocolMessageType('ImageDataParameter', (_message.Message,), dict( - DESCRIPTOR = _IMAGEDATAPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ImageDataParameter) - )) -_sym_db.RegisterMessage(ImageDataParameter) - -InfogainLossParameter = _reflection.GeneratedProtocolMessageType('InfogainLossParameter', (_message.Message,), dict( - DESCRIPTOR = _INFOGAINLOSSPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.InfogainLossParameter) - )) -_sym_db.RegisterMessage(InfogainLossParameter) - -InnerProductParameter = _reflection.GeneratedProtocolMessageType('InnerProductParameter', (_message.Message,), dict( - DESCRIPTOR = _INNERPRODUCTPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.InnerProductParameter) - )) -_sym_db.RegisterMessage(InnerProductParameter) - -InputParameter = _reflection.GeneratedProtocolMessageType('InputParameter', (_message.Message,), dict( - DESCRIPTOR = _INPUTPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.InputParameter) - )) -_sym_db.RegisterMessage(InputParameter) - -LogParameter = _reflection.GeneratedProtocolMessageType('LogParameter', (_message.Message,), dict( - DESCRIPTOR = _LOGPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.LogParameter) - )) -_sym_db.RegisterMessage(LogParameter) - -LRNParameter = _reflection.GeneratedProtocolMessageType('LRNParameter', (_message.Message,), dict( - DESCRIPTOR = _LRNPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.LRNParameter) - )) -_sym_db.RegisterMessage(LRNParameter) - -GRNParameter = _reflection.GeneratedProtocolMessageType('GRNParameter', (_message.Message,), dict( - DESCRIPTOR = _GRNPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.GRNParameter) - )) -_sym_db.RegisterMessage(GRNParameter) - -MemoryDataParameter = _reflection.GeneratedProtocolMessageType('MemoryDataParameter', (_message.Message,), dict( - DESCRIPTOR = _MEMORYDATAPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.MemoryDataParameter) - )) -_sym_db.RegisterMessage(MemoryDataParameter) - -MVNParameter = _reflection.GeneratedProtocolMessageType('MVNParameter', (_message.Message,), dict( - DESCRIPTOR = _MVNPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.MVNParameter) - )) -_sym_db.RegisterMessage(MVNParameter) - -ParameterParameter = _reflection.GeneratedProtocolMessageType('ParameterParameter', (_message.Message,), dict( - DESCRIPTOR = _PARAMETERPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ParameterParameter) - )) -_sym_db.RegisterMessage(ParameterParameter) - -PoolingParameter = _reflection.GeneratedProtocolMessageType('PoolingParameter', (_message.Message,), dict( - DESCRIPTOR = _POOLINGPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.PoolingParameter) - )) -_sym_db.RegisterMessage(PoolingParameter) - -PowerParameter = _reflection.GeneratedProtocolMessageType('PowerParameter', (_message.Message,), dict( - DESCRIPTOR = _POWERPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.PowerParameter) - )) -_sym_db.RegisterMessage(PowerParameter) - -PriorBoxParameter = _reflection.GeneratedProtocolMessageType('PriorBoxParameter', (_message.Message,), dict( - DESCRIPTOR = _PRIORBOXPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.PriorBoxParameter) - )) -_sym_db.RegisterMessage(PriorBoxParameter) - -PSROIPoolingParameter = _reflection.GeneratedProtocolMessageType('PSROIPoolingParameter', (_message.Message,), dict( - DESCRIPTOR = _PSROIPOOLINGPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.PSROIPoolingParameter) - )) -_sym_db.RegisterMessage(PSROIPoolingParameter) - -PythonParameter = _reflection.GeneratedProtocolMessageType('PythonParameter', (_message.Message,), dict( - DESCRIPTOR = _PYTHONPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.PythonParameter) - )) -_sym_db.RegisterMessage(PythonParameter) - -RecurrentParameter = _reflection.GeneratedProtocolMessageType('RecurrentParameter', (_message.Message,), dict( - DESCRIPTOR = _RECURRENTPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.RecurrentParameter) - )) -_sym_db.RegisterMessage(RecurrentParameter) - -ReductionParameter = _reflection.GeneratedProtocolMessageType('ReductionParameter', (_message.Message,), dict( - DESCRIPTOR = _REDUCTIONPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ReductionParameter) - )) -_sym_db.RegisterMessage(ReductionParameter) - -ReLUParameter = _reflection.GeneratedProtocolMessageType('ReLUParameter', (_message.Message,), dict( - DESCRIPTOR = _RELUPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ReLUParameter) - )) -_sym_db.RegisterMessage(ReLUParameter) - -ReLU6Parameter = _reflection.GeneratedProtocolMessageType('ReLU6Parameter', (_message.Message,), dict( - DESCRIPTOR = _RELU6PARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ReLU6Parameter) - )) -_sym_db.RegisterMessage(ReLU6Parameter) - -ReshapeParameter = _reflection.GeneratedProtocolMessageType('ReshapeParameter', (_message.Message,), dict( - DESCRIPTOR = _RESHAPEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ReshapeParameter) - )) -_sym_db.RegisterMessage(ReshapeParameter) - -ReverseParameter = _reflection.GeneratedProtocolMessageType('ReverseParameter', (_message.Message,), dict( - DESCRIPTOR = _REVERSEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ReverseParameter) - )) -_sym_db.RegisterMessage(ReverseParameter) - -ROIPoolingParameter = _reflection.GeneratedProtocolMessageType('ROIPoolingParameter', (_message.Message,), dict( - DESCRIPTOR = _ROIPOOLINGPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ROIPoolingParameter) - )) -_sym_db.RegisterMessage(ROIPoolingParameter) - -ROIWarpingTestParameter = _reflection.GeneratedProtocolMessageType('ROIWarpingTestParameter', (_message.Message,), dict( - DESCRIPTOR = _ROIWARPINGTESTPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ROIWarpingTestParameter) - )) -_sym_db.RegisterMessage(ROIWarpingTestParameter) - -ROIWarpingParameter = _reflection.GeneratedProtocolMessageType('ROIWarpingParameter', (_message.Message,), dict( - DESCRIPTOR = _ROIWARPINGPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ROIWarpingParameter) - )) -_sym_db.RegisterMessage(ROIWarpingParameter) - -ScaleParameter = _reflection.GeneratedProtocolMessageType('ScaleParameter', (_message.Message,), dict( - DESCRIPTOR = _SCALEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ScaleParameter) - )) -_sym_db.RegisterMessage(ScaleParameter) - -SigmoidParameter = _reflection.GeneratedProtocolMessageType('SigmoidParameter', (_message.Message,), dict( - DESCRIPTOR = _SIGMOIDPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.SigmoidParameter) - )) -_sym_db.RegisterMessage(SigmoidParameter) - -SliceParameter = _reflection.GeneratedProtocolMessageType('SliceParameter', (_message.Message,), dict( - DESCRIPTOR = _SLICEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.SliceParameter) - )) -_sym_db.RegisterMessage(SliceParameter) - -SmoothL1LossParameter = _reflection.GeneratedProtocolMessageType('SmoothL1LossParameter', (_message.Message,), dict( - DESCRIPTOR = _SMOOTHL1LOSSPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.SmoothL1LossParameter) - )) -_sym_db.RegisterMessage(SmoothL1LossParameter) - -SoftmaxParameter = _reflection.GeneratedProtocolMessageType('SoftmaxParameter', (_message.Message,), dict( - DESCRIPTOR = _SOFTMAXPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.SoftmaxParameter) - )) -_sym_db.RegisterMessage(SoftmaxParameter) - -TanHParameter = _reflection.GeneratedProtocolMessageType('TanHParameter', (_message.Message,), dict( - DESCRIPTOR = _TANHPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.TanHParameter) - )) -_sym_db.RegisterMessage(TanHParameter) - -TileParameter = _reflection.GeneratedProtocolMessageType('TileParameter', (_message.Message,), dict( - DESCRIPTOR = _TILEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.TileParameter) - )) -_sym_db.RegisterMessage(TileParameter) - -ThresholdParameter = _reflection.GeneratedProtocolMessageType('ThresholdParameter', (_message.Message,), dict( - DESCRIPTOR = _THRESHOLDPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ThresholdParameter) - )) -_sym_db.RegisterMessage(ThresholdParameter) - -WindowDataParameter = _reflection.GeneratedProtocolMessageType('WindowDataParameter', (_message.Message,), dict( - DESCRIPTOR = _WINDOWDATAPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.WindowDataParameter) - )) -_sym_db.RegisterMessage(WindowDataParameter) - -SPPParameter = _reflection.GeneratedProtocolMessageType('SPPParameter', (_message.Message,), dict( - DESCRIPTOR = _SPPPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.SPPParameter) - )) -_sym_db.RegisterMessage(SPPParameter) - -V1LayerParameter = _reflection.GeneratedProtocolMessageType('V1LayerParameter', (_message.Message,), dict( - DESCRIPTOR = _V1LAYERPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.V1LayerParameter) - )) -_sym_db.RegisterMessage(V1LayerParameter) - -V0LayerParameter = _reflection.GeneratedProtocolMessageType('V0LayerParameter', (_message.Message,), dict( - DESCRIPTOR = _V0LAYERPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.V0LayerParameter) - )) -_sym_db.RegisterMessage(V0LayerParameter) - -PReLUParameter = _reflection.GeneratedProtocolMessageType('PReLUParameter', (_message.Message,), dict( - DESCRIPTOR = _PRELUPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.PReLUParameter) - )) -_sym_db.RegisterMessage(PReLUParameter) - -RegionYoloParameter = _reflection.GeneratedProtocolMessageType('RegionYoloParameter', (_message.Message,), dict( - DESCRIPTOR = _REGIONYOLOPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.RegionYoloParameter) - )) -_sym_db.RegisterMessage(RegionYoloParameter) - -ReorgYoloParameter = _reflection.GeneratedProtocolMessageType('ReorgYoloParameter', (_message.Message,), dict( - DESCRIPTOR = _REORGYOLOPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ReorgYoloParameter) - )) -_sym_db.RegisterMessage(ReorgYoloParameter) - -RandomGeneratorParameter = _reflection.GeneratedProtocolMessageType('RandomGeneratorParameter', (_message.Message,), dict( - DESCRIPTOR = _RANDOMGENERATORPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.RandomGeneratorParameter) - )) -_sym_db.RegisterMessage(RandomGeneratorParameter) - -CoeffScheduleParameter = _reflection.GeneratedProtocolMessageType('CoeffScheduleParameter', (_message.Message,), dict( - DESCRIPTOR = _COEFFSCHEDULEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.CoeffScheduleParameter) - )) -_sym_db.RegisterMessage(CoeffScheduleParameter) - -AugmentationCoeff = _reflection.GeneratedProtocolMessageType('AugmentationCoeff', (_message.Message,), dict( - DESCRIPTOR = _AUGMENTATIONCOEFF, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.AugmentationCoeff) - )) -_sym_db.RegisterMessage(AugmentationCoeff) - -AugmentationParameter = _reflection.GeneratedProtocolMessageType('AugmentationParameter', (_message.Message,), dict( - DESCRIPTOR = _AUGMENTATIONPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.AugmentationParameter) - )) -_sym_db.RegisterMessage(AugmentationParameter) - -FlowWarpParameter = _reflection.GeneratedProtocolMessageType('FlowWarpParameter', (_message.Message,), dict( - DESCRIPTOR = _FLOWWARPPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.FlowWarpParameter) - )) -_sym_db.RegisterMessage(FlowWarpParameter) - -CorrelationParameter = _reflection.GeneratedProtocolMessageType('CorrelationParameter', (_message.Message,), dict( - DESCRIPTOR = _CORRELATIONPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.CorrelationParameter) - )) -_sym_db.RegisterMessage(CorrelationParameter) - -ResampleParameter = _reflection.GeneratedProtocolMessageType('ResampleParameter', (_message.Message,), dict( - DESCRIPTOR = _RESAMPLEPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ResampleParameter) - )) -_sym_db.RegisterMessage(ResampleParameter) - -AccumParameter = _reflection.GeneratedProtocolMessageType('AccumParameter', (_message.Message,), dict( - DESCRIPTOR = _ACCUMPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.AccumParameter) - )) -_sym_db.RegisterMessage(AccumParameter) - -ShuffleChannelParameter = _reflection.GeneratedProtocolMessageType('ShuffleChannelParameter', (_message.Message,), dict( - DESCRIPTOR = _SHUFFLECHANNELPARAMETER, - __module__ = 'mo_caffe_pb2' - # @@protoc_insertion_point(class_scope:mo_caffe.ShuffleChannelParameter) - )) -_sym_db.RegisterMessage(ShuffleChannelParameter) - - -_BLOBSHAPE.fields_by_name['dim'].has_options = True -_BLOBSHAPE.fields_by_name['dim']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) -_BLOBPROTO.fields_by_name['data'].has_options = True -_BLOBPROTO.fields_by_name['data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) -_BLOBPROTO.fields_by_name['diff'].has_options = True -_BLOBPROTO.fields_by_name['diff']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) -_BLOBPROTO.fields_by_name['double_data'].has_options = True -_BLOBPROTO.fields_by_name['double_data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) -_BLOBPROTO.fields_by_name['double_diff'].has_options = True -_BLOBPROTO.fields_by_name['double_diff']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) -# @@protoc_insertion_point(module_scope) diff --git a/tools/mo/openvino/tools/mo/front/caffe/proto/generate_caffe_pb2.py b/tools/mo/openvino/tools/mo/front/caffe/proto/generate_caffe_pb2.py deleted file mode 100644 index ca68c9fc7b8a6d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/proto/generate_caffe_pb2.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import os -import shutil -import subprocess # nosec -import sys - - -def shell(cmd, env=None, cwd=None): - kwargs = dict(cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) - print('Running: "{}"'.format(' '.join(cmd))) - p = subprocess.Popen(cmd, **kwargs) - (stdout, stderr) = p.communicate() - return p.returncode, stdout, stderr - - -def get_cli_parser(): - parser = argparse.ArgumentParser() - parser.add_argument('--input_proto', required=True, help='Path to caffe.proto') - parser.add_argument('--output', help='Directory where output file are generated', - default=os.path.dirname(os.path.realpath(__file__))) - return parser - - -def build_proto(proto_file_path, python_path): - retcode, out, err = shell(['protoc', '-h']) - if retcode: - print(err) - return 1 - if not (os.path.exists(proto_file_path) and os.path.isfile(proto_file_path)): - print('File {} does not exist'.format(proto_file_path)) - return 1 - proto_path = os.path.split(proto_file_path)[0] - if not proto_path: - proto_path = os.getcwd() - - proto_file = os.path.split(proto_file_path)[1] - command = ['protoc', proto_file, '--python_out={}'.format(python_path)] - - retcode, out, err = shell(command, cwd=proto_path) - - if retcode: - print('protoc exit with code {}'.format(retcode)) - print('protoc out: {}'.format(out.decode().strip('\n'))) - print('protoc error: {}'.format(err.decode())) - else: - python_file = '{}_pb2.py'.format(proto_file.split('.')[0]) - shutil.move(os.path.join(python_path, python_file), os.path.join(python_path, 'caffe_pb2.py')) - print('File {} was generated in: {}'.format('caffe_pb2.py', python_path)) - return retcode - - -if __name__ == "__main__": - if sys.version_info < (3, 0): - print('Python version should be of version 3.5 or newer') - sys.exit(1) - argv = get_cli_parser().parse_args() - proto_file_path = argv.input_proto - python_path = argv.output - if not os.path.exists(python_path): - print("Output directory {} does not exist".format(python_path)) - sys.exit(1) - status = build_proto(proto_file_path, python_path) - exit(status) diff --git a/tools/mo/openvino/tools/mo/front/caffe/proto/mo_caffe.proto b/tools/mo/openvino/tools/mo/front/caffe/proto/mo_caffe.proto deleted file mode 100644 index f9dabe332cc2c4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/proto/mo_caffe.proto +++ /dev/null @@ -1,2096 +0,0 @@ -syntax = "proto2"; - -package mo_caffe; - -// Specifies the shape (dimensions) of a Blob. -message BlobShape { - repeated int64 dim = 1 [packed = true]; -} - -message BlobProto { - optional BlobShape shape = 7; - repeated float data = 5 [packed = true]; - repeated float diff = 6 [packed = true]; - repeated double double_data = 8 [packed = true]; - repeated double double_diff = 9 [packed = true]; - - // 4D dimensions -- deprecated. Use "shape" instead. - optional int32 num = 1 [default = 0]; - optional int32 channels = 2 [default = 0]; - optional int32 height = 3 [default = 0]; - optional int32 width = 4 [default = 0]; -} - -// The BlobProtoVector is simply a way to pass multiple blobproto instances -// around. -message BlobProtoVector { - repeated BlobProto blobs = 1; -} - -message CosineSimilarityBatchParameter { - optional double pos_label = 1 [default = 1]; - optional double neg_label = 2 [default = -1]; -} - -message Datum { - optional int32 channels = 1; - optional int32 height = 2; - optional int32 width = 3; - // the actual image data, in bytes - optional bytes data = 4; - optional int32 label = 5; - // Optionally, the datum could also hold float data. - repeated float float_data = 6; - // If true data contains an encoded image that need to be decoded - optional bool encoded = 7 [default = false]; -} - -// The label (display) name and label id. -message LabelMapItem { - // Both name and label are required. - optional string name = 1; - optional int32 label = 2; - // display_name is optional. - optional string display_name = 3; -} - -message LabelMap { - repeated LabelMapItem item = 1; -} - -// The normalized bounding box [0, 1] w.r.t. the input image size. -message NormalizedBBox { - optional float xmin = 1; - optional float ymin = 2; - optional float xmax = 3; - optional float ymax = 4; - optional int32 label = 5; - optional bool difficult = 6; - optional float score = 7; - optional float size = 8; -} - -message FillerParameter { - // The filler type. - optional string type = 1 [default = 'constant']; - optional float value = 2 [default = 0]; // the value in constant filler - optional float min = 3 [default = 0]; // the min value in uniform filler - optional float max = 4 [default = 1]; // the max value in uniform filler - optional float mean = 5 [default = 0]; // the mean value in Gaussian filler - optional float std = 6 [default = 1]; // the std value in Gaussian filler - // The expected number of non-zero output weights for a given input in - // Gaussian filler -- the default -1 means don't perform sparsification. - optional int32 sparse = 7 [default = -1]; - // Normalize the filler variance by fan_in, fan_out, or their average. - // Applies to 'xavier' and 'msra' fillers. - enum VarianceNorm { - FAN_IN = 0; - FAN_OUT = 1; - AVERAGE = 2; - } - optional VarianceNorm variance_norm = 8 [default = FAN_IN]; - - // added by Kaichun Mo - optional string file = 9; - repeated float diag_val = 10; -} - -message NetParameter { - optional string name = 1; // consider giving the network a name - // DEPRECATED. See InputParameter. The input blobs to the network. - repeated string input = 3; - // DEPRECATED. See InputParameter. The shape of the input blobs. - repeated BlobShape input_shape = 8; - - // 4D input dimensions -- deprecated. Use "input_shape" instead. - // If specified, for each input blob there should be four - // values specifying the num, channels, height and width of the input blob. - // Thus, there should be a total of (4 * #input) numbers. - repeated int32 input_dim = 4; - - // Whether the network will force every layer to carry out backward operation. - // If set False, then whether to carry out backward is determined - // automatically according to the net structure and learning rates. - optional bool force_backward = 5 [default = false]; - // The current "state" of the network, including the phase, level, and stage. - // Some layers may be included/excluded depending on this state and the states - // specified in the layers' include and exclude fields. - optional NetState state = 6; - - // Print debugging information about results while running Net::Forward, - // Net::Backward, and Net::Update. - optional bool debug_info = 7 [default = false]; - - optional bool profile_info = 9 [default = false]; - optional int32 profile_iter = 10 [default = 50]; - optional int32 profile_warmup = 11 [default = 10]; - - // The layers that make up the net. Each of their configurations, including - // connectivity and behavior, is specified as a LayerParameter. - repeated LayerParameter layer = 100; // ID 100 so layers are printed last. - - // DEPRECATED: use 'layer' instead. - repeated V1LayerParameter layers = 2; -} - -// NOTE -// Update the next available ID when you add a new SolverParameter field. -// -// SolverParameter next available ID: 43 (last added: plateau_winsize) -message SolverParameter { - ////////////////////////////////////////////////////////////////////////////// - // Specifying the train and test networks - // - // Exactly one train net must be specified using one of the following fields: - // train_net_param, train_net, net_param, net - // One or more test nets may be specified using any of the following fields: - // test_net_param, test_net, net_param, net - // If more than one test net field is specified (e.g., both net and - // test_net are specified), they will be evaluated in the field order given - // above: (1) test_net_param, (2) test_net, (3) net_param/net. - // A test_iter must be specified for each test_net. - // A test_level and/or a test_stage may also be specified for each test_net. - ////////////////////////////////////////////////////////////////////////////// - - // Proto filename for the train net, possibly combined with one or more - // test nets. - optional string net = 24; - // Inline train net param, possibly combined with one or more test nets. - optional NetParameter net_param = 25; - - optional string train_net = 1; // Proto filename for the train net. - repeated string test_net = 2; // Proto filenames for the test nets. - optional NetParameter train_net_param = 21; // Inline train net params. - repeated NetParameter test_net_param = 22; // Inline test net params. - - // The states for the train/test nets. Must be unspecified or - // specified once per net. - // - // By default, all states will have solver = true; - // train_state will have phase = TRAIN, - // and all test_state's will have phase = TEST. - // Other defaults are set according to the NetState defaults. - optional NetState train_state = 26; - repeated NetState test_state = 27; - - // The number of iterations for each test net. - repeated int32 test_iter = 3; - - // The number of iterations between two testing phases. - optional int32 test_interval = 4 [default = 0]; - optional bool test_compute_loss = 19 [default = false]; - // If true, run an initial test pass before the first iteration, - // ensuring memory availability and printing the starting value of the loss. - optional bool test_initialization = 32 [default = true]; - optional float base_lr = 5; // The base learning rate - // the number of iterations between displaying info. If display = 0, no info - // will be displayed. - optional int32 display = 6; - // Display the loss averaged over the last average_loss iterations - optional int32 average_loss = 33 [default = 1]; - optional int32 max_iter = 7; // the maximum number of iterations - // accumulate gradients over `iter_size` x `batch_size` instances - optional int32 iter_size = 36 [default = 1]; - - // The learning rate decay policy. The currently implemented learning rate - // policies are as follows: - // - fixed: always return base_lr. - // - step: return base_lr * gamma ^ (floor(iter / step)) - // - exp: return base_lr * gamma ^ iter - // - inv: return base_lr * (1 + gamma * iter) ^ (- power) - // - multistep: similar to step but it allows non uniform steps defined by - // stepvalue - // - poly: the effective learning rate follows a polynomial decay, to be - // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) - // - sigmoid: the effective learning rate follows a sigmod decay - // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) - // - plateau: decreases lr - // if the minimum loss isn't updated for 'plateau_winsize' iters - // - // where base_lr, max_iter, gamma, step, stepvalue and power are defined - // in the solver parameter protocol buffer, and iter is the current iteration. - optional string lr_policy = 8; - optional float gamma = 9; // The parameter to compute the learning rate. - optional float power = 10; // The parameter to compute the learning rate. - optional float momentum = 11; // The momentum value. - optional float weight_decay = 12; // The weight decay. - // regularization types supported: L1 and L2 - // controlled by weight_decay - optional string regularization_type = 29 [default = "L2"]; - // the stepsize for learning rate policy "step" - optional int32 stepsize = 13; - // the stepsize for learning rate policy "multistep" - repeated int32 stepvalue = 34; - // the stepsize for learning rate policy "plateau" - repeated int32 plateau_winsize = 42; - - // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, - // whenever their actual L2 norm is larger. - optional float clip_gradients = 35 [default = -1]; - - optional int32 snapshot = 14 [default = 0]; // The snapshot interval - optional string snapshot_prefix = 15; // The prefix for the snapshot. - // whether to snapshot diff in the results or not. Snapshotting diff will help - // debugging but the final protocol buffer size will be much larger. - optional bool snapshot_diff = 16 [default = false]; - enum SnapshotFormat { - HDF5 = 0; - BINARYPROTO = 1; - } - optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; - // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. - enum SolverMode { - CPU = 0; - GPU = 1; - } - optional SolverMode solver_mode = 17 [default = GPU]; - // the device_id will that be used in GPU mode. Use device_id = 0 in default. - optional int32 device_id = 18 [default = 0]; - // If non-negative, the seed with which the Solver will initialize the Caffe - // random number generator -- useful for reproducible results. Otherwise, - // (and by default) initialize using a seed derived from the system clock. - optional int64 random_seed = 20 [default = -1]; - - // type of the solver - optional string type = 40 [default = "SGD"]; - - // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam - optional float delta = 31 [default = 1e-8]; - // parameters for the Adam solver - optional float momentum2 = 39 [default = 0.999]; - - // RMSProp decay value - // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) - optional float rms_decay = 38 [default = 0.99]; - - // If true, print information about the state of the net that may help with - // debugging learning problems. - optional bool debug_info = 23 [default = false]; - - // If false, don't save a snapshot after training finishes. - optional bool snapshot_after_train = 28 [default = true]; - - // DEPRECATED: old solver enum types, use string instead - enum SolverType { - SGD = 0; - NESTEROV = 1; - ADAGRAD = 2; - RMSPROP = 3; - ADADELTA = 4; - ADAM = 5; - } - // DEPRECATED: use type instead of solver_type - optional SolverType solver_type = 30 [default = SGD]; - - // Overlap compute and communication for data parallel training - optional bool layer_wise_reduce = 41 [default = true]; -} - -// A message that stores the solver snapshots -message SolverState { - optional int32 iter = 1; // The current iteration - optional string learned_net = 2; // The file that stores the learned net. - repeated BlobProto history = 3; // The history for sgd solvers - optional int32 current_step = 4 [default = 0]; // The current step for learning rate - optional float minimum_loss = 5 [default = 1E38]; // Historical minimum loss - optional int32 iter_last_event = 6 [default = 0]; // The iteration when last lr-update or min_loss-update happend -} - -enum Phase { - TRAIN = 0; - TEST = 1; -} - -message NetState { - optional Phase phase = 1 [default = TEST]; - optional int32 level = 2 [default = 0]; - repeated string stage = 3; -} - -message NetStateRule { - // Set phase to require the NetState have a particular phase (TRAIN or TEST) - // to meet this rule. - optional Phase phase = 1; - - // Set the minimum and/or maximum levels in which the layer should be used. - // Leave undefined to meet the rule regardless of level. - optional int32 min_level = 2; - optional int32 max_level = 3; - - // Customizable sets of stages to include or exclude. - // The net must have ALL of the specified stages and NONE of the specified - // "not_stage"s to meet the rule. - // (Use multiple NetStateRules to specify conjunctions of stages.) - repeated string stage = 4; - repeated string not_stage = 5; -} - - -// added by Kaichun Mo -message SpatialTransformerParameter { - // How to use the parameter passed by localisation network - optional string transform_type = 1 [default = "affine"]; - // What is the sampling technique - optional string sampler_type = 2 [default = "bilinear"]; - - // If not set,stay same with the input dimension H and W - optional int32 output_H = 3; - optional int32 output_W = 4; - - // If false, only compute dTheta, DO NOT compute dU - optional bool to_compute_dU = 5 [default = true]; - - // The default value for some parameters - optional double theta_1_1 = 6; - optional double theta_1_2 = 7; - optional double theta_1_3 = 8; - optional double theta_2_1 = 9; - optional double theta_2_2 = 10; - optional double theta_2_3 = 11; - - optional bool de_transform = 12 [default = false]; -} - -// added by Kaichun Mo -message PowerFileParameter { - - optional string shift_file = 1; -} - -// added by Kaichun Mo -message STLossParameter { - - // Indicate the resolution of the output images after ST transformation - required int32 output_H = 1; - required int32 output_W = 2; -} - -// added by Kaichun Mo -message LocLossParameter { - - required double threshold = 1; -} - -// Specifies training parameters (multipliers on global learning constants, -// and the name and other settings used for weight sharing). -message ParamSpec { - // The names of the parameter blobs -- useful for sharing parameters among - // layers, but never required otherwise. To share a parameter between two - // layers, give it a (non-empty) name. - optional string name = 1; - - // Whether to require shared weights to have the same shape, or just the same - // count -- defaults to STRICT if unspecified. - optional DimCheckMode share_mode = 2; - enum DimCheckMode { - // STRICT (default) requires that num, channels, height, width each match. - STRICT = 0; - // PERMISSIVE requires only the count (num*channels*height*width) to match. - PERMISSIVE = 1; - } - - // The multiplier on the global learning rate for this parameter. - optional float lr_mult = 3 [default = 1.0]; - - // The multiplier on the global weight decay for this parameter. - optional float decay_mult = 4 [default = 1.0]; -} - -// NOTE -// Update the next available ID when you add a new LayerParameter field. -// -// LayerParameter next available layer-specific ID: 216 (last added: reorg_yolo_param) -message LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the layer type - repeated string bottom = 3; // the name of each bottom blob - repeated string top = 4; // the name of each top blob - - // The train / test phase for computation. - optional Phase phase = 10; - - // The amount of weight to assign each top blob in the objective. - // Each layer assigns a default value, usually of either 0 or 1, - // to each top blob. - repeated float loss_weight = 5; - - // Specifies training parameters (multipliers on global learning constants, - // and the name and other settings used for weight sharing). - repeated ParamSpec param = 6; - - // The blobs containing the numeric parameters of the layer. - repeated BlobProto blobs = 7; - - // Specifies whether to backpropagate to each bottom. If unspecified, - // Caffe will automatically infer whether each input needs backpropagation - // to compute parameter gradients. If set to true for some inputs, - // backpropagation to those inputs is forced; if set false for some inputs, - // backpropagation to those inputs is skipped. - // - // The size must be either 0 or equal to the number of bottoms. - repeated bool propagate_down = 11; - - // Rules controlling whether and when a layer is included in the network, - // based on the current NetState. You may specify a non-zero number of rules - // to include OR exclude, but not both. If no include or exclude rules are - // specified, the layer is always included. If the current NetState meets - // ANY (i.e., one or more) of the specified rules, the layer is - // included/excluded. - repeated NetStateRule include = 8; - repeated NetStateRule exclude = 9; - - // Parameters for data pre-processing. - optional TransformationParameter transform_param = 100; - - // Parameters shared by loss layers. - optional LossParameter loss_param = 101; - - // Layer type-specific parameters. - // - // Note: certain layers may have more than one computational engine - // for their implementation. These layers include an Engine type and - // engine parameter for selecting the implementation. - // The default for the engine is set by the ENGINE switch at compile-time. - optional AccuracyParameter accuracy_param = 102; - optional ArgMaxParameter argmax_param = 103; - optional BatchNormParameter batch_norm_param = 139; - optional BiasParameter bias_param = 141; - optional ChannelPermutationParameter channel_permutation_param = 8082; - optional ConcatParameter concat_param = 104; - optional ContrastiveLossParameter contrastive_loss_param = 105; - optional ConvolutionParameter convolution_param = 106; - optional CropParameter crop_param = 144; - optional CTCDecoderParameter ctc_decoder_param = 149; - optional CTCLossParameter ctc_loss_param = 148; - optional DataParameter data_param = 107; - optional DropoutParameter dropout_param = 108; - optional DummyDataParameter dummy_data_param = 109; - optional EltwiseParameter eltwise_param = 110; - optional ELUParameter elu_param = 140; - optional EmbedParameter embed_param = 137; - optional ExpParameter exp_param = 111; - optional FlattenParameter flatten_param = 135; - optional GRNParameter grn_param = 213; - optional HDF5DataParameter hdf5_data_param = 112; - optional HDF5OutputParameter hdf5_output_param = 113; - optional HingeLossParameter hinge_loss_param = 114; - optional ImageDataParameter image_data_param = 115; - optional InfogainLossParameter infogain_loss_param = 116; - optional InnerProductParameter inner_product_param = 117; - optional InputParameter input_param = 143; - optional LogParameter log_param = 134; - optional LRNParameter lrn_param = 118; - optional MemoryDataParameter memory_data_param = 119; - optional MVNParameter mvn_param = 120; - optional ParameterParameter parameter_param = 145; - optional PoolingParameter pooling_param = 121; - optional PermuteParameter permute_param = 154; - optional PowerParameter power_param = 122; - optional PReLUParameter prelu_param = 131; - optional PythonParameter python_param = 130; - optional RecurrentParameter recurrent_param = 146; - optional ReductionParameter reduction_param = 136; - optional ReLUParameter relu_param = 123; - optional ReshapeParameter reshape_param = 133; - optional ReverseParameter reverse_param = 147; - optional ScaleParameter scale_param = 142; - optional SigmoidParameter sigmoid_param = 124; - optional SoftmaxParameter softmax_param = 125; - optional SPPParameter spp_param = 132; - optional SliceParameter slice_param = 126; - optional TanHParameter tanh_param = 127; - optional ThresholdParameter threshold_param = 128; - optional TileParameter tile_param = 138; - optional WindowDataParameter window_data_param = 129; - - // added by Kaichun Mo - optional SpatialTransformerParameter st_param = 150; - optional STLossParameter st_loss_param = 151; - optional PowerFileParameter power_file_param = 152; - optional LocLossParameter loc_loss_param = 153; - - optional ProposalParameter proposal_param = 201; - optional CosineSimilarityBatchParameter cosine_similarity_batch_param = 202; - optional RandomSamplingSoftmaxLossParameter rss_loss_param = 203; - optional NormalizeParameter norm_param = 204; - optional ROIWarpingParameter roi_warping_param = 205; - optional PSROIPoolingParameter psroi_pooling_param = 207; - optional ROIPoolingParameter roi_pooling_param = 208; - optional SmoothL1LossParameter smooth_l1_loss_param = 209; - optional BoxAnnotatorOHEMParameter box_annotator_ohem_param = 210; - optional DetectionOutputParameter detection_output_param = 211; - optional PriorBoxParameter prior_box_param = 212; - - optional RegionYoloParameter region_yolo_param = 214; - optional ReorgYoloParameter reorg_yolo_param = 215; - optional ReLU6Parameter relu6_param = 216; - - optional InterpParameter interp_param = 217; - - // for FlowNet2 - optional AugmentationParameter augmentation_param = 218; - optional CorrelationParameter correlation_param = 219; - optional ResampleParameter resample_param = 220; - optional FlowWarpParameter flow_warp_param = 221; - optional AccumParameter accum_param = 222; - optional CoeffScheduleParameter coeff_schedule_param = 223; - - // for Shufflenet v2 - optional ShuffleChannelParameter shuffle_channel_param= 224; -} - -message InterpParameter { - optional int32 height = 1 [default = 0]; // Height of output - optional int32 width = 2 [default = 0]; // Width of output - optional int32 zoom_factor = 3 [default = 1]; // zoom factor - optional int32 shrink_factor = 4 [default = 1]; // shrink factor - optional int32 pad_beg = 5 [default = 0]; // padding at begin of input - optional int32 pad_end = 6 [default = 0]; // padding at end of input -} - -message RandomSamplingSoftmaxLossParameter { - optional int32 random_sampling_num = 1 [default = 100]; - optional string random_sampling_policy = 2 [default = "random"]; -} - -// Message that stores parameters used by ProposalLayer -message ProposalParameter { - optional uint32 feat_stride = 1 [default = 16]; - optional uint32 base_size = 2 [default = 16]; - optional uint32 min_size = 3 [default = 16]; - repeated float ratio = 4; - repeated float scale = 5; - optional uint32 pre_nms_topn = 6 [default = 6000]; - optional uint32 post_nms_topn = 7 [default = 300]; - optional float nms_thresh = 8 [default = 0.7]; -} - -// Message that stores parameters used by NormalizeLayer -message NormalizeParameter { - optional bool across_spatial = 1 [default = true]; - // Initial value of scale. Default is 1.0 for all - optional FillerParameter scale_filler = 2; - // Whether or not scale parameters are shared across channels. - optional bool channel_shared = 3 [default = true]; - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 4 [default = 1e-10]; -} - -message PermuteParameter { - // The new orders of the axes of data. Notice it should be with - // in the same range as the input data, and it starts from 0. - // Do not provide repeated order. - repeated uint32 order = 1; -} - -// Message that stores parameters used to apply transformation -// to the data layer's data -message TransformationParameter { - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 1 [default = 1]; - // Specify if we want to randomly mirror data. - optional bool mirror = 2 [default = false]; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 3 [default = 0]; - // mean_file and mean_value cannot be specified at the same time - optional string mean_file = 4; - // if specified can be repeated once (would subtract it from all the channels) - // or can be repeated the same number of times as channels - // (would subtract them from the corresponding channel) - repeated float mean_value = 5; - // Force the decoded image to have 3 color channels. - optional bool force_color = 6 [default = false]; - // Force the decoded image to have 1 color channels. - optional bool force_gray = 7 [default = false]; -} - -// Message that stores parameters shared by loss layers -message LossParameter { - // If specified, ignore instances with the given label. - optional int32 ignore_label = 1; - // How to normalize the loss for loss layers that aggregate across batches, - // spatial dimensions, or other dimensions. Currently only implemented in - // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers. - enum NormalizationMode { - // Divide by the number of examples in the batch times spatial dimensions. - // Outputs that receive the ignore label will NOT be ignored in computing - // the normalization factor. - FULL = 0; - // Divide by the total number of output locations that do not take the - // ignore_label. If ignore_label is not set, this behaves like FULL. - VALID = 1; - // Divide by the batch size. - BATCH_SIZE = 2; - // Divide by pre-fixed normalizer - PRE_FIXED = 3; - // Do not normalize the loss. - NONE = 4; - } - // For historical reasons, the default normalization for - // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID. - optional NormalizationMode normalization = 3 [default = VALID]; - // Deprecated. Ignored if normalization is specified. If normalization - // is not specified, then setting this to false will be equivalent to - // normalization = BATCH_SIZE to be consistent with previous behavior. - optional bool normalize = 2; - //pre-fixed normalizer - optional float pre_fixed_normalizer = 4 [default = 1]; - // label frequencies - optional bool weight_by_label_freqs = 5 [default = false]; - repeated float class_weighting = 6; -} - -// Messages that store parameters used by individual layer types follow, in -// alphabetical order. - -message AccuracyParameter { - // When computing accuracy, count as correct by comparing the true label to - // the top k scoring classes. By default, only compare to the top scoring - // class (i.e. argmax). - optional uint32 top_k = 1 [default = 1]; - - // The "label" axis of the prediction blob, whose argmax corresponds to the - // predicted label -- may be negative to index from the end (e.g., -1 for the - // last axis). For example, if axis == 1 and the predictions are - // (N x C x H x W), the label blob is expected to contain N*H*W ground truth - // labels with integer values in {0, 1, ..., C-1}. - optional int32 axis = 2 [default = 1]; - - // If specified, ignore instances with the given label. - optional int32 ignore_label = 3; -} - -message ArgMaxParameter { - // If true produce pairs (argmax, maxval) - optional bool out_max_val = 1 [default = false]; - optional uint32 top_k = 2 [default = 1]; - // The axis along which to maximise -- may be negative to index from the - // end (e.g., -1 for the last axis). - // By default ArgMaxLayer maximizes over the flattened trailing dimensions - // for each index of the first / num dimension. - optional int32 axis = 3; -} - -message ChannelPermutationAction { - // Destination channel. - required uint32 chan = 1; - // Source channel for channel copy operation. No source channel shall be - // used more than once. - optional uint32 copy = 2; - // Value for channel fill operation (float for both single- and - // double-precision Caffe). - optional float fill = 3; -} - -message ChannelPermutationParameter { - // Sequence of actions ordered by increasing value of chan. - // The missing values of chan (i.e. top channel indices) are assumed to be - // copy operations from bottom channels with the same channel index. - repeated ChannelPermutationAction action = 1; - - // Number out output channels - required uint32 num_output = 16; - // When true, tells layer that copying/filling channels in-place in - // the given order would give correct result. - optional bool inplace_possible = 17 [default = false]; - // Version field is used to check compatibility between layer implementation - // and layer parameters in model prototxt file. - // Version number of this message format is 1. - optional int32 version = 18 [default = 0]; -} - -message ConcatParameter { - // The axis along which to concatenate -- may be negative to index from the - // end (e.g., -1 for the last axis). Other axes must have the - // same dimension for all the bottom blobs. - // By default, ConcatLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 2 [default = 1]; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 concat_dim = 1 [default = 1]; -} - -message BatchNormParameter { - // If false, accumulate global mean/variance values via a moving average. If - // true, use those accumulated values instead of computing mean/variance - // across the batch. - optional bool use_global_stats = 1; - // How much does the moving average decay each iteration? - optional float moving_average_fraction = 2 [default = .999]; - // Small value to add to the variance estimate so that we don't divide by - // zero. - optional float eps = 3 [default = 1e-5]; -} - -message BoxAnnotatorOHEMParameter { - required uint32 roi_per_img = 1; // number of rois for training - optional int32 ignore_label = 2 [default = -1]; // ignore_label in scoring -} - -message BiasParameter { - // The first axis of bottom[0] (the first input Blob) along which to apply - // bottom[1] (the second input Blob). May be negative to index from the end - // (e.g., -1 for the last axis). - // - // For example, if bottom[0] is 4D with shape 100x3x40x60, the output - // top[0] will have the same shape, and bottom[1] may have any of the - // following shapes (for the given value of axis): - // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 - // (axis == 1 == -3) 3; 3x40; 3x40x60 - // (axis == 2 == -2) 40; 40x60 - // (axis == 3 == -1) 60 - // Furthermore, bottom[1] may have the empty shape (regardless of the value of - // "axis") -- a scalar bias. - optional int32 axis = 1 [default = 1]; - - // (num_axes is ignored unless just one bottom is given and the bias is - // a learned parameter of the layer. Otherwise, num_axes is determined by the - // number of axes by the second bottom.) - // The number of axes of the input (bottom[0]) covered by the bias - // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. - // Set num_axes := 0, to add a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [default = 1]; - - // (filler is ignored unless just one bottom is given and the bias is - // a learned parameter of the layer.) - // The initialization for the learned bias parameter. - // Default is the zero (0) initialization, resulting in the BiasLayer - // initially performing the identity operation. - optional FillerParameter filler = 3; -} - -message ContrastiveLossParameter { - // margin for dissimilar pair - optional float margin = 1 [default = 1.0]; - // The first implementation of this cost did not exactly match the cost of - // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. - // legacy_version = false (the default) uses (margin - d)^2 as proposed in the - // Hadsell paper. New models should probably use this version. - // legacy_version = true uses (margin - d^2). This is kept to support / - // reproduce existing models and results - optional bool legacy_version = 2 [default = false]; -} - -message ConvolutionParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in all spatial dimensions, or once per spatial dimension. - repeated uint32 pad = 3; // The padding size; defaults to 0 - repeated uint32 kernel_size = 4; // The kernel size - repeated uint32 stride = 6; // The stride; defaults to 1 - // Factor used to dilate the kernel, (implicitly) zero-filling the resulting - // holes. (Kernel dilation is sometimes referred to by its use in the - // algorithme à trous from Holschneider et al. 1987.) - repeated uint32 dilation = 18; // The dilation; defaults to 1 - - // For 2D convolution only, the *_h and *_w versions may also be used to - // specify both spatial dimensions. - optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only) - optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only) - optional uint32 kernel_h = 11; // The kernel height (2D only) - optional uint32 kernel_w = 12; // The kernel width (2D only) - optional uint32 stride_h = 13; // The stride height (2D only) - optional uint32 stride_w = 14; // The stride width (2D only) - - optional uint32 group = 5 [default = 1]; // The group size for group conv - - optional FillerParameter weight_filler = 7; // The filler for the weight - optional FillerParameter bias_filler = 8; // The filler for the bias - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 15 [default = DEFAULT]; - - // The axis to interpret as "channels" when performing convolution. - // Preceding dimensions are treated as independent inputs; - // succeeding dimensions are treated as "spatial". - // With (N, C, H, W) inputs, and axis == 1 (the default), we perform - // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for - // groups g>1) filters across the spatial axes (H, W) of the input. - // With (N, C, D, H, W) inputs, and axis == 1, we perform - // N independent 3D convolutions, sliding (C/g)-channels - // filters across the spatial axes (D, H, W) of the input. - optional int32 axis = 16 [default = 1]; - - // Whether to force use of the general ND convolution, even if a specific - // implementation for blobs of the appropriate number of spatial dimensions - // is available. (Currently, there is only a 2D-specific convolution - // implementation; for input blobs with num_axes != 2, this option is - // ignored and the ND implementation will be used.) - optional bool force_nd_im2col = 17 [default = false]; -} - -message CropParameter { - // To crop, elements of the first bottom are selected to fit the dimensions - // of the second, reference bottom. The crop is configured by - // - the crop `axis` to pick the dimensions for cropping - // - the crop `offset` to set the shift for all/each dimension - // to align the cropped bottom with the reference bottom. - // All dimensions up to but excluding `axis` are preserved, while - // the dimensions including and trailing `axis` are cropped. - // If only one `offset` is set, then all dimensions are offset by this amount. - // Otherwise, the number of offsets must equal the number of cropped axes to - // shift the crop in each dimension accordingly. - // Note: standard dimensions are N,C,H,W so the default is a spatial crop, - // and `axis` may be negative to index from the end (e.g., -1 for the last - // axis). - optional int32 axis = 1 [default = 2]; - repeated uint32 offset = 2; - repeated uint32 dimsize = 3; -} - -message CTCDecoderParameter { - // The index of the blank index in the labels. A negative (default) - // value will use the last index - optional int32 blank_index = 1 [default = -1]; - - // Collapse the repeated labels during the ctc calculation - // e.g. collapse [0bbb11bb11bb0b2] to [01102] instead of [0111102], - // where b means blank label. - // The default behaviour is to merge repeated labels. - // Note: blank labels will be removed in any case. - optional bool ctc_merge_repeated = 2 [default = true]; -} - -message CTCLossParameter { - // Adds delayed output to the CTC loss calculation (untested!) - optional int32 output_delay = 1 [default = 0]; - - // The index of the blank index in the labels. A negative (default) - // value will use the last index - optional int32 blank_index = 2 [default = -1]; - - // Collapse repeating labels of the target sequence before calculating - // the loss and the gradients (e.g. collapse [01102] to [0102]) - // The default behaviour is to keep repeated labels. Elsewise the - // network will not learn to predict repetitions. - optional bool preprocess_collapse_repeated = 3 [default = false]; - - // Collapse the repeated labels during the ctc calculation - // e.g collapse [0bbb11bb11bb0b2] to [01102] instead of [0111102], - // where b means blank label. - // The default behaviour is to merge repeated labels. - // Note: blank labels will be removed in any case. - optional bool ctc_merge_repeated = 4 [default = true]; - - /// This parameter is for test cases only! - /// The time for which to calculate the loss (see Graves Eq. (7.27) ) - /// Note that the result must be the same for each 0 <= t < T - /// Therefore you can chose an arbitrary value, default 0 - optional int32 loss_calculation_t = 5 [default = 0]; -} - -message DataParameter { - enum DB { - LEVELDB = 0; - LMDB = 1; - } - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - // DEPRECATED. Each solver accesses a different subset of the database. - optional uint32 rand_skip = 7 [default = 0]; - optional DB backend = 8 [default = LEVELDB]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - // Force the encoded image to have 3 color channels - optional bool force_encoded_color = 9 [default = false]; - // Prefetch queue (Increase if data feeding bandwidth varies, within the - // limit of device memory for GPU training) - optional uint32 prefetch = 10 [default = 4]; -} - -message NonMaximumSuppressionParameter { - // Threshold to be used in nms. - optional float nms_threshold = 1 [default = 0.3]; - // Maximum number of results to be kept. - optional int32 top_k = 2; - // Parameter for adaptive nms. - optional float eta = 3 [default = 1.0]; -} - -// Message that stores parameters used by data transformer for resize policy -message ResizeParameter { - //Probability of using this resize policy - optional float prob = 1 [default = 1]; - - enum Resize_mode { - WARP = 1; - FIT_SMALL_SIZE = 2; - FIT_LARGE_SIZE_AND_PAD = 3; - } - optional Resize_mode resize_mode = 2 [default = WARP]; - optional uint32 height = 3 [default = 0]; - optional uint32 width = 4 [default = 0]; - // A parameter used to update bbox in FIT_SMALL_SIZE mode. - optional uint32 height_scale = 8 [default = 0]; - optional uint32 width_scale = 9 [default = 0]; - - enum Pad_mode { - CONSTANT = 1; - MIRRORED = 2; - REPEAT_NEAREST = 3; - } - // Padding mode for BE_SMALL_SIZE_AND_PAD mode and object centering - optional Pad_mode pad_mode = 5 [default = CONSTANT]; - // if specified can be repeated once (would fill all the channels) - // or can be repeated the same number of times as channels - // (would use it them to the corresponding channel) - repeated float pad_value = 6; - - enum Interp_mode { //Same as in OpenCV - LINEAR = 1; - AREA = 2; - NEAREST = 3; - CUBIC = 4; - LANCZOS4 = 5; - } - //interpolation for for resizing - repeated Interp_mode interp_mode = 7; -} - -message SaveOutputParameter { - // Output directory. If not empty, we will save the results. - optional string output_directory = 1; - // Output name prefix. - optional string output_name_prefix = 2; - // Output format. - // VOC - PASCAL VOC output format. - // COCO - MS COCO output format. - optional string output_format = 3; - // If you want to output results, must also provide the following two files. - // Otherwise, we will ignore saving results. - // label map file. - optional string label_map_file = 4; - // A file which contains a list of names and sizes with same order - // of the input DB. The file is in the following format: - // name height width - // ... - optional string name_size_file = 5; - // Number of test images. It can be less than the lines specified in - // name_size_file. For example, when we only want to evaluate on part - // of the test images. - optional uint32 num_test_image = 6; - // The resize parameter used in saving the data. - optional ResizeParameter resize_param = 7; -} - -// Message that store parameters used by DetectionOutputLayer -message DetectionOutputParameter { - // Number of classes to be predicted. Required! - optional uint32 num_classes = 1; - // If true, bounding box are shared among different classes. - optional bool share_location = 2 [default = true]; - // Background label id. If there is no background class, - // set it as -1. - optional int32 background_label_id = 3 [default = 0]; - // Parameters used for non maximum suppression. - optional NonMaximumSuppressionParameter nms_param = 4; - // Parameters used for saving detection results. - optional SaveOutputParameter save_output_param = 5; - // Type of coding method for bbox. - optional PriorBoxParameter.CodeType code_type = 6 [default = CORNER]; - // If true, variance is encoded in target; otherwise we need to adjust the - // predicted offset accordingly. - optional bool variance_encoded_in_target = 8 [default = false]; - // Number of total bboxes to be kept per image after nms step. - // -1 means keeping all bboxes after nms step. - optional int32 keep_top_k = 7 [default = -1]; - // Only consider detections whose confidences are larger than a threshold. - // If not provided, consider all boxes. - optional float confidence_threshold = 9; - // If true, visualize the detection results. - optional bool visualize = 10 [default = false]; - // The threshold used to visualize the detection results. - optional float visualize_threshold = 11; - // If provided, save outputs to video file. - optional string save_file = 12; - // Input width - optional int32 input_width = 13 [default = -1]; - // Input height - optional int32 input_height = 14 [default = -1]; - // If false, bboxes need to be normalized - optional bool normalized = 15 [default = true]; - //the objectness score is used for the anchor refinement module to filter easy negative anchor. - optional float objectness_score = 16 [default = 0.01]; -} - -message DropoutParameter { - optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio -} - -// DummyDataLayer fills any number of arbitrarily shaped blobs with random -// (or constant) data generated by "Fillers" (see "message FillerParameter"). -message DummyDataParameter { - // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N - // shape fields, and 0, 1 or N data_fillers. - // - // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. - // If 1 data_filler is specified, it is applied to all top blobs. If N are - // specified, the ith is applied to the ith top blob. - repeated FillerParameter data_filler = 1; - repeated BlobShape shape = 6; - - // 4D dimensions -- deprecated. Use "shape" instead. - repeated uint32 num = 2; - repeated uint32 channels = 3; - repeated uint32 height = 4; - repeated uint32 width = 5; -} - -message EltwiseParameter { - enum EltwiseOp { - PROD = 0; - SUM = 1; - MAX = 2; - } - optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation - repeated float coeff = 2; // blob-wise coefficient for SUM operation - - // Whether to use an asymptotically slower (for >2 inputs) but stabler method - // of computing the gradient for the PROD operation. (No effect for SUM op.) - optional bool stable_prod_grad = 3 [default = true]; -} - -// Message that stores parameters used by ELULayer -message ELUParameter { - // Described in: - // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate - // Deep Network Learning by Exponential Linear Units (ELUs). arXiv - optional float alpha = 1 [default = 1]; -} - -// Message that stores parameters used by EmbedLayer -message EmbedParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - // The input is given as integers to be interpreted as one-hot - // vector indices with dimension num_input. Hence num_input should be - // 1 greater than the maximum possible input value. - optional uint32 input_dim = 2; - - optional bool bias_term = 3 [default = true]; // Whether to use a bias term - optional FillerParameter weight_filler = 4; // The filler for the weight - optional FillerParameter bias_filler = 5; // The filler for the bias - -} - -// Message that stores parameters used by ExpLayer -message ExpParameter { - // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = exp(shift + scale * x). - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -/// Message that stores parameters used by FlattenLayer -message FlattenParameter { - // The first axis to flatten: all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 1 [default = 1]; - - // The last axis to flatten: all following axes are retained in the output. - // May be negative to index from the end (e.g., the default -1 for the last - // axis). - optional int32 end_axis = 2 [default = -1]; -} - -// Message that stores parameters used by HDF5DataLayer -message HDF5DataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 2; - - // Specify whether to shuffle the data. - // If shuffle == true, the ordering of the HDF5 files is shuffled, - // and the ordering of data within any given HDF5 file is shuffled, - // but data between different files are not interleaved; all of a file's - // data are output (in a random order) before moving onto another file. - optional bool shuffle = 3 [default = false]; -} - -message HDF5OutputParameter { - optional string file_name = 1; -} - -message HingeLossParameter { - enum Norm { - L1 = 1; - L2 = 2; - } - // Specify the Norm to use L1 or L2 - optional Norm norm = 1 [default = L1]; -} - -message ImageDataParameter { - // Specify the data source. - optional string source = 1; - // Specify the batch size. - optional uint32 batch_size = 4 [default = 1]; - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 7 [default = 0]; - // Whether or not ImageLayer should shuffle the list of files at every epoch. - optional bool shuffle = 8 [default = false]; - // It will also resize images if new_height or new_width are not zero. - optional uint32 new_height = 9 [default = 0]; - optional uint32 new_width = 10 [default = 0]; - // Specify if the images are color or gray - optional bool is_color = 11 [default = true]; - // DEPRECATED. See TransformationParameter. For data pre-processing, we can do - // simple scaling and subtracting the data mean, if provided. Note that the - // mean subtraction is always carried out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // DEPRECATED. See TransformationParameter. Specify if we would like to randomly - // crop an image. - optional uint32 crop_size = 5 [default = 0]; - // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror - // data. - optional bool mirror = 6 [default = false]; - optional string root_folder = 12 [default = ""]; -} - -message InfogainLossParameter { - // Specify the infogain matrix source. - optional string source = 1; -} - -message InnerProductParameter { - optional uint32 num_output = 1; // The number of outputs for the layer - optional bool bias_term = 2 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 3; // The filler for the weight - optional FillerParameter bias_filler = 4; // The filler for the bias - - // The first axis to be lumped into a single inner product computation; - // all preceding axes are retained in the output. - // May be negative to index from the end (e.g., -1 for the last axis). - optional int32 axis = 5 [default = 1]; - // Specify whether to transpose the weight matrix or not. - // If transpose == true, any operations will be performed on the transpose - // of the weight matrix. The weight matrix itself is not going to be transposed - // but rather the transfer flag of operations will be toggled accordingly. - optional bool transpose = 6 [default = false]; -} - -message InputParameter { - // This layer produces N >= 1 top blob(s) to be assigned manually. - // Define N shapes to set a shape for each top. - // Define 1 shape to set the same shape for every top. - // Define no shape to defer to reshaping manually. - repeated BlobShape shape = 1; -} - -// Message that stores parameters used by LogLayer -message LogParameter { - // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. - // Or if base is set to the default (-1), base is set to e, - // so y = ln(shift + scale * x) = log_e(shift + scale * x) - optional float base = 1 [default = -1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -// Message that stores parameters used by LRNLayer -message LRNParameter { - optional uint32 local_size = 1 [default = 5]; - optional float alpha = 2 [default = 1.]; - optional float beta = 3 [default = 0.75]; - enum NormRegion { - ACROSS_CHANNELS = 0; - WITHIN_CHANNEL = 1; - } - optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; - optional float k = 5 [default = 1.]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - -// Message that stores parameters used by GRNLayer (across channels only) -message GRNParameter { - optional float bias = 1 [default = 1.]; -} - -message MemoryDataParameter { - optional uint32 batch_size = 1; - optional uint32 channels = 2; - optional uint32 height = 3; - optional uint32 width = 4; -} - -message MVNParameter { - // This parameter can be set to false to normalize mean only - optional bool normalize_variance = 1 [default = true]; - - // This parameter can be set to true to perform DNN-like MVN - optional bool across_channels = 2 [default = false]; - - // Epsilon for not dividing by zero while normalizing variance - optional float eps = 3 [default = 1e-9]; -} - -message ParameterParameter { - optional BlobShape shape = 1; -} - -message PoolingParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 1 [default = MAX]; // The pooling method - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) - optional uint32 pad_h = 9 [default = 0]; // The padding height - optional uint32 pad_w = 10 [default = 0]; // The padding width - optional uint32 kernel_size = 2; // The kernel size (square) - optional uint32 kernel_h = 5; // The kernel height - optional uint32 kernel_w = 6; // The kernel width - optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) - optional uint32 stride_h = 7; // The stride height - optional uint32 stride_w = 8; // The stride width - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 11 [default = DEFAULT]; - // If global_pooling then it will pool over the size of the bottom by doing - // kernel_h = bottom->height and kernel_w = bottom->width - optional bool global_pooling = 12 [default = false]; - optional bool ceil_mode = 13 [default = true]; -} - -message PowerParameter { - // PowerLayer computes outputs y = (shift + scale * x) ^ power. - optional float power = 1 [default = 1.0]; - optional float scale = 2 [default = 1.0]; - optional float shift = 3 [default = 0.0]; -} - -// Message that store parameters used by PriorBoxLayer -message PriorBoxParameter { - // Encode/decode type. - enum CodeType { - CORNER = 1; - CENTER_SIZE = 2; - CORNER_SIZE = 3; - } - // Minimum box size (in pixels). Required! - repeated float min_size = 1; - // Maximum box size (in pixels). Required! - repeated float max_size = 2; - // Various of aspect ratios. Duplicate ratios will be ignored. - // If none is provided, we use default ratio 1. - repeated float aspect_ratio = 3; - // If true, will flip each aspect ratio. - // For example, if there is aspect ratio "r", - // we will generate aspect ratio "1.0/r" as well. - optional bool flip = 4 [default = true]; - // If true, will clip the prior so that it is within [0, 1] - optional bool clip = 5 [default = false]; - // Variance for adjusting the prior bboxes. - repeated float variance = 6; - // By default, we calculate img_height, img_width, step_x, step_y based on - // bottom[0] (feat) and bottom[1] (img). Unless these values are explicitely - // provided. - // Explicitly provide the img_size. - optional uint32 img_size = 7; - // Either img_size or img_h/img_w should be specified; not both. - optional uint32 img_h = 8; - optional uint32 img_w = 9; - - // Explicitly provide the step size. - optional float step = 10; - // Either step or step_h/step_w should be specified; not both. - optional float step_h = 11; - optional float step_w = 12; - - // Offset to the top left corner of each cell. - optional float offset = 13 [default = 0.5]; - - // width (in pixels). - repeated float width = 14; - // height (in pixels). - repeated float height = 15; - - // initial box size - repeated float fixed_size = 16; - // aspect ration for box - repeated float fixed_ratio = 17; - // density of some type of boxes on image - repeated float density = 18; -} - -message PSROIPoolingParameter { - required float spatial_scale = 1; - required int32 output_dim = 2; // output channel number - required int32 group_size = 3; // number of groups to encode position-sensitive score maps - } - -message PythonParameter { - optional string module = 1; - optional string layer = 2; - // This value is set to the attribute `param_str` of the `PythonLayer` object - // in Python before calling the `setup()` method. This could be a number, - // string, dictionary in Python dict format, JSON, etc. You may parse this - // string in `setup` method and use it in `forward` and `backward`. - optional string param_str = 3 [default = '']; - // Whether this PythonLayer is shared among worker solvers during data parallelism. - // If true, each worker solver sequentially run forward from this layer. - // This value should be set true if you are using it as a data layer. - optional bool share_in_parallel = 4 [default = false]; -} - -// Message that stores parameters used by RecurrentLayer -message RecurrentParameter { - // The dimension of the output (and usually hidden state) representation -- - // must be explicitly set to non-zero. - optional uint32 num_output = 1 [default = 0]; - - optional FillerParameter weight_filler = 2; // The filler for the weight - optional FillerParameter bias_filler = 3; // The filler for the bias - - // Whether to enable displaying debug_info in the unrolled recurrent net. - optional bool debug_info = 4 [default = false]; - - // Whether to add as additional inputs (bottoms) the initial hidden state - // blobs, and add as additional outputs (tops) the final timestep hidden state - // blobs. The number of additional bottom/top blobs required depends on the - // recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs. - optional bool expose_hidden = 5 [default = false]; -} - -// Message that stores parameters used by ReductionLayer -message ReductionParameter { - enum ReductionOp { - SUM = 1; - ASUM = 2; - SUMSQ = 3; - MEAN = 4; - } - - optional ReductionOp operation = 1 [default = SUM]; // reduction operation - - // The first axis to reduce to a scalar -- may be negative to index from the - // end (e.g., -1 for the last axis). - // (Currently, only reduction along ALL "tail" axes is supported; reduction - // of axis M through N, where N < num_axes - 1, is unsupported.) - // Suppose we have an n-axis bottom Blob with shape: - // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). - // If axis == m, the output Blob will have shape - // (d0, d1, d2, ..., d(m-1)), - // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) - // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. - // If axis == 0 (the default), the output Blob always has the empty shape - // (count 1), performing reduction across the entire input -- - // often useful for creating new loss functions. - optional int32 axis = 2 [default = 0]; - - optional float coeff = 3 [default = 1.0]; // coefficient for output -} - -// Message that stores parameters used by ReLULayer -message ReLUParameter { - // Allow non-zero slope for negative inputs to speed up optimization - // Described in: - // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities - // improve neural network acoustic models. In ICML Workshop on Deep Learning - // for Audio, Speech, and Language Processing. - optional float negative_slope = 1 [default = 0]; - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 2 [default = DEFAULT]; -} - -message ReLU6Parameter { - // Allows to limit ReLU activation from the top and clip by specified value - // - optional float n = 1 [default = 6]; -} - -message ReshapeParameter { - // Specify the output dimensions. If some of the dimensions are set to 0, - // the corresponding dimension from the bottom layer is used (unchanged). - // Exactly one dimension may be set to -1, in which case its value is - // inferred from the count of the bottom blob and the remaining dimensions. - // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: - // - // layer { - // type: "Reshape" bottom: "input" top: "output" - // reshape_param { ... } - // } - // - // If "input" is 2D with shape 2 x 8, then the following reshape_param - // specifications are all equivalent, producing a 3D blob "output" with shape - // 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } - // reshape_param { shape { dim: 0 dim:-1 dim: 4 } } - // - optional BlobShape shape = 1; - - // axis and num_axes control the portion of the bottom blob's shape that are - // replaced by (included in) the reshape. By default (axis == 0 and - // num_axes == -1), the entire bottom blob shape is included in the reshape, - // and hence the shape field must specify the entire output shape. - // - // axis may be non-zero to retain some portion of the beginning of the input - // shape (and may be negative to index from the end; e.g., -1 to begin the - // reshape after the last axis, including nothing in the reshape, - // -2 to include only the last axis, etc.). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are all equivalent, - // producing a blob "output" with shape 2 x 2 x 4: - // - // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } - // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } - // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } - // - // num_axes specifies the extent of the reshape. - // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on - // input axes in the range [axis, axis+num_axes]. - // num_axes may also be -1, the default, to include all remaining axes - // (starting from axis). - // - // For example, suppose "input" is a 2D blob with shape 2 x 8. - // Then the following ReshapeLayer specifications are equivalent, - // producing a blob "output" with shape 1 x 2 x 8. - // - // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } - // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } - // reshape_param { shape { dim: 1 } num_axes: 0 } - // - // On the other hand, these would produce output blob shape 2 x 1 x 8: - // - // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } - // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } - // - optional int32 axis = 2 [default = 0]; - optional int32 num_axes = 3 [default = -1]; -} - -message ReverseParameter { - // axis controls the data axis which shall be inverted. - // The layout of the content will not be inverted - // - // The default axis is 0 that means: - // data_previous[n] == data_afterwards[N - n -1] - // where N is the shape of axis(n) - // - // Usually this layer will be used with recurrent layers to invert the - // time axis which is axis 0 - // This layer will therefore swap the order in time but not the - // order of the actual data. - optional int32 axis = 1 [default = 0]; -} - -// Message that stores parameters used by ROIPoolingLayer -message ROIPoolingParameter { - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pooled_h = 1 [default = 0]; // The pooled output height - optional uint32 pooled_w = 2 [default = 0]; // The pooled output width - // Multiplicative spatial scale factor to translate ROI coords from their - // input scale to the scale used when pooling - optional float spatial_scale = 3 [default = 1]; -} - -message ROIWarpingTestParameter { - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pooled_h = 1 [default = 0]; // The pooled output height - optional uint32 pooled_w = 2 [default = 0]; // The pooled output width - // Multiplicative spatial scale factor to translate ROI coords from their - // input scale to the scale used when pooling - optional float spatial_scale = 3 [default = 1]; -} -message ROIWarpingParameter { - // Pad, kernel size, and stride are all given as a single value for equal - // dimensions in height and width or as Y, X pairs. - optional uint32 pooled_h = 1 [default = 0]; // The pooled output height - optional uint32 pooled_w = 2 [default = 0]; // The pooled output width - // Multiplicative spatial scale factor to translate ROI coords from their - // input scale to the scale used when pooling - optional float spatial_scale = 3 [default = 1]; -} - -message ScaleParameter { - // The first axis of bottom[0] (the first input Blob) along which to apply - // bottom[1] (the second input Blob). May be negative to index from the end - // (e.g., -1 for the last axis). - // - // For example, if bottom[0] is 4D with shape 100x3x40x60, the output - // top[0] will have the same shape, and bottom[1] may have any of the - // following shapes (for the given value of axis): - // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 - // (axis == 1 == -3) 3; 3x40; 3x40x60 - // (axis == 2 == -2) 40; 40x60 - // (axis == 3 == -1) 60 - // Furthermore, bottom[1] may have the empty shape (regardless of the value of - // "axis") -- a scalar multiplier. - optional int32 axis = 1 [default = 1]; - - // (num_axes is ignored unless just one bottom is given and the scale is - // a learned parameter of the layer. Otherwise, num_axes is determined by the - // number of axes by the second bottom.) - // The number of axes of the input (bottom[0]) covered by the scale - // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. - // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar. - optional int32 num_axes = 2 [default = 1]; - - // (filler is ignored unless just one bottom is given and the scale is - // a learned parameter of the layer.) - // The initialization for the learned scale parameter. - // Default is the unit (1) initialization, resulting in the ScaleLayer - // initially performing the identity operation. - optional FillerParameter filler = 3; - - // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but - // may be more efficient). Initialized with bias_filler (defaults to 0). - optional bool bias_term = 4 [default = false]; - optional FillerParameter bias_filler = 5; -} - -message SigmoidParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -message SliceParameter { - // The axis along which to slice -- may be negative to index from the end - // (e.g., -1 for the last axis). - // By default, SliceLayer concatenates blobs along the "channels" axis (1). - optional int32 axis = 3 [default = 1]; - repeated uint32 slice_point = 2; - - // DEPRECATED: alias for "axis" -- does not support negative indexing. - optional uint32 slice_dim = 1 [default = 1]; -} - -message SmoothL1LossParameter { - // SmoothL1Loss(x) = - // 0.5 * (sigma * x) ** 2 -- if x < 1.0 / sigma / sigma - // |x| - 0.5 / sigma / sigma -- otherwise - optional float sigma = 1 [default = 1]; -} - -// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer -message SoftmaxParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; - - // The axis along which to perform the softmax -- may be negative to index - // from the end (e.g., -1 for the last axis). - // Any other axes will be evaluated as independent softmaxes. - optional int32 axis = 2 [default = 1]; -} - -message TanHParameter { - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 1 [default = DEFAULT]; -} - -// Message that stores parameters used by TileLayer -message TileParameter { - // The index of the axis to tile. - optional int32 axis = 1 [default = 1]; - - // The number of copies (tiles) of the blob to output. - optional int32 tiles = 2; -} - -// Message that stores parameters used by ThresholdLayer -message ThresholdParameter { - optional float threshold = 1 [default = 0]; // Strictly positive values -} - -message WindowDataParameter { - // Specify the data source. - optional string source = 1; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 2 [default = 1]; - optional string mean_file = 3; - // Specify the batch size. - optional uint32 batch_size = 4; - // Specify if we would like to randomly crop an image. - optional uint32 crop_size = 5 [default = 0]; - // Specify if we want to randomly mirror data. - optional bool mirror = 6 [default = false]; - // Foreground (object) overlap threshold - optional float fg_threshold = 7 [default = 0.5]; - // Background (non-object) overlap threshold - optional float bg_threshold = 8 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float fg_fraction = 9 [default = 0.25]; - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 context_pad = 10 [default = 0]; - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string crop_mode = 11 [default = "warp"]; - // cache_images: will load all images in memory for faster access - optional bool cache_images = 12 [default = false]; - // append root_folder to locate images - optional string root_folder = 13 [default = ""]; -} - -message SPPParameter { - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional uint32 pyramid_height = 1; - optional PoolMethod pool = 2 [default = MAX]; // The pooling method - enum Engine { - DEFAULT = 0; - CAFFE = 1; - CUDNN = 2; - } - optional Engine engine = 6 [default = DEFAULT]; -} - -// DEPRECATED: use LayerParameter. -message V1LayerParameter { - repeated string bottom = 2; - repeated string top = 3; - optional string name = 4; - repeated NetStateRule include = 32; - repeated NetStateRule exclude = 33; - enum LayerType { - NONE = 0; - ABSVAL = 35; - ACCURACY = 1; - ARGMAX = 30; - BNLL = 2; - CONCAT = 3; - CONTRASTIVE_LOSS = 37; - CONVOLUTION = 4; - DATA = 5; - DECONVOLUTION = 39; - DROPOUT = 6; - DUMMY_DATA = 32; - EUCLIDEAN_LOSS = 7; - ELTWISE = 25; - EXP = 38; - FLATTEN = 8; - HDF5_DATA = 9; - HDF5_OUTPUT = 10; - HINGE_LOSS = 28; - IM2COL = 11; - IMAGE_DATA = 12; - INFOGAIN_LOSS = 13; - INNER_PRODUCT = 14; - LRN = 15; - MEMORY_DATA = 29; - MULTINOMIAL_LOGISTIC_LOSS = 16; - MVN = 34; - POOLING = 17; - POWER = 26; - RELU = 18; - SIGMOID = 19; - SIGMOID_CROSS_ENTROPY_LOSS = 27; - SILENCE = 36; - SOFTMAX = 20; - SOFTMAX_LOSS = 21; - SPLIT = 22; - SLICE = 33; - TANH = 23; - WINDOW_DATA = 24; - THRESHOLD = 31; - } - optional LayerType type = 5; - repeated BlobProto blobs = 6; - repeated string param = 1001; - repeated DimCheckMode blob_share_mode = 1002; - enum DimCheckMode { - STRICT = 0; - PERMISSIVE = 1; - } - repeated float blobs_lr = 7; - repeated float weight_decay = 8; - repeated float loss_weight = 35; - optional AccuracyParameter accuracy_param = 27; - optional ArgMaxParameter argmax_param = 23; - optional ConcatParameter concat_param = 9; - optional ContrastiveLossParameter contrastive_loss_param = 40; - optional ConvolutionParameter convolution_param = 10; - optional DataParameter data_param = 11; - optional DropoutParameter dropout_param = 12; - optional DummyDataParameter dummy_data_param = 26; - optional EltwiseParameter eltwise_param = 24; - optional ExpParameter exp_param = 41; - optional HDF5DataParameter hdf5_data_param = 13; - optional HDF5OutputParameter hdf5_output_param = 14; - optional HingeLossParameter hinge_loss_param = 29; - optional ImageDataParameter image_data_param = 15; - optional InfogainLossParameter infogain_loss_param = 16; - optional InnerProductParameter inner_product_param = 17; - optional LRNParameter lrn_param = 18; - optional MemoryDataParameter memory_data_param = 22; - optional MVNParameter mvn_param = 34; - optional PoolingParameter pooling_param = 19; - optional PowerParameter power_param = 21; - optional ReLUParameter relu_param = 30; - optional SigmoidParameter sigmoid_param = 38; - optional SoftmaxParameter softmax_param = 39; - optional SliceParameter slice_param = 31; - optional TanHParameter tanh_param = 37; - optional ThresholdParameter threshold_param = 25; - optional WindowDataParameter window_data_param = 20; - optional TransformationParameter transform_param = 36; - optional LossParameter loss_param = 42; - optional V0LayerParameter layer = 1; -} - -// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters -// in Caffe. We keep this message type around for legacy support. -message V0LayerParameter { - optional string name = 1; // the layer name - optional string type = 2; // the string to specify the layer type - - // Parameters to specify layers with inner products. - optional uint32 num_output = 3; // The number of outputs for the layer - optional bool biasterm = 4 [default = true]; // whether to have bias terms - optional FillerParameter weight_filler = 5; // The filler for the weight - optional FillerParameter bias_filler = 6; // The filler for the bias - - optional uint32 pad = 7 [default = 0]; // The padding size - optional uint32 kernelsize = 8; // The kernel size - optional uint32 group = 9 [default = 1]; // The group size for group conv - optional uint32 stride = 10 [default = 1]; // The stride - enum PoolMethod { - MAX = 0; - AVE = 1; - STOCHASTIC = 2; - } - optional PoolMethod pool = 11 [default = MAX]; // The pooling method - optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio - - optional uint32 local_size = 13 [default = 5]; // for local response norm - optional float alpha = 14 [default = 1.]; // for local response norm - optional float beta = 15 [default = 0.75]; // for local response norm - optional float k = 22 [default = 1.]; - - // For data layers, specify the data source - optional string source = 16; - // For data pre-processing, we can do simple scaling and subtracting the - // data mean, if provided. Note that the mean subtraction is always carried - // out before scaling. - optional float scale = 17 [default = 1]; - optional string meanfile = 18; - // For data layers, specify the batch size. - optional uint32 batchsize = 19; - // For data layers, specify if we would like to randomly crop an image. - optional uint32 cropsize = 20 [default = 0]; - // For data layers, specify if we want to randomly mirror data. - optional bool mirror = 21 [default = false]; - - // The blobs containing the numeric parameters of the layer - repeated BlobProto blobs = 50; - // The ratio that is multiplied on the global learning rate. If you want to - // set the learning ratio for one blob, you need to set it for all blobs. - repeated float blobs_lr = 51; - // The weight decay that is multiplied on the global weight decay. - repeated float weight_decay = 52; - - // The rand_skip variable is for the data layer to skip a few data points - // to avoid all asynchronous sgd clients to start at the same point. The skip - // point would be set as rand_skip * rand(0,1). Note that rand_skip should not - // be larger than the number of keys in the database. - optional uint32 rand_skip = 53 [default = 0]; - - // Fields related to detection (det_*) - // foreground (object) overlap threshold - optional float det_fg_threshold = 54 [default = 0.5]; - // background (non-object) overlap threshold - optional float det_bg_threshold = 55 [default = 0.5]; - // Fraction of batch that should be foreground objects - optional float det_fg_fraction = 56 [default = 0.25]; - - // optional bool OBSOLETE_can_clobber = 57 [default = true]; - - // Amount of contextual padding to add around a window - // (used only by the window_data_layer) - optional uint32 det_context_pad = 58 [default = 0]; - - // Mode for cropping out a detection window - // warp: cropped window is warped to a fixed size and aspect ratio - // square: the tightest square around the window is cropped - optional string det_crop_mode = 59 [default = "warp"]; - - // For ReshapeLayer, one needs to specify the new dimensions. - optional int32 new_num = 60 [default = 0]; - optional int32 new_channels = 61 [default = 0]; - optional int32 new_height = 62 [default = 0]; - optional int32 new_width = 63 [default = 0]; - - // Whether or not ImageLayer should shuffle the list of files at every epoch. - // It will also resize images if new_height or new_width are not zero. - optional bool shuffle_images = 64 [default = false]; - - // For ConcatLayer, one needs to specify the dimension for concatenation, and - // the other dimensions must be the same for all the bottom blobs. - // By default it will concatenate blobs along the channels dimension. - optional uint32 concat_dim = 65 [default = 1]; - - optional HDF5OutputParameter hdf5_output_param = 1001; -} - -message PReLUParameter { - // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: - // Surpassing Human-Level Performance on ImageNet Classification, 2015. - - // Initial value of a_i. Default is a_i=0.25 for all i. - optional FillerParameter filler = 1; - // Whether or not slope parameters are shared across channels. - optional bool channel_shared = 2 [default = false]; -} - -message RegionYoloParameter { - optional int32 coords = 1 [default = 4]; - optional int32 classes = 2 [default = 20]; - optional int32 num = 3 [default = 1]; - optional bool do_softmax = 4 [default = true]; - repeated float anchors = 5; - repeated int32 mask = 6; -} - -message ReorgYoloParameter { - optional int32 stride = 1 [default = 1]; -} - -// Message used by AugmentationParameter for describing how to generate augmentation parameters -message RandomGeneratorParameter { - optional string rand_type = 1 [default = "uniform" ]; // can be uniform, gaussian, bernoulli - optional bool exp = 2 [default = false ]; // after generating the random number, exponentiate it or not - optional float mean = 4 [default = 0. ]; // mean of the random variable - optional float spread = 5 [default = 0. ]; // half of interval length for uniform; standard deviation for gaussian - optional float prob = 6 [default = 1.]; - optional bool apply_schedule = 7 [default = true]; - optional bool discretize = 8 [default = false]; //Discretize (Round) value from rng to INT - optional float multiplier = 9 [default = 1.]; //Final random value will be multiplied by this. (Useful for discrete distributions) -} - -message CoeffScheduleParameter { - optional float half_life = 1 [default = 1]; - optional float initial_coeff = 2 [default = 1]; - optional float final_coeff = 3 [default = 1]; -} - -// Message storing the actual coefficients of a transformation -// IMPORTANT: default values should be 0 or 1 -message AugmentationCoeff { - // Spatial - optional float mirror = 1 [default = 0]; - optional float dx = 2 [default = 0]; - optional float dy = 3 [default = 0]; - optional float angle = 4 [default = 0]; - optional float zoom_x = 5 [default = 1]; - optional float zoom_y = 6 [default = 1]; - - // Chromatic - optional float gamma = 100 [default = 1]; - optional float brightness = 101 [default = 0]; - optional float contrast = 102 [default = 1]; - optional float color1 = 103 [default = 1]; - optional float color2 = 104 [default = 1]; - optional float color3 = 105 [default = 1]; - - // Chromatic-Eigen - optional float pow_nomean0 = 10 [default = 1]; - optional float pow_nomean1 = 11 [default = 1]; - optional float pow_nomean2 = 12 [default = 1]; - optional float add_nomean0 = 13 [default = 0]; - optional float add_nomean1 = 14 [default = 0]; - optional float add_nomean2 = 15 [default = 0]; - optional float mult_nomean0 = 16 [default = 1]; - optional float mult_nomean1 = 17 [default = 1]; - optional float mult_nomean2 = 18 [default = 1]; - optional float pow_withmean0 = 19 [default = 1]; - optional float pow_withmean1 = 20 [default = 1]; - optional float pow_withmean2 = 21 [default = 1]; - optional float add_withmean0 = 22 [default = 0]; - optional float add_withmean1 = 23 [default = 0]; - optional float add_withmean2 = 24 [default = 0]; - optional float mult_withmean0 = 25 [default = 1]; - optional float mult_withmean1 = 26 [default = 1]; - optional float mult_withmean2 = 27 [default = 1]; - optional float lmult_pow = 28 [default = 1]; - optional float lmult_add = 29 [default = 0]; - optional float lmult_mult = 30 [default = 1]; - optional float col_angle = 31 [default = 0]; - - // Effect - optional float fog_amount = 38 [default = 0]; - optional float fog_size = 39 [default = 0]; - optional float motion_blur_angle = 40 [default = 0]; - optional float motion_blur_size = 41 [default = 0]; - optional float shadow_angle = 42 [default = 0]; - optional float shadow_distance = 43 [default = 0]; - optional float shadow_strength = 44 [default = 0]; - optional float noise = 45 [default = 0]; -} - -message AugmentationParameter { - optional uint32 crop_width = 33 [default = 0]; - optional uint32 crop_height = 34 [default = 0]; - optional string write_augmented = 2 [default = ""]; - optional float max_multiplier = 3 [default = 255.]; - optional bool augment_during_test = 4 [default = false]; - optional uint32 recompute_mean = 5 [default = 0]; // number of iterations to recompute mean (0 - do not recompute) - optional string write_mean = 6 [default = ""]; - optional bool mean_per_pixel = 7 [default = true]; // if the mean is computed for each pixel or for the whole channel - repeated float mean = 18; // Eddy: Per pixel RGB mean to subtract - optional string mode = 8 [default = "add"]; // can be "add" or "replace" or "regenerate" - optional uint32 bottomwidth = 80 [default = 0]; - optional uint32 bottomheight = 81 [default = 0]; - optional uint32 num = 82 [default = 0]; - - repeated float chromatic_eigvec = 83; - - // Spatial - optional RandomGeneratorParameter mirror = 10; - optional RandomGeneratorParameter translate = 11 ; - optional RandomGeneratorParameter rotate = 12 ; - optional RandomGeneratorParameter zoom = 13 ; - optional RandomGeneratorParameter squeeze = 14 ; - optional RandomGeneratorParameter translate_x = 15 ; - optional RandomGeneratorParameter translate_y = 16 ; - - - // Chromatic - optional RandomGeneratorParameter gamma = 35 ; - optional RandomGeneratorParameter brightness = 36 ; - optional RandomGeneratorParameter contrast = 37 ; - optional RandomGeneratorParameter color = 38 ; - - // Chromatic-Eigen - optional RandomGeneratorParameter lmult_pow = 20 ; - optional RandomGeneratorParameter lmult_mult = 21 ; - optional RandomGeneratorParameter lmult_add = 22 ; - optional RandomGeneratorParameter sat_pow = 23 ; - optional RandomGeneratorParameter sat_mult = 24 ; - optional RandomGeneratorParameter sat_add = 25 ; - optional RandomGeneratorParameter col_pow = 26 ; - optional RandomGeneratorParameter col_mult = 27 ; - optional RandomGeneratorParameter col_add = 28 ; - optional RandomGeneratorParameter ladd_pow = 29 ; - optional RandomGeneratorParameter ladd_mult = 30 ; - optional RandomGeneratorParameter ladd_add = 31 ; - optional RandomGeneratorParameter col_rotate = 32 ; - - // Effect - optional RandomGeneratorParameter fog_amount = 100 ; - optional RandomGeneratorParameter fog_size = 101 ; - optional RandomGeneratorParameter motion_blur_angle = 102 ; - optional RandomGeneratorParameter motion_blur_size = 103 ; - optional RandomGeneratorParameter shadow_angle = 104 ; - optional RandomGeneratorParameter shadow_distance = 105 ; - optional RandomGeneratorParameter shadow_strength = 106 ; - optional RandomGeneratorParameter noise = 107 ; -} - -message FlowWarpParameter { - enum FillParameter { - ZERO = 1; - NOT_A_NUMBER = 2; - } - - optional FillParameter fill_value = 1 [ default = ZERO ]; -} - -message CorrelationParameter { - optional uint32 pad = 2 [default = 0]; // The padding size (equal in Y, X) - optional uint32 kernel_size = 3; // The kernel size (square) - optional uint32 max_displacement = 4; // The maximum displacement (square) - optional uint32 stride_1 = 5 [default = 1]; // The stride in blob 1 (equal in Y, X) - optional uint32 stride_2 = 6 [default = 1]; // The stride in blob 2 (equal in Y, X) - - // For Correlation1D: - optional int32 single_direction = 8 [default = 0]; // Correlate only to the left (-1) or right (1) - - optional bool do_abs = 7 [default = false]; // Use absolute value of result - enum CorrelationType { - MULTIPLY = 0; - SUBTRACT = 1; - } - optional CorrelationType correlation_type = 15 [default = MULTIPLY]; // Multiplicative is normal correlation -} - -message ResampleParameter { - enum ResampleType { - NEAREST = 1; - LINEAR = 2; - CUBIC = 3; - AREA = 4; - }; - optional bool antialias = 4 [ default = true ]; - optional uint32 width = 1; - optional uint32 height = 2; - optional ResampleType type = 3 [ default = LINEAR ]; - optional float factor = 5 [ default = 1.0 ]; -} - -message AccumParameter { - optional uint32 top_height = 1 [default = 0]; // The output height - optional uint32 top_width = 2 [default = 0]; // The output width - optional uint32 size_divisible_by = 3 [default = 0]; // Upscales to the minimal size divisible by the given number - optional bool have_reference = 4 [ default = false ]; -} - -message ShuffleChannelParameter { - required uint32 group = 1; -} diff --git a/tools/mo/openvino/tools/mo/front/caffe/psroipooling_ext.py b/tools/mo/openvino/tools/mo/front/caffe/psroipooling_ext.py deleted file mode 100644 index ddbab3fe6b9b91..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/psroipooling_ext.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.psroipooling import PSROIPoolingOp -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class PSROIPoolingFrontExtractor(FrontExtractorOp): - op = 'PSROIPooling' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.psroi_pooling_param - - update_attrs = { - 'spatial_scale': param.spatial_scale, - 'output_dim': param.output_dim, - 'group_size': param.group_size, - } - - mapping_rule = merge_attrs(param, update_attrs) - - mapping_rule.update(layout_attrs()) - - # update the attributes of the node - PSROIPoolingOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/python_layer_extractor.py b/tools/mo/openvino/tools/mo/front/caffe/python_layer_extractor.py deleted file mode 100644 index a95001e2b5d5ad..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/python_layer_extractor.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp, CaffePythonFrontExtractorOp - - -class PythonFrontExtractorOp(FrontExtractorOp): - op = 'Python' - enabled = True - - @classmethod - def extract(cls, node): - module = node.pb.python_param.module - layer = node.pb.python_param.layer - layer_type = '{}.{}'.format(module, layer) - if layer_type and layer_type in CaffePythonFrontExtractorOp.registered_ops: - if hasattr(CaffePythonFrontExtractorOp.registered_ops[layer_type], 'extract'): - # CaffePythonFrontExtractorOp.registered_ops[layer_type] is object of FrontExtractorOp and has the - # function extract - return CaffePythonFrontExtractorOp.registered_ops[layer_type].extract(node) - else: - # User defined only Op for this layer and CaffePythonFrontExtractorOp.registered_ops[layer_type] is - # special extractor for Op - return CaffePythonFrontExtractorOp.registered_ops[layer_type](node) diff --git a/tools/mo/openvino/tools/mo/front/caffe/regionyolo_ext.py b/tools/mo/openvino/tools/mo/front/caffe/regionyolo_ext.py deleted file mode 100644 index ec81f78f40e58b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/regionyolo_ext.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.ops.regionyolo import RegionYoloOp -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class RegionYoloFrontExtractor(FrontExtractorOp): - op = 'RegionYolo' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.region_yolo_param - flatten_param = proto_layer.flatten_param - axis = flatten_param.axis - end_axis = flatten_param.end_axis - coords = param.coords - classes = param.classes - num = param.num - update_attrs = { - 'coords': coords, - 'classes': classes, - 'num': num, - 'do_softmax': int(param.do_softmax), - 'anchors': mo_array(param.anchors), - 'mask': mo_array(param.mask) - } - - flatten_attrs = { - 'axis': axis, - 'end_axis': end_axis - } - - mapping_rule = merge_attrs(param, update_attrs) - - mapping_rule.update(flatten_attrs) - mapping_rule.update(layout_attrs()) - - # update the attributes of the node - RegionYoloOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/register_custom_ops.py b/tools/mo/openvino/tools/mo/front/caffe/register_custom_ops.py deleted file mode 100644 index 3c9d3437391c10..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/register_custom_ops.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementOp, FrontReplacementPattern, FrontReplacementSubgraph -from openvino.tools.mo.front.extractor import FrontExtractorOp, CaffePythonFrontExtractorOp - - -def get_front_classes(): - front_classes = [FrontExtractorOp, CaffePythonFrontExtractorOp, FrontReplacementOp, - FrontReplacementPattern, FrontReplacementSubgraph] - return front_classes diff --git a/tools/mo/openvino/tools/mo/front/caffe/relu6.py b/tools/mo/openvino/tools/mo/front/caffe/relu6.py deleted file mode 100644 index b07f0fd689bc8a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/relu6.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import ReLU6 -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ReLU6FrontExtractor(FrontExtractorOp): - op = 'ReLU6' - enabled = True - - @classmethod - def extract(cls, node): - ReLU6.update_node_stat(node) - return True diff --git a/tools/mo/openvino/tools/mo/front/caffe/relu_ext.py b/tools/mo/openvino/tools/mo/front/caffe/relu_ext.py deleted file mode 100644 index b0401c27057665..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/relu_ext.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import LeakyReLU, ReLU -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ReLUFrontExtractor(FrontExtractorOp): - op = 'relu' - enabled = True - - @classmethod - def extract(cls, node): - assert node.pb, 'Protobuf layer can not be empty' - param = node.pb.relu_param - negative_slope = param.negative_slope - if negative_slope == 0: - ReLU.update_node_stat(node) - else: - LeakyReLU.update_node_stat(node, {'negative_slope': negative_slope}) - return True diff --git a/tools/mo/openvino/tools/mo/front/caffe/reorgyolo_ext.py b/tools/mo/openvino/tools/mo/front/caffe/reorgyolo_ext.py deleted file mode 100644 index ef53de82dfc82f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/reorgyolo_ext.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.reorgyolo import ReorgYoloOp -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ReorgYoloFrontExtractor(FrontExtractorOp): - op = 'ReorgYolo' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.reorg_yolo_param - - stride = param.stride - update_attrs = { - 'stride': stride, - } - mapping_rule = merge_attrs(param, update_attrs) - - mapping_rule.update(layout_attrs()) - - # update the attributes of the node - ReorgYoloOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/resample_ext.py b/tools/mo/openvino/tools/mo/front/caffe/resample_ext.py deleted file mode 100644 index b07d0bb15d8c3b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/resample_ext.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.caffe.collect_attributes import merge_attrs -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ResampleFrontExtractor(FrontExtractorOp): - op = 'Resample' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.resample_param - types = [ - "", - 'nearest', - 'linear', - 'cubic', - 'area', - ] - resample_type = types[param.type] - - update_attrs = { - 'antialias': int(param.antialias), - 'height': param.height, - 'width': param.width, - 'type': resample_type, - 'factor': param.factor, - 'fw': 'caffe', - } - - mapping_rule = merge_attrs(param, update_attrs) - mapping_rule['mode'] = mapping_rule['type'] - mapping_rule['axes'] = int64_array([2, 3]) - mapping_rule.pop('type') - Interpolate.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/reshape.py b/tools/mo/openvino/tools/mo/front/caffe/reshape.py deleted file mode 100644 index cb8fb8ee07559c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/reshape.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.reshape import Reshape - - -class ReshapeFrontExtractor(FrontExtractorOp): - op = 'reshape' - enabled = True - - @classmethod - def extract(cls, node): - param = node.pb.reshape_param - - if param.axis != 0: - log.error('The operation "Reshape" has attribute "axis" with unsupported value "{}"'.format(param['axis'])) - return False - - if param.num_axes != -1: - log.error('The operation "Reshape" has attribute "num_axes" with unsupported value "{}"'.format( - param['num_axes'])) - return False - - Reshape.update_node_stat(node, { - 'dim': list(param.shape.dim), - }) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/roipooling_ext.py b/tools/mo/openvino/tools/mo/front/caffe/roipooling_ext.py deleted file mode 100644 index 3ab519a127248b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/roipooling_ext.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.roipooling import ROIPooling - - -class ROIPoolingFrontExtractor(FrontExtractorOp): - op = 'roipooling' - enabled = True - - @classmethod - def extract(cls, node): - param = node.pb.roi_pooling_param - attrs = { - 'pooled_h': param.pooled_h, - 'pooled_w': param.pooled_w, - 'spatial_scale': param.spatial_scale, - } - - ROIPooling.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/scale_ext.py b/tools/mo/openvino/tools/mo/front/caffe/scale_ext.py deleted file mode 100644 index 9aef9587d195a4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/scale_ext.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input, weights_biases -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp -from openvino.tools.mo.utils.utils import NamedAttrsClass - - -class ScaleFrontExtractor(FrontExtractorOp): - op = 'scale' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.pb - model = node.model_pb - param = pb.scale_param - attrs = { - 'axis': param.axis, - } - - if model is None and len(pb.bottom) == 1: - # default weights and biases for scale layer if the caffemodel file doesn't contain them - model = NamedAttrsClass({'blobs': mo_array([NamedAttrsClass({'data': mo_array([1])}), - NamedAttrsClass({'data': mo_array([0])})])}) - # scale with 1 input and 1 or 2 blobs - if model and len(model.blobs) != 0 and len(pb.bottom) == 1: - attrs.update(weights_biases(param.bias_term, model)) - # 2 inputs + bias - elif len(pb.bottom) == 2 and param.bias_term: - if model is None or len(model.blobs) == 0: - # default bias for scale layer with 2 inputs if the caffemodel file doesn't contain them - model = NamedAttrsClass({'blobs': mo_array([NamedAttrsClass({'data': mo_array([0])})])}) - - embed_input(attrs, 1, 'biases', model.blobs[0].data) - ScaleShiftOp.update_node_stat(node, attrs) - return cls.enabled - diff --git a/tools/mo/openvino/tools/mo/front/caffe/shufflechannel_ext.py b/tools/mo/openvino/tools/mo/front/caffe/shufflechannel_ext.py deleted file mode 100644 index 532ca653e618ef..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/shufflechannel_ext.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.shufflechannel import ShuffleChannels -from openvino.tools.mo.front.caffe.collect_attributes import collect_attributes -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ShuffleChannelFrontExtractor(FrontExtractorOp): - op = 'ShuffleChannel' - enabled = True - - @classmethod - def extract(cls, node): - mapping_rule = collect_attributes(node.pb.shuffle_channel_param) - mapping_rule.update(layout_attrs()) - - # update the attributes of the node - ShuffleChannels.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/sigmoid.py b/tools/mo/openvino/tools/mo/front/caffe/sigmoid.py deleted file mode 100644 index dd67350a5976ef..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/sigmoid.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import Sigmoid -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class SigmoidFrontExtractor(FrontExtractorOp): - op = 'Sigmoid' - enabled = True - - @classmethod - def extract(cls, node): - Sigmoid.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/slice_ext.py b/tools/mo/openvino/tools/mo/front/caffe/slice_ext.py deleted file mode 100644 index 0dd3d42bb1487a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/slice_ext.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.slice import CaffeSlice - - -class SliceFrontExtractor(FrontExtractorOp): - op = 'slice' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.slice_param - - # slice_dim is deprecated parameter and is used as alias for axis - # however if slice_dim is defined and axis is default, we use slice_dim - if param.slice_dim != 1 and param.axis == 1: - axis = param.slice_dim - else: - axis = param.axis - - update_attrs = { - 'axis': axis, - 'slice_point': int64_array(param.slice_point), - 'in_ports_count': 1, - 'out_ports_count': len(param.slice_point) + 1, - } - - CaffeSlice.update_node_stat(node, update_attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/slice_to_split.py b/tools/mo/openvino/tools/mo/front/caffe/slice_to_split.py deleted file mode 100644 index e5f2c41e53993b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/slice_to_split.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.split import VariadicSplit, Split -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph - - -class SliceToVariadicSplit(FrontReplacementOp): - op = "CaffeSlice" - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['op'] - name = node.soft_get('name', node.id) - - assert node.has_valid('axis'), 'Slice operation `{}` has no `axis` parameter'.format(name) - axis = int64_array(node.axis) - if axis.size != 1: - return - - assert node.has_valid('slice_point'), 'Slice operation `{}` has no `slice_point` parameter'.format(name) - slice_point = node.slice_point - - if slice_point.size == 0: - num_splits = len(node.out_ports()) - split_node = create_op_with_const_inputs(graph, op=Split, - port_value_dict={1: axis}, - op_attrs={'name': name, 'num_splits': num_splits}) - else: - size_splits = [] - curr_pos = 0 - for point in slice_point: - assert point > curr_pos - size_splits.append(point - curr_pos) - curr_pos = point - size_splits.append(-1) - - split_node = create_op_with_const_inputs(graph, op=VariadicSplit, - port_value_dict={1: axis, 2: int64_array(size_splits)}, - op_attrs={'name': name, 'out_ports_count': len(slice_point) + 1}) - - node.in_port(0).get_connection().set_destination(split_node.in_port(0)) - for i, port in node.out_ports().items(): - node.out_port(i).get_connection().set_source(split_node.out_port(i)) diff --git a/tools/mo/openvino/tools/mo/front/caffe/softmax_ext.py b/tools/mo/openvino/tools/mo/front/caffe/softmax_ext.py deleted file mode 100644 index e5f2eb669a7f3e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/softmax_ext.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.softmax import Softmax - - -class SoftmaxFrontExtractor(FrontExtractorOp): - op = 'Softmax' - enabled = True - - @classmethod - def extract(cls, node): - proto_layer = node.pb - param = proto_layer.softmax_param - - attrs = { - 'axis': param.axis - } - - # update the attributes of the node - Softmax.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/caffe/split_to_identity.py b/tools/mo/openvino/tools/mo/front/caffe/split_to_identity.py deleted file mode 100644 index 7f98f680ef59a5..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/split_to_identity.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.identity import Identity -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Graph - - -class SplitToIdentity(FrontReplacementOp): - """ - The Split layer in Caffe copies input blob to a number of output layers. The Split layer in OpenVINO divides - the input blob into several peaces. The Caffe Split layer is redundant because OpenVINO takes care of - creation of the intermediate blobs if it is necessary. - - The replacer changes the 'op' attribute of the node to 'Identity' and set all 'out' edge attributes to be 0. So the - Identity operations are removed further in the pipeline. - """ - op = "Split" - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['op'] - - identity = Identity(graph, {'name': node.soft_get('name', node.id)}).create_node() - node.in_port(0).get_connection().set_destination(identity.in_port(0)) - - for idx, port in node.out_ports().items(): - port.get_connection().set_source(identity.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/caffe/tanh.py b/tools/mo/openvino/tools/mo/front/caffe/tanh.py deleted file mode 100644 index 05b301cce5201a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/caffe/tanh.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import Tanh -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class TanhFrontExtractor(FrontExtractorOp): - op = 'Tanh' - enabled = True - - @classmethod - def extract(cls, node): - Tanh.update_node_stat(node) - return cls.enabled - diff --git a/tools/mo/openvino/tools/mo/front/common/__init__.py b/tools/mo/openvino/tools/mo/front/common/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/common/custom_replacement_registry.py b/tools/mo/openvino/tools/mo/front/common/custom_replacement_registry.py deleted file mode 100644 index 299f7f12338010..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/custom_replacement_registry.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import os - -from openvino.tools.mo.utils.custom_replacement_config import parse_custom_replacement_config_file -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class CustomReplacementRegistry(object): - """ - Registry that contains registered custom calls descriptors. - """ - - class __CustomReplacementRegistry: - def __init__(self): - self.registry = {} - - def __str__(self): - return repr(self) + str(self.registry) - - def __init__(self): - if not CustomReplacementRegistry.instance: - CustomReplacementRegistry.instance = CustomReplacementRegistry.__CustomReplacementRegistry() - else: - pass - # CustomCallRegistry.instance.val = arg - - def __getattr__(self, name): - return getattr(self.instance, name) - - instance = None - - def add_custom_replacement_description_from_config(self, file_name: str): - if not os.path.exists(file_name): - raise Error("Custom replacement configuration file '{}' doesn't exist. ".format(file_name) + - refer_to_faq_msg(46)) - - descriptions = parse_custom_replacement_config_file(file_name) - for desc in descriptions: - self.registry.setdefault(desc.id, list()).append(desc) - log.info("Registered custom replacement with id '{}'".format(desc.id)) - - def get_custom_replacement_description(self, replacement_id: str): - if replacement_id in self.registry: - return self.registry[replacement_id] - else: - log.warning("Configuration file for custom replacement with id '{}' doesn't exist".format(replacement_id)) - return None - - def get_all_replacements_descriptions(self): - result = list() - for l in self.registry.values(): - result.extend(l) - return result diff --git a/tools/mo/openvino/tools/mo/front/common/extractors/__init__.py b/tools/mo/openvino/tools/mo/front/common/extractors/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/openvino/tools/mo/front/common/extractors/utils.py b/tools/mo/openvino/tools/mo/front/common/extractors/utils.py deleted file mode 100644 index bec3432639f85e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/extractors/utils.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array - - -def layout_attrs(): - return { - 'spatial_dims': int64_array([2, 3]), - 'channel_dims': int64_array([1]), - 'batch_dims': int64_array([0]), - 'layout': 'NCHW' - } diff --git a/tools/mo/openvino/tools/mo/front/common/find_unsupported_ops.py b/tools/mo/openvino/tools/mo/front/common/find_unsupported_ops.py deleted file mode 100644 index 013986c637bf30..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/find_unsupported_ops.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.graph.graph import Node, Graph - - -def find_unsupported_ops(graph: Graph): - """ - The function returns list of node name those are not supported. Currently nodes that product non FP32 data tensors - or has undefined 'type' attribute are considered unsupported. - :param graph: current graph with operations. Data nodes are not yet added. - :return: the list of node names which are not supported - """ - unsupported = list() - for node_name in graph.nodes(): - node = Node(graph, node_name) - # op node that produce non FP32 data or has no type are considered unsupported - if node.kind == 'op': - if node.has_valid('type') or (node.has_valid('op') and node.op == 'Result'): - for out_data_node in node.out_nodes().values(): - if out_data_node.has_valid('data_type') and out_data_node.data_type != np.float32: - log.info('Node "{}" produces output as non FP32. Consider it unsupported'.format(node_name)) - unsupported.append(node.id) - else: - log.info('Node "{}" does not have type. Consider it unsupported'.format(node_name)) - unsupported.append(node.id) - return unsupported - diff --git a/tools/mo/openvino/tools/mo/front/common/layout.py b/tools/mo/openvino/tools/mo/front/common/layout.py deleted file mode 100644 index 6603e6725459e8..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/layout.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value -from openvino.tools.mo.front.common.partial_infer.utils import mo_array, int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error - -nchw_to_nhwc_permute = int64_array([0, 2, 3, 1]) -nhwc_to_nchw_permute = int64_array([0, 3, 1, 2]) -supported_layouts = ('NCHW', 'NHWC') -# the attribute 'layout' in the graph.graph can have two values only: "NCHW" or "NHWC". If the tensor has 5 dimensions -# then it is necessary to transform "NCHW" to "NCDHW" and "NHWC" to "NDHWC" respectively. The dictionary below id used -# for this purpose. -indices_mapping = {4: {'NCHW': 'NCHW', - 'NHWC': 'NHWC'}, - 5: {'NCHW': 'NCDHW', - 'NHWC': 'NDHWC'}} - - -def convert_shape(shape: np.array, permute: np.array): - result = [0, 0, 0, 0] - for ind, perm_ind in enumerate(permute): - result[ind] = shape[perm_ind] - return mo_array(result) - - -def get_depth_dim(layout: str, shape_len: int): - """ - Gets index of the dimension corresponding to depth. - :param layout: string representing layout: NCHW or NHWC usually. - :param shape_len: the shape length. - :return: index of the 'D' character - """ - assert layout in supported_layouts - assert shape_len == 5 - return indices_mapping[shape_len][layout].find('D') - - -def get_height_dim(layout: str, shape_len: int): - """ - Gets index of the dimension corresponding to height. - :param layout: string representing layout: NCHW or NHWC usually. - :param shape_len: the shape length. - :return: index of the 'H' character - """ - assert layout in supported_layouts - assert 4 <= shape_len <= 5 - return indices_mapping[shape_len][layout].find('H') - - -def get_width_dim(layout: str, shape_len: int): - """ - Gets index of the dimension corresponding to width. - :param layout: string representing layout: NCHW or NHWC usually. - :param shape_len: the shape length. - :return: index of the 'W' character - """ - assert layout in supported_layouts - assert 4 <= shape_len <= 5 - return indices_mapping[shape_len][layout].find('W') - - -def get_features_dim(layout: str, shape_len: int): - """ - Gets index of the dimension corresponding to features. - :param layout: string representing layout: NCHW or NHWC usually. - :param shape_len: the shape length. - :return: index of the 'C' character - """ - assert layout in supported_layouts - assert 4 <= shape_len <= 5 - return indices_mapping[shape_len][layout].find('C') - - -def get_batch_dim(layout: str, shape_len: int): - """ - Gets index of the dimension corresponding to batch. - :param layout: string representing layout: NCHW or NHWC usually. - :param shape_len: the shape length. - :return: index of the 'N' character - """ - assert layout in supported_layouts - assert 4 <= shape_len <= 5 - return indices_mapping[shape_len][layout].find('N') - - -def shape_for_layout(layout: str, **kwargs): - """ - Creates 4D or 5D tensor with the layout with specified dimension sizes. - :param layout: layout string. - :param kwargs: dictionary that contains the dimension sizes using the following keys: 'batch', 'features', 'depth', - 'height', 'width'. - :return: shape_array of type np.int64 with 4 or 5 elements. - """ - assert layout in supported_layouts - for required_key in ('batch', 'features', 'height', 'width'): - if required_key not in kwargs: - raise Error('Required parameter "{}" is missing.'.format(required_key)) - for key in kwargs.keys(): - if key not in ('batch', 'features', 'height', 'width', 'depth'): - raise Error('Parameter "{}" is not supported.'.format(key)) - - depth = kwargs.get('depth', None) - shape_len = 4 + (depth is not None) - output_shape = np.ma.ones(shape=[shape_len], dtype=np.int64, fill_value=dynamic_dimension_value) - output_shape[get_batch_dim(layout, shape_len)] = kwargs['batch'] - output_shape[get_height_dim(layout, shape_len)] = kwargs['height'] - output_shape[get_width_dim(layout, shape_len)] = kwargs['width'] - output_shape[get_features_dim(layout, shape_len)] = kwargs['features'] - if depth is not None: - output_shape[get_depth_dim(layout, shape_len)] = depth - return output_shape - - -def get_dim_from_layout(node: Node, dim: str): - """ - Gets index of dimension from layout specified for node. - :param node: node to get dim for. - :param dim: name of dimension to get index for. - :return: tuple with index of the dimension and bool flag if the node has layout specified or no. - """ - layout = None - graph = node.graph - if 'layout_values' in graph.graph['cmd_params'] and graph.graph['cmd_params'].layout_values: - layout_values = graph.graph['cmd_params'].layout_values.copy() - if '' in layout_values: - in_nodes = graph.get_op_nodes(op='Parameter') - if len(in_nodes) == 1: - in_node = in_nodes[0] - layout_values[in_node.soft_get('name', in_node.id)] = layout_values[''] - del layout_values[''] - name = node.soft_get('name', node.id) - if name in layout_values: - if layout_values[name]['source_layout']: - layout = layout_values[name]['source_layout'] - - if layout: - from openvino.runtime import Layout # pylint: disable=no-name-in-module,import-error - - layout_parsed = Layout(layout) - has_dim = layout_parsed.has_name(dim) - if has_dim: - idx = layout_parsed.get_index_by_name(dim) - if idx < 0: - idx = len(node.shape) + idx - return idx, True - else: - return None, True - else: - return None, False diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/__init__.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/batch_norm.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/batch_norm.py deleted file mode 100644 index 0591932217dbc4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/batch_norm.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.common.partial_infer.utils import mark_input_bins -from openvino.tools.mo.graph.graph import Node - - -def batch_norm_4_infer(node: Node): - copy_shape_infer(node) - mark_input_bins(node, ['weights', 'biases', 'mean', 'variance']) - if node.has('fix_gamma') and node.fix_gamma: - # go to the 1-st input weights and set all elements to 1 - node.in_node(1).value = np.full_like(node.in_node(1).value, 1, dtype=np.float32) diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/caffe_fallback.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/caffe_fallback.py deleted file mode 100644 index 6116a206c4faed..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/caffe_fallback.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import os - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.find_inputs import find_inputs -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def get_node_top(graph: Graph, name: str): - node = Node(graph, name) - return node.out_edge()['name'] if node else None - - -def build_net(graph: Graph): - try: - if not hasattr(os.environ, 'GLOG_minloglevel'): - os.environ['GLOG_minloglevel'] = '2' - import caffe - log.info('Partial inference via the framework is available') - except ImportError: - log.warning('pyCaffe is not available. Partial inference via the framework is not ' + - 'possible') - return - - try: - net = caffe.Net(graph.proto_path, graph.caffemodel_path, caffe.TEST) - except Exception as err: - raise Error( - 'Error happened while constructing caffe.Net in the Caffe fallback function: {}. ' + - refer_to_faq_msg(12), - str(err) - ) from err - - inputs_node_name = find_inputs(graph) - - reshape_flag = False - for i in inputs_node_name: - new_input_shape = graph.node[i]['shape'].astype(int) - top_node = get_node_top(graph, i) - caffe_shape = list(net.blobs[top_node].shape) - if not np.all(caffe_shape == new_input_shape): - net.blobs[top_node].reshape(*[int(x) for x in new_input_shape]) - reshape_flag = True - - if reshape_flag: - net.reshape() - - try: - net.forward() - except KeyError as err: - log.error('Error happened in Caffe net.forward: {}.'.format(str(err))) - log.error('It may point to the known bug in pycaffe when top and name of the layer do not match.') - log.error('Please make sure that the latest pycaffe is used.') - raise Error('Cannot infer shapes due to exception in Caffe: {}. ' + - refer_to_faq_msg(13), str(err)) from err - except Exception as err: - raise Error('Cannot infer shapes in Caffe net.forward due to exception: {}.' + - refer_to_faq_msg(13), str(err)) from err - - graph.__setattr__('caffe_net', net) - - -def get_net(graph: Graph): - if not graph: - return None - - if graph and not hasattr(graph, 'caffe_net'): - build_net(graph) - return getattr(graph, 'caffe_net', None) - - -def caffe_native_node_infer(node: Node): - """ - Infers shape of the unknown operation via Caffe if it is available. - Requires graph to contain paths to both prototxt and caffemodel files. - When it is visited for the first time, net object is created and written to graph. - Next time, it just takes the built net from graph. - - Parameters - ---------- - node node to infer the shape for - - """ - log.error("Caffe fallback is deprecated. It will be removed in future releases. Please use extensions for unsupported layers.\n" + - "See more information in the \"Custom Layers in the Model Optimizer\" chapter of the Model Optimizer Developer Guide", - extra={'is_warning': True}) - log.info('Called "caffe_native_node_infer" for node "{}"'.format(node.id)) - - graph = node.graph - net = get_net(graph) - if not net: - raise Error( - 'Cannot infer shape for node "{}" because there is no Caffe available. ' + - 'Please register python infer function for op = {} or use Caffe for shape inference. ' + - refer_to_faq_msg(14), - node.soft_get('name'), - node.soft_get('op') - ) - - for iout in range(len(node.out_nodes())): - output_shape = int64_array(net.blobs[node.top].data.shape) - node.out_node(iout).shape = output_shape diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/concat.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/concat.py deleted file mode 100644 index ebabc3c8a5221f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/concat.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, is_fully_defined, dynamic_dimension -from openvino.tools.mo.ops.op import PermuteAttrs -from openvino.tools.mo.utils.error import Error - - -def concat_infer(node): - node_name = node.soft_get('name', node.id) - if not node.has('axis'): - N = node.N - axis_input = node.in_node(N) - if axis_input.has_valid('value') and axis_input.value.size == 1: - node['axis'] = axis_input.value.item() - node.graph.remove_edge(axis_input.node, node.node) # TODO add skip attribute instead of deleting - else: - raise Error('Input with value is not specified for node "{}"'.format(node_name)) - else: - N = len(node.in_nodes()) - - shapes = [node.in_node(i).shape for i in range(N)] - if any(s is None for s in shapes): - raise Error('One of the input shapes is not defined for node "{}"'.format(node_name)) - - shape = shape_array(shapes[0]) - - axis = get_canonical_axis_index(shape, node.axis) - node.axis = axis - - mask = np.zeros_like(shape, dtype=bool) - mask[axis] = True # pylint: disable=unsupported-assignment-operation - not_mask = np.logical_not(mask) # pylint: disable=assignment-from-no-return - for s in shapes[1:]: - s = shape_array(s) - if np.ma.allequal(shape[not_mask], s[not_mask]): - shape[mask] += s[mask] - else: - raise Error('Concat input shapes do not match for node "{}" with axis {}'.format(node_name, axis)) - - # dynamic dimensions in the output (except the concat axis) can be deduced from input shape - for pos in range(len(shape)): - if shape[pos] is dynamic_dimension and pos != axis: - for in_shape in shapes: - if in_shape[pos] is not dynamic_dimension: - shape[pos] = in_shape[pos] - - node.out_port(0).data.set_shape(shape) - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) - - values = [node.in_node(i).value for i in range(N)] - if any([v is None for v in values]): - return - - # if one of the input values are dynamic, the output tensor type is inferred from one of the fully defined inputs - output_dtype = np.int64 - for input in values: - if is_fully_defined(input): - output_dtype = input.dtype - - if any(not is_fully_defined(v) for v in values): - node.out_port(0).data.set_value(np.ma.concatenate(values, axis=node.axis).astype(output_dtype)) - else: # there is a serious performance benefit to use concatenation as it is implemented below - node.out_node(0).value = np.concatenate(values, axis=node.axis).astype(values[0].dtype, copy=False) - node.out_node(0).shape = shape_array(node.out_node(0).value.shape) diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/crop.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/crop.py deleted file mode 100644 index 5fddf71a3e3bfd..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/crop.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.front.common.partial_infer.utils import mo_array - - -def crop_infer(node): - """ - Crops the shape of the output blob according to input ones be specified params. - Node should have 2 input blobs - 1st blob is getting cropped by specified axis according - to the the 2nd (reference) blob. - The result blob is written to output node shape, and reference blob is removed from graph. - In order to save the reference dims, it is written to dims parameter. - - Parameters - ---------- - node - - - """ - N = len(node.in_nodes()) - if N < 2: - log.debug('Wrong number of bottom blobs in ' + node.node) - return - - shapes = [node.in_node(i).shape for i in range(N)] - if any(s is None for s in shapes): - return - - input_shape = mo_array(shapes[0]) - start_axis = get_canonical_axis_index(input_shape, node.axis) - node.axis = start_axis - - reference_shape = mo_array(shapes[1]) - input_dim = input_shape.size - - # set new shape to current shape - new_shape = input_shape.copy() - ir_axis = [] - ir_offset = [] - dim = [] - - for i in range(0, input_dim): - if i < start_axis: - new_shape[i] = input_shape[i] - continue - - crop_offset = 0 - if len(node.offset) == 1: - crop_offset = node.offset[0] - elif len(node.offset) > 1: - crop_offset = node.offset[i - start_axis] - - if input_shape[i] - crop_offset < reference_shape[i]: - log.error('The crop for dimension is out of bounds in ' + node.node) - return - - dim.append(reference_shape[i]) - ir_axis.append(i) - ir_offset.append(crop_offset) - new_shape[i] = reference_shape[i] - - node.axis = ir_axis - node.offset = ir_offset - node.dim = dim - node.out_node().shape = new_shape diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/elemental.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/elemental.py deleted file mode 100644 index 47816313c01497..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/elemental.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -def single_output_infer(node, shape_infer, value_infer=None): - node.out_node(0).shape = shape_infer(node) - - if value_infer is not None and \ - 'value' in node.in_node() and \ - node.in_node().value is not None: - node.out_node(0).value = value_infer(node) - - -def copy_shape_infer(node, value_infer=None): - """ - Sets output dimensions of node equal to input ones - Args: - node: graph node - """ - single_output_infer(node, lambda n: n.in_port(0).data.get_shape(), value_infer) - - -def copy_value(node): - return None if node.in_node().value is None else node.in_port(0).data.get_value() diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/eltwise.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/eltwise.py deleted file mode 100644 index 90a687eee2f173..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/eltwise.py +++ /dev/null @@ -1,167 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension, dynamic_dimension_value, \ - undefined_shape_of_rank, compatible_shapes, compatible_dims -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error - - -def eltwise_infer(node: Node, op=None, **kwargs): - def broadcast_dims(dim1, dim2): - if dim1 is not dynamic_dimension and dim2 is not dynamic_dimension: - mind = min(dim1, dim2) - maxd = max(dim1, dim2) - if mind == 1: - return maxd - elif mind != maxd: - raise Error('Input shapes mismatch for node {}: {}'.format(node_name, shapes)) - return mind - elif dim1 is dynamic_dimension and dim2 is dynamic_dimension: - return dynamic_dimension_value - elif dim1 is dynamic_dimension and dim2 is not dynamic_dimension: - return broadcast_dims(dim2, dim1) - else: # dim1 is static, dim2 is dynamic - if dim1 != 1: - return dim1 - else: - return dim2 - - raw_inputs = [(inp, attr) for inp, attr in node.get_sorted_inputs() - if 'control_flow_edge' not in attr or not attr['control_flow_edge']] - shapes = [node.graph.node[inp]['shape'] for inp, attr in raw_inputs] - values = [node.graph.node[inp]['value'] for inp, attr in raw_inputs] - node_name = node.soft_get('name', node.id) - - if any([s is None for s in shapes]): - raise Error('One of the input shapes for node "{}" is None'.format(node_name)) - - max_dims = None - for id, s in enumerate(shapes): - if max_dims is None or len(s) > max_dims: - max_dims = len(s) - - # Make all input shapes of the same size by adding 1's - axis = node.axis if node.has_valid('axis') else None - for id, item in enumerate(zip(shapes, values)): - shape, value = item - if len(shape) != max_dims and len(shape) > 0 and axis is not None: - new_shape = shape - - # Extend shape with 1's - for cnt in range(axis + len(shape), max_dims): - new_shape = np.ma.append(new_shape, 1) - - shapes[id] = new_shape - - # Reshape value to correctly calculate output shape - if values[id] is not None: - values[id] = np.ma.reshape(values[id], new_shape) - - extended_shapes = [np.ma.concatenate((np.ma.ones(max_dims - len(s), dtype=np.int64), s)) for s in shapes] - output_shape = extended_shapes[0] - for si in range(1, len(extended_shapes)): - for ei in range(max_dims): - output_shape[ei] = broadcast_dims(output_shape[ei], extended_shapes[si][ei]) - - node.out_port(0).data.set_shape(output_shape) - - if node.has_and_set('stop_value_propagation'): - return - - if op is None or any([v is None for v in values]): - return - - if len(values) <= 2: - node.out_port(0).data.set_value(op(*values, **kwargs)) - else: - node.out_port(0).data.set_value(values[0]) - for i in range(len(values) - 1): - node.out_port(0).data.set_value(op(node.out_node().value, values[i + 1])) - - -def eltwise_reverse_infer(node: Node): - input_1_shape = node.in_port(0).data.get_shape() - input_2_shape = node.in_port(1).data.get_shape() - if input_1_shape is not None and input_2_shape is not None: - return - - output_shape = node.out_port(0).data.get_shape() - node_name = node.soft_get('name', node.id) - - if node['auto_broadcast'] is 'none': - # input_1, input_2 and output shapes must match - # therefore undefined partial shapes can be exactly defined from output shape - if output_shape is not None: - most_defined_shape = output_shape - - # if out_shape = [4, dyn] and input_1_shape = [dyn, 13] - # then missing shape must be [4, 13] - if input_1_shape is not None and not compatible_shapes(output_shape, input_1_shape): - raise Error("shapes are not compatible for node '{}'".format(node_name)) - elif input_1_shape is not None: - most_defined_shape = find_common_partial_shape(output_shape, input_1_shape) - - if input_2_shape is not None and not compatible_shapes(output_shape, input_2_shape): - raise Error("shapes are not compatible for node '{}'".format(node_name)) - elif input_2_shape is not None: - most_defined_shape = find_common_partial_shape(most_defined_shape, input_2_shape) - - if input_1_shape is None: - node.in_port(0).data.set_shape(most_defined_shape) - if input_2_shape is None: - node.in_port(1).data.set_shape(most_defined_shape) - elif node['auto_broadcast'] == 'numpy': - if output_shape is not None: - out_rank = len(output_shape) - deduced_in_shape = undefined_shape_of_rank(out_rank) - - if input_1_shape is not None and input_2_shape is None and out_rank > len(input_1_shape): - in_port_to_update = 1 - defined_in_shape = input_1_shape - elif input_2_shape is not None and input_1_shape is None and out_rank > len(input_2_shape): - in_port_to_update = 0 - defined_in_shape = input_2_shape - else: - return - defined_in_rank = len(defined_in_shape) - - for i in range(-1, -defined_in_rank - 1, -1): - assert defined_in_shape[i] == 1 or np.ma.is_masked(defined_in_shape[i]) \ - or compatible_dims(defined_in_shape[i], output_shape[i]), \ - "Shapes of Elementwise node '{}' are not compatible for reverse_infer.".format(node_name) - - # if defined_input_shape = [1] and output_shape = [N, 400, 400, 3] - # partial shape information about sizes should not be lost - if defined_in_shape[i] == 1 or output_shape[i] == 1: - deduced_in_shape[i] = output_shape[i] - deduced_in_shape[:-defined_in_rank] = output_shape[:-defined_in_rank] - - node.in_port(in_port_to_update).data.set_shape(deduced_in_shape) - - -def bias_add_infer(node, op): - if node.in_port(0).data.get_value() is not None and node.in_port(1).data.get_value() is not None and op is not None: - node.out_port(0).data.set_value(op(node.in_port(0).data.get_value(), node.in_port(1).data.get_value())) - else: - node.out_port(0).data.set_shape(node.in_port(0).data.get_shape()) - - -def find_common_partial_shape(shape1, shape2): - """ - Extracts maximum information from partially defined shapes. - - For example, if shape_1 = [4, dyn] and shape_2 = [dyn, 13] - then resulting shape will be [4, 13] - :param shape1: partially defined shape - :param shape2: partially defined shape - :return: - """ - assert compatible_shapes(shape1, shape2), 'shapes must be compatible' - res = shape1.copy() - for i, (d1, d2) in enumerate(zip(shape1, shape2)): - if np.ma.is_masked(res[i]): - res[i] = d2 - return res diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/multi_box_detection.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/multi_box_detection.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/multi_box_prior.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/multi_box_prior.py deleted file mode 100644 index 2c80a2ecaa5adb..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/multi_box_prior.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Node - - -def multi_box_prior_infer_mxnet(node: Node): - v10 = node.has_and_set('V10_infer') - data_H, data_W = node.in_node(0).value if v10 else node.in_node(0).shape[2:] - - num_ratios = len(node.aspect_ratio) - num_priors = len(node.min_size) + num_ratios - 1 - if v10: - node.out_node(0).shape = shape_array([2, data_H * data_W * num_priors * 4]) - else: - node.out_node(0).shape = shape_array([1, 2, data_H * data_W * num_priors * 4]) diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/roipooling.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/roipooling.py deleted file mode 100644 index a136dd177d9e13..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/roipooling.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.layout import get_batch_dim, get_features_dim, shape_for_layout -from openvino.tools.mo.graph.graph import Node - - -def roipooling_infer(node: Node): - """ - Sets shape of output node according specified parameters input blobs and node - Sets number from the first input blob, channels from the second one, height and width are specified - Parameters - ---------- - node - """ - shapes = [node.in_node(i).shape for i in range(len(node.in_nodes()))] - if any(s is None for s in shapes): - return - if len(node.in_nodes()) == 4: # TensorFlow case of CropAndResize operation - crop_size = node.in_node(3).value - if crop_size is None: - log.error('The ROIPooling size is not known for node {}'.format(node.soft_get('name'))) - return - if not isinstance(crop_size, np.ndarray) or len(crop_size) != 2: - log.error('The ROIPooling size is should have 2 elements for node {}'.format(node.soft_get('name'))) - node.pooled_h = crop_size[0] - node.pooled_w = crop_size[1] - node.graph.remove_edge(node.in_node(3).id, node.id) - node.graph.remove_edge(node.in_node(2).id, node.id) - - layout = node.graph.graph['layout'] - assert len(layout) == 4 - - node.out_port(0).data.set_shape(shape_for_layout(layout, - batch=shapes[1][get_batch_dim(layout, 4)], - features=shapes[0][get_features_dim(layout, 4)], - height=node.pooled_h, - width=node.pooled_w)) diff --git a/tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py b/tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py deleted file mode 100644 index ffa6c6295e69dc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/partial_infer/utils.py +++ /dev/null @@ -1,370 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from typing import Iterable, List, Union - -import numpy as np - -from openvino.tools.mo.utils.error import Error - -dynamic_dimension = np.ma.masked -# numpy masked array for integer values forces us to select one integer number to be considered as a missing/invalid -# value. Since the primary purpose of usage of masked arrays in the MO is to specify dynamic dimension, the big prime -# (by modulo) negative number is selected as such a value -dynamic_dimension_value = -1000000007 - - -def shape_array(value, dtype=np.int64): - # if the input tensor has masked values then they should be explicitly converted to dynamic_dimension_value and - # a masked array should be created from scratch, otherwise, method "masked_equal" will convert masked elements to - # "nan" values - if isinstance(value, Iterable) and (not isinstance(value, np.ndarray) or value.ndim != 0): - value = [item if item is not dynamic_dimension else dynamic_dimension_value for item in value] - return np.ma.masked_equal(value, dynamic_dimension_value).astype(dtype=dtype) - - -def undefined_shape_of_rank(rank: int): - """ - Create a shape of specified rank with all dynamic dimensions. - - :param rank: requested rank of the output shape - :return: shape array - """ - return shape_array([dynamic_dimension_value] * rank) - - -def compatible_dims(dim1, dim2): - """ - Compare if dim1 is equal to dim2 or any of them is dynamic - - :param dim1: dimension to compare - :param dim2: dimension to compare - :return: boolean result of the comparison - """ - return dim1 is dynamic_dimension or dim2 is dynamic_dimension or dim1 == dim2 - - -def compatible_shapes(shape1, shape2): - """ - Compares two shape tensors. The shapes are considered equal if they have the same rank and the corresponding - dimensions are either equal or at least one of them is dynamic. - - :param shape1: the first shape to compare - :param shape2: the second shape to compare - :return: boolean result of the comparison - """ - if shape1.ndim != shape2.ndim: - return False - if shape1.size != shape2.size: - return False - for d1, d2 in zip(shape1, shape2): - if not compatible_dims(d1, d2): - return False - return True - - -def strict_compare_tensors(tensor1, tensor2): - """ - Strict comparison of two tensors. The tensors are equal iff their corresponding elements are equal or both are - dynamic. - - :param tensor1: the first tensor to compare - :param tensor2: the second tensor to compare - :return: boolean result of the comparison - """ - if tensor1 is None and tensor2 is None: - return True - if tensor1 is None or tensor2 is None: - return False - - if not isinstance(tensor1, np.ma.masked_array): - tensor1 = shape_array(tensor1) - if not isinstance(tensor2, np.ma.masked_array): - tensor2 = shape_array(tensor2) - - if tensor1.ndim != tensor2.ndim: - return False - if tensor1.size != tensor2.size: - return False - if tensor1.ndim == 0: - return tensor1.item() == tensor2.item() - if not np.array_equal(tensor1.shape, tensor2.shape): - return False - for d1, d2 in zip(tensor1.flatten(), tensor2.flatten()): - if (d1 is not dynamic_dimension) ^ (d2 is not dynamic_dimension): - return False - elif d1 is not dynamic_dimension and d1 != d2: - return False - return True - - -def shape_delete(shape: np.ma.masked_array, obj: [int, list]): - """ - Removes element in the input tensor shape (presumably the numpy masked array) specified by index/indices obj. - The function is implemented to avoid usage of np.delete which corrupts information about the masked elements. - - :param shape: the shape object to remove elements from - :param obj: the list or a single integer defining index(es) of elements to remove - :return: shape with removed selected elements - """ - if isinstance(obj, (int, np.int64, np.int32)): - return shape_delete(shape, [obj]) - elif isinstance(obj, np.ndarray): - return shape_delete(shape, obj.tolist()) - elif isinstance(obj, list): - result = shape.copy() - obj = [item if item >= 0 else len(shape) + item for item in obj] - for index in sorted(obj, reverse=True): - assert 0 <= index < len(result), 'Incorrect element index {} to remove from {}'.format(index, result) - result = np.ma.concatenate((result[:index], result[index + 1:])) - return result - else: - raise Error('Incorrect parameter type of "obj": {}'.format(type(obj))) - - -def shape_insert(shape: [np.ndarray, list], pos: int, obj: [int, list, np.ndarray, dynamic_dimension]): - """ - Insert element(s) in the input tensor shape (presumably the numpy masked array) specified by position pos. - The function is implemented to avoid usage of np.insert which corrupts information about the masked elements. - - :param shape: the shape object to insert element(s) to - :param pos: the position to insert the elements into - :param obj: the list or a single integer or the dynamic_dimension_value or numpy array to insert - :return: shape with inserted elements - """ - if isinstance(obj, (int, np.int64, np.int32)) or obj is dynamic_dimension_value: - return shape_insert(shape, pos, [obj]) - elif isinstance(obj, (np.ndarray, list)): - return np.ma.concatenate((shape_array(shape[:pos]), shape_array(obj), shape_array(shape[pos:]))) - else: - raise Error('Incorrect parameter type of "obj": {}'.format(type(obj))) - - -def unmask_shape(value: [np.ma.masked_array, np.array]): - """ - Converts all dynamic_dimension values from the input tensor to -1. Used to generate shapes for the IR. - - :param value: the value to be unmasked. - :return: the value where dynamic_dimension elements are converted to -1. - """ - if not isinstance(value, np.ma.masked_array): - return value - else: - return value.tolist(-1) - - -def is_fully_defined(value): - """ - Checks that provided input tensor is fully defined. The input value can be of different types: scalar, list, array, - masked array. - - :param value: the value to check - :return: the result of the check - """ - if value is None: - return False - elif isinstance(value, np.ma.masked_array): - return not np.ma.is_masked(value) - elif isinstance(value, np.ndarray): # numpy array cannot contain dynamic values - return True - elif isinstance(value, list) or isinstance(value, tuple): - return dynamic_dimension not in value - elif value is dynamic_dimension: - return False - return True - - -def int64_array(value: Union[Iterable[Union[float, int]], float, int]) -> np.ndarray: - return np.array(value, dtype=np.int64) - - -def float32_array(value: Union[Iterable[Union[float, int]], float, int]) -> np.ndarray: - return np.array(value, dtype=np.float32) - - -def int8_array(value: Union[Iterable[Union[float, int]], float, int]) -> np.ndarray: - return np.array(value, dtype=np.int8) - - -def float_array(value: Union[Iterable[Union[float, int]], float, int]) -> np.ndarray: - return float32_array(value) - - -def mo_array(value: Union[Iterable[Union[float, int]], float, int], dtype=None) -> np.ndarray: - """ - This function acts in a same way as np.array except for the case when dtype is not provided - and np.array return fp64 array this function returns fp32 array - """ - x = np.array(value, dtype=dtype) - if not isinstance(value, np.ndarray) and x.dtype == np.float64 and dtype != np.float64: - x = x.astype(np.float32) - return x - - -def mark_input_bins(node, names=('weights', 'biases'), start_port: int = 1): - """ - Preparing necessary attributes for edges at input ports starting from start_port. - It is applicable for convolution and other operations that have constant inputs which - are intended to be dumped as OV IR bin file. - """ - for port, name in enumerate(names, start=start_port): - if port in node.in_nodes() and node.in_node(port).has_valid('value'): - node.in_edge(port)['bin'] = name - - -def assign_dims_to_weights(node, spatial, input_channel, output_channel=None, dims_number=None): - if spatial is not None: - node['spatial_dims'] = int64_array(spatial) - node['input_channel_dim'] = int64_array(input_channel) - node['output_channel_dim'] = int64_array(output_channel) - if 'dim_attrs' in node and 'input_channel_dim' not in node['dim_attrs']: - node['dim_attrs'].append('input_channel_dim') - node['dims_number'] = dims_number - - -def copy_or_none(x): - return x.copy() if x is not None else None - - -def convert_tf_padding_to_str(padding): - mapping = {'SAME': 'same_upper', 'VALID': 'valid'} - return mapping[padding] - - -def convert_deconv_tf_padding_to_str(padding): - # according to the formulas for calculating "auto_pad" values of the - # ConvBackpropData layer in the Operation Specification, - # the "same_lower" value matches to the "same" value for conv_transpose layer in TensorFlow - mapping = {'SAME': 'same_lower', 'VALID': 'valid'} - return mapping[padding] - - -# TODO eliminate this dependency and pass necessary function as an argument -def tf_window_op_pad_infer(input, window, stride, auto_pad, is_deconv=False, dilation=None): - if input is None or window is None or stride is None or auto_pad is None: - return None, None - - if dilation is None: - dilation = np.ones(len(input), dtype=np.int64) - - normalized_stride = stride - if is_deconv: - normalized_stride = 1 / stride - - if auto_pad in ['same_lower', 'same_upper']: - output = np.ma.ceil(input / normalized_stride) - residual = input % stride - mask = residual == 0 - full_pad = window.copy() - full_pad[mask] -= stride[mask] - mask = np.logical_not(mask) # pylint: disable=assignment-from-no-return - full_pad[mask] -= input[mask] % stride[mask] - full_pad = np.ma.maximum(full_pad, 0) # pylint: disable=assignment-from-no-return - low_pad = np.int64(full_pad / 2) - high_pad = full_pad - low_pad - pad = shape_array([low_pad, high_pad]).transpose() - elif auto_pad == 'valid': - output = np.int64(np.ceil((input - ((window - 1) * dilation + 1) + 1) / normalized_stride)) - pad = np.zeros((len(output), 2), dtype=np.int64) - else: - log.error("Unsupported padding scheme: {}".format(auto_pad)) - pad = None - output = None - return pad, output - - -def get_shape_from_slice(input_shape: np.ndarray, slices: List) -> np.ndarray: - """ - Calculate shape of a tensor after slicing without actually creating the resulting tensor. - Is introduced to prevent potentially large memory consumption. - """ - output_shape = [] - num_new_axes = np.count_nonzero(list(map(lambda x: x is np.newaxis, slices))) - num_ellipsis_inserts = len(input_shape) - len(slices) + num_new_axes + 1 - - in_idx = 0 - for i, s in enumerate(slices): - if s is dynamic_dimension or s == dynamic_dimension_value: - output_shape.append(dynamic_dimension_value) - in_idx += 1 - elif isinstance(s, slice): - if input_shape[in_idx] is not dynamic_dimension and not is_dynamic_slice(s): - output_shape.append(len(range(*s.indices(input_shape[in_idx])))) - else: - output_shape.append(dynamic_dimension_value) - in_idx += 1 - elif s is np.newaxis: - output_shape.append(1) - elif type(s) in [int, np.int32, np.int64]: # shrink_axis - in_idx += 1 - elif s is Ellipsis: - for idx in range(num_ellipsis_inserts): - output_shape.append(input_shape[in_idx]) - in_idx += 1 - else: - raise Exception('Element type of a slice list is unacceptable: "{}"'.format(type(s))) - for i in range(in_idx, len(input_shape)): - output_shape.append(input_shape[i]) - return shape_array(output_shape) - - -def is_dynamic_slice(s: [slice, int, None]): - """ - The function checks that the specified slice produces dynamic value. - :param s: slice object - :return: the result of the check - """ - return isinstance(s, slice) and (s.start is dynamic_dimension or - s.stop is dynamic_dimension or - s.step is dynamic_dimension) - - -def reverse_bypass_infer(node, in_ports: List[int]): - """ - Copies shapes from the out_port 0 into ports specified in the in_ports - - - :param node: - :param in_ports: input ports for which shape will be updated - :return: - """ - # WA: for cases when terminal Identity node has only output control dependency edges - # For this case the graph is not correctly build because the Identity node goes - # without Result node - if node.out_port(0).disconnected(): - return - - output_shape = node.out_port(0).data.get_shape() - if output_shape is not None: - for port in in_ports: - assert node.is_in_port_connected(port) - if node.in_port(port).data.get_shape() is None: - node.in_port(port).data.set_shape(output_shape) - - -def clarify_partial_shape(shapes: List): - """ - returns more precise partial shape from a set of partial shapes, - e.g. pshape_1 = [dyn, 2, dyn], pshape_2 = [10, dyn, dyn] => out_shape = [10, 2, dyn] - :param shapes: - :return: - """ - assert len(shapes) > 0 - out_shape = shapes[0] - for shape in shapes: - assert compatible_shapes(shape, out_shape), "shapes {} and {} are not compatible".format( - unmask_shape(shape), unmask_shape(out_shape)) - shape_unmasked = shape.data.copy() - for i, dim in enumerate(shape_unmasked): - if dim != dynamic_dimension_value: - out_shape[i] = dim - return out_shape - - -def set_input_shapes(node, *shapes: List): - assert len(shapes) <= len(node.in_ports()) - - for i, shape in enumerate(shapes): - if node.is_in_port_connected(i) and node.in_port(i).data.get_shape() is None: - node.in_port(i).data.set_shape(shape) diff --git a/tools/mo/openvino/tools/mo/front/common/register_custom_ops.py b/tools/mo/openvino/tools/mo/front/common/register_custom_ops.py deleted file mode 100644 index 166c5d5e575d01..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/register_custom_ops.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from collections import defaultdict - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def extension_extractor(node, ex_cls, disable_omitting_optional: bool = False, - enable_flattening_optional_params: bool = False): - ex = ex_cls() - supported = ex.extract(node) - return node.graph.node[node.id] if supported else None - - -def extension_op_extractor(node, op_cls): - op_cls.update_node_stat(node) - # TODO Need to differentiate truly supported ops extractors and ops extractors generated here - return node.graph.node[node.id] - - -def find_case_insensitive_duplicates(extractors_collection: dict): - """ - Searches for case-insensitive duplicates among extractors_collection keys. - Returns a list of groups, where each group is a list of case-insensitive duplicates. - Also returns a dictionary with lowered keys. - """ - keys = defaultdict(list) - for k in extractors_collection.keys(): - keys[k.lower()].append(k) - return [duplicates for duplicates in keys.values() if len(duplicates) > 1], keys - - -def check_for_duplicates(extractors_collection: dict): - """ - Check if extractors_collection has case-insensitive duplicates, if it does, - raise exception with information about duplicates - """ - # Check if extractors_collection is a normal form, that is it doesn't have case-insensitive duplicates - duplicates, keys = find_case_insensitive_duplicates(extractors_collection) - if len(duplicates) > 0: - raise Error('Extractors collection have case insensitive duplicates {}. ' + - refer_to_faq_msg(47), duplicates) - return {k: v[0] for k, v in keys.items()} - - -def add_or_override_extractor(extractors: dict, keys: dict, name, extractor, extractor_desc): - name_lower = name.lower() - if name_lower in keys: - old_name = keys[name_lower] - assert old_name in extractors - del extractors[old_name] - log.debug('Overridden extractor entry {} by {}.'.format(old_name, extractor_desc)) - if old_name != name: - log.debug('Extractor entry {} was changed to {}.'.format(old_name, name)) - else: - log.debug('Added a new entry {} to extractors with {}.'.format(name, extractor_desc)) - # keep extractor name in case-sensitive form for better diagnostics for the user - # but we will continue processing of extractor keys in case-insensitive way - extractors[name] = extractor - keys[name_lower] = name - - -def update_extractors_with_extensions(extractors_collection: dict = None, - disable_omitting_optional: bool = False, - enable_flattening_optional_params: bool = False): - """ - Update tf_op_extractors based on mnemonics registered in Op and FrontExtractorOp. - FrontExtractorOp extends and overrides default extractors. - Op extends but doesn't override extractors. - """ - keys = check_for_duplicates(extractors_collection) - for op, ex_cls in FrontExtractorOp.registered_ops.items(): - add_or_override_extractor( - extractors_collection, - keys, - op, - lambda node, cls=ex_cls: extension_extractor( - node, cls, disable_omitting_optional, enable_flattening_optional_params), - 'custom extractor class {}'.format(ex_cls) - ) - - for op, op_cls in Op.registered_ops.items(): - op_lower = op.lower() - if op_lower not in keys: - extractors_collection[op] = (lambda c: lambda node: extension_op_extractor(node, c))(op_cls) - log.debug('Added a new entry {} to extractors with custom op class {}.'.format(op, op_cls)) - keys[op_lower] = op - check_for_duplicates(extractors_collection) diff --git a/tools/mo/openvino/tools/mo/front/common/replacement.py b/tools/mo/openvino/tools/mo/front/common/replacement.py deleted file mode 100644 index 5dd814b442d977..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/replacement.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.graph.graph import Node, merge_edge_props, Graph -from openvino.tools.mo.middle.pattern_match import apply_pattern -from openvino.tools.mo.utils import class_registration -from openvino.tools.mo.utils.replacement_pattern import ReplacementPattern - - -class FrontReplacementPattern(ReplacementPattern): - registered_ops = {} - registered_cls = [] - - def run_after(self): - from openvino.tools.mo.front.pass_separator import FrontStart - return [FrontStart] - - def run_before(self): - from openvino.tools.mo.front.pass_separator import FrontFinish - return [FrontFinish] - - def pattern(self): - raise Exception('Function "pattern" must be overridden in the sub-class') - - @classmethod - def class_type(cls): - return class_registration.ClassType.FRONT_REPLACER - - -ReplacementPattern.excluded_replacers.append(FrontReplacementPattern) - - -class FrontReplacementSubgraph(FrontReplacementPattern): - """ - Replace pattern defined set of nodes with a sub-graph. - """ - replacement_id = 'None' - - def run_after(self): - from openvino.tools.mo.front.pass_separator import FrontStart - return [FrontStart] - - def run_before(self): - from openvino.tools.mo.front.pass_separator import FrontFinish - return [FrontFinish] - - def __init__(self): - pass - - @staticmethod - def extract_port(node_port): - return node_port if isinstance(node_port, tuple) else (node_port, 0) - - @staticmethod - def replace_input_edges(graph: Graph, input_edges_match: dict): - """ - Replacing existing input/output edges with a new ones to a new sub-graph. - :param graph: networkX graph to operate on. - :param input_edges_match: match of input edges between old and new sub-graph. - :return: None - """ - for old_name_port, new_name_port in input_edges_match.items(): - old_node_name, old_in_port = __class__.extract_port(old_name_port) - new_node_name, new_in_port = __class__.extract_port(new_name_port) - old_node = Node(graph, old_node_name) - src_node_name = old_node.get_sorted_inputs()[old_in_port][0] - edge_attrs = graph[src_node_name][old_node_name][0].copy() - edge_attrs['in'] = new_in_port - graph.add_edge(src_node_name, new_node_name, **edge_attrs) - log.debug("Created edge from {} to {} with attrs: {}".format(src_node_name, new_node_name, edge_attrs)) - - @staticmethod - def replace_output_edges(graph: Graph, output_edges_match: dict): - """ - Replacing existing input/output edges with a new ones to a new sub-graph. - :param graph: networkX graph to operate on. - :param output_edges_match: match of output edges between old and new sub-graph. - :return: None - """ - for old_name_port, new_name_port in output_edges_match.items(): - old_node_name, old_out_port = __class__.extract_port(old_name_port) - new_node_name, new_out_port = __class__.extract_port(new_name_port) - for src, dst, edge_attrs in graph.out_edges(old_node_name, data=True): - if edge_attrs['out'] == old_out_port: - new_edge_attrs = edge_attrs.copy() - new_edge_attrs['out'] = new_out_port - # Add control_flow ports, as we do not copy control flow ports to new node - if 'control_flow_edge' in new_edge_attrs and new_edge_attrs['control_flow_edge'] is True: - in_port_id = 'control_flow_{}'.format(new_edge_attrs['in']) - out_port_id = 'control_flow_{}'.format(new_edge_attrs['out']) - in_node, out_node = Node(graph, dst), Node(graph, new_node_name) - # if not out_node.has_port('out', out_port_id, control_flow=True): - out_node.add_output_port(out_port_id, control_flow=True, skip_if_exist=True) - # if not in_node.has_port('in', in_port_id, control_flow=True): - in_node.add_input_port(in_port_id, control_flow=True, skip_if_exist=True) - graph.add_edge(new_node_name, dst, **new_edge_attrs) - log.debug("Created edge from {} to {} with attrs: {}".format(new_node_name, dst, new_edge_attrs)) - - def input_edges_match(self, graph: Graph, match: object, new_sub_graph: dict): - """ - Default implementation doesn't add new input edges automatically. - """ - return {} - - def output_edges_match(self, graph: Graph, match: object, new_sub_graph: dict): - """ - Default implementation doesn't add new output edges automatically. - """ - return {} - - def generate_sub_graph(self, graph: Graph, match: object): - raise Exception("The function 'generate_sub_graph' must be implemented in the sub-class.") - - def nodes_to_remove(self, graph: Graph, match: dict): - """ - Default implementation generates list of all matched nodes. So all matched nodes will be removed. - """ - return [node.id for node in match.values()] - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - log.debug('replace_sub_graph: "{}" matched nodes: {}'.format(self.replacement_id, - '\n'.join(sorted(match.matched_nodes_names())))) - new_sub_graph = self.generate_sub_graph(graph, match) # pylint: disable=assignment-from-no-return - self.replace_input_edges(graph, self.input_edges_match(graph, match, new_sub_graph)) - self.replace_output_edges(graph, self.output_edges_match(graph, match, new_sub_graph)) - - remove_nodes = self.nodes_to_remove(graph, match) - log.debug( - 'replace_sub_graph: "{}" removing nodes: {}'.format(self.replacement_id, '\n'.join(sorted(remove_nodes)))) - graph.remove_nodes_from(remove_nodes) - - def find_and_replace_pattern(self, graph: Graph): - apply_pattern(graph, action=self.replace_sub_graph, **self.pattern()) - - registered_ops = {} - registered_cls = [] - - @classmethod - def class_type(cls): - return class_registration.ClassType.FRONT_REPLACER - - -ReplacementPattern.excluded_replacers.append(FrontReplacementSubgraph) - - -class FrontReplacementOp(FrontReplacementSubgraph): - """ - A super class for an operation replacement. - Replaces a single operation (identified by 'op' attribute) by a sub-graph of operations. - It is a convenient specialization of FrontReplacementPattern. - """ - op = 'UnknownOp' - - def run_after(self): - from openvino.tools.mo.front.pass_separator import FrontStart - return [FrontStart] - - def run_before(self): - from openvino.tools.mo.front.pass_separator import FrontFinish - return [FrontFinish] - - def pattern(self): - return dict( - nodes=[ - ('op', dict(op=self.__class__.op))], - edges=[] - ) - - def replace_op(self, graph: Graph, node: Node): - raise Exception("The function 'replace_op' must be implemented in the sub-class.") - - @staticmethod - def gen_output_edges_match(node: Node, out_node_replace: list): - out_edges_match_dict = dict() - for old_out_port, new_node_desc in enumerate(out_node_replace): - new_out_port = 0 - if new_node_desc is tuple: - new_node_name = new_node_desc[0] - new_out_port = new_node_desc[1] - else: - new_node_name = new_node_desc - out_edges_match_dict[(node.id, old_out_port)] = (new_node_name, new_out_port) - return out_edges_match_dict - - @staticmethod - def update_input_edges_attrs(graph: Graph, node: Node, added_nodes: list): - """ - Copy edge attributes from 'old' input edges of node 'node' to new input sub-graph edges. - :param graph: graph to operate on - :param node: Node object that was replaced. - :param added_nodes: list of nodes names added. - :return: None - """ - for old_u, old_v, old_edge_attrs in graph.in_edges(node.id, data=True): - for new_u, new_v, new_edge_attrs in graph.in_edges(added_nodes, data=True): - if new_u not in added_nodes: # external input to the sub-graph - if old_u == new_u and old_edge_attrs['out'] == new_edge_attrs['out']: - merge_edge_props(new_edge_attrs, old_edge_attrs) # copy old edge attributes - - def replace_sub_graph(self, graph: Graph, match: dict): - assert 'op' in match - assert len(match) == 1 - node = match['op'] - nodes_before_replacement = graph.nodes() - self.replace_output_edges(graph, self.gen_output_edges_match(node, self.replace_op(graph, node))) - - # nodes added by the 'replace_op' function call - added_nodes = list(set(graph.nodes()) - set(nodes_before_replacement)) - self.update_input_edges_attrs(graph, node, added_nodes) - - # TODO Need to check if there are other users for these nodes - remove_nodes = self.nodes_to_remove(graph, match) - log.debug("Removing nodes: {}".format(remove_nodes)) - graph.remove_nodes_from(remove_nodes) - - registered_ops = {} - registered_cls = [] - - @classmethod - def class_type(cls): - return class_registration.ClassType.FRONT_REPLACER - - -ReplacementPattern.excluded_replacers.append(FrontReplacementOp) diff --git a/tools/mo/openvino/tools/mo/front/common/weights.py b/tools/mo/openvino/tools/mo/front/common/weights.py deleted file mode 100644 index 1851a4848d75a7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/common/weights.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Graph - - -def swap_weights_xy(graph: Graph, nodes: list): - from openvino.tools.mo.front.tf.ObjectDetectionAPI import swap_weights_xy as new_swap_weights_xy - new_swap_weights_xy(graph, nodes) diff --git a/tools/mo/openvino/tools/mo/front/create_tensor_nodes.py b/tools/mo/openvino/tools/mo/front/create_tensor_nodes.py deleted file mode 100644 index c43edb8d710e2f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/create_tensor_nodes.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.extractor import create_tensor_nodes -from openvino.tools.mo.graph.graph import Graph - - -class CreateTensorNodes(FrontReplacementPattern): - enabled = True - force_clean_up = True - - def run_before(self): - return [] - - def run_after(self): - from openvino.tools.mo.front.pass_separator import FrontFinish - return [FrontFinish] - - def find_and_replace_pattern(self, graph: Graph): - graph.stage = 'middle' - graph.strict_mode = False - create_tensor_nodes(graph) - graph.strict_mode = True diff --git a/tools/mo/openvino/tools/mo/front/disable_weights_quantize_value_propagation.py b/tools/mo/openvino/tools/mo/front/disable_weights_quantize_value_propagation.py deleted file mode 100644 index 7158643e15f877..00000000000000 --- a/tools/mo/openvino/tools/mo/front/disable_weights_quantize_value_propagation.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.FakeQuantWithMinMaxVars import FakeQuantWithMinMaxVarsToQuantize -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class DisableQuantizeValuePropagation(FrontReplacementPattern): - enabled = True - - def run_after(self): - return [FakeQuantWithMinMaxVarsToQuantize] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('quantize', dict(op='FakeQuantize', levels=lambda levels: levels != 2)), - ], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - match['quantize']['stop_value_propagation'] = True diff --git a/tools/mo/openvino/tools/mo/front/div.py b/tools/mo/openvino/tools/mo/front/div.py deleted file mode 100644 index 34ed1031563ee3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/div.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Mul, Pow -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node, rename_node - - -class Div(FrontReplacementPattern): - # This transformation is called directly from the 'openvino/tools/mo/middle/fusings.py' transformation - enabled = False - - @staticmethod - def div_to_mul_replacement(div: Node): - # we execute this transformation for V10 IR later on middle phase despite graph_condition - # so we prevent Div replacement on shape-calculating sub-graphs - if div.in_port(0).data.get_value() is not None and div.in_port(1).data.get_value() is not None: - return - - # cannot replace Div with Mul when the divisor is integer because the reciprocal number will be 0 - value = div.in_port(1).data.get_value() - if value is not None and type(value.item(0)) == int: - return - - graph = div.graph - name = div.soft_get('name', div.id) - - # keep Mul name the same as Div -- because of mathematical equality of output tensors - rename_node(node=div, name=name + '/to_be_removed') - - # reconnect Div in(out)puts to Mul - mul = Mul(graph, {'name': name}).create_node() - rename_node(mul, name) - - div.in_port(0).get_connection().set_destination(mul.in_port(0)) - div.in_port(1).get_connection().set_destination(mul.in_port(1)) - div.out_port(0).get_connection().set_source(mul.out_port(0)) - - # restore mathematical equivalence to Div operation: Div(A, B) = Mul(A, Pow(B, -1)) - reciprocal = create_op_with_const_inputs(graph, Pow, {1: np.float64(-1)}, {'name': name + '/reciprocal_'}) - mul.in_port(1).get_connection().insert_node(reciprocal) - - def find_and_replace_pattern(self, graph: Graph): - for div in graph.get_op_nodes(op='Div'): - self.div_to_mul_replacement(div) diff --git a/tools/mo/openvino/tools/mo/front/eltwise_n.py b/tools/mo/openvino/tools/mo/front/eltwise_n.py deleted file mode 100644 index 328070f1fcf3db..00000000000000 --- a/tools/mo/openvino/tools/mo/front/eltwise_n.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Add, Maximum, Minimum, Mul -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Node, Graph - - -class EltwiseNReplacement(FrontReplacementOp): - """ - This replacer substitutes elementwise operation with more than 2 inputs with a number of simple elementwise - operations with 2 inputs. The replacer supports operations supported by the Eltwise layer. - """ - op = 'EltwiseN' - enabled = True - - op_to_class_map = { - 'sum': Add, - 'min': Minimum, - 'max': Maximum, - 'mul': Mul, - } - - def replace_op(self, graph: Graph, node: Node): - last_node = node - operation = node.operation - assert operation in EltwiseNReplacement.op_to_class_map - op_class = EltwiseNReplacement.op_to_class_map[operation] - left_connect = node.in_port(0).get_connection() - - for ind in list(node.in_ports())[1:]: - attrs = {'name': node.name + '/' + operation + '_' + str(ind)} - attrs.update({'axis': node.axis} if node.has_valid('axis') else {}) - # Create node - eltwise_op = op_class(graph, attrs).create_node() - # Connect nodes - left_connect.set_destination(eltwise_op.in_port(0)) - left_connect = eltwise_op.out_port(0).get_connection() - node.in_port(ind).get_connection().set_destination(eltwise_op.in_port(1)) - last_node = eltwise_op - return [last_node.id] diff --git a/tools/mo/openvino/tools/mo/front/extractor.py b/tools/mo/openvino/tools/mo/front/extractor.py deleted file mode 100644 index 35f5a591496352..00000000000000 --- a/tools/mo/openvino/tools/mo/front/extractor.py +++ /dev/null @@ -1,1225 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ast -import logging as log -import re -from collections import defaultdict -from copy import copy - -import numpy as np -from openvino.runtime import PartialShape, Dimension - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array -from openvino.tools.mo.front.onnx.extractors.utils import get_backend_pad -from openvino.tools.mo.graph.graph import Node, Graph, add_opoutput -from openvino.tools.mo.middle.passes.eliminate import reverse_dfs -from openvino.tools.mo.utils import class_registration -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.unsupported_ops import UnsupportedOps -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def restore_edges(graph: Graph, get_edges: callable): - """ - Take a graph without edges and extract dependencies between nodes with the help of get_edges function. - For a given node n the get_edges function returns a list of tuples (n1, n2, attrs), that is used to create - n1 --> n2 edge with attributes attrs. - It is possible that two nodes n1 and n2 have more than one n1 --> n2 edges, so the resulting graph is Graph. - """ - used_tensors = set() - for node in list(graph.nodes()): - edges = get_edges(Node(graph, node)) - for u, v, d in edges: - undefined = ['"' + x + '"' for x in [u, v] if not graph.has_node(x)] - if len(undefined): - raise Error( - 'While creating an edge from "{}" to "{}": node name {} is undefined in the graph. ' + - 'Check correctness of the input model. ', - u, v, - ' and '.join(undefined) + - refer_to_faq_msg(25) - ) - used_tensors.add(u) - - graph.add_edges_from(edges) - return used_tensors - - -def remove_control_dependency_inputs(graph: Graph): - """ - Delete control dependency inputs from pb all over the graph - :param graph: graph to operate on - """ - for _, attrs in list(graph.nodes(data=True)): - if 'pb' not in attrs: - continue - pb = attrs['pb'] - ind = 0 - while ind < len(pb.input): - if pb.input[ind].startswith('^'): - del pb.input[ind] - else: - ind += 1 - - -def update_attrs(attrs: [dict, Node], attr: str, new: [str, list]): - """ Updates attrs[attr], which should be a list, by a new items from 'new' list. - If attrs[attr] doesn't exist, create it. - """ - if attr not in attrs: - attrs[attr] = [] - if isinstance(new, str): - new = [new] - attrs[attr] = list(set(attrs[attr]).union(set(new))) - - -def add_attrs_props(attrs: dict): - update_attrs(attrs, 'dim_attrs', ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']) - update_attrs(attrs, 'shape_attrs', ['shape', 'pad', 'window', 'stride', 'output_shape']) - return attrs - - -def spatial_attr_getter(node: Node, field: str = None, dim: int = None, post: callable = None): - """ - - Parameters - ---------- - node: node of graph - field: name of the field in original layer - dim: dimension of the field - post: function for getting values of the field - - Returns: - value of field - ------- - - """ - if node.has(field) and type(node[field]) is np.ndarray and node.has('spatial_dims'): - return post(node[field][node.spatial_dims[dim]]) - return None - - -def spatial_getter(name: str, field: str, dim: int, post: callable = lambda x: x): - """ - - Parameters - ---------- - name: name of the filed in IR - field: name of field in original layer - dim: dimension of field - post: function for getting values of field - - Returns: - of the filed in IR and function for getting values of the field - ------- - - """ - return name, lambda node: spatial_attr_getter(node, field=field, dim=dim, post=post) - - -def attr_getter(node: Node, name: str): - if node.has(name): - if type(node[name]) is list or type(node[name]) is np.ndarray: - return ','.join(map(str, node[name])) - elif type(node[name]) is not np.ndarray: - return str(node[name]) - return None - - -def bool_to_str(node: Node, attr: str): - # Function converts 0/1 or bool False/True or '0'/'1' values to str 'false'/'true' which need to appear in IR - attribute_name = node.soft_get(attr, None) - if attribute_name is None: - return None - if isinstance(attribute_name, bool): - return str(attribute_name).lower() - elif attribute_name in [0, 1]: - return str(bool(attribute_name)).lower() - elif attribute_name in ['0', '1']: - return str(bool(int(attribute_name))).lower() - else: - raise Error('Wrong value {} for boolean attribute {} in node {}'.format( - attribute_name, attr, node.soft_get('name'))) - - -def kernel_getter(node: Node, dim: int): - if node.kind == 'op' and node.op in ['Conv2D', 'DepthwiseConv2dNative', 'Deconv2D']: - if node.has('kernel_spatial'): - return node.kernel_spatial[dim] # TODO check if order of dimension matches expectations - weights = node.in_node(1) # WARNING: 1 is hardcoded input with a kernel - return weights.shape[weights.spatial_dims[dim]] - else: - return None - - -def node_defs_to_str(node: Node): - node_name_to_pb_mapping = {node_name: node_def for node_name, node_def in node['pbs'].items()} - result = '' - for node_name in node['nodes_order']: - result += 'node {\n' + str(node_name_to_pb_mapping[node_name]) + '}\n' - return result - - -def update_ie_fields(attrs: dict, ir_version = None): - ir_v10_attrs = { - 'IE': [( - 'layer', - [('id', lambda node: node.node), 'name', 'type', 'version'], - [ - ( - 'data', - [ - 'auto_pad', - 'epsilon', - 'min', - 'max', - ('axis', lambda node: attr_getter(node, 'axis')), - 'tiles', - ('dim', lambda node: attr_getter(node, 'dim')), - 'num_axes', - ('pool-method', 'pool_method'), - 'group', - ('rounding-type', 'rounding_type'), - ('exclude-pad', 'exclude_pad'), - 'operation', - 'out-size', - 'power', - 'shift', - 'alpha', - 'beta', - 'coords', - 'classes', - 'num', - ('local-size', 'local_size'), - 'region', - 'knorm', - 'bias', - - 'num_classes', - 'keep_top_k', - 'variance_encoded_in_target', - 'code_type', - 'share_location', - 'nms_threshold', - 'confidence_threshold', - 'background_label_id', - 'top_k', - 'eta', - 'visualize', - 'visualize_threshold', - 'save_file', - 'output_directory', - 'output_name_prefix', - 'output_format', - 'label_map_file', - 'name_size_file', - 'num_test_image', - 'prob', - 'resize_mode', - 'height', - 'width', - 'height_scale', - 'width_scale', - 'pad_mode', - 'pad_value', - 'interp_mode', - - 'img_size', - 'img_h', - 'img_w', - 'step', - 'step_h', - 'step_w', - ('offset', lambda node: attr_getter(node, 'offset')), - 'variance', - 'flip', - 'clip', - ('min_size', lambda node: attr_getter(node, 'min_size')), - ('max_size', lambda node: attr_getter(node, 'max_size')), - ('aspect_ratio', lambda node: attr_getter(node, 'aspect_ratio')), - 'decrease_label_id', - 'normalized', - 'scale_all_sizes', - - ('type', 'norm_type'), - 'eps', - 'eps_mode', - 'across_spatial', - 'channel_shared', - - 'negative_slope', - 'engine', - - 'num_filter', - ('type', 'sample_type'), - ('order', lambda node: attr_getter(node, 'order')), - - 'pooled_h', - 'pooled_w', - 'spatial_scale', - - 'cls_threshold', - 'max_num_proposals', - 'iou_threshold', - 'min_bbox_size', - 'feat_stride', - 'pre_nms_topn', - 'post_nms_topn', - ('type', lambda node: node['filler_type'] if node.has('filler_type') else None), - ('value', lambda node: node['filler_value'] if node.has('filler_value') else None), - ('output', - lambda node: node.output_shape[node.channel_dims][0] if node.has('output_shape') and node.has( - 'channel_dims') else None), - ('input_nodes_names', lambda node: ' '.join(node['input_nodes_names']) if node.has( - 'input_nodes_names') else None), - ('output_tensors_names', lambda node: ' '.join(node['output_tensors_names']) if node.has( - 'output_tensors_names') else None), - ('real_input_dims', lambda node: ';'.join([' '.join(map(str, shape)) for shape in - node['real_input_dims']]) - if node.has('real_input_dims') else None), - ('protobuf', lambda node: node_defs_to_str(node) if node.has('pbs') else None), - {'custom_attributes': None}, - ('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims])) if node.has_valid('stride') else None), - ('kernel', lambda node: ','.join(map(str, node['kernel_spatial'])) if node.has_valid( - 'kernel_spatial') else None), - ('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims])) if node.has_valid('dilation') else None), - - ('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0))) if node.has_valid('pad') else None), - ('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1))) if node.has_valid('pad') else None), - - ('scale', lambda node: attr_getter(node, 'scale')), - 'crop_width', - 'crop_height', - 'write_augmented', - 'max_multiplier', - 'augment_during_test', - 'recompute_mean', - 'write_mean', - 'mean_per_pixel', - 'mode', - 'bottomwidth', - 'bottomheight', - 'chromatic_eigvec', - 'kernel_size', - 'max_displacement', - 'stride_1', - 'stride_2', - 'single_direction', - 'do_abs', - 'correlation_type', - 'antialias', - 'resample_type', - 'factor', - 'coeff', - ('ratio', lambda node: attr_getter(node, 'ratio')), - 'size', - ], - []), - '@ports', - '@consts'])] - } - - ir_version_mapping = { - # Default behaviour is IR V10 attributes - None: ir_v10_attrs, - 10: ir_v10_attrs, - 11: ir_v10_attrs, - } - - if ir_version not in ir_version_mapping.keys(): - raise Error("Unrecognized IR version was specified: {}".format(ir_version)) - - attrs.update(ir_version_mapping[ir_version]) - - -def create_tensor_nodes(graph: Graph): - """ - Creates nodes between ops to represent intermediate data that flows from one op to another. - For each edge with unique out attribute that goes from a given node, - a new node is created with attribute kind='data' - - Old: op1 ---(out, in)---> op2 - New: op1 ---(out)---> tensor ---(in)---> op2 - - Edge also can contain in_attrs, out_attrs and data_attrs attributes. Each of them is a list - of names of other attributes in an edge. The lists control how original edge attributes are distributed - among newly created in/out edges and tensor node. Having the name X in in_attrs means that an edge attribute - with name X should be moved to the input edge to the tensor (together with 'out' attribute). - Having Y in out_attrs means that the attribute Y should be moved to the output edge from the tensor. - And if Z is in data_attrs, Z attribute of the edge should be moved to the tensor node itself. - For example: - - Old: op1 ---(out, in, X, Y, Z)---> op2 - New: op1 ---(out, X)---> tensor(Z) ---(in, Y)---> op2 - - All old nodes are marked as kind='op' - """ - for node in list(graph.nodes()): - node_attr = graph.node[node] - # threat all existing nodes in the graph as operation nodes (in opposite to data nodes created in this function - # below) - graph.node[node]['kind'] = 'op' - - # the Result nodes are just marker operations so we don't need to create output tensors for them - if node_attr['op'] == 'Result': - continue - # out_edges is a list of (u, v, d), where d is a dict of edge attributes - out_edges = list(graph.out_edges(node, data=True)) - - # Make a list of unique output ports for a node, unique means an edge has unique 'out' attribute. - # Multiple edges coming from node may have duplicated 'out' ports because a single output port - # can be reused multiple times by several consumers. - out_ports = list(set([d['out'] for u, v, d in out_edges])) - - smart_node = Node(graph, node) - out_nodes = smart_node.out_nodes() - node_name = str(smart_node.name) if smart_node.has_valid('name') else str(smart_node.id) - - # assign to each output port a tensor unique id in the graph - out_tensor_dict = {port: graph.unique_id('{}/Output_{}/Data_'.format(node_name, port)) for port in out_ports} - - # add a new node with kind='data' per each tensor - graph.add_nodes_from([(uid, - add_attrs_props( - dict(name=uid, kind='data', shape=None, value=None, data_type=None, infer=None))) for - port, uid in out_tensor_dict.items()]) - - # add all edges from the node to each output port tensor - added_out_ports = set() - - for src_node, _, attrs in out_edges: - port = attrs['out'] - if port not in added_out_ports: - graph.add_edges_from([(node, out_tensor_dict[port], get_specific_edge_attrs(attrs, 'out_attrs'))]) - # merge additional data node attributes from original edge - graph.node[out_tensor_dict[port]].update(get_specific_edge_attrs(attrs, 'data_attrs')) - added_out_ports.add(port) - # graph.add_edges_from([(node, out_tensor_dict[port], {'out' : port}) for port in out_ports]) - - # connect newly created tensor nodes to their consumers - for u, v, d in out_edges: - graph.add_edges_from([(out_tensor_dict[d['out']], v, get_specific_edge_attrs(d, 'in_attrs'))]) - # graph.add_edges_from([(out_tensor_dict[d['out']], v, {'in' : d['in']}) for u, v, d in out_edges]) - # remove old edges op1 ---> op2; due to bug in nx, need to repack out_edges to have (u,v) as an element - graph.remove_edges_from([x[:2] for x in out_edges]) - return graph - - -# 'attrs_type' is either "in_attrs" or "out_attrs" -# update result values with the values from dictionary additional_attrs -def get_specific_edge_attrs(attrs: dict, attrs_type: str, additional_attrs=None): - new_attrs = dict() - if attrs_type in attrs: - for key in attrs[attrs_type]: - if key in attrs.keys(): - new_attrs[key] = attrs[key] - if additional_attrs is not None: - new_attrs.update(additional_attrs) - return new_attrs - - -def extract_node_attrs(graph: Graph, extractor: callable): - """ - For each node produce new entries in a node attributes dictionary by existing attributes. - Old attributes are not removed but merged with new ones. - """ - unsupported = UnsupportedOps(graph) - for node, attrs in list(graph.nodes(data=True)): - # the 'Result' operation is a virtual operation that is added after the output nodes - if 'op' in attrs and attrs['op'] == 'Result': - supported, new_attrs = True, {'in_attrs': list(), 'out_attrs': list()} - else: - try: - supported, new_attrs = extractor(Node(graph, node)) - except Exception as e: - log.warning('Node attributes: {}'.format(graph.node[node])) - raise Error( - 'Unexpected exception happened during extracting attributes for node {}.' + - '\nOriginal exception message: {}', - node, - str(e) - ) from e - if supported: - if 'IE' not in new_attrs: - update_ie_fields(new_attrs) - add_attrs_props(new_attrs) - for key, val in new_attrs.items(): - graph.node[node][key] = val - if not supported: - unsupported.add(Node(graph, node)) - - unsupported.report(log.warning, 'Instructions/layers that do not have attribute extractors:') - - return graph - - -def raise_no_node(node_name: str): - raise Error('No node with name {}'.format(node_name)) - - -def raise_node_name_collision(node_name: str, found_nodes: list): - raise Error('Name collision was found, there are several nodes for mask "{}": {}. ' - 'If your intention was to specify port for node, please instead specify node names connected to ' - 'this port. If your intention was to specify the node name, please add port to the node ' - 'name'.format(node_name, found_nodes)) - - -def get_node_id_with_ports(graph: Graph, node_name: str, skip_if_no_port=True): - """ - Extracts port and node ID out of user provided name - :param graph: graph to operate on - :param node_name: user provided node name - :return: node ID, direction of port ('in', 'out', 'port') and port number or None - """ - node_names = [n.soft_get('name', n.id) for n in graph.get_op_nodes()] - found_names = [] - for name in node_names: - regexp = r'(\d*:)?(' + name + r')(:\d*)?' - match = re.search(regexp, node_name) - if match and match.group() == node_name: - in_port = None - out_port = None - if match.group(1) and match.group(3): - log.warning('Skipping the case with both in and out port specified, only one port can be specified') - continue - node = Node(graph, graph.get_node_id_by_name(name)) - if match.group(1): - in_port = int(match.group(1).replace(':', '')) - if skip_if_no_port and in_port not in [e['in'] for e in node.in_edges().values()]: - # skip found node if it doesn't have such port number - continue - if match.group(3): - out_port = int(match.group(3).replace(':', '')) - if skip_if_no_port and out_port not in [e['out'] for e in node.out_edges().values()]: - # skip found node if it doesn't have such port number - continue - - found_names.append((in_port, out_port, name)) - if len(found_names) == 0: - raise_no_node(node_name) - if len(found_names) > 1: - raise_node_name_collision(node_name, [name for _, _, name in found_names]) - in_port, out_port, name = found_names[0] - node_id = graph.get_node_id_by_name(name) - if in_port is not None: - direction = 'in' - port = in_port - elif out_port is not None: - direction = 'out' - port = out_port - else: - direction = 'port' - port = None - return node_id, direction, port - - -def get_new_placeholder_name(node_id: str, is_out_port: bool = False, port: int = 0): - """ - Forms a name of new placeholder created by cutting a graph - :param node_id: a node name that is cut - :param is_out_port: it is True iff output port is cut - :param port: a port number - :return: a name of new placeholder created by cutting a graph - """ - port_type = '_out' if is_out_port else '' - return '{}/placeholder{}_port_{}'.format(node_id, port_type, port) - - -def create_params_with_custom_types(packed_user_shapes: [None, dict]): - """ - Compute a list of placeholder names for which an user specifies custom type - :param packed_user_shapes: packed data that contains input node names, - their port numbers, shapes and data types - :return: a list of placeholder names for which an user specifies custom type - Example of packed_user_shapes dictionary: - packed_user_shapes = - { - 'node_ID': - [ - {'shape': None, 'in': 0}, - {'shape': None, 'in': 1}, - ], - 'node_1_ID': - [ - {'shape': [1, 227, 227, 3], 'port': None, 'data_type': np.int32} - ], - 'node_2_ID': - [ - {'shape': None, 'out': 3} - ] - } - For which the function returns a list ['node_1_ID'] because this node only has custom data type - """ - if packed_user_shapes is None: - return [] - - params_with_custom_types = [] - for input_name in packed_user_shapes: - for desc in packed_user_shapes[input_name]: - p_name = input_name - if 'port' in desc and desc['port'] is None: # neither input nor output port specified - user_defined_type = desc.get('data_type', None) - else: # need to check the particular port the Parameter was created for - p_name = get_new_placeholder_name(input_name, 'out' in desc, - desc['out'] if 'out' in desc else desc['in']) - user_defined_type = desc.get('data_type', None) - if user_defined_type is not None: - params_with_custom_types.append(p_name) - return params_with_custom_types - - -def input_user_data_repack(graph: Graph, input_user_shapes: [None, list, dict, np.ndarray], - freeze_placeholder: dict, input_user_data_types = dict()): - """ - Restructures user input cutting request. Splits ports out of node names. Transforms node names to node ids. - :param graph: graph to operate on - :param input_user_shapes: data structure representing user input cutting request. It may be: - # None value if user did not provide neither --input nor --input_shape keys - # list instance witch contains input layer names with or without ports if user provided only --input key - # dict instance witch contains input layer names with or without ports as keys and shapes as values if user - provided both --input and --input_shape - # np.ndarray if user provided only --input_shape key - :param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values - :param input_user_data_types: dictionary with input nodes and its data types - :return: restructured input shapes and freeze placeholder shapes information - Example of input dictionary: - _input_shapes = - { - 'node_ID': - [ - {'shape': None, 'in': 0}, - {'shape': None, 'in': 1}, - ], - 'node_1_ID': - [ - {'shape': [1, 227, 227, 3], 'port': None, 'data_type': np.int32} - ], - 'node_2_ID': - [ - {'shape': None, 'out': 3} - ] - } - Example of freeze placeholder dictionary: - _freeze_placeholder = - { - 'phase_train' : False - } - """ - _input_shapes = defaultdict(list) - _freeze_placeholder = dict() - _freeze_new_placeholder = defaultdict(list) - - # freeze placeholder restructure - # Replaces placeholder name with placeholder id. Raises if there is no placeholder with such ID - placeholders_ids = graph.get_nodes_with_attributes(op='Parameter') - if freeze_placeholder is None: - _freeze_placeholder = None - else: - if isinstance(freeze_placeholder, list): - raise Error('Unnamed inputs with values are not supported for legacy frontend. Please provide input names.') - for placeholder_name, value in freeze_placeholder.items(): - placeholder_id, direction, port = get_node_id_with_ports(graph, placeholder_name) - if port is None and placeholder_id in placeholders_ids: - _freeze_placeholder[placeholder_id] = value - else: - # collect possible new placeholders that will be frozen with values - is_out_port = (direction == 'out') - new_placeholder_id = get_new_placeholder_name(placeholder_id, is_out_port, port) - _freeze_new_placeholder[placeholder_id].append( - {'direction': direction, 'port': port, 'name': placeholder_name, 'id': new_placeholder_id, - 'value': value}) - - if isinstance(input_user_shapes, list): - if len(input_user_shapes) == 1 and isinstance(input_user_shapes[0], PartialShape): - input_user_shapes = input_user_shapes[0] - - # input user shapes restructure - if input_user_shapes is None: - # None User did not provide neither --input nor --input_shape keys - _input_shapes = None - elif isinstance(input_user_shapes, list) and len(input_user_shapes) > 1 and isinstance(input_user_shapes[0], PartialShape): - raise Error('Please provide input layer names for input layer shapes. ' + refer_to_faq_msg(58)) - elif isinstance(input_user_shapes, list) or isinstance(input_user_shapes, dict): - # list [layer names w or w/o ports]. User provided only --input key - # dict {layer names w or w/o ports as keys: shapes as values}. User provided both --input and --input_shape - for input_name in input_user_shapes: - node_id, direction, port = get_node_id_with_ports(graph, input_name) - shape = None if isinstance(input_user_shapes, list) else input_user_shapes[input_name] - if input_name in input_user_data_types and input_user_data_types[input_name] is not None: - data_type = input_user_data_types[input_name] - _input_shapes[node_id].append({'shape': shape, direction: port, 'data_type': data_type}) - else: - _input_shapes[node_id].append({'shape': shape, direction: port}) - if _freeze_placeholder is not None: - # here we give user an opportunity not to provide node names from --freeze_placeholder_with_value in --input - [_input_shapes[ph_id].append({'shape': None, 'port': None}) for ph_id in _freeze_placeholder - if ph_id not in _input_shapes] - else: - # User provided only --input_shape key - assert isinstance(input_user_shapes, PartialShape) - if len(placeholders_ids) == 1: - # There is only one placeholder in the original network - _input_shapes[placeholders_ids[0]].append({'shape': input_user_shapes, 'port': None}) - elif _freeze_placeholder is not None: - # There are multiple placeholders and some of them are frozen - original_phs = copy(placeholders_ids) - [placeholders_ids.remove(ph_id) for ph_id in _freeze_placeholder] - if len(placeholders_ids) != 1: - raise Error('Original placeholders: \'{}\'. Freezing was requested for \'{}\'. --input_shape was ' - 'provided without --input. Can not deduce which node shape to override' - ''.format(', '.join(original_phs), ', '.join(_freeze_placeholder.keys()))) - _input_shapes[placeholders_ids[0]].append({'shape': input_user_shapes, 'port': None}) - [_input_shapes[node_id].append({'shape': None, 'port': None}) for node_id in _freeze_placeholder] - else: - # There are multiple placeholders in the original network and none of them are frozen - # Can not deduce which placeholder shape to override - raise Error('No or multiple placeholders in the model, but only one shape is provided, cannot set it. ' + - refer_to_faq_msg(32)) - - # check that shape is specified for every new placeholder in _input_shapes - # and update _freeze_placeholder with new possible placeholders created by cutting a graph - for node_id in _freeze_new_placeholder: - new_phs = _freeze_new_placeholder[node_id] - if node_id not in _input_shapes: - raise Error('Shape is not specified for the placeholder with name {} through --input_shape option.' - ''.format(new_phs[0]['name'])) - _ins = _input_shapes[node_id] # list - for new_ph in new_phs: - name = new_ph['name'] - direction = new_ph['direction'] - port = new_ph['port'] - placeholder_id = new_ph['id'] - value = new_ph['value'] - if any([_in['shape'] is not None and direction in _in and _in[direction] == port for _in in _ins]): - _freeze_placeholder[placeholder_id] = value - else: - raise Error('Shape is not specified for the placeholder with name {} through --input_shape option.' - ''.format(name)) - - return _input_shapes, _freeze_placeholder - - -def output_user_data_repack(graph: Graph, outputs: list): - """ - - :param graph: graph to operate on - :param outputs: list of node names provided by user - :return: dictionary with node IDs as keys and list of port dictionaries as values - Example of outputs dictionary: - _outputs = - { - 'node_ID': - [ - {'out': 0}, - {'out': 1}, - ], - 'node_1_ID': - [ - {'port': None} - ], - 'node_2_ID': - [ - {'in': 3} - ] - } - """ - _outputs = defaultdict(list) - if outputs is None: - _outputs = None - else: - for output in outputs: - node_id, direction, port = get_node_id_with_ports(graph, output) - _outputs[node_id].append({direction: port}) - return _outputs - - -def user_data_repack(graph: Graph, input_user_shapes: [None, list, dict, np.array], - input_user_data_types: dict, outputs: list, freeze_placeholder: dict): - """ - :param graph: graph to operate on - :param input_user_shapes: data structure representing user input cutting request - :param outputs: list of node names to treat as outputs - :param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values - :return: restructured input, output and freeze placeholder dictionaries or None values - """ - _input_shapes, _freeze_placeholder = input_user_data_repack(graph, input_user_shapes, freeze_placeholder, - input_user_data_types=input_user_data_types) - _outputs = output_user_data_repack(graph, outputs) - return _input_shapes, _outputs, _freeze_placeholder - - -def add_output_ops(graph: Graph, user_defined_outputs: dict, inputs: dict = None): - sinks = [] - # func sets all layers as outputs in case of empty user_defined_outputs list (it's impossible to reach by cli) - assert not (isinstance(user_defined_outputs, list) and not len(user_defined_outputs)) - - # remove previously generated Result if any - graph.remove_nodes_from([node_name for node_name in graph.nodes() if - 'op' in graph.node[node_name] and graph.node[node_name]['op'] == 'Result']) - - if user_defined_outputs is None: - inputs = graph.get_nodes_with_attributes(op='Parameter') if inputs is None else list(inputs.keys()) - input_reachable, dead_outputs, undead_outputs = set(), [], [] - for input in inputs: - graph.dfs(node_name=input, visited=input_reachable) - for node_name in list(graph.nodes()): - if len(list(graph.out_edges(node_name))) == 0: - if node_name in input_reachable: - out_ports_count = Node(graph, node_name).out_ports_count if Node(graph, node_name).has_valid('out_ports_count') else 1 - for i in range(out_ports_count): - sinks.append(add_opoutput(graph, node_name, i, False)) - undead_outputs.append(node_name) - else: - dead_outputs.append(node_name) - if len(dead_outputs): - log.info('Possible outputs: \'{!s}\' are not input reachable. True outputs are {!s}' - ''.format(', '.join([str(d_o) for d_o in dead_outputs]), - ', '.join([str(u_o) for u_o in undead_outputs]))) - else: # cutting the net by outputs - for node, values in user_defined_outputs.items(): - if node not in graph.nodes(): - raise Error('Node "{}" does not exist in the graph. ' + - refer_to_faq_msg(26), node) - for value in values: - if 'in' in value: - in_edges = list(graph.in_edges(node, data=True)) - if len(in_edges) - 1 < value['in']: - raise Error('Port index {} is out of number of available input ports for node "{}". ' + - refer_to_faq_msg(29), value['in'], node) - for u, v, attrs in in_edges: - if 'in' in attrs and attrs['in'] == value['in']: - sinks.append(add_opoutput(graph, u, attrs['out'], user_defined_name=node)) - elif 'out' in value: - out_edges = list(graph.out_edges(node, data=True)) - if len(out_edges) - 1 < value['out']: - raise Error('Port index {} is out of number of available output ports for node "{}". ' + - refer_to_faq_msg(29), value['out'], node) - for u, v, attrs in out_edges: - if 'out' in attrs and attrs['out'] == value['out']: - sinks.append(add_opoutput(graph, node, attrs['out'], user_defined_name=node)) - else: - sinks.append(add_opoutput(graph, node, 0, user_defined_name=node)) - return sinks - - -def add_outputs_identity(graph: Graph, outputs: list, add_edge: callable, params: dict = {}): - """ - Adds identity nodes marked with needs_removal=True attribute after each output of the graph. - These nodes are used for storing tensor names information at the incoming edge - and are removed with the OutputCut transformation. - :param graph: graph to operate on. - :param outputs: list of output node ids. - :param add_edge: method which adds an edge to the graph with the following signature: - f(src_node_id: str, dst_node_id: str, in_port: int). - :param params: extra parameters for add_edge method. - """ - for output in outputs: - fake_node_name = graph.unique_id(output) - graph.add_node(fake_node_name, name=fake_node_name, identity=True, kind='op', op='Identity', - infer=None, needs_removal=True, symbol_dict={'op': 'Identity'}) - add_edge(graph, output, fake_node_name, **params) - - -def set_is_input(graph: Graph, placeholders: list, is_input: bool): - for placeholder in placeholders: - graph.node[placeholder]['is_input'] = is_input - - -def check_input(graph: Graph, node_name: str): - node = Node(graph, node_name) - if node['kind'] == 'op' and node['op'] == 'Parameter' and not len(graph.in_edges(node_name)) and \ - not node['is_input']: - raise Error("--input parameter was provided. Other inputs are needed for output computation. " - "Provide more inputs or choose another place to cut the net. " + refer_to_faq_msg(27)) - - -def split_node_in_port(node_id: str): - """Split node_id in form port:node to separate node and port, where port is converted to int""" - if isinstance(node_id, str): - separator = ':' - parts = node_id.split(separator) - if len(parts) > 1: - if parts[0].isdigit(): - node_name = separator.join(parts[1:]) - try: - port = int(parts[0]) - return node_name, port - except ValueError as err: - log.warning('Didn\'t recognize port:node format for "{}" because port is not an integer.'.format( - node_id)) - else: - node_name = separator.join(parts[:-1]) - try: - port = int(parts[-1]) - return node_name, port - except ValueError as err: - log.warning('Didn\'t recognize node:port format for "{}" because port is not an integer.'.format( - node_id)) - - return node_id, None - - -def add_input_op_input_port_without_data(graph: Graph, node_id: str, input_op, edge_attrs: dict): - input_node = input_op.create_node() - graph.add_edge(input_node.id, node_id, **edge_attrs) - log.debug('Input: {} for node {}'.format(input_node.id, node_id)) - log.debug("Add edge from {} to {}".format(input_node.id, node_id)) - return input_node.id - - -def add_input_op_input_port_with_data(graph: Graph, node_id: str, input_op, edge_attrs: dict): - assert graph.stage == 'middle', 'add_input_op_input_port_with_data() function can be used only for graph after ' \ - 'shape inference!' - input_node = input_op.create_node(edge_attrs=edge_attrs) - node = Node(graph, node_id) - - out_port = input_node.out_port(edge_attrs['out']) - out_port.connect(node.in_port(edge_attrs['in'])) - out_port.data.set_shape(input_node.soft_get('shape', None)) - input_data_node = input_node.out_node(0) - - if 'fw_tensor_debug_info' in edge_attrs: - input_data_node['fw_tensor_debug_info'] = edge_attrs['fw_tensor_debug_info'] - - log.debug('Input: {} for node {}'.format(input_node.id, node_id)) - log.debug("Add edge from {} to {}".format(input_node.id, input_data_node.id)) - log.debug("Add edge from {} to {}".format(input_data_node.id, node_id)) - return input_node.id - - -def add_input_op_output_port_without_data(graph: Graph, node_id: str, input_op, port: int, fw_info: list): - input_node = input_op.create_node() - # In this case it can be more than one out edge from one port and we should iterate over all output edges - for _, out_node, attrs in graph.out_edges(node_id, data=True): - if attrs['out'] == port: - # new out port = 0 - attrs = attrs.copy() - attrs['out'] = 0 - attrs['fw_tensor_debug_info'] = fw_info - attrs['data_attrs'] = ['fw_tensor_debug_info'] - graph.add_edge(input_node.id, out_node, **attrs) - log.debug('Input: {} for node {} output port {}'.format(input_node.id, node_id, port)) - log.debug("Add edge from {} to {}".format(input_node.id, out_node)) - return input_node.id - - -def add_input_op_output_port_with_data(graph: Graph, node_id: str, input_op, port: int, fw_info: list): - # we assume that after op always data node - assert graph.stage == 'middle', 'add_input_op_input_port_with_data() function can be used only for graph after ' \ - 'shape inference!' - data_node = Node(graph, node_id).out_node(port) - data_node['fw_tensor_debug_info'] = fw_info - assert data_node.has_valid('kind') and data_node.kind == 'data' - input_node = input_op.create_node() - Node(graph, node_id).out_port(port).get_connection().set_source(input_node.out_port(0)) - log.debug('Input: {} for node {}'.format(input_node.id, node_id)) - log.debug("Add edge from {} to {}".format(input_node.id, node_id)) - return input_node.id - - -def add_input_op(graph: Graph, node_id: str, port: int = 0, data: bool = False, - shape=None, user_shape=None, data_type=None, is_out_port: bool = False): - """ - This function adds Input node to node with id==node_id to specified port (in or out defined with is_out_port). - :param graph: graph to operate on. - :param node_id: node_id for node to which we should add new input. - :param port: number of port of node_id node for adding input node. - :param data: flag that define whether data nodes is needed or not. - :param shape: shape for new input node. - :param user_shape: shape provided by user which may contain boundaries of dynamic dimension. - :param data_type: data type of input node. - :param is_out_port: flag that define whether port is output port or not. - :return: id of new Input operation - """ - # We import it here because Op imports add_attrs_props and update_ie_fields from this file - from openvino.tools.mo.ops.parameter import Parameter - if data_type is None: - data_type = np.float32 - input_op = Parameter(graph, dict(shape=shape, user_shape=user_shape, data_type=data_type, initial_node_name=node_id, - name=get_new_placeholder_name(node_id, is_out_port, port))) - - if is_out_port: - tensor_name = Node(graph, node_id).soft_get('name') + ":" + str(port) - else: - tensor_name = str(port) + ":" + Node(graph, node_id).soft_get('name') - fw_info = [(Node(graph, node_id).soft_get('name'), tensor_name)] - - if not is_out_port and port == 0: - tensor_name_no_port = Node(graph, node_id).soft_get('name') - if graph.has_tensor_name(tensor_name_no_port): - log.warning('Could not add user defined input name {} to tensor names list of as ' - 'graph contains tensor name with same name.'.format(tensor_name_no_port)) - else: - # Add alias with operation name, as this format is used in some config files - fw_info.append((Node(graph, node_id).soft_get('name'), tensor_name_no_port)) - - edge_attrs = {'in': port, 'out': 0, 'in_attrs': ['in'], 'out_attrs': ['out'], - 'fw_tensor_debug_info': fw_info, - 'data_attrs': ['fw_tensor_debug_info']} - - if not data: - if is_out_port: - new_input_id = add_input_op_output_port_without_data(graph=graph, node_id=node_id, input_op=input_op, - port=port, fw_info=edge_attrs['fw_tensor_debug_info']) - else: - new_input_id = add_input_op_input_port_without_data(graph=graph, node_id=node_id, input_op=input_op, - edge_attrs=edge_attrs) - else: - if is_out_port: - new_input_id = add_input_op_output_port_with_data(graph=graph, node_id=node_id, input_op=input_op, - port=port, fw_info=edge_attrs['fw_tensor_debug_info']) - else: - new_input_id = add_input_op_input_port_with_data(graph=graph, node_id=node_id, input_op=input_op, - edge_attrs=edge_attrs) - return new_input_id - - -def add_input_ops_helper_before_infer_input_port(graph: Graph, smart_node: Node, port: int, node_id: str, - shape: np.array, user_shape: tuple, data_type, - inputs: list, edges_to_remove: list): - n_inputs = len(smart_node.in_nodes()) - if n_inputs > 1 and port is None: - raise Error( - 'Node {} has more than 1 input and input shapes were provided. Try not to provide input' - ' shapes or specify input port with port:node notation, where port is an integer. ' - '{}'.format(smart_node.soft_get('name'), refer_to_faq_msg(30))) - port = port if port is not None else 0 - edges_to_remove.append((smart_node.in_node(port).id, smart_node.id)) - inputs.append(add_input_op(graph=graph, node_id=node_id, port=port, data=False, - shape=shape, user_shape=user_shape, data_type=data_type)) - - -def add_input_ops_helper_after_infer_input_port(graph: Graph, smart_node: Node, port:int, node_id: str, - inputs: list, edges_to_remove: list): - port = port if port is not None else 0 - in_node = smart_node.in_node(port) - shape = in_node['shape'] if 'shape' in in_node else None - if shape is None: - raise Error('Shape for tensor "{}" is not defined. Can not proceed.' + refer_to_faq_msg(41), - in_node.soft_get('name')) - inputs.append(add_input_op(graph=graph, node_id=node_id, port=port, data=True, - shape=shape.copy(), data_type=in_node.soft_get('data_type', None))) - edges_to_remove.append((in_node.id, node_id)) - - -def add_input_ops_helper_before_infer_output_port(graph: Graph, port: int, node_id: str, - shape: np.array, user_shape: tuple, data_type: tuple, - inputs: list, edges_to_remove: list): - for u, v, edge_attrs in graph.out_edges(node_id, data=True): - if edge_attrs['out'] == port: - edges_to_remove.append((u, v)) # we need to remove all edges from this port - inputs.append(add_input_op(graph=graph, node_id=node_id, port=port, data=False, - shape=shape, user_shape=user_shape, data_type=data_type, is_out_port=True)) - - -def add_input_ops_helper_after_infer_output_port(graph: Graph, smart_node: Node, port:int, node_id: str, - inputs: list, edges_to_remove: list): - out_node = smart_node.out_node(port) - shape = out_node['shape'] if 'shape' in out_node else None - if shape is None: - raise Error('Shape for tensor "{}" is not defined. Can not proceed.' + refer_to_faq_msg(41), - out_node.soft_get('name')) - inputs.append(add_input_op(graph=graph, node_id=node_id, port=port, data=True, - shape=shape.copy(), data_type=out_node.soft_get('data_type', None), is_out_port=True)) - edges_to_remove.append((node_id, out_node.id)) - - -def add_input_ops(graph: Graph, user_defined_inputs: dict, before_infer: bool): - """ - This function add user defined input operations. - For cutting without port: - Op_1 -> Op_2 -> output, user_defined_inputs = {'Op_2': {'shape': PartialShape([1, 2])}} => - Op_1, New_input (op=Parameter, shape=[1, 2]) -> Op_2 -> output - - For cutting with input port: - Op_1 -> Op_2 -> output, user_defined_inputs = {'Op_2': {'shape':PartialShape([1, 2]), 'in': 0}} => - Op_1, New_input (op=Parameter, shape=[1, 2]) -> Op_2 -> output - - For cutting with output port: - Op_1 -> Op_2 -> output, user_defined_inputs = {'Op_2': {'shape':PartialShape([1, 2]), 'out': 0}} => - Op_1 -> Op_2, New_input (op=Parameter, shape=[1, 2]) -> output - - For case with before_infer=False data nodes are added to this schemes. - """ - inputs = [] - set_is_input(graph, graph.get_nodes_with_attributes(op='Parameter'), False) - if user_defined_inputs is None: - inputs = graph.get_nodes_with_attributes(op='Parameter') - else: - # cutting the net by inputs - assert isinstance(user_defined_inputs, dict) - edges_to_remove = [] - for node_id in user_defined_inputs: - for port_and_shape_info in user_defined_inputs[node_id]: - if 'added' in port_and_shape_info and port_and_shape_info['added']: - continue - - is_out_port = 'out' in port_and_shape_info # by default we assume input port or input node without port - shape = port_and_shape_info['shape'] if 'shape' in port_and_shape_info else None - user_shape = None - if shape is not None: - user_shape = shape - shape_list = [] - for dim in shape: - if isinstance(dim, Dimension): - if dim.is_static: - shape_list.append(dim.get_min_length()) - else: - shape_list.append(dynamic_dimension_value) - continue - if dim >= 0: - shape_list.append(dim) - else: - shape_list.append(dynamic_dimension_value) - - shape = shape_array(shape_list) - data_type = port_and_shape_info['data_type'] if 'data_type' in port_and_shape_info else None - smart_node = Node(graph, node_id) - - # Common port index check - if is_out_port: - port = port_and_shape_info['out'] # we check that 'out' in port_and_shape_info earlier - if port is None: - raise Error('Output port for input node {} should be specified, it cannot be None!'.format( - node_id - )) - if port is not None and port not in smart_node.out_nodes(): - raise Error('Output port index {} is out of number of available output ports for node "{}". ' + - refer_to_faq_msg(29), port, node_id) - else: - port = port_and_shape_info['in'] if 'in' in port_and_shape_info else None - if port is not None and port not in smart_node.in_nodes(): - raise Error('Input port index {} is out of number of available input ports for node "{}". ' + - refer_to_faq_msg(29), port, node_id) - - # specific Parameter case - if smart_node.op == 'Parameter': - if port is not None: - raise Error( - 'Parameter node "{}" doesn\'t have input port, but input port {} was provided. ' + - refer_to_faq_msg(28), node_id, port) - if shape is not None: - smart_node['shape'] = shape - smart_node['user_shape'] = user_shape - if data_type is not None: - smart_node['data_type'] = data_type - inputs.append(node_id) - port_and_shape_info['added'] = True - - if smart_node.out_edges(): - # User specified input is Parameter, so input cut is not needed, but - # Op name needs to be added to tensor names - op_name = smart_node.soft_get('name') - if graph.has_tensor_name(op_name): - continue - out_edges = list(graph.out_edges(op_name, data=True)) - for _, _, attrs in out_edges: - fw_info = [] - if 'fw_tensor_debug_info' in attrs: - fw_info += attrs['fw_tensor_debug_info'] - attrs['fw_tensor_debug_info'] = fw_info + [(op_name, op_name)] - - continue - - if before_infer: - if shape is None: - continue - # We cut with shapes provided by user and there is no need to wait till infer - if is_out_port: - add_input_ops_helper_before_infer_output_port(graph, port, node_id, shape, user_shape, - data_type, inputs, edges_to_remove) - else: - add_input_ops_helper_before_infer_input_port(graph, smart_node, port, node_id, shape, - user_shape, data_type, inputs, - edges_to_remove) - else: - # We cut after infer and we need inferred shapes in nodes - if is_out_port: - add_input_ops_helper_after_infer_output_port(graph, smart_node, port, node_id, inputs, - edges_to_remove) - else: - add_input_ops_helper_after_infer_input_port(graph, smart_node, port, node_id, inputs, - edges_to_remove) - port_and_shape_info['added'] = True - graph.remove_edges_from(edges_to_remove) - - # if len(inputs) == 0, shapes were not provided for all nodes in input-cut request, - # we didn't cut inputs before infer, so this check is useless and invalid - if len(inputs): - set_is_input(graph, inputs, True) - # Check if there are inputs that are not listed in user_defined_inputs and are needed to calculate outputs - outputs = graph.get_nodes_with_attributes(op='Result') - visited = set() - for output_name in outputs: - reverse_dfs(graph, output_name, check_input, visited) - - return inputs - - -class FrontExtractorOp(object): - """ - A super class for an operation extractor. - Do additional extraction of operation attributes without modifying of graph topology. - Useful for custom layers that maps to a single FW operation to re-use of FW shape inference. - In contrast to FrontReplacement* classes, this class doesn't modify graph topology and - doesn't completely override node attributes. So it is safe to preserve the original - MO inference function (which can use FW fallback mechanism). - - A sub-class should implement one of extract methods: - def extract(self, node): - return (, { }) - """ - - registered_ops = {} - registered_cls = [] - - @classmethod - def class_type(cls): - return class_registration.ClassType.EXTRACTOR - - -class CaffePythonFrontExtractorOp: - """ - A super class for custom caffe operation extractor. - Do additional extraction of Python Caffe operation attributes without modifying the graph topology. - Useful for Python layers that maps to a single FW operation to re-use of FW shape inference. - In contrast to FrontReplacement* classes, this class doesn't modify graph topology and - doesn't completely override node attributes. So it is safe to preserve the original - MO inference function (which can use FW fallback mechanism). - - It is needed to keep the list of extractors for particularly Python layers. - - When actual extraction happens, Model Optimizer first finds the match by type, which is PythonFrontExtractorOp. - It in turns looks up the CaffePythonFrontExtractorOp for the needed layer extractor not by type, but by - the compilation of the layer name and the module name. - - A sub-class should implement one of extract methods: - def extract(self, node): - return (, { }) - """ - registered_ops = {} - registered_cls = [] - - @staticmethod - def get_attrs(pb) -> dict: - params = pb.python_param - attrs = CaffePythonFrontExtractorOp.parse_param_str(params.param_str) - return attrs - - @staticmethod - def parse_param_str(param_str: str) -> dict: - if param_str[0] != '{' and param_str[-1] != '}': - param_str = '{' + param_str + '}' - return ast.literal_eval(param_str) - - @staticmethod - def check_param(op_cls, attrs): - for a in attrs: - if a not in op_cls.supported_attrs(op_cls): - log.error('Parameter {} is not recognised, please check correctness.\n List of supported parameters ' - 'is: {}'.format(a, op_cls.supported_attrs(op_cls)), extra={'is_warning': True}) - - @classmethod - def class_type(cls): - return class_registration.ClassType.EXTRACTOR diff --git a/tools/mo/openvino/tools/mo/front/flatten_to_reshape.py b/tools/mo/openvino/tools/mo/front/flatten_to_reshape.py deleted file mode 100644 index 84f8772fdef8cf..00000000000000 --- a/tools/mo/openvino/tools/mo/front/flatten_to_reshape.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.rank_decomposer import RankDecomposer -from openvino.tools.mo.ops.ReduceOps import ReduceProd -from openvino.tools.mo.ops.rank import Rank -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.utils.shape import new_shape_node_from_shape_nodes, get_shape_values_by_range_idxs - - -class FlattenToReshape(FrontReplacementSubgraph): - """ - Flatten operation flattens the input tensor according to given `axis` and `end_axis` parameters: - - Input of shape [d_0, d_1, ... d_n] - Output of shape [d_0, d_1, ... , d_(axis-1), d_axis X ... X d_(end_axis), d_(end_axis + 1), ... , dn] - """ - enabled = True - - def run_before(self): - return [RankDecomposer] - - def pattern(self): - return dict(nodes=[ - ('flatten', dict(op='Flatten')) - ], - edges=[]) - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['flatten'] - name = node.soft_get('name', node.id) - - assert node.has_valid('axis'), 'Flatten {} has no mandatory `axis` attribute'.format(name) - assert node.has_valid('end_axis'), 'Flatten {} has no mandatory `end_axis` attribute'.format(name) - - axis = node.axis - end_axis = node.end_axis - - if end_axis == -1 and axis >= 0: - begin_dims = Const(graph, {'value': int64_array([0] * axis)}).create_node() - middle_dim = Const(graph, {'value': int64_array([-1])}).create_node() - end_dims = Const(graph, {'value': int64_array([])}).create_node() - else: - rank = Rank(graph, {'name': name + '/input_rank'}).create_node() - node.in_port(0).get_source().connect(rank.in_port(0)) - - shape = Shape(graph, {'name': name + '/input_shape'}).create_node() - node.in_port(0).get_source().connect(shape.in_port(0)) - - begin_dims = get_shape_values_by_range_idxs( - shape=shape, rank=rank, begin=0, end=axis) - middle_dims = get_shape_values_by_range_idxs( - shape=shape, rank=rank, begin=axis, end=end_axis, include_end=True) - end_dims = get_shape_values_by_range_idxs( - shape=shape, rank=rank, begin=end_axis, end=-1, include_begin=False, include_end=True) - - middle_dim = create_op_node_with_second_input(graph, ReduceProd, int64_array([0]), {'keep_dims': True}) - middle_dims.out_port(0).connect(middle_dim.in_port(0)) - - dim = new_shape_node_from_shape_nodes([begin_dims, middle_dim, end_dims]) - - original_name = node.soft_get('name') - abandoned_name = original_name + '/ShouldBeDeleted' - reshape_node = Reshape(graph, {}).create_node() - # Keep node with the same name to avoid confuse with renaming - rename_nodes([(node, abandoned_name), (reshape_node, original_name)]) - reshape_node.in_port(1).connect(dim.out_port(0)) - - node.out_port(0).get_connection().set_source(reshape_node.out_port(0)) - node.in_port(0).get_connection().set_destination(reshape_node.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/freeze_placeholder_value.py b/tools/mo/openvino/tools/mo/front/freeze_placeholder_value.py deleted file mode 100644 index 2ff62b703d56c9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/freeze_placeholder_value.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.convert_data_type import SUPPORTED_DATA_TYPES -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.utils.error import Error - - -class FreezePlaceholderValue(FrontReplacementSubgraph): - """ - Replaces existing placeholder to Constant node with provided value. It takes value from freeze_placeholder as - a string and casts it to actual node data type - """ - enabled = True - run_not_recursively = True - graph_condition = [lambda graph: graph.graph['freeze_placeholder'] is not None] - - def run_after(self): - from openvino.tools.mo.front.restore_ports import RestorePorts - return [RestorePorts] - - def run_before(self): - return [] - - @staticmethod - def pattern(): - return dict( - nodes=[('placeholder', dict(kind='op', op='Parameter'))], - edges=[] - ) - - def replace_sub_graph(self, graph: Graph, match: dict): - ph = match['placeholder'] - if ph.name in graph.graph['freeze_placeholder']: - name = ph.name - if ph.has_and_set('data_type'): - data_type = ph.data_type - else: - data_type = SUPPORTED_DATA_TYPES[graph.graph['cmd_params'].data_type][0] - string_value = graph.graph['freeze_placeholder'][name] - try: - if data_type != bool: - value = mo_array(string_value, dtype=data_type) - # TODO: investigate why boolean type is allowed only for TensorFlow - elif data_type == bool and graph.graph['fw'] == 'tf': - from openvino.tools.mo.front.tf.common import tf_data_type_cast - if isinstance(string_value, list): - casted_list = list() - for v in mo_array(string_value): - casted_list.append(tf_data_type_cast[ph.data_type](v)) - value = mo_array(string_value, dtype=data_type) - else: - value = tf_data_type_cast[ph.data_type](string_value) - else: - raise Error("Cannot cast value {} to {} data_type".format(string_value, data_type)) - except: - raise Error("Cannot cast value {} to {} data_type".format(string_value, data_type)) - try: - value = np.reshape(a=value, newshape=ph.shape) - except: - raise Error("Can not reshape value {} to shape {}".format(value, ph.shape)) - out_edges = list(graph.out_edges(ph.id, data=True)) - new_node = Const(graph).create_node( - attrs={'value': value, 'data_type': type(value), 'name': name + '/const_placeholder', - 'shape': ph.shape}) - graph.erase_node(ph) - graph.add_edges_from([(new_node.id, v, attrs) for u, v, attrs in out_edges]) - log.info("Placeholder node \"{}\" was replaced with Const node \"{}\" with value \"{}\"".format( - name, new_node.name, value)) diff --git a/tools/mo/openvino/tools/mo/front/global_pooling_to_reduce.py b/tools/mo/openvino/tools/mo/front/global_pooling_to_reduce.py deleted file mode 100644 index 2a1420bb6c80c7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/global_pooling_to_reduce.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.rank_decomposer import RankDecomposer -from openvino.tools.mo.ops.ReduceOps import ReduceMax, ReduceMean -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.ops.rank import Rank -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class GlobalPoolingToReduce(FrontReplacementPattern): - op = "Pooling" - enabled = True - - pool_method_to_reduce_type = { - 'max': ReduceMax, - 'avg': ReduceMean, - } - - def run_before(self): - return [RankDecomposer] - - def find_and_replace_pattern(self, graph: Graph): - global_poolings = graph.get_op_nodes(type='Pooling', global_pool=True) - if len(global_poolings) == 0: - return - - layout = graph.graph['layout'] - assert layout != 'NHWC', 'Global pooling transformation depends on layout (NHWC not enabled)' - - for pooling in global_poolings: - name = pooling.soft_get('name', pooling.id) - assert pooling.has_valid('pool_method'), 'Global Pooling {} has no `pool_method` attribute'.format(name) - method = pooling['pool_method'] - assert method in self.pool_method_to_reduce_type, \ - 'Unexpected Global Pooling method `{}` for node `{}`'.format(method, name) - reduce_op_class = self.pool_method_to_reduce_type[method] - - reduce = reduce_op_class(graph, {'name': name + '/reduce', 'keep_dims': True}).create_node() - - pooling.out_port(0).get_connection().set_source(reduce.out_port(0)) - src = pooling.in_port(0).get_connection().get_source() - - reduce.in_port(0).get_connection().set_source(src) - - start = Const(graph, {'value': int64_array(2)}).create_node() - end = Rank(graph, {'name': name + '/input_rank'}).create_node() - delta = Const(graph, {'value': int64_array(1)}).create_node() - - axis = Range(graph, {'name': name + '/global_pooling_reduce_axis'}).create_node() - - axis.in_port(0).connect(start.out_port(0)) - src.connect(end.in_port(0)) - axis.in_port(1).connect(end.out_port(0)) - axis.in_port(2).connect(delta.out_port(0)) - - axis.out_port(0).connect(reduce.in_port(1)) - - log.debug('Global {} pooling was converted to reduce: `{}`'.format(method, name)) diff --git a/tools/mo/openvino/tools/mo/front/image_scaler.py b/tools/mo/openvino/tools/mo/front/image_scaler.py deleted file mode 100644 index 10f5b2bf876d8c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/image_scaler.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Mul, Add -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class ImageScaler(FrontReplacementOp): - op = "ImageScaler" - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - # This replacer replace ImageScalar operation to Mul->Add sequence - # Also it check that weights and biases are good - op = match['op'] - - # Check that weights and biases are not useless - has_bias, has_weights = True, True - if all([x == 1 for x in np.nditer(op.scale)]): - has_weights = False - if all([x == 0 for x in np.nditer(op.bias)]): - has_bias = False - - assert len(op.in_ports()) == 1 - - last_port = op.in_port(0).get_source() - - # Create Mul & Add nodes - if has_weights: - mul_weights = Const(graph, dict(value=op.scale, shape=op.scale.shape)).create_node() - mul_op = Mul(graph, dict(name=op.id + '/mul_')).create_node() - op.in_port(0).get_connection().set_destination(mul_op.in_port(0)) - mul_weights.out_port(0).connect(mul_op.in_port(1)) - last_port = mul_op.out_port(0) - - if has_bias: - add_bias = Const(graph, dict(value=op.bias, shape=op.bias.shape)).create_node() - add_op = Add(graph, dict(name=op.id + '/add_')).create_node() - last_port.get_connection().set_destination(add_op.in_port(0)) - add_bias.out_port(0).connect(add_op.in_port(1)) - last_port = add_op.out_port(0) - - op.in_port(0).disconnect() - op.out_port(0).get_connection().set_source(last_port) diff --git a/tools/mo/openvino/tools/mo/front/input_cut.py b/tools/mo/openvino/tools/mo/front/input_cut.py deleted file mode 100644 index 6e0f3ba007252e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/input_cut.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.extractor import add_input_ops -from openvino.tools.mo.graph.graph import Graph - - -class InputCut(FrontReplacementPattern): - enabled = True - force_clean_up = True - run_not_recursively = True - - def run_after(self): - from openvino.tools.mo.front.output_cut import OutputCut - return [OutputCut] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - add_input_ops(graph, graph.graph['user_shapes'], True) diff --git a/tools/mo/openvino/tools/mo/front/instance_normalization.py b/tools/mo/openvino/tools/mo/front/instance_normalization.py deleted file mode 100644 index 32d60610e9a5ed..00000000000000 --- a/tools/mo/openvino/tools/mo/front/instance_normalization.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Add, Mul -from openvino.tools.mo.ops.mvn import MVN -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.ops.rank import Rank -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Node, Graph, rename_nodes - -import numpy as np - - -class InstanceNormalization(FrontReplacementOp): - ''' Decompose InstanceNormalization to scale*MVN(x) + B - - There are should be also reshapes added for each scale and B. - ''' - op = "InstanceNormalization" - enabled = True - - def replace_op(self, graph: Graph, node: Node): - name = node.soft_get('name', node.id) - - # create range of axes for MVN based on `start_axis` and rank of input - rank = Rank(graph, {'name': name + '/Rank'}).create_node() - rng = create_op_with_const_inputs(graph, Range, {0: int64_array(2), 2: int64_array(1)}, - {'name': name + '/Range', 'output_type': np.int64}) - mvn = MVN(graph, {'eps': node.epsilon, 'eps_mode': 'inside_sqrt', 'normalize_variance': 1, - 'name': name + '/Ins_Norm/MVN_', }).create_node() - node.in_port(0).get_connection().set_destination(mvn.in_port(0)) - rng.out_port(0).connect(mvn.in_port(1)) - mul = Mul(graph, {'axis': 1, 'name': name + '/Ins_Norm/mul_'}).create_node() - mvn.out_port(0).connect(mul.in_port(0)) - node.in_port(1).get_connection().set_destination(mul.in_port(1)) - add = Add(graph, {'axis': 1, 'name': name + '/Ins_Norm/add_'}).create_node() - mul.out_port(0).connect(add.in_port(0)) - node.in_port(2).get_connection().set_destination(add.in_port(1)) - - mvn.in_port(0).get_connection().add_destination(rank.in_port(0)) - rng.in_port(1).connect(rank.out_port(0)) - - rename_nodes([(node, name + '/TBD'), (add, name)]) - - return [add.id] diff --git a/tools/mo/openvino/tools/mo/front/interpolate_reshape.py b/tools/mo/openvino/tools/mo/front/interpolate_reshape.py deleted file mode 100644 index ed6db96104f57a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/interpolate_reshape.py +++ /dev/null @@ -1,173 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.shape import Shape - - -class InterpolateWithConcat(FrontReplacementPattern): - r""" - Replaces hard-coded 1-port input of Interpolate with reshape-able sub-graph using the following Concat inputs - - BEFORE: - input Const - shape=[1, 3, 30, 40] value=[60, 160] - \ / - Interpolate(axes=(2, 3)) input_1 - shape=[1, 3, 60, 160] shape=[1, 4, 60, 160] - \ / - Concat(axis=1) - shape=[1, 7, 60, 160] - AFTER: - input - shape=[1, 3, 30, 40] input_1 - | shape=[1, 4, 60, 160] - | / | - | ShapeOf | - | | | - | Gather | - | indices=(2, 3); axis=0 | - \ | | - Interpolate(axes=(2, 3)) | - shape=[1, 3, 60, 160] | - \ / - Concat(axis=1) - shape=[1, 7, 60, 160] - - 1. Searches for Interpolate operation which output is connected to Concat (through identity operation or directly). - Interpolate -- [identity] --> Concat - 2. Checks that Interpolate has positive axes parameter - 3. Checks that Concat has positive axis (from attribute and N-input) - 4. Checks that interpolation takes place over different dimensions than concatenation - 5. Searches for Concat sources that are not connected to Interpolate operations - and checks that we have at least one such source (we could create a loop if we won't check) - 6. If any of this checks are failed -- transformation doesn't do anything - 7. Otherwise, we take the first Concat source from the (5) item. - Taking ShapeOf of this source and Gather'ing dimensions by the Interpolate::axes indices - we connect them to the second Interpolate input - - This is how we get updated Interpolate second input that will fit the following Concat operation restrictions. - - - We perform this transformation of the FRONT phase for MO to be able to reshape this Interpolate layer too. - There is a similar transformation with less restrictions on the BACK phase. - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.front.InterpolateNormalizer import InterpolateNormalizer - return [InterpolateNormalizer] - - @staticmethod - def get_concat_axis(concat: Node): - # Concat axis may be stored as an attribute and as an input (TF) and this is not resolved yet - # TODO: should be removed after Concat operation normalization - assert concat.soft_get('type') == 'Concat' - if concat.has_valid('axis'): - return concat.axis - if concat.has_valid('N'): - axis_node = concat.in_port(concat.N).get_source().node - if axis_node.has_valid('value'): - return axis_node.value.item(0) - return None - - @staticmethod - def get_single_output_destination_safely(node: Node, idx: int = 0): - """ - Checks if node has exactly one used output port and this output port is only used by one consumer - If the checks passed, function returns consumer_node, otherwise None - """ - connected_out_ports = [port for port in node.out_ports().values() if not port.disconnected()] - if len(connected_out_ports) == 1 and connected_out_ports[0].idx == idx: - dsts = node.out_port(idx).get_destinations() - if len(dsts) == 1: - return dsts[0].node - return None - - @staticmethod - def get_single_input_source_safely(node: Node, idx: int = 0): - """ - Checks if node has exactly one used input port - If the check passed, function returns input_node otherwise None - """ - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - if len(connected_in_ports) == 1 and connected_in_ports[0].idx == idx: - return node.in_port(idx).get_source().node - return None - - def get_non_interpolate_concat_sources(self, concat: Node): - """ - Traverses Concat input ports up to find which of them are not connected to Interpolate operations directly - or through identity operation sequence. Returns the list of Concat sources that satisfy the condition. - """ - assert concat.soft_get('type') == 'Concat' - sources, ports_to_omit = [], [] - if concat.has_valid('N'): - # TODO: should be removed after Concat operation normalization - ports_to_omit.append(concat.N) - - for in_port in concat.in_ports().values(): - if in_port.disconnected() or in_port.idx in ports_to_omit: - continue - next_node = in_port.get_source().node - while next_node.soft_get('type') != 'Interpolate' and next_node.has_and_set('identity'): - node = self.get_single_input_source_safely(next_node) - if node is not None: - next_node = node - else: - break - if next_node.soft_get('type') != 'Interpolate': - sources.append(in_port.get_connection().get_source()) - return sources - - def make_interpolate_reshape_able(self, interpolate: Node, concat: Node): - assert interpolate.soft_get('type') == 'Interpolate' - assert concat.soft_get('type') == 'Concat' - interp_axes = Interpolate.get_axes(interpolate) - concat_axis = self.get_concat_axis(concat) - - if concat_axis is None or interp_axes is None \ - or np.any(interp_axes < 0) or concat_axis < 0 \ - or concat_axis in interp_axes: - # checks that interpolate axes and concat axis are valid and do not intersect - return - - non_interp_concat_srcs = self.get_non_interpolate_concat_sources(concat) - if not len(non_interp_concat_srcs): - # there is no Concat input to take input from - return - - graph = interpolate.graph - src = non_interp_concat_srcs[0] - - shape = Shape(graph, {'name': src.node.soft_get('name', src.node.id) + '/Shape'}).create_node() - shape.in_port(0).connect(src) - gather = create_op_with_const_inputs(graph, Gather, - {1: mo_array(interp_axes, dtype=np.int32), 2: int64_array(0)}, - {'name': shape.name + '/Gathered'}, input_node=shape) - interpolate.in_port(1).get_connection().set_source(gather.out_port(0)) - - def find_and_replace_pattern(self, graph: Graph): - for interpolate in graph.get_op_nodes(type='Interpolate', version='opset1'): - if interpolate.in_port(1).get_source().node.soft_get('type') != 'Const': - continue - - # Interpolate could be connected to Concat through identity operations, skipping them - next_node = self.get_single_output_destination_safely(interpolate) - if next_node is not None: - while next_node.soft_get('type') != 'Concat' and next_node.has_and_set('identity'): - node = self.get_single_output_destination_safely(next_node) - if node is not None: - next_node = node - else: - break - if next_node.soft_get('type') == 'Concat': - self.make_interpolate_reshape_able(interpolate, next_node) diff --git a/tools/mo/openvino/tools/mo/front/kaldi/__init__.py b/tools/mo/openvino/tools/mo/front/kaldi/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/kaldi/add_reshape_transpose_around_conv_pool.py b/tools/mo/openvino/tools/mo/front/kaldi/add_reshape_transpose_around_conv_pool.py deleted file mode 100644 index 21b8660ba64f56..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/add_reshape_transpose_around_conv_pool.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs, create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.elementwise import Div -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.shape import node_to_get_shape_value_of_indices - - -def find_max_frame_time(node: Node): - """ - Find maximum time_dim among all inputs of given node - time_dim can be > 0 or < 0, we will find min value (<=0) and max value (>=0) of inputs, - time_dim for node with such inputs will max-min - If one of inputs has undefined time_dim, it raises Error because we assume that all parent nodes already have - set time_dim. - """ - in_frame_time_max = 0 - in_frame_time_min = 0 - - for inp in node.in_ports(): - if node.in_port(inp).disconnected(): - continue - in_node = node.in_port(inp).get_source().node - if in_node.time_dim is None: - raise Error("Parent node {} does not have set time_dim".format(in_node.id)) - if in_node.time_dim > in_frame_time_max: - in_frame_time_max = in_node.time_dim - if in_node.time_dim < in_frame_time_min: - in_frame_time_min = in_node.time_dim - - return in_frame_time_max - in_frame_time_min - - -def set_time_dim(graph): - """ - Set value of dimension where we gather frames with different time labels. - If in some node we don't use any context, then time_dim = 0 - """ - graph.set_node_attributes('time_dim', None) - - # set correct time dim for start Convolutions - update_time_dim_for_start_convolution(graph) - - sorted_nodes = graph.topological_sort() - - for node in sorted_nodes: - if node.time_dim is not None: - continue - - if node.op == "MemoryOffset": - # MemoryOffset can be splitted already and can be without input, time_dim = t in this case - node.time_dim = node.in_port(0).get_source().node.time_dim + node.t if not node.in_port(0).disconnected() else node.t - elif node.op == "Splice": - node.time_dim = node.in_port(0).get_source().node.time_dim + len(node.context) - 1 - elif node.op in ['Convolution', 'Pooling']: - node.time_dim = 0 - elif len([port for port in node.in_ports().values() if not port.disconnected()]) > 1: - node.time_dim = find_max_frame_time(node) - elif len([port for port in node.in_ports().values() if not port.disconnected()]) == 1: - node.time_dim = node.in_port(0).get_source().node.time_dim - else: - node.time_dim = 0 - - -def update_time_dim_for_start_convolution(graph): - """ - If we have pattern like Parameter->Convolution->... then input already spliced outside network. So from set_time_dim - time_dim will be set as 1 and it will be wrong. For such pattern time_dim should be set as kernel[1] - (convolution run through the whole splice) - """ - params = graph.get_op_nodes(op="Parameter") - for param_node in params: - for dest in param_node.out_port(0).get_destinations(): - if dest.node.op == 'Convolution': - conv_node = dest.node - assert param_node.time_dim is None or \ - param_node.time_dim == conv_node.soft_get('kernel')[1] - 1, \ - "Kaldi model have 2 Convolutions after Parameter with different time dimensions" - # time_dim starts from 0, kernel from 1 - param_node.time_dim = conv_node.soft_get('kernel')[1] - 1 - - -class AddReshapeTransposeAroundConvPool(FrontReplacementPattern): - """ - This pass adds Reshapes and Transposes around a Convolution/Pooling layer for reshaping from NH to NCHW - For example: - Let's suppose we have next graph: - - Prev_Layer [N, H] -> Convolution [N, C, H, W] -> Next_Layer [N, H] - - In this case Convolution takes only [N, H] from input tensor in 3rd dim - So this pass will convert this graph to the next one: - - Prev_Layer [N, H] -> Reshape(N, H, W, C) -> Transpose(0, 3, 1, 2) -> Convolution [N, C, H, W] -> - Transpose(0, 2, 3, 1) -> Reshape(0, -1) -> Next_Layer [N, H] - """ - enabled = True - graph_condition = [lambda graph: graph.graph['fw'] == "kaldi"] - - def run_after(self): - # remove cycles before this transformation because topological_sort call - from openvino.tools.mo.front.kaldi.split_recurrent_memoryoffset import SplitRecurrentMemoryOffset - return [SplitRecurrentMemoryOffset] - - @staticmethod - def find_and_replace_pattern(graph: Graph): - conv_pool_nodes = graph.get_op_nodes(op='Convolution') - conv_pool_nodes.extend(graph.get_op_nodes(op='Pooling')) - - if len(conv_pool_nodes) == 0: - return - - set_time_dim(graph) - - for node in conv_pool_nodes: - node_name = node.soft_get('name', node.id) - - # create Reshape before convolution - # shape = [in_shape[0], t, patch_stride, C = in_shape[1]/(patch_stride*t)], - # where patch_stride is attribute in Convolution taken from Kaldi - # or before pooling - # shape = [in_shape[0], t, in_shape[1]/(pool_stride*t), pool_stride] - # where pool_stride is attribute in Pooling taken from Kaldi - # adapt time_dim to use in kernel as dimension - time_dim = node.in_port(0).get_source().node.time_dim + 1 - if node.op == 'Convolution': - frame_height = node.patch_stride if node.has_valid('patch_stride') else node.height_in - # set time t instead of 1 in kernel as H and update C to have kernel shape - if node.kernel[2] != time_dim: - assert node.kernel[2] == 1 - node.kernel[2] = time_dim - assert node.kernel[1] % time_dim == 0 - node.kernel[1] = node.kernel[1] // time_dim - node.kernel_spatial = node.kernel[2:] - index_const = 2 - index_div = 3 - else: - frame_height = node.pool_stride - if node.pool_step is None: - node.stride = int64_array([1, 1, node.window[-1], node.window[-1]]) - index_const = 3 - index_div = 2 - - i_shape = Shape(graph, {'name': node_name + '/Shape'}).create_node() - i_shape.in_port(0).connect(node.in_port(0).get_source()) - - N, H = node_to_get_shape_value_of_indices(i_shape, [0]), node_to_get_shape_value_of_indices(i_shape, [1]) - - # H / (patch_stride * t) - H_div_stride_t = create_op_with_const_inputs( - graph, Div, {1: int64_array([frame_height * time_dim])}, {'name': node_name + '/div_stride_h'}) - H_div_stride_t.in_port(0).connect(H.out_port(0)) - - # gather all dims - concat_dims = create_op_with_const_inputs(graph, Concat, {index_const: int64_array([frame_height]), - 1: int64_array([time_dim])}, - {'name': node_name + '/concat_all_dims', 'in_ports_count': 4, - 'axis': 0}) - concat_dims.in_port(0).connect(N.out_port(0)) - concat_dims.in_port(index_div).connect(H_div_stride_t.out_port(0)) - - reshape_in = Reshape(graph, {'name': node_name + '/reshape_in', - 'time_dim': time_dim - 1}).create_node() - reshape_in.in_port(1).connect(concat_dims.out_port(0)) - - # change layout from NHWC to NCHW - # should be replaced by common Permute logic in future - direct_transpose = create_op_node_with_second_input(graph, Transpose, int64_array([0, 3, 1, 2]), - {'name': node_name + '/Transpose', - 'time_dime': time_dim - 1}, reshape_in) - # after convolution/pooling time_dim becomes 0 - inverse_transpose = create_op_node_with_second_input(graph, Transpose, int64_array([0, 2, 3, 1]), - {'name': node_name + '/Transpose_back', - 'time_dim': 0}) - - # create Reshape after Convolution - reshape_out = create_op_node_with_second_input(graph, Reshape, int64_array([0, -1]), - {'name': node_name + '/reshape_out', - 'special_zero': True, 'time_dim': 0}) - - # connect input_reshape_node - source = node.in_port(0).get_source() - node.in_port(0).get_connection().set_source(direct_transpose.out_port(0)) - reshape_in.in_port(0).connect(source) - # connect output_reshape_node - node.out_port(0).get_connection().set_source(reshape_out.out_port(0)) - node.out_port(0).connect(inverse_transpose.in_port(0)) - reshape_out.in_port(0).connect(inverse_transpose.out_port(0)) - rename_nodes([(node, node_name + '/' + node.op), (reshape_out, node_name)]) - - for node in graph.get_op_nodes(): - if 'time_dim' in node: - del node['time_dim'] diff --git a/tools/mo/openvino/tools/mo/front/kaldi/apply_counts.py b/tools/mo/openvino/tools/mo/front/kaldi/apply_counts.py deleted file mode 100644 index 952627450cf56f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/apply_counts.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Add -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.find_inputs import find_outputs -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def apply_biases_to_last_layer(graph, counts): - r""" - When user provides counts file, it is a file that contains log-apriory probabilities, - technically it should be subtracted from the bias of the last layer unless it is a SoftMax. - - Case 1: - weights ---\ - biases ---\ - some layer ---> AffineTransform ---> SoftMax - - Then, counts are applied to biases of Affine Transform: - - weights ---\ - (biases - counts) ---\ - some layer ---> AffineTransform ---> SoftMax - - Case 2: - weights ---\ - biases ---\ - some layer ---> AffineTransform - - Just takes the last layer and updates biases: - - weights ---\ - (biases - counts) ---\ - some layer ---> AffineTransform - - Parameters - ---------- - graph - counts - - Returns - ------- - - """"" - outputs_ids = find_outputs(graph) - for output in outputs_ids.copy(): - node = Node(graph, output) - if node.op != 'Assign' and node.op != "MemoryOffset": - continue - outputs_ids.remove(output) - - if len(outputs_ids) > 1: - raise Error('Ambiguity in applying counts to several outputs.') - elif len(outputs_ids) == 0: - raise Error('No outputs were found') - - target_node = Node(graph, outputs_ids[0]) - if target_node.op == 'SoftMax': - target_node = target_node.in_port(0).get_source().node - - sub_node = create_op_node_with_second_input(graph, Add, -counts, {'name': 'sub_counts'}) - target_node.out_port(0).get_connection().set_source(sub_node.out_port(0)) - sub_node.in_port(0).connect(target_node.out_port(0)) - - -def read_counts_file(file_path): - with open(file_path, 'r') as f: - file_content = f.readlines() - if len(file_content) > 1: - raise Error('Expect counts file to be one-line file. ' + - refer_to_faq_msg(90)) - - counts_line = file_content[0].strip().replace('[', '').replace(']', '') - try: - counts = np.fromstring(counts_line, dtype=float, sep=' ') - except TypeError: - raise Error('Expect counts file to contain list of floats.' + - refer_to_faq_msg(90)) - - return counts - - -def counts_to_priors(counts): - cutoff = 1.00000001e-10 - cutoff_idxs = np.where(counts < cutoff) - counts[cutoff_idxs] = cutoff - scale = 1.0 / np.sum(counts) - counts = np.log(counts * scale) # pylint: disable=assignment-from-no-return - counts[cutoff_idxs] += np.finfo(np.float32).max / 2 - return counts - - -class ApplyCountsFilePattern(FrontReplacementSubgraph): - """ - Pass applies counts file as biases to last layer - """ - enabled = True - graph_condition = [lambda graph: graph.graph['cmd_params'].counts is not None] - - def run_after(self): - from openvino.tools.mo.front.output_cut import OutputCut - from openvino.tools.mo.front.MoveEmbeddedInputsToInputs import MoveEmbeddedInputsToInputs - return [MoveEmbeddedInputsToInputs, - OutputCut, - ] - - def run_before(self): - from openvino.tools.mo.front.MatMul_normalizer import FullyConnectedDecomposer - return [FullyConnectedDecomposer, - ] - - def find_and_replace_pattern(self, graph: Graph): - # if empty string is in counts, read priors from model itself (on loader stage) - if graph.graph['cmd_params'].counts == "": - assert isinstance(graph.graph['priors'], (list, np.ndarray)) and len(graph.graph['priors']) != 0, \ - "Model file does not contain Priors tag with counts values, use separate file instead" - counts = graph.graph['priors'].copy() - else: - # read counts from given file - try: - counts = read_counts_file(graph.graph['cmd_params'].counts) - except Exception as e: - raise Error('Model Optimizer is not able to read counts file {}'.format(graph.graph['cmd_params'].counts) + - refer_to_faq_msg(92)) from e - - # calculate normalized counts as follows: - # c_i=log(c_i/sum(c_j)) - # set max_float/2 for almost zero c_i (< 1.e-10) - counts = counts_to_priors(counts) - apply_biases_to_last_layer(graph, counts) diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractor.py b/tools/mo/openvino/tools/mo/front/kaldi/extractor.py deleted file mode 100644 index a501da5d2253e1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractor.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node - - -def node_pb_arg(pb_extractor): - return lambda node: pb_extractor(node.parameters) - - -kaldi_type_extractors = {} - - -def common_kaldi_fields(node: Node) -> dict: - layer_type = node.op - return { - 'kind': 'op', - 'name': node.id, - 'op': layer_type, - # generic code relies on op; it should be overridden by specific op extractor - 'infer': None, - } - - -def kaldi_extractor(node: Node) -> (bool, dict): - result = common_kaldi_fields(node) - layer_type = result['op'] - if layer_type not in kaldi_type_extractors: - supported = False - return supported, result - - result.update(kaldi_type_extractors[layer_type](node)) - supported = True - - return supported, result diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/__init__.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/add_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/add_ext.py deleted file mode 100644 index 0566bf7373aebd..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/add_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Add -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class AddFrontExtractor(FrontExtractorOp): - op = 'Add' - enabled = True - - @classmethod - def extract(cls, node): - Add.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/add_shift_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/add_shift_ext.py deleted file mode 100644 index b033564349f088..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/add_shift_ext.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.utils import read_binary_vector, read_learning_info -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp - - -class AddShiftFrontExtractor(FrontExtractorOp): - op = 'addshift' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - read_learning_info(pb) - biases = read_binary_vector(pb) - bias_term = True - mapping_rule = {'bias_term': bias_term} - embed_input(mapping_rule, 1, 'weights', np.ones(biases.shape, dtype=np.float32)) - embed_input(mapping_rule, 2, 'biases', biases) - ScaleShiftOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/affine_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/affine_component_ext.py deleted file mode 100644 index e455e0a3d65b59..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/affine_component_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.extractors.fixed_affine_component_ext import FixedAffineComponentFrontExtractor -from openvino.tools.mo.front.kaldi.utils import read_learning_info -from openvino.tools.mo.graph.graph import Node - - -class AffineComponentFrontExtractor(FrontExtractorOp): - op = 'affinecomponent' - enabled = True - - @classmethod - def extract(cls, node: Node): - read_learning_info(node.parameters) - return FixedAffineComponentFrontExtractor.extract(node) diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/affine_component_preconditioned_online_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/affine_component_preconditioned_online_ext.py deleted file mode 100644 index 919d2465458be9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/affine_component_preconditioned_online_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.extractors.fixed_affine_component_ext import FixedAffineComponentFrontExtractor -from openvino.tools.mo.front.kaldi.utils import read_learning_info -from openvino.tools.mo.graph.graph import Node - - -class AffineComponentFrontExtractor(FrontExtractorOp): - op = 'affinecomponentpreconditionedonline' - enabled = True - - @classmethod - def extract(cls, node: Node): - read_learning_info(node.parameters) - return FixedAffineComponentFrontExtractor.extract(node) diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/affine_transform_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/affine_transform_ext.py deleted file mode 100644 index 58cbc92eba40e4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/affine_transform_ext.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.MatMul import FullyConnected -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.utils import read_binary_matrix, read_binary_vector, read_learning_info - - -class AffineTransformFrontExtractor(FrontExtractorOp): - op = 'affinetransform' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - read_learning_info(pb) - weights, weights_shape = read_binary_matrix(pb) - biases = read_binary_vector(pb) - - mapping_rule = { - 'out-size': weights_shape[0], - 'transpose_weights': True, - } - embed_input(mapping_rule, 1, 'weights', weights) - embed_input(mapping_rule, 2, 'biases', biases) - - FullyConnected.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/backproptruncation_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/backproptruncation_ext.py deleted file mode 100644 index e3aeb7851d64b0..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/backproptruncation_ext.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import read_binary_float_token, read_binary_integer32_token, collect_until_token -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp - - -class BackPropTrancationFrontExtractor(FrontExtractorOp): - op = 'backproptruncationcomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - - collect_until_token(pb, b'') - dim = read_binary_integer32_token(pb) - - collect_until_token(pb, b'') - scale = read_binary_float_token(pb) - - # TODO add real batch here - attrs = {} - embed_input(attrs, 1, 'weights', np.full([dim], scale)) - ScaleShiftOp.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/batchnorm_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/batchnorm_component_ext.py deleted file mode 100644 index cda4c33466b9f8..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/batchnorm_component_ext.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import collect_until_token, read_binary_float_token, read_binary_integer32_token -from openvino.tools.mo.front.kaldi.utils import read_binary_vector -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp - - -class BatchNormComponentFrontExtractor(FrontExtractorOp): - op = 'batchnormcomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - - collect_until_token(pb, b'') - dim = read_binary_integer32_token(pb) - - collect_until_token(pb, b'') - block_dim = read_binary_integer32_token(pb) - - collect_until_token(pb, b'') - eps = read_binary_float_token(pb) - - collect_until_token(pb, b'') - target_rms = read_binary_float_token(pb) - - collect_until_token(pb, b'') - mean = read_binary_vector(pb) - - collect_until_token(pb, b'') - var = read_binary_vector(pb) - - scale = target_rms / np.sqrt(var + eps) - - shift = - target_rms * mean / np.sqrt(var + eps) - - scale = np.tile(scale, dim // block_dim) - shift = np.tile(shift, dim // block_dim) - - attrs = {'out-size': dim} - embed_input(attrs, 1, 'weights', scale) - embed_input(attrs, 2, 'biases', shift) - - ScaleShiftOp.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/bias_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/bias_component_ext.py deleted file mode 100644 index ef942692382401..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/bias_component_ext.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, collect_until_token -from openvino.tools.mo.front.kaldi.utils import read_binary_vector -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp - - -class FixedBiasComponentFrontExtractor(FrontExtractorOp): - op = 'fixedbiascomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - collect_until_token(pb, b'') - biases = read_binary_vector(pb) - find_next_tag(pb) - read_placeholder(pb, 1) - - mapping_rule = { - 'layout': 'NCHW', - 'bias_term': True, - 'out-size': biases.shape[0], - } - embed_input(mapping_rule, 2, 'biases', biases) - - ScaleShiftOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/clip_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/clip_ext.py deleted file mode 100644 index 4ffb53900dabd1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/clip_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.identity import Identity -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ClipGradientComponentFrontExtractor(FrontExtractorOp): - op = 'clipgradientcomponent' - enabled = True - - @classmethod - def extract(cls, node): - Identity.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/concat_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/concat_ext.py deleted file mode 100644 index d6fc7a4f4ce56e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/concat_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.concat import Concat - - -class ConcatFrontExtractor(FrontExtractorOp): - op = 'concat' - enabled = True - - @classmethod - def extract(cls, node): - mapping_rule = { - 'axis': 1 - } - Concat.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/const_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/const_ext.py deleted file mode 100644 index 9deef60457cf2a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/const_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.const import Const - - -class ConstantExtractor(FrontExtractorOp): - op = 'Const' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'data_type': node.value.dtype, - 'value': node.value, - } - Const.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/convolutional_1d_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/convolutional_1d_component_ext.py deleted file mode 100644 index 4fea847241925b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/convolutional_1d_component_ext.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import read_token_value, collect_until_whitespace, find_next_tag -from openvino.tools.mo.front.kaldi.utils import read_learning_info, read_binary_matrix, read_binary_vector -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class ConvolutionalComponentFrontExtractor(FrontExtractorOp): - op = 'convolutional1dcomponent' # Naming like in Kaldi - enabled = True - @classmethod - def extract(cls, node: Node) -> bool: - """ - Extract conv parameters from node.parameters. - node.parameters like file descriptor object. - :param node: Convolution node - :return: - """ - pb = node.parameters - read_learning_info(pb) - - kernel = read_token_value(pb, b'') - stride = read_token_value(pb, b'') - patch_stride = read_token_value(pb, b'') - - token = find_next_tag(pb) - if token == '': - appended_conv = True - token = find_next_tag(pb) - if token != '': - raise Error('Can not load token {} from Kaldi model'.format(token) + - refer_to_faq_msg(94)) - collect_until_whitespace(pb) - weights, weights_shape = read_binary_matrix(pb) - - collect_until_whitespace(pb) - biases = read_binary_vector(pb) - - if (patch_stride - kernel) % stride != 0: - raise Error( - 'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. ' + - refer_to_faq_msg(93)) - - output = biases.shape[0] - if weights_shape[0] != output: - raise Error('Weights shape does not correspond to the `output` attribute of Convolution layer. ' + - refer_to_faq_msg(93)) - - mapping_rule = { - 'output': output, - 'patch_stride': patch_stride, - 'bias_term': None, - 'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]), - 'pad_spatial_shape': int64_array([[0, 0], [0, 0]]), - 'dilation': int64_array([1, 1, 1, 1]), - 'kernel': int64_array([weights_shape[0], weights_shape[1] // kernel, 1, kernel]), - 'stride': int64_array([1, 1, 1, stride]), - 'kernel_spatial': int64_array([1, kernel]), - 'input_feature_channel': 1, - 'output_feature_channel': 0, - 'kernel_spatial_idx': [2, 3], - 'group': 1, - 'reshape_kernel': True, - 'appended_conv': appended_conv # pylint: disable=possibly-used-before-assignment - } - - mapping_rule.update(layout_attrs()) - embed_input(mapping_rule, 1, 'weights', weights) - embed_input(mapping_rule, 2, 'biases', biases) - - mapping_rule['bias_addable'] = len(biases) > 0 - - Convolution.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/convolutional_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/convolutional_component_ext.py deleted file mode 100644 index bb3517c94cba47..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/convolutional_component_ext.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import read_token_value, collect_until_whitespace -from openvino.tools.mo.front.kaldi.utils import read_learning_info, read_binary_matrix, read_binary_vector -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class ConvolutionalComponentFrontExtractor(FrontExtractorOp): - op = 'convolutionalcomponent' # Naming like in Kaldi - enabled = True - - @classmethod - def extract(cls, node: Node) -> bool: - """ - Extract conv parameters from node.parameters. - node.parameters like file descriptor object. - :param node: Convolution node - :return: - """ - pb = node.parameters - kernel = read_token_value(pb, b'') - stride = read_token_value(pb, b'') - patch_stride = read_token_value(pb, b'') - - read_learning_info(pb) - - collect_until_whitespace(pb) - weights, weights_shape = read_binary_matrix(pb) - - collect_until_whitespace(pb) - biases = read_binary_vector(pb) - - if (patch_stride - kernel) % stride != 0: - raise Error( - 'Kernel size and stride does not correspond to `patch_stride` attribute of Convolution layer. ' + - refer_to_faq_msg(93)) - - output = biases.shape[0] - if weights_shape[0] != output: - raise Error('Weights shape does not correspond to the `output` attribute of Convolution layer. ' + - refer_to_faq_msg(93)) - - mapping_rule = { - 'output': output, - 'patch_stride': patch_stride, - 'bias_term': None, - 'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]), - 'pad_spatial_shape': int64_array([[0, 0], [0, 0]]), - 'dilation': int64_array([1, 1, 1, 1]), - 'kernel': int64_array([weights_shape[0], weights_shape[1] // kernel, 1, kernel]), - 'stride': int64_array([1, 1, 1, stride]), - 'kernel_spatial': int64_array([1, kernel]), - 'input_feature_channel': 1, - 'output_feature_channel': 0, - 'kernel_spatial_idx': [2, 3], - 'group': 1, - 'reshape_kernel': True, - } - - mapping_rule.update(layout_attrs()) - embed_input(mapping_rule, 1, 'weights', weights) - embed_input(mapping_rule, 2, 'biases', biases) - - mapping_rule['bias_addable'] = len(biases) > 0 - - Convolution.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/copy_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/copy_ext.py deleted file mode 100644 index becd2af07e3226..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/copy_ext.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.kaldi.loader.utils import read_binary_integer32_token, read_blob -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.const import Const - - -class CopyFrontExtractor(FrontReplacementOp): - op = 'copy' - enabled = True - - def replace_op(self, graph: Graph, node: Node): - pb = node.parameters - weights_size = read_binary_integer32_token(pb) - weights = read_blob(pb, weights_size, dtype=np.int32) - 1 - - node_name = node.soft_get('name', node.id) - const_attrs = { - 'name': node_name + '/indexes', - 'value': mo_array(weights), - 'shape': [weights_size], - 'data_type': np.int32 - } - indexes_node = Const(graph).create_node(attrs=const_attrs) - - perm_in_1 = Const(graph, {'value': int64_array([1, 0]), 'name': node_name + '/order'}).create_node() - perm1_node = Transpose(graph, {'name': node_name + '/input_permute'}).create_node([node.in_node(0)]) - perm1_node.in_port(0).connect(node.in_port(0).get_source()) - perm1_node.in_port(1).connect(perm_in_1.out_port(0)) - - gather_node = create_op_with_const_inputs(graph, Gather, {2: int64_array(0)}, {'name': node_name + '/gather'}) - gather_node.in_port(0).connect(perm1_node.out_port(0)) - gather_node.in_port(1).connect(indexes_node.out_port(0)) - - perm2_node = Transpose(graph, {'name': node_name + '/output_permute'}).create_node() - perm2_node.in_port(0).connect(gather_node.out_port(0)) - perm2_node.in_port(1).connect(perm_in_1.out_port(0)) - - return [perm2_node.id] diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/crop_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/crop_ext.py deleted file mode 100644 index 31d03fd915d648..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/crop_ext.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.crop import Crop - - -class CropFrontExtractor(FrontExtractorOp): - op = 'Crop' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - - mapping_rule = { - 'dim': pb['dim'], - 'offset': pb['offset'], - 'axis': pb['axis'], - 'layout': 'NCHW' - } - - Crop.update_node_stat(node, attrs=mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/dropoutmask_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/dropoutmask_ext.py deleted file mode 100644 index 54c704d6f69eb7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/dropoutmask_ext.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import collect_until_token, collect_until_token_and_read, read_binary_float_token -from openvino.tools.mo.ops.dropoutmask import DropoutMask - - -class DropoutMaskComponentFrontExtractor(FrontExtractorOp): - op = 'dropoutmaskcomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - - size = collect_until_token_and_read(pb, b'') - collect_until_token(pb, b'') - dropout_proportion = read_binary_float_token(pb) - DropoutMask.update_node_stat(node, {'dropout_proportion': 1.0-dropout_proportion, - 'size': size}) - - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/elementwise_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/elementwise_component_ext.py deleted file mode 100644 index 3111388312d343..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/elementwise_component_ext.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.utils import read_token_value -from openvino.tools.mo.ops.eltwise_ninputs_in_1 import EltwiseNin1 - - -class ElementwiseProductComponentFrontExtractor(FrontExtractorOp): - op = 'elementwiseproductcomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - - indim = read_token_value(pb, b'') - outdim = read_token_value(pb, b'') - num_inputs = indim / outdim - - attrs = {'num_inputs': int(num_inputs), - 'operation': 'mul'} - - EltwiseNin1.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/fixed_affine_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/fixed_affine_component_ext.py deleted file mode 100644 index bba13add8caff7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/fixed_affine_component_ext.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.MatMul import FullyConnected -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, collect_until_token -from openvino.tools.mo.front.kaldi.utils import read_binary_matrix, read_binary_vector -from openvino.tools.mo.utils.error import Error - - -class FixedAffineComponentFrontExtractor(FrontExtractorOp): - op = 'fixedaffinecomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - collect_until_token(pb, b'') - weights, weights_shape = read_binary_matrix(pb) - tag = find_next_tag(pb) - read_placeholder(pb, 1) - if tag != '': - raise Error('FixedAffineComponent must contain BiasParams') - biases = read_binary_vector(pb) - - mapping_rule = { - 'out-size': weights_shape[0], - 'transpose_weights': True, - } - embed_input(mapping_rule, 1, 'weights', weights) - embed_input(mapping_rule, 2, 'biases', biases) - - FullyConnected.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/generaldropout_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/generaldropout_ext.py deleted file mode 100644 index 0c1632496f9a15..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/generaldropout_ext.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import read_binary_integer32_token, collect_until_token, \ - read_binary_float_token -from openvino.tools.mo.ops.identity import Identity - - -class GeneralDropoutComponentFrontExtractor(FrontExtractorOp): - op = 'generaldropoutcomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - - collect_until_token(pb, b'') - dim = read_binary_integer32_token(pb) - - collect_until_token(pb, b'') - block_dim = read_binary_integer32_token(pb) - - collect_until_token(pb, b'') - time_period = read_binary_integer32_token(pb) - - collect_until_token(pb, b'') - dropout_proporion = read_binary_float_token(pb) - - # collect_until_token(pb, b'') - Identity.update_node_stat(node, {}) - - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/linear_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/linear_component_ext.py deleted file mode 100644 index 3cd2ed584e964c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/linear_component_ext.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.MatMul import FullyConnected -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import collect_until_token -from openvino.tools.mo.front.kaldi.utils import read_binary_matrix - - -class LinearComponentFrontExtractor(FrontExtractorOp): - op = 'linearcomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - collect_until_token(pb, b'') - weights, weights_shape = read_binary_matrix(pb) - - mapping_rule = { - 'out-size': weights_shape[0], - 'transpose_weights': True, - } - - embed_input(mapping_rule, 1, 'weights', weights) - - FullyConnected.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/lstm_nonlinearity_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/lstm_nonlinearity_ext.py deleted file mode 100644 index c1b87110010b28..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/lstm_nonlinearity_ext.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import collect_until_token, collect_until_token_and_read -from openvino.tools.mo.front.kaldi.utils import read_binary_matrix -from openvino.tools.mo.ops.lstmnonlinearity import LstmNonLinearity -from openvino.tools.mo.utils.error import Error - - -class LSTMNonlinearityFrontExtractor(FrontExtractorOp): - op = 'lstmnonlinearitycomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - - collect_until_token(pb, b'') - ifo_x_weights, ifo_x_weights_shape = read_binary_matrix(pb) - - try: - use_dropout = collect_until_token_and_read(pb, b'', bool) - except Error: - # layer have not UseDropout attribute, so setup it to False - use_dropout = False - - mapping_rule = {'use_dropout': use_dropout} - - assert len(ifo_x_weights_shape) == 2, "Unexpected shape of weights in LSTMNonLinearityComponent" - assert ifo_x_weights_shape[0] == 3, "Unexpected shape of weights in LSTMNonLinearityComponent" - - ifo_x_weights = ifo_x_weights.reshape(ifo_x_weights_shape) - embed_input(mapping_rule, 1, 'i_weights', ifo_x_weights[0][:]) - embed_input(mapping_rule, 2, 'f_weights', ifo_x_weights[1][:]) - embed_input(mapping_rule, 3, 'o_weights', ifo_x_weights[2][:]) - - LstmNonLinearity.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/lstm_projected_streams_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/lstm_projected_streams_ext.py deleted file mode 100644 index 956bb0ee8cac67..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/lstm_projected_streams_ext.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.lstm_cell import LSTMCell -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import collect_until_token, collect_until_whitespace, get_uint32 -from openvino.tools.mo.front.kaldi.utils import read_binary_matrix, read_binary_vector - - -class LSTMProjectedStreamsFrontExtractor(FrontExtractorOp): - op = 'lstmprojectedstreams' - enabled = True - - @classmethod - def extract(cls, node): - clip_value = 50 - pb = node.parameters - res = collect_until_whitespace(pb) - if res == b'': - clip_value = get_uint32(pb.read(4)) - collect_until_token(pb, b'FM') - gifo_x_weights, gifo_x_weights_shape = read_binary_matrix(pb, False) - gifo_r_weights, gifo_r_weights_shape = read_binary_matrix(pb) - gifo_biases = read_binary_vector(pb) - input_gate_weights = read_binary_vector(pb) - forget_gate_weights = read_binary_vector(pb) - output_gate_weights = read_binary_vector(pb) - - projection_weights, projection_weights_shape = read_binary_matrix(pb) - - mapping_rule = {'gifo_x_weights_shape': gifo_x_weights_shape, - 'gifo_r_weights_shape': gifo_r_weights_shape, - 'projection_weights_shape': projection_weights_shape, - 'clip_value': clip_value, - 'format': 'kaldi', - } - - embed_input(mapping_rule, 1, 'gifo_x_weights', gifo_x_weights) - embed_input(mapping_rule, 2, 'gifo_r_weights', gifo_r_weights) - embed_input(mapping_rule, 3, 'gifo_biases', gifo_biases) - embed_input(mapping_rule, 4, 'input_gate_weights', input_gate_weights) - embed_input(mapping_rule, 5, 'forget_gate_weights', forget_gate_weights) - embed_input(mapping_rule, 6, 'output_gate_weights', output_gate_weights) - embed_input(mapping_rule, 7, 'projection_weights', projection_weights) - - LSTMCell.update_node_stat(node, mapping_rule) - return cls.enabled - - -class LSTMProjectedFrontExtractor(FrontExtractorOp): - op = 'lstmprojected' - enabled = True - - @classmethod - def extract(cls, node): - return LSTMProjectedStreamsFrontExtractor.extract(node) diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/max_pooling_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/max_pooling_ext.py deleted file mode 100644 index 17c9b436386c5b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/max_pooling_ext.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import read_token_value, collect_until_token, \ - read_binary_integer32_token, find_next_tag, read_placeholder -from openvino.tools.mo.ops.pooling import Pooling -from openvino.tools.mo.utils.error import Error - - -class MaxPoolingComponentFrontExtractor(FrontExtractorOp): - op = 'maxpoolingcomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - collect_until_token(pb, b'') - kernel = read_binary_integer32_token(pb) - tag = find_next_tag(pb) - if tag == '': - read_placeholder(pb, 1) - stride = read_binary_integer32_token(pb) - pool_step = stride - pool_stride = read_token_value(pb, b'') - elif tag == '': - stride = 1 - pool_step = None - read_placeholder(pb, 1) - pool_stride = read_binary_integer32_token(pb) - else: - raise Error('Can not extract parameters for {}'.format(node)) - - mapping_rule = { - 'window': int64_array([1, 1, 1, kernel]), - 'stride': int64_array([1, 1, 1, stride]), - 'pool_stride': pool_stride, - 'pool_step': pool_step, - 'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]), - 'pad_spatial_shape': int64_array([[0, 0], [0, 0]]), - 'pool_method': 'max', - } - mapping_rule.update(layout_attrs()) - Pooling.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/memoryoffset_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/memoryoffset_ext.py deleted file mode 100644 index b6561870b5d182..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/memoryoffset_ext.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.memoryoffset import MemoryOffset - - -class MemoryOffsetFrontExtractor(FrontExtractorOp): - op = 'MemoryOffset' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - mapping_rule = { - 'pair_name': pb['pair_name'], - 't': pb['t'], - 'has_default': pb['has_default'], - 'splitted': False, - } - if 'element_size' in pb: - mapping_rule['element_size'] = pb['element_size'] - - MemoryOffset.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/mul_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/mul_ext.py deleted file mode 100644 index 4460d489d2249c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/mul_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Mul -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class MulFrontExtractor(FrontExtractorOp): - op = 'Mul' - enabled = True - - @classmethod - def extract(cls, node): - Mul.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/naturalgradient_affine_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/naturalgradient_affine_component_ext.py deleted file mode 100644 index 05ced2e2631a83..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/naturalgradient_affine_component_ext.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.extractors.fixed_affine_component_ext import FixedAffineComponentFrontExtractor -from openvino.tools.mo.front.kaldi.utils import read_learning_info - - -class NaturalGradientAffineComponentFrontExtractor(FrontExtractorOp): - op = 'naturalgradientaffinecomponent' - enabled = True - - @classmethod - def extract(cls, node): - read_learning_info(node.parameters) - return FixedAffineComponentFrontExtractor.extract(node) diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/noop_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/noop_ext.py deleted file mode 100644 index c5cb49f669ff95..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/noop_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.identity import Identity -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class NoOpFrontExtractor(FrontExtractorOp): - op = 'noopcomponent' - enabled = True - - @classmethod - def extract(cls, node): - Identity.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/normalize_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/normalize_component_ext.py deleted file mode 100644 index 53ed56fbc0c707..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/normalize_component_ext.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.normalize import NormalizeOp -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import collect_until_token, read_binary_bool_token, read_binary_integer32_token, \ - read_binary_float_token -from openvino.tools.mo.utils.error import Error - - -class NormalizeComponentFrontExtractor(FrontExtractorOp): - op = 'normalizecomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - try: - collect_until_token(pb, b'') - except Error: - try: - pb.seek(0) - collect_until_token(pb, b'') - except Error: - raise Error("Neither nor were found") - in_dim = read_binary_integer32_token(pb) - - try: - collect_until_token(pb, b'') - target_rms = read_binary_float_token(pb) - except Error: - # model does not contain TargetRms - target_rms = 1.0 - - try: - collect_until_token(pb, b'') - add_log = read_binary_bool_token(pb) - except Error: - # model does not contain AddLogStddev - add_log = False - - if add_log is not False: - raise Error("AddLogStddev True in Normalize component is not supported") - - scale = target_rms * np.sqrt(in_dim) - - attrs = { - 'eps': 0.00000001, - 'across_spatial': 0, - 'channel_shared': 1, - 'in_dim': in_dim, - } - embed_input(attrs, 1, 'weights', [scale]) - - NormalizeOp.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/pnorm_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/pnorm_component_ext.py deleted file mode 100644 index 5f36a5a7b62056..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/pnorm_component_ext.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.pnorm import PNormOp -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import collect_until_token, read_binary_integer32_token, read_binary_float_token -from openvino.tools.mo.utils.error import Error - - -class PNormComponentFrontExtractor(FrontExtractorOp): - op = 'pnormcomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - try: - collect_until_token(pb, b'') - except Error: - raise Error(" was not found") - in_dim = read_binary_integer32_token(pb) - - try: - collect_until_token(pb, b'') - except Error: - raise Error(" was not found") - out_dim = read_binary_integer32_token(pb) - - assert in_dim % out_dim == 0 - - group = in_dim / out_dim - - try: - collect_until_token(pb, b'

') - except Error: - raise Error("

was not found") - p = read_binary_float_token(pb) - - attrs = { - 'group': group, - 'p': p, - } - - PNormOp.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/rectified_linear_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/rectified_linear_component_ext.py deleted file mode 100644 index 2538af9d325238..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/rectified_linear_component_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import ReLU -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class RectifiedLinearComponentFrontExtractor(FrontExtractorOp): - op = 'rectifiedlinearcomponent' - enabled = True - - @classmethod - def extract(cls, node): - ReLU.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/rescale_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/rescale_ext.py deleted file mode 100644 index 4c7b72b98657b1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/rescale_ext.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.utils import read_binary_vector, read_learning_info -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp - - -class RescaleFrontExtractor(FrontExtractorOp): - op = 'rescale' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - read_learning_info(pb) - weights = read_binary_vector(pb) - mapping_rule = {} - embed_input(mapping_rule, 1, 'weights', weights) - ScaleShiftOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/restrictedattentioncomponent_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/restrictedattentioncomponent_ext.py deleted file mode 100644 index b2eae6d5a56ba9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/restrictedattentioncomponent_ext.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import read_binary_bool_token, \ - read_binary_integer32_token, collect_until_token, read_binary_float_token -from openvino.tools.mo.front.kaldi.utils import read_binary_vector, read_binary_matrix -from openvino.tools.mo.ops.restrictedattentioncomponent import RestrictedAttentionComponent - - -class RestrictedAttentionComponentFrontExtractor(FrontExtractorOp): - """ - This class is used for extracting attributes of RestrictedAttention Kaldi operator. - """ - op = 'restrictedattentioncomponent' - enabled = True - - @classmethod - def extract(cls, node): - """ - This method extracts attributes of RestrictedAttention operator from Kaldi model. - Description of all attributes can be found in the operator documentation: - https://kaldi-asr.org/doc/classkaldi_1_1nnet3_1_1RestrictedAttentionComponent.html - """ - params = node.parameters - - attrs = {} - - collect_until_token(params, b'') - attrs['num_heads'] = read_binary_integer32_token(params) - - collect_until_token(params, b'') - attrs['key_dim'] = read_binary_integer32_token(params) - - collect_until_token(params, b'') - attrs['value_dim'] = read_binary_integer32_token(params) - - collect_until_token(params, b'') - attrs['num_left_inputs'] = read_binary_integer32_token(params) - - collect_until_token(params, b'') - attrs['num_right_inputs'] = read_binary_integer32_token(params) - - collect_until_token(params, b'') - attrs['time_stride'] = read_binary_integer32_token(params) - - collect_until_token(params, b'') - attrs['num_left_inputs_required'] = read_binary_integer32_token(params) - - collect_until_token(params, b'') - attrs['num_right_inputs_required'] = read_binary_integer32_token(params) - - collect_until_token(params, b'') - attrs['output_context'] = read_binary_bool_token(params) - - collect_until_token(params, b'') - attrs['key_scale'] = read_binary_float_token(params) - - collect_until_token(params, b'') - attrs['stats_count'] = read_binary_float_token(params) - - collect_until_token(params, b'') - entropy_stats = read_binary_vector(params) - attrs['entropy_stats'] = mo_array( - entropy_stats) if len(entropy_stats) != 0 else None - - collect_until_token(params, b'') - posterior_stats, posterior_stats_shape = read_binary_matrix(params) - attrs['posterior_stats'] = np.reshape( - posterior_stats, posterior_stats_shape) - - RestrictedAttentionComponent.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/scale_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/scale_component_ext.py deleted file mode 100644 index 022346f13d2524..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/scale_component_ext.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, collect_until_token -from openvino.tools.mo.front.kaldi.utils import read_binary_vector -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp - - -class NaturalGradientPerElementScaleComponentFrontExtractor(FrontExtractorOp): - op = 'naturalgradientperelementscalecomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - collect_until_token(pb, b'') - weights = read_binary_vector(pb) - find_next_tag(pb) - read_placeholder(pb, 1) - - mapping_rule = { - 'layout': 'NCHW' - } - embed_input(mapping_rule, 1, 'weights', weights) - - ScaleShiftOp.update_node_stat(node, mapping_rule) - return cls.enabled - - -class FixedScaleComponentFrontExtractor(FrontExtractorOp): - op = 'fixedscalecomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - collect_until_token(pb, b'') - weights = read_binary_vector(pb) - find_next_tag(pb) - read_placeholder(pb, 1) - - mapping_rule = { - 'layout': 'NCHW', - 'out-size': weights.shape[0], - } - embed_input(mapping_rule, 1, 'weights', weights) - - ScaleShiftOp.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/softmax_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/softmax_ext.py deleted file mode 100644 index 7881d79a28b4f2..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/softmax_ext.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.softmax import Softmax - - -class SoftmaxComponentFrontExtractor(FrontExtractorOp): - op = 'softmaxcomponent' - enabled = True - - @classmethod - def extract(cls, node): - return SoftmaxFrontExtractor.extract(node) - - -class SoftmaxFrontExtractor(FrontExtractorOp): - op = 'softmax' - enabled = True - - @classmethod - def extract(cls, node): - Softmax.update_node_stat(node, {'infer': copy_shape_infer}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/specaugment_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/specaugment_component_ext.py deleted file mode 100644 index b4a460c4248200..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/specaugment_component_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.identity import Identity -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class SpecAugmentComponentFrontExtractor(FrontExtractorOp): - op = 'specaugmenttimemaskcomponent' - enabled = True - - @classmethod - def extract(cls, node): - Identity.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/splice_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/splice_component_ext.py deleted file mode 100644 index 1b153ad6225b0d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/splice_component_ext.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.splice import Splice -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, read_binary_integer32_token, \ - collect_until_whitespace -from openvino.tools.mo.front.kaldi.utils import read_binary_vector -from openvino.tools.mo.utils.error import Error - - -class SpliceFrontExtractor(FrontExtractorOp): - op = 'splicecomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - mapping_rule = { - 'context': list() - } - tag = find_next_tag(pb) - if tag == '': - read_placeholder(pb, 1) - l_context = read_binary_integer32_token(pb) - tag = find_next_tag(pb) - if tag != '': - raise Error('Unknown token {} in SpliceComponent node {}'.format(tag, node.id)) - read_placeholder(pb, 1) - r_context = read_binary_integer32_token(pb) - for i in range(-l_context, r_context + 1): - mapping_rule['context'].append(i) - elif tag == '': - collect_until_whitespace(pb) - mapping_rule['context'] = read_binary_vector(pb, False, dtype=np.int32) - else: - raise Error('Unknown token {} in SpliceComponent node {}'.format(tag, node.id)) - - tag = find_next_tag(pb) - if tag == '': - read_placeholder(pb, 1) - const_dim = read_binary_integer32_token(pb) - mapping_rule['const_dim'] = const_dim - - Splice.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/tdnncomponent_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/tdnncomponent_ext.py deleted file mode 100644 index 810df8594064be..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/tdnncomponent_ext.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import read_binary_bool_token, read_binary_integer32_token, collect_until_token, \ - read_binary_float_token -from openvino.tools.mo.front.kaldi.utils import read_binary_vector, read_binary_matrix -from openvino.tools.mo.ops.tdnncomponent import TdnnComponent - - -class TdnnComponentFrontExtractor(FrontExtractorOp): - op = 'tdnncomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - - collect_until_token(pb, b'') - max_change = read_binary_float_token(pb) - - collect_until_token(pb, b'') - collect_until_token(pb, b'') - - collect_until_token(pb, b'') - time_offsets = read_binary_vector(pb, False, np.int32) - - collect_until_token(pb, b'') - weights, weights_shape = read_binary_matrix(pb) - collect_until_token(pb, b'') - bias_params = read_binary_vector(pb) - - collect_until_token(pb, b'') - orthonormal_constraint = read_binary_float_token(pb) # used only on training - - collect_until_token(pb, b'') - use_natural_grad = read_binary_bool_token(pb) # used only on training - collect_until_token(pb, b'') - num_samples_hist = read_binary_float_token(pb) - - collect_until_token(pb, b'') - alpha_in_out = read_binary_float_token(pb), read_binary_float_token(pb) # for training, usually (4, 4) - - # according to Kaldi documentation http://kaldi-asr.org/doc/classkaldi_1_1nnet3_1_1TdnnComponent.html#details - # it looks like it's used only during training (but not 100% sure) - collect_until_token(pb, b'') - rank_in_out = read_binary_integer32_token(pb), read_binary_integer32_token(pb) - - biases = mo_array(bias_params) if len(bias_params) != 0 else None - attrs = { - 'weights': np.reshape(weights, weights_shape), - 'biases': biases, - 'time_offsets': time_offsets, - } - TdnnComponent.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/extractors/timeheightconvolution_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/extractors/timeheightconvolution_ext.py deleted file mode 100644 index df5e627e85c563..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/extractors/timeheightconvolution_ext.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import embed_input -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.kaldi.loader.utils import collect_until_token, read_token_value -from openvino.tools.mo.front.kaldi.utils import read_binary_matrix, read_binary_vector, read_binary_vector_of_pairs -from openvino.tools.mo.ops.timeheightconvolution import TimeHeightConvolutionComponent - - -class TimeHeightConvolutionFrontExtractor(FrontExtractorOp): - op = 'timeheightconvolutioncomponent' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.parameters - collect_until_token(pb, b'') - in_shape = read_token_value(pb, b'') - out_shape = read_token_value(pb, b'') - height_in = read_token_value(pb, b'') - height_out = read_token_value(pb, b'') - height_subsample = read_token_value(pb, b'') - collect_until_token(pb, b'') - offsets = read_binary_vector_of_pairs(pb, read_token=False, dtype=np.int32) - collect_until_token(pb, b'') - time_offsets = read_binary_vector(pb, read_token=False, dtype=np.int32) - collect_until_token(pb, b'') - weights, _ = read_binary_matrix(pb) - collect_until_token(pb, b'') - biases = read_binary_vector(pb) - - offsets = offsets.reshape([len(offsets)//2, 2]) - mapping_rule = { # stride for h axis - 'height_subsample': height_subsample, - # input dimension for h axis - 'height_in': height_in, - # output dimension for h axis - 'height_out': height_out, - # input dimension for channel axis - 'in_channels': in_shape, - # output dimension for channel axis - 'out_channels': out_shape, - # array with pairs like the following - # [ (-1, -1) (-1, 0) (-1, 1) - # (0, -1) (0, 0) (0, 1) - # (1, -1) (1, 0) (1, 1)] - # it means that kernel 3x3 will be applied to calculate current value of output - 'offsets': offsets, - # required time offsets to calculate current convolution - # time_offsets = [-1, 0, 1] for previous example means no padding for time axis and - # 3 values should be prepared - # time_offsets = [0] means zero padding [1, 1] for time axis - 'time_offsets': time_offsets, - 'out-size': out_shape * height_out} - - embed_input(mapping_rule, 1, 'weights', weights) - embed_input(mapping_rule, 2, 'biases', biases) - - TimeHeightConvolutionComponent.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/loader/__init__.py b/tools/mo/openvino/tools/mo/front/kaldi/loader/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/loader/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/kaldi/loader/loader.py b/tools/mo/openvino/tools/mo/front/kaldi/loader/loader.py deleted file mode 100644 index 05a713d8d5cc4a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/loader/loader.py +++ /dev/null @@ -1,560 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from io import IOBase - -import networkx as nx -import numpy as np - -from openvino.tools.mo.ops.elementwise import Mul -from openvino.tools.mo.ops.split import AttributedVariadicSplit -from openvino.tools.mo.front.common.partial_infer.utils import float_array, int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.extractor import add_outputs_identity -from openvino.tools.mo.front.kaldi.loader.utils import find_next_tag, read_placeholder, find_next_component, get_name_from_path, \ - find_end_of_component, end_of_nnet_tag, read_binary_integer32_token, get_parameters, read_token_value, \ - collect_until_token, collect_until_token_and_read, create_edge_attrs, get_args_for_specifier -from openvino.tools.mo.front.kaldi.utils import read_binary_vector -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def load_parallel_component(file_descr, graph: Graph, prev_layer_id): - """ - Load ParallelComponent of the Kaldi model. - ParallelComponent contains parallel nested networks. - VariadicSplit is inserted before nested networks. - Outputs of nested networks concatenate with layer Concat. - - :param file_descr: descriptor of the model file - :param graph: graph with the topology. - :param prev_layer_id: id of the input layers for parallel component layer - :return: id of the concat layer - last layer of the parallel component layers - """ - nnet_count = read_token_value(file_descr, b'') - log.debug('Model contains parallel component with {} nested networks'.format(nnet_count)) - - split_points = [] - outputs = [] - inputs = [] - - for i in range(nnet_count): - read_token_value(file_descr, b'') - collect_until_token(file_descr, b'') - g = Graph() - load_kalid_nnet1_model(g, file_descr, 'Nested_net_{}'.format(i)) - - # input to nnet1 models is of a rank 1 but we also insert batch_size to 0th axis - # 1st axis contains input_size of the nested subnetwork - # we split input from the main network to subnetworks - input_node = Node(g, 'Parameter') - split_points.append(input_node['shape'][1]) - g.remove_node(input_node.id) - - mapping = {node: graph.unique_id(node) for node in g.nodes(data=False) if node in graph} - g = nx.relabel_nodes(g, mapping) - for val in mapping.values(): - g.node[val]['name'] = val - graph.add_nodes_from(g.nodes(data=True)) - graph.add_edges_from(g.edges(data=True)) - sorted_nodes = tuple(nx.topological_sort(g)) - - outputs.append(Node(graph, sorted_nodes[-1])) - inputs.append(Node(graph, sorted_nodes[0])) - - split_id = graph.unique_id(prefix='NestedNets/VariadicSplit') - attrs = {'out_ports_count': nnet_count, 'size_splits': split_points, 'axis': 1, 'name': split_id} - variadic_split_node = AttributedVariadicSplit(graph, attrs).create_node() - prev_layer_node = Node(graph, prev_layer_id) - prev_layer_node.add_output_port(0) - graph.create_edge(prev_layer_node, variadic_split_node, 0, 0, create_edge_attrs(prev_layer_id, variadic_split_node.id, prev_layer_id)) - - concat_id = graph.unique_id(prefix='Concat') - graph.add_node(concat_id, parameters=None, op='concat', kind='op') - concat_node = Node(graph, concat_id) - - # Connect each output of variadic_split_node to each subnetwork's inputs in ParallelComponent - # and each subnetwork's output to concat_node - for i, (input_node, output_node) in enumerate(zip(inputs, outputs)): - output_node.add_output_port(0) - concat_node.add_input_port(i) - graph.create_edge(output_node, concat_node, 0, i, create_edge_attrs(output_node.id, concat_id, output_node.id, i, 0)) - graph.create_edge(variadic_split_node, input_node, i, 0, create_edge_attrs(variadic_split_node.id, input_node.id, variadic_split_node.id, 0, i)) - return concat_id - - -def load_kaldi_model(graph, nnet_path): - """ - Structure of the file is the following: - magic-number(16896) weights etc. - :param nnet_path: - :return: - """ - nnet_name = None - if isinstance(nnet_path, str): - file_desc = open(nnet_path, "rb") - nnet_name = get_name_from_path(nnet_path) - elif isinstance(nnet_path, IOBase): - file_desc = nnet_path - else: - raise Error('Unsupported type of Kaldi model') - - tag = find_next_tag(file_desc) - # start new model / submodel - if tag == '': - load_function = load_kalid_nnet1_model - elif tag == '': - while tag != '' and tag != '': - tag = find_next_tag(file_desc) - - if tag == '': - load_function = load_kaldi_nnet3_model - else: - load_function = load_kalid_nnet2_model - elif tag == '': - load_function = load_kaldi_nnet3_model - else: - raise Error('Kaldi model should start with or tag. ', - refer_to_faq_msg(89)) - read_placeholder(file_desc, 1) - - return load_function(graph, file_desc, nnet_name) - - -def load_kalid_nnet1_model(graph, file_descr, name): - prev_layer_id = 'Parameter' - graph.add_node(prev_layer_id, name=prev_layer_id, kind='op', op='Parameter', parameters=None) - - # find out output layer, it can be only one due to chain structure of nnet1 model - output_layer = None - while True: - component_type = find_next_component(file_descr) - if component_type == end_of_nnet_tag.lower()[1:-1]: - break - - layer_o = read_binary_integer32_token(file_descr) - layer_i = read_binary_integer32_token(file_descr) - - if component_type == 'parallelcomponent': - prev_layer_id = load_parallel_component(file_descr, graph, prev_layer_id) - find_end_of_component(file_descr, component_type) - continue - - start_index = file_descr.tell() - end_tag, end_index = find_end_of_component(file_descr, component_type) - end_index -= len(end_tag) - layer_id = graph.unique_id(prefix=component_type) - graph.add_node(layer_id, - parameters=get_parameters(file_descr, start_index, end_index), - op=component_type, - kind='op', - layer_i=layer_i, - layer_o=layer_o) - if hasattr(graph, 'op_names_statistic'): - graph.op_names_statistic[component_type] += 1 - - prev_node = Node(graph, prev_layer_id) - if prev_node.op == 'Parameter': - prev_node['shape'] = int64_array([1, layer_i]) - - prev_node.add_output_port(0) - Node(graph, layer_id).add_input_port(0) - graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id)) - prev_layer_id = layer_id - output_layer = layer_id - log.debug('{} (type is {}) was loaded'.format(prev_layer_id, component_type)) - - # Tensor names information corresponding to a node is stored on outgoing edges. - # As output nodes do not have outgoing edges, fake outputs are required. In the following code - # for each output Identity node is added, and tensor name for the output is kept - # on (output, fake output) edge. After Result nodes adding transformation fake outputs - # are deleted from graph. - assert output_layer is not None, "Output layer is not found in graph" - add_outputs_identity(graph, [output_layer], lambda g, output, fake_output: g.create_edge( - Node(g, output), Node(g, fake_output), 0, 0, create_edge_attrs(output, fake_output, output))) - - -def load_kalid_nnet2_model(graph, file_descr, nnet_name): - input_name = 'Input' - graph.add_node(input_name, name=input_name, kind='op', op='Parameter', parameters=None, shape=None) - - prev_layer_id = input_name - - all_components = load_components(file_descr, graph) - - used_layers = set() - for layer_id in all_components: - prev_node = Node(graph, prev_layer_id) - if prev_node.op == 'Parameter': - parameters = Node(graph, layer_id).parameters - input_dim = read_token_value(parameters, b'') - prev_node['shape'] = int64_array([1, input_dim]) - prev_node.add_output_port(0) - Node(graph, layer_id).add_input_port(0) - graph.create_edge(prev_node, Node(graph, layer_id), 0, 0, create_edge_attrs(prev_layer_id, layer_id, prev_layer_id)) - used_layers.add(prev_layer_id) - prev_layer_id = layer_id - log.debug('{} and {} were connected'.format(prev_layer_id, layer_id)) - - # Tensor names information corresponding to a node is stored on outgoing edges. - # As output nodes do not have outgoing edges, fake outputs are required. In the following code - # for each output Identity node is added, and tensor name for the output is kept - # on (output, fake output) edge. After Result nodes adding transformation fake outputs - # are deleted from graph. - output_layers = graph.nodes - used_layers - add_outputs_identity(graph, output_layers, lambda g, output, fake_output: g.create_edge( - Node(g, output), Node(g, fake_output), 0, 0, create_edge_attrs(output, fake_output, output))) - - -def load_kaldi_nnet3_model(graph, file_descr, nnet_name): - file_descr.read(1) - component_layer_map = load_topology_map(file_descr, graph) - # add information for shape calculation for MemoryOffset - # shape calculation for MemoryOffset can't be done through shape of previous layer because - # it is separated in 2 parts to remove cycle from graph - for node in graph.get_op_nodes(**{'op': 'Parameter'}): - for o_n_name, params in node.get_outputs(): - o_n = Node(graph, o_n_name) - if o_n['op'] == 'MemoryOffset': - # don't take batch from Parameter, it will be overwritten - # take only second dimension because we have only 2 dimensions - o_n['parameters']['element_size'] = int64_array([1, node.shape[1]]) - - load_components(file_descr, graph, component_layer_map) - load_priors(file_descr, graph) - - -def load_priors(file_descr, graph): - try: - collect_until_token(file_descr, b'') - except Error: - # just ignore if priors were not found - return - if graph.graph['cmd_params'].counts is not None: - graph.graph['priors'] = read_binary_vector(file_descr) - else: - log.error("Model contains Prior values, if you want to embed them into the generated IR add option --counts=\"\" to command line", - extra={'is_warning': True}) - - -def load_components(file_descr, graph, component_layer_map=None): - num_components = collect_until_token_and_read(file_descr, b'') - log.debug('Network contains {} components'.format(num_components)) - is_nnet3 = False if component_layer_map is None else True - - if not is_nnet3: - collect_until_token(file_descr, b'') - - all_components = list() - name = "" - for _ in range(num_components): - if is_nnet3: - name = collect_until_token_and_read(file_descr, b'', np.string_) - - component_type = find_next_component(file_descr) - if component_type == end_of_nnet_tag.lower()[1:-1]: - break - - start_index = file_descr.tell() - end_tag, end_index = find_end_of_component(file_descr, component_type) - # read dim info where possible to simplify shape calculation for MemoryOffset - # shape calculation for MemoryOffset can't be done through shape of previous layer because - # it is separated in 2 parts to remove cycle from graph - file_descr.seek(start_index) - dim = 0 - dim_words = {b'', b''} - for dim_word in dim_words: - try: - collect_until_token(file_descr, dim_word, size_search_zone=end_index - start_index) - cur_index = file_descr.tell() - if start_index < cur_index < end_index: - dim = read_binary_integer32_token(file_descr) - break - else: - file_descr.seek(start_index) - except Error: - file_descr.seek(start_index) - - if is_nnet3: - if name in component_layer_map: - layer_id = component_layer_map[name][0] - for layer in component_layer_map[name]: - node = Node(graph, layer) - node['parameters'] = get_parameters(file_descr, start_index, end_index) - node['op'] = component_type - # Read dim info where possible to simplify shape calculation for MemoryOffset - for o_n_name, params in node.get_outputs(): - o_n = Node(graph, o_n_name) - if o_n['op'] == 'MemoryOffset' and dim != 0: - o_n['parameters']['element_size'] = int64_array([1, dim]) - else: - raise Error("Something wrong with layer {}".format(name)) - else: - layer_id = graph.unique_id(prefix=component_type) - graph.add_node(layer_id, - parameters=get_parameters(file_descr, start_index, end_index), - op=component_type, - kind='op') - if hasattr(graph, 'op_names_statistic'): - graph.op_names_statistic[component_type] += 1 - - all_components.append(layer_id) - log.debug('{} (type is {}) was loaded'.format(layer_id, component_type)) - - return all_components - - -def load_topology_map(file_descr, graph): - not_finished = True - component_layer_map = {} - layer_node_map = {} - while not_finished: - not_finished = read_node(file_descr, graph, component_layer_map, layer_node_map) - return component_layer_map - - -def read_node(file_descr, graph, component_layer_map, layer_node_map): - s = file_descr.readline() - if s == b'\n': - return False - tokens = s.split(b' ') - if tokens[0] == b'input-node': - in_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0] - in_name = str(in_name).strip('b').replace('\'', "") - in_shape = mo_array([1, s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0]], dtype=int) - - if in_name not in layer_node_map: - graph.add_node(in_name, name=in_name, kind='op', op='Parameter', parameters=None, shape=in_shape) - layer_node_map[in_name] = in_name - else: - Node(graph, in_name)['op'] = 'Parameter' - Node(graph, in_name)['shape'] = in_shape - elif tokens[0] == b'component-node': - layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0] - layer_name = str(layer_name).strip('b').replace('\'', "") - - component_name = s[s.find(b'component=') + len(b'component='):].split(b' ')[0] - if layer_name not in layer_node_map: - node_name = graph.unique_id(prefix=layer_name) - graph.add_node(node_name, - parameters=None, - op=None, - kind='op') - layer_node_map[layer_name] = node_name - else: - node_name = layer_node_map[layer_name] - - if component_name in component_layer_map: - component_layer_map[component_name].append(node_name) - else: - component_layer_map[component_name] = [node_name] - - # parse input - in_node_id = parse_input_for_node(s[s.find(b'input=') + 6:], graph, layer_node_map) - # don't create cyclic edges node to itself to avoid removing later - if in_node_id != node_name: - out_port = len(Node(graph, in_node_id).out_nodes()) - in_port = len(Node(graph, node_name).in_nodes()) - - Node(graph, node_name).add_input_port(in_port) - Node(graph, in_node_id).add_output_port(out_port, skip_if_exist=True) - - graph.add_edge(in_node_id, node_name, **create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port)) - elif tokens[0] == b'output-node': - layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0] - layer_name = str(layer_name).strip('b').replace('\'', "") - node_name = graph.unique_id(prefix=layer_name) - graph.add_node(node_name, - parameters=None, - op='Identity', - kind='op') - out_name = graph.unique_id(prefix=node_name + "_out") - graph.add_node(out_name, - parameters=None, - op='Result', - kind='op') - Node(graph, node_name).add_input_port(0) - Node(graph, node_name).add_output_port(0) - Node(graph, out_name).add_input_port(0) - graph.add_edge(node_name, out_name, **create_edge_attrs(node_name, out_name, node_name)) - - # parse input - in_node_id = parse_input_for_node(s[s.find(b'input=') + len(b'input='):], graph, layer_node_map) - - out_port = len(Node(graph, in_node_id).out_nodes()) - Node(graph, in_node_id).add_output_port(out_port) - graph.create_edge(Node(graph, in_node_id), Node(graph, node_name), out_port, 0, create_edge_attrs(in_node_id, node_name, in_node_id, 0, out_port)) - - objective_type = s[s.find(b'objective=') + 10:].split(b' ')[0].split(b'\n')[0] - if objective_type != b'linear': - raise Error("Unsupported objective-type for output {}".format(node_name)) - elif tokens[0] == b'dim-range-node': - layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0] - layer_name = str(layer_name).strip('b').replace('\'', "") - offset = int(s[s.find(b'dim-offset=') + len(b'dim-offset='):].split(b' ')[0]) - dim = int(s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0]) - - if layer_name in layer_node_map: - node_name = layer_node_map[layer_name] - node = Node(graph, node_name) - node['parameters'] = {'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])} - node['op'] = 'Crop' - else: - node_name = graph.unique_id(prefix=layer_name) - graph.add_node(node_name, - parameters={'offset': mo_array([offset]), 'dim': mo_array([dim]), 'axis': mo_array([1])}, - op='Crop', - kind='op') - layer_node_map[layer_name] = node_name - node = Node(graph, node_name) - - in_node_id = parse_input_for_node(s[s.find(b'input-node=') + len(b'input-node='):], graph, layer_node_map) - out_port = len(Node(graph, in_node_id).out_nodes()) - in_port = len(Node(graph, node_name).in_nodes()) - - node.add_input_port(in_port) - Node(graph, in_node_id).add_output_port(out_port) - - graph.create_edge(Node(graph, in_node_id), node, out_port, in_port, create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port)) - - # read dim info where possible to simplify shape calculation for MemoryOffset - # shape calculation for MemoryOffset can't be done through shape of previous layer because - # it is separated in 2 parts to remove cycle from graph - for o_n_name, params in node.get_outputs(): - o_n = Node(graph, o_n_name) - if o_n['op'] == 'MemoryOffset': - o_n['parameters']['element_size'] = int64_array([1, dim]) - else: - raise Error("Unsupported node specifier {}".format(tokens[0])) - return True - - -def parse_input_for_node(string, graph, component_layer_map): - return parse_specifier(string, graph, component_layer_map) - - -def parse_specifier(string, graph, layer_node_map): - pos = string.find(b'(') - if pos == -1: - # node name - input_name = str(string.split(b' ')[0]).strip('b').replace("\'", '').replace('\\n', '') - - if input_name not in layer_node_map: - node_name = graph.unique_id(prefix=input_name) - graph.add_node(node_name, parameters=[], op="", kind='op') - layer_node_map[input_name] = node_name - else: - node_name = layer_node_map[input_name] - return node_name - - spec = string[:pos] - args = get_args_for_specifier(string[pos:]) - if spec == b'Append': - nodes = [] - for i in range(len(args)): - nodes.append(parse_specifier(args[i], graph, layer_node_map)) - layer_name = 'Append_' - for node in nodes: - layer_name = layer_name + node + "_" - - if layer_name not in layer_node_map: - concat_name = graph.unique_id(prefix=layer_name) - graph.add_node(concat_name, - parameters=None, - op='concat', - kind='op') - layer_node_map[layer_name] = concat_name - i = 0 - Node(graph, concat_name).add_sequence_of_ports('in', range(len(nodes))) - for node in nodes: - out_port = len(Node(graph, node).out_nodes()) - Node(graph, node).add_output_port(out_port) - graph.create_edge(Node(graph, node), Node(graph, concat_name), out_port, i, create_edge_attrs(node, concat_name, node, i, out_port)) - i = i + 1 - else: - concat_name = layer_node_map[layer_name] - return concat_name - elif spec == b'Offset': - node = parse_specifier(args[0], graph, layer_node_map) - t = int(args[1]) - if len(args) > 2: - raise Error("ModelOptimizer supports only 2 arguments for Offset") - layer_name = 'Offset_' + node + '_' - if t < 0: - layer_name = layer_name + '_' + str(-t) - else: - layer_name = layer_name + str(t) - - if layer_name not in layer_node_map: - memory_name = graph.unique_id(prefix=layer_name) - layer_node_map[layer_name] = memory_name - memory_name_2 = memory_name + '_out' - graph.add_node(memory_name, - parameters=dict(t=t, pair_name=memory_name_2, has_default=False), - op='MemoryOffset', - kind='op') - out_port = len(Node(graph, node).out_nodes()) - in_port = len(Node(graph, memory_name).in_nodes()) - Node(graph, memory_name).add_input_port(in_port) - Node(graph, node).add_output_port(out_port, skip_if_exist=True) - graph.create_edge(Node(graph, node), Node(graph, memory_name), out_port, in_port, create_edge_attrs(node, memory_name, node, in_port, out_port)) - else: - memory_name = layer_node_map[layer_name] - return memory_name - elif spec == b'Sum': - nodes = [] - for i in range(len(args)): - nodes.append(parse_specifier(args[i], graph, layer_node_map)) - - layer_name = 'Sum_' - for node in nodes: - layer_name = layer_name + node + "_" - - if layer_name not in layer_node_map: - sum_name = graph.unique_id(prefix=layer_name) - graph.add_node(sum_name, parameters=None, op='Add', kind='op') - layer_node_map[layer_name] = sum_name - else: - sum_name = layer_node_map[layer_name] - - for i, node in enumerate(nodes): - out_port = len(Node(graph, node).out_nodes()) - Node(graph, node).add_output_port(out_port, skip_if_exist=True) - Node(graph, sum_name).add_input_port(i) - graph.add_edge(node, sum_name, **create_edge_attrs(node, sum_name, node, i)) - - return sum_name - elif spec == b'IfDefined': - node_id = parse_specifier(args[0], graph, layer_node_map) - node = Node(graph, node_id) - if node.op == 'MemoryOffset': - node['parameters']['has_default'] = True - return node_id - elif spec == b'ReplaceIndex': - node = parse_specifier(args[0], graph, layer_node_map) - return node - elif spec == b'Scale': - node_name = parse_specifier(args[1], graph, layer_node_map) - scale_value = float(args[0]) - layer_name = '{}/Mul/{}'.format(node_name, scale_value) - - if layer_name not in layer_node_map: - scale_name = graph.unique_id(prefix=layer_name) - scale_node = Mul(graph, {'name': scale_name}).create_node() - - layer_node_map[layer_name] = scale_name - - scale_const_name = 'Const_{}'.format(scale_value) - const_node = Const(graph, {'name': scale_const_name, 'value': float_array([scale_value])}).create_node() - - node = Node(graph, node_name) - graph.create_edge(const_node, scale_node, 0, 0, create_edge_attrs(const_node.id, scale_node.id, const_node.id)) - out_port = len(node.out_nodes()) - graph.create_edge(node, scale_node, out_port, 1, create_edge_attrs(node_name, scale_node.id, node_name, 1, out_port)) - else: - scale_name = layer_node_map[layer_name] - - return scale_name diff --git a/tools/mo/openvino/tools/mo/front/kaldi/loader/utils.py b/tools/mo/openvino/tools/mo/front/kaldi/loader/utils.py deleted file mode 100644 index 7beb6a69f5f5bb..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/loader/utils.py +++ /dev/null @@ -1,413 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import io -import os -import struct - -import numpy as np - -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - -end_of_nnet_tag = '' -end_of_component_tag = '' - -supported_components = [ - 'addshift', - 'affinecomponent', - 'affinecomponentpreconditionedonline', - 'affinetransform', - 'backproptruncationcomponent', - 'batchnormcomponent', - 'clipgradientcomponent', - 'convolutional1dcomponent', - 'convolutionalcomponent', - 'copy', - 'dropoutmaskcomponent', - 'elementwiseproductcomponent', - 'fixedaffinecomponent', - 'fixedscalecomponent', - 'fixedbiascomponent', - 'generaldropoutcomponent', - 'linearcomponent', - 'logsoftmaxcomponent', - 'lstmnonlinearitycomponent', - 'lstmprojected', - 'lstmprojectedstreams', - 'maxpoolingcomponent', - 'naturalgradientaffinecomponent', - 'naturalgradientperelementscalecomponent', - 'noopcomponent', - 'normalizecomponent', - 'parallelcomponent', - 'pnormcomponent', - 'rectifiedlinearcomponent', - 'rescale', - 'restrictedattentioncomponent', - 'sigmoid', - 'sigmoidcomponent', - 'softmax', - 'softmaxcomponent', - 'specaugmenttimemaskcomponent', - 'splicecomponent', - 'sumgroupcomponent', - 'tanhcomponent', - 'tdnncomponent', - 'timeheightconvolutioncomponent', -] - - -def get_bool(s: bytes) -> bool: - """ - Get bool value from bytes - :param s: bytes array contains bool value - :return: bool value from bytes array - """ - if str(s) == "b\'F\'": - return False - elif str(s) == "b\'T\'": - return True - else: - return struct.unpack('?', s)[0] - - -def get_uint16(s: bytes) -> int: - """ - Get unsigned int16 value from bytes - :param s: bytes array contains unsigned int16 value - :return: unsigned int16 value from bytes array - """ - return struct.unpack('H', s)[0] - - -def get_uint32(s: bytes) -> int: - """ - Get unsigned int32 value from bytes - :param s: bytes array contains unsigned int32 value - :return: unsigned int32 value from bytes array - """ - return struct.unpack('I', s)[0] - - -def get_uint64(s: bytes) -> int: - """ - Get unsigned int64 value from bytes - :param s: bytes array contains unsigned int64 value - :return: unsigned int64 value from bytes array - """ - return struct.unpack('q', s)[0] - - -def read_binary_bool_token(file_desc: io.BufferedReader) -> bool: - """ - Get next bool value from file - The carriage moves forward to 1 position. - :param file_desc: file descriptor - :return: next boolean value in file - """ - return get_bool(file_desc.read(1)) - - -def read_binary_integer32_token(file_desc: io.BufferedReader) -> int: - """ - Get next int32 value from file - The carriage moves forward to 5 position. - :param file_desc: file descriptor - :return: next uint32 value in file - """ - buffer_size = file_desc.read(1) - return get_uint32(file_desc.read(buffer_size[0])) - - -def read_binary_integer64_token(file_desc: io.BufferedReader) -> int: - """ - Get next int64 value from file - The carriage moves forward to 9 position. - :param file_desc: file descriptor - :return: next uint64 value in file - """ - buffer_size = file_desc.read(1) - return get_uint64(file_desc.read(buffer_size[0])) - - -def read_binary_float_token(file_desc: io.BufferedReader) -> float: - """ - Get next float32 value from file - The carriage moves forward to 5 position. - :param file_desc: file descriptor - :return: next float32 value in file - """ - buffer_size = file_desc.read(1) - s = file_desc.read(buffer_size[0]) - return np.frombuffer(s, dtype=np.float32)[0] - - -def read_string(file_desc: io.BufferedReader) -> int: - return collect_until_whitespace(file_desc) - - -def find_next_tag(file_desc: io.BufferedReader) -> str: - """ - Get next tag in the file - :param file_desc:file descriptor - :return: string like '' - """ - tag = b'' - while True: - symbol = file_desc.read(1) - if symbol == b'': - raise Error('Unexpected end of Kaldi model') - if tag == b'' and symbol != b'<': - continue - elif symbol == b'<': - tag = b'' - tag += symbol - if symbol != b'>': - continue - try: - return tag.decode('ascii') - except UnicodeDecodeError: - # Tag in Kaldi model always in ascii encoding - tag = b'' - - -def read_placeholder(file_desc: io.BufferedReader, size=3) -> bytes: - """ - Read size bytes from file - :param file_desc:file descriptor - :param size:number of reading bytes - :return: bytes - """ - return file_desc.read(size) - - -def find_next_component(file_desc: io.BufferedReader) -> str: - """ - Read next component in the file. - All components are contained in supported_components - :param file_desc:file descriptor - :return: string like '' - """ - is_start = True - while True: - tag = find_next_tag(file_desc) - # Tag is . But we want get without '<' and '>' - component_name = tag[1:-1].lower() - if component_name in supported_components or tag == end_of_nnet_tag: - # There is whitespace after component's name - read_placeholder(file_desc, 1) - return component_name - elif tag == '': - raise Error('Component has unsupported or not specified type') - elif not (is_start and tag == end_of_component_tag) and tag.find('Component') != -1: - raise Error('Component has unsupported type {}'.format(tag)) - is_start = False - - -def get_name_from_path(path: str) -> str: - """ - Get name from path to the file - :param path: path to the file - :return: name of the file - """ - return os.path.splitext(os.path.basename(path))[0] - - -def find_end_of_component(file_desc: io.BufferedReader, component: str, end_tags: tuple = ()): - """ - Find an index and a tag of the ent of the component - :param file_desc: file descriptor - :param component: component from supported_components - :param end_tags: specific end tags - :return: the index and the tag of the end of the component - """ - end_tags_of_component = [''.format(component), - end_of_component_tag.lower(), - end_of_nnet_tag.lower(), - *end_tags, - *['<{}>'.format(component) for component in supported_components]] - next_tag = find_next_tag(file_desc) - while next_tag.lower() not in end_tags_of_component: - next_tag = find_next_tag(file_desc) - return next_tag, file_desc.tell() - - -def get_parameters(file_desc: io.BufferedReader, start_index: int, end_index: int): - """ - Get part of file - :param file_desc: file descriptor - :param start_index: Index of the start reading - :param end_index: Index of the end reading - :return: part of the file - """ - file_desc.seek(start_index) - buffer = file_desc.read(end_index - start_index) - return io.BytesIO(buffer) - - -def read_token_value(file_desc: io.BufferedReader, token: bytes = b'', value_type: type = np.uint32): - """ - Get value of the token. - Read next token (until whitespace) and check if next teg equals token - :param file_desc: file descriptor - :param token: token - :param value_type: type of the reading value - :return: value of the token - """ - getters = { - np.uint32: read_binary_integer32_token, - np.uint64: read_binary_integer64_token, - bool: read_binary_bool_token - } - current_token = collect_until_whitespace(file_desc) - if token != b'' and token != current_token: - raise Error('Can not load token {} from Kaldi model'.format(token) + - refer_to_faq_msg(94)) - return getters[value_type](file_desc) - - -def collect_until_whitespace(file_desc: io.BufferedReader): - """ - Read from file until whitespace - :param file_desc: file descriptor - :return: - """ - res = b'' - while True: - new_sym = file_desc.read(1) - if new_sym == b' ' or new_sym == b'': - break - res += new_sym - return res - - -def collect_until_token(file_desc: io.BufferedReader, token, size_search_zone=0): - """ - Read from file until the token - :param file_desc: file descriptor - :param token: token that we find - :return: - """ - while True: - # usually there is the following structure DIM VALUEFM - res = collect_until_whitespace(file_desc) - if res == token or res[-len(token):] == token: - return - size = size_search_zone - if size == 0 and isinstance(file_desc, io.BytesIO): - size = len(file_desc.getbuffer()) - elif size == 0 and isinstance(file_desc, io.BufferedReader): - size = os.fstat(file_desc.fileno()).st_size - if file_desc.tell() >= size: - raise Error('End of the file. Token {} not found. {}'.format(token, file_desc.tell())) - - -def collect_until_token_and_read(file_desc: io.BufferedReader, token, value_type: type = np.uint32): - """ - Read from file until the token - :param file_desc: file descriptor - :param token: token to find and read - :param value_type: type of value to read - :return: - """ - getters = { - np.uint32: read_binary_integer32_token, - np.uint64: read_binary_integer64_token, - bool: read_binary_bool_token, - np.string_: read_string - } - collect_until_token(file_desc, token) - return getters[value_type](file_desc) - - -def create_edge_attrs(prev_layer_id: str, next_layer_id: str, tensor_name: str, in_port=0, out_port=0) -> dict: - """ - Create common edge's attributes - :param prev_layer_id: id of previous layer - :param next_layer_id: id of next layer - :param tensor_name: framework tensor name - :param in_port: 'in' port - :param out_port: 'out' port - :return: dictionary contains common attributes for edge - """ - return { - 'out': out_port, - 'in': in_port, - 'name': next_layer_id, - 'fw_tensor_debug_info': [(prev_layer_id, tensor_name + ":" + str(out_port))], - 'in_attrs': ['in', 'permutation'], - 'out_attrs': ['out', 'permutation'], - 'data_attrs': ['fw_tensor_debug_info'] - } - - -def read_blob(file_desc: io.BufferedReader, size: int, dtype=np.float32): - """ - Read blob from the file - :param file_desc: file descriptor - :param size: size of the blob - :param dtype: type of values of the blob - :return: np array contains blob - """ - dsizes = { - np.float32: 4, - np.int32: 4 - } - data = file_desc.read(size * dsizes[dtype]) - return np.frombuffer(data, dtype=dtype) - - -def get_args_for_specifier(string): - """ - Parse arguments in brackets and return list of arguments - :param string: string in format (, , .., ) - :return: list with arguments - """ - open_bracket = 1 - pos = 1 - args = [] - prev_arg_pos = 1 - pos_close = string.rfind(b')') - string = string[:pos_close + 1] - while pos < len(string): - pos_open = string.find(b'(', pos) - pos_close = string.find(b')', pos) - pos_sep = string.find(b',', pos) - - if pos_open == -1: - if open_bracket == 1: - args = args + string[prev_arg_pos:pos_close].replace(b' ', b'').split(b',') - pos = len(string) - else: - open_bracket = open_bracket - 1 - while open_bracket > 1: - pos_close = string.find(b')', pos_close + 1) - if pos_close != -1: - open_bracket = open_bracket - 1 - else: - raise Error("Syntax error in model: incorrect number of brackets") - args.append(string[prev_arg_pos:pos_close + 1].strip()) - prev_arg_pos = string.find(b',', pos_close + 1) + 1 - if prev_arg_pos != 0 and string[prev_arg_pos:-2].replace(b' ', b'').split(b',') != [b'']: - args = args + string[prev_arg_pos:-1].replace(b' ', b'').split(b',') - pos = len(string) - else: - if pos_sep < pos_open and open_bracket == 1: - pos_sep = string[pos_sep:pos_open].rfind(b',') + pos_sep - args = args + string[prev_arg_pos:pos_sep].replace(b' ', b'').split(b',') - prev_arg_pos = pos_sep + 1 - - if pos_open < pos_close: - open_bracket = open_bracket + 1 - pos = pos_open + 1 - else: - open_bracket = open_bracket - 1 - if open_bracket == 1: - args.append(string[prev_arg_pos:pos_close + 1].strip()) - prev_arg_pos = string.find(b',', pos_close + 1) + 1 - pos = prev_arg_pos - else: - pos = pos_close + 1 - - return args diff --git a/tools/mo/openvino/tools/mo/front/kaldi/logsoftmax_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/logsoftmax_component_ext.py deleted file mode 100644 index a3ebc861b2f843..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/logsoftmax_component_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.log_softmax import LogSoftmax -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class LogSoftMaxComponentExtractor(FrontExtractorOp): - op = 'logsoftmaxcomponent' - enabled = True - - @classmethod - def extract(cls, node): - LogSoftmax.update_node_stat(node, {'axis': 1}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/memory_offset_adjustment.py b/tools/mo/openvino/tools/mo/front/kaldi/memory_offset_adjustment.py deleted file mode 100644 index a939f0e100be7f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/memory_offset_adjustment.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import networkx as nx - -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.memoryoffset import MemoryOffset - - -def find_max_frame_time(node: Node): - in_frame_time_max = 0 - should_align = False - for inp in node.in_ports(): - if node.in_port(inp).disconnected(): - continue - in_node = node.in_port(inp).get_source().node - if in_node.frame_time > in_frame_time_max: - in_frame_time_max = in_node.frame_time - - if in_frame_time_max == 0: - return in_frame_time_max, False - - for inp in node.in_ports(): - if node.in_port(inp).disconnected(): - continue - if in_frame_time_max != node.in_port(inp).get_source().node.frame_time: - should_align = True - break - - return in_frame_time_max, should_align - - -def align_frame_time(graph: Graph, node: Node, frame_time_max): - for inp in node.in_ports(): - if node.in_port(inp).disconnected(): - continue - in_node = node.in_port(inp).get_source().node - in_node_out_port = node.in_port(inp).get_source() - in_port = node.in_port(inp) - # Adding MemoryOffset for Const does not make sense - if in_node.frame_time < frame_time_max and in_node.op != 'Const': - # Change existing MemoryOffset to avoid adding new one - if in_node.op == 'MemoryOffset': - in_node.t = in_node.frame_time - frame_time_max - in_node.frame_time = in_node.t - else: - mem_name = graph.unique_id("align_" + node.id) - memory_align = MemoryOffset(graph, attrs={'id': mem_name, - 'name': mem_name, - 'pair_name': mem_name + "_pair", - 't': in_node.frame_time - frame_time_max, - 'splitted': False}).create_node() - # add element_size for MemoryOffset after Parameter for infer - if in_node.op == 'Parameter': - memory_align['element_size'] = in_node.shape - - memory_align.in_port(0).get_connection().set_source(in_node_out_port) - in_port.get_connection().set_source(memory_align.out_port(0)) - memory_align['frame_time'] = memory_align.t - # remove MemoryOffset with maximum delay - elif in_node.frame_time == frame_time_max and in_node.op == 'MemoryOffset': - in_node_out_port.get_connection().set_source(in_node.in_port(0).get_source()) - graph.remove_node(in_node.id) - - -class MemoryOffsetAdjustment(FrontReplacementSubgraph): - r""" - Pass used to fix wrong results in the following situation: - input - | \ - ... ... - | \ - MemoryOffset(k) \ - | | - ... | - \ | - \ | - Concat - In Left branch we have MemoryOffset with k > 0 so we wait until kth frame will be calculated. In right branch - we have no such offsets. As result we Concat (or use in any calculations with more than 1 input) kth frame from - left branch and 0th from right branch. So we need to add synchronization before Concat node. it can be done with - MemoryOffset(k) inserted before Concat. - - Main idea of this change that when we found memoryOffset with t>0 we should re-calculate all delays relative to this - t. - """ - enabled = True - graph_condition = [lambda graph: graph.graph['fw'] == 'kaldi'] - - def run_before(self): - # transformation can't work with splitted MemoryOffsets - from openvino.tools.mo.front.kaldi.split_recurrent_memoryoffset import SplitRecurrentMemoryOffset - return [SplitRecurrentMemoryOffset] - - def find_and_replace_pattern(self, graph: Graph): - should_continue = False - for n in graph: - if Node(graph, n).op == 'MemoryOffset' and Node(graph, n).t > 0: - should_continue = True - break - - if not should_continue: - return - - try: - nodes = list(nx.topological_sort(graph)) - except: - return - - nx.set_node_attributes(G=graph, name='frame_time', values=-1) - - for n in nodes: - node = Node(graph, n) - - # calculate frame_time (delay) that was not calculated - if node.frame_time < 0: - # MemoryOffset with t>0 increases frame delay - if node.op == "MemoryOffset": - node.frame_time = node.in_port(0).get_source().node.frame_time + node.t - # for node with several inputs frame_time = maximum of delays from branches - # other branches should be synced by adding MemoryOffset(branch frame_time - max) - # After that MemoryOffset with maximum delay should be deleted (t becomes 0) - elif len(node.in_edges()) > 1: - # find out maximum of delay and check that we have at least one branch with another delay - in_frame_time_max, should_align = find_max_frame_time(node) - if should_align: - align_frame_time(graph, node, in_frame_time_max) - node.frame_time = in_frame_time_max - elif len(node.in_edges()) == 1: - node.frame_time = node.in_port(0).get_source().node.frame_time - else: - # for all input nodes (without inputs) frame_time is 0 - node.frame_time = 0 - - for n in graph: - node = Node(graph, n) - if 'frame_time' in node: - del node['frame_time'] diff --git a/tools/mo/openvino/tools/mo/front/kaldi/memoryoffset_batch_update.py b/tools/mo/openvino/tools/mo/front/kaldi/memoryoffset_batch_update.py deleted file mode 100644 index 5a747ef7906e74..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/memoryoffset_batch_update.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class MemoryOffsetBatchUpdate(FrontReplacementPattern): - """ - Update batch for MemoryOffset nodes with set element_size. - element_size is set in loader according to shape saved in model (for example Parameter node have shape in attribute). - But batch can be changed on front stage if user set batch through command line. So, element_size should be updated - accordingly. - """ - enabled = True - run_not_recursively = True - - def run_after(self): - from openvino.tools.mo.front.user_data_repack import UserDataRepack - from openvino.tools.mo.front.kaldi.split_recurrent_memoryoffset import SplitRecurrentMemoryOffset - return [UserDataRepack, SplitRecurrentMemoryOffset] - - def find_and_replace_pattern(self, graph: Graph): - batch = graph.get_op_nodes(op="Parameter")[0].shape[0] - for memoryoffset_node in graph.get_op_nodes(op='MemoryOffset'): - if memoryoffset_node.has_valid('element_size'): - memoryoffset_node.element_size[0] = batch diff --git a/tools/mo/openvino/tools/mo/front/kaldi/register_custom_ops.py b/tools/mo/openvino/tools/mo/front/kaldi/register_custom_ops.py deleted file mode 100644 index cb87f2148d7c01..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/register_custom_ops.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementOp, FrontReplacementSubgraph, FrontReplacementPattern -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -def get_front_classes(): - front_classes = [FrontExtractorOp, FrontReplacementOp, FrontReplacementPattern, FrontReplacementSubgraph] - return front_classes diff --git a/tools/mo/openvino/tools/mo/front/kaldi/replace_dropoutmask.py b/tools/mo/openvino/tools/mo/front/kaldi/replace_dropoutmask.py deleted file mode 100644 index cedea76e1166e5..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/replace_dropoutmask.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from openvino.tools.mo.middle.InsertSelect import check_inputs -from openvino.tools.mo.middle.MakeKaldiConstReshapable import create_const_with_batch_from_input -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class ReplaceDropoutMaskPattern(FrontReplacementPattern): - enabled = True - - def run_after(self): - from openvino.tools.mo.front.restore_ports import RestorePorts - return [RestorePorts] - - def run_before(self): - from openvino.tools.mo.front.kaldi.replace_lstm_nonlinearity import ReplaceLstmNonLinearityPattern - return [ReplaceLstmNonLinearityPattern] - - def find_and_replace_pattern(self, graph: Graph): - inp = check_inputs(graph) - batch_port = inp.out_port(0) - replace_nodes = graph.get_op_nodes(op='dropoutmaskcomponent') - for dropout_node in replace_nodes: - assert dropout_node.has_and_set('size'), "DropoutMaskComponent has not set size attribute" - assert dropout_node.size > 0, "DropoutMaskComponent has negative or zero size attribute" - assert dropout_node.has_and_set('dropout_proportion'), \ - "DropoutMaskComponent has not set dropout_proportion attribute" - assert dropout_node.dropout_proportion > 0, \ - "DropoutMaskComponent has negative or zero dropout_proportion attribute" - dp_const_node = create_const_with_batch_from_input(batch_port, dropout_node.size, - dropout_node.dropout_proportion) - dropout_node.out_port(0).get_connection().set_source(dp_const_node.out_port(0)) - graph.remove_node(dropout_node.id) diff --git a/tools/mo/openvino/tools/mo/front/kaldi/replace_eltwise_nin1.py b/tools/mo/openvino/tools/mo/front/kaldi/replace_eltwise_nin1.py deleted file mode 100644 index 63752e34f96bfe..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/replace_eltwise_nin1.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Add, Mul -from openvino.tools.mo.ops.split import Split -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.eltwise_n import EltwiseN -from openvino.tools.mo.utils.error import Error - - -class ReplaceEltwiseNin1NodePattern(FrontReplacementOp): - """ - In nnet3 models Kaldi gather all inputs of Mul or Sum in 1. This pass separates inputs as it should be for IE. - """ - op = "EltwiseNin1" - enabled = True - - def run_after(self): - from openvino.tools.mo.front.restore_ports import RestorePorts - return [RestorePorts] - - def replace_op(self, graph: Graph, node: Node): - ss_node = create_op_with_const_inputs(graph, Split, {1: int64_array(1)}, {'name': 'Split_eltwise_' + node.name, - 'num_splits': node['num_inputs']}) - - inp = node.get_inputs() - in_node = inp[0][0] - edge_attrs = inp[0][1] - graph.add_edge(in_node, ss_node.id, **edge_attrs) - if ss_node.num_splits == 2: - if node['operation'] == 'mul': - eltwise_node = Mul(graph, attrs={'name': 'Eltwise_' + node.name}).create_node() - elif node['operation'] == 'sum': - eltwise_node = Add(graph, attrs={'name': 'Eltwise_' + node.name}).create_node() - else: - raise Error('Error on replacing Kaldi eltwise: unknown type ' + node['operation']) - elif ss_node.num_splits > 2: - eltwise_node = EltwiseN(graph, attrs={'name': 'Eltwise_' + node.name, - 'operation': node['operation']}).create_node() - else: - raise Error('Error on replacing Kaldi eltwise') - for i in range(ss_node.num_splits): - ss_node.out_port(i).get_connection().set_destination(eltwise_node.in_port(i)) - return [eltwise_node.id] diff --git a/tools/mo/openvino/tools/mo/front/kaldi/replace_lstm_node_pattern.py b/tools/mo/openvino/tools/mo/front/kaldi/replace_lstm_node_pattern.py deleted file mode 100644 index c10902f2c65574..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/replace_lstm_node_pattern.py +++ /dev/null @@ -1,259 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array -from openvino.tools.mo.middle.MakeKaldiConstReshapable import create_const_with_batch_from_input -from openvino.tools.mo.ops.MatMul import FullyConnected -from openvino.tools.mo.ops.activation_ops import Tanh, Sigmoid -from openvino.tools.mo.ops.elementwise import Add, Mul -from openvino.tools.mo.ops.split import Split -from openvino.tools.mo.front.caffe.extractors.utils import input_as_const -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.assign import Assign -from openvino.tools.mo.ops.clamp import Clamp -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.read_value import ReadValue -from openvino.tools.mo.ops.result import Result -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp - - -def unique_id(prefix: str = 'id') -> str: - """ - Generates a unique id - The optional string prefix can be specified. - """ - index = len(unique_id.names) - name = prefix - while name in unique_id.names: - name = '{}_{}'.format(prefix, index) - index += 1 - unique_id.names.append(name) - return name - - -unique_id.names = [] - - -class ReplaceLSTMNodePattern(FrontReplacementOp): - op = "LSTMCell" - enabled = True - - def run_after(self): - from openvino.tools.mo.front.restore_ports import RestorePorts - return [RestorePorts] - - def run_before(self): - # current pass should be rewritten to use MatMul ops only (No FullyConnected ops should be created here) - from openvino.tools.mo.front.MatMul_normalizer import FullyConnectedDecomposer - from openvino.tools.mo.front.MoveEmbeddedInputsToInputs import MoveEmbeddedInputsToInputs - return [FullyConnectedDecomposer, - MoveEmbeddedInputsToInputs] - - def pattern(self): - return dict( - nodes=[ - ('op', dict(op=self.__class__.op, format='kaldi'))], - edges=[] - ) - - def replace_op(self, graph: Graph, node: Node): - input_out_port = node.in_port(0).get_source() - - memory_pair_input = unique_id('id') - memory_pair_output = unique_id('id') - - # Input -> FullyConnected - fc_layer_after_input_attrs = {'name': 'input_fullyconnected', - 'out-size': node.gifo_x_weights_shape[0], - 'transpose_weights': True, - 'bias_term': True, - } - - fc_layer_after_input = FullyConnected(graph, fc_layer_after_input_attrs).create_node() - fc_layer_after_input.in_port(0).connect(input_out_port) - input_as_const(fc_layer_after_input, fc_layer_after_input_attrs, 1, 'weights', node.gifo_x_weights) - input_as_const(fc_layer_after_input, fc_layer_after_input_attrs, 2, 'biases', node.gifo_biases) - - init_value_prev_lstm_output = create_const_with_batch_from_input(input_out_port, - node.gifo_r_weights_shape[1]) - prev_lstm_output = ReadValue(graph, {'name': 'prev_memory_output', - 'variable_id': memory_pair_input, - 'variable_shape': None, - 'variable_type': None - }).create_node() - prev_lstm_output.in_port(0).connect(init_value_prev_lstm_output.out_port(0)) - - # *Memory(output) -> FullyConnected - fc_layer_from_prev_state_attrs = {'name': 'prev_memory_output_fullyconnected', - 'out-size': node.gifo_r_weights_shape[0], - 'transpose_weights': True, - 'bias_term': False, - } - - fc_layer_from_prev_state = FullyConnected(graph, fc_layer_from_prev_state_attrs).create_node() - fc_layer_from_prev_state.in_port(0).connect(prev_lstm_output.out_port(0)) - input_as_const(fc_layer_from_prev_state, fc_layer_from_prev_state_attrs, 1, 'weights', node.gifo_r_weights) - - # Memory -> FullyConnected \ - # *Eltwise(sum) - # Input -> FullyConnected / - join_input_prev_state_sum = Add(graph, {'name': 'join_input_eltwise'}).create_node() - join_input_prev_state_sum.in_port(0).connect(fc_layer_from_prev_state.out_port(0)) - join_input_prev_state_sum.in_port(1).connect(fc_layer_after_input.out_port(0)) - - # *Eltwise(sum) -> Split - # it is split into 4 nodes: Act, Eltw*3 - # the following order is mandatory - # ___Tanh - # / - # Split ---(2)Eltwise(sum) - # |\ - # | \__(3)Eltwise(sum) - # |____(4)Eltwise(sum) - split_joined_input_axis = Const(graph, {'value': np.int64(1)}).create_node() - split_joined_input = Split(graph, {'name': 'join_input_split', - 'num_splits': 4, 'out_ports_count': 4}).create_node() - split_joined_input.in_port(0).connect(join_input_prev_state_sum.out_port(0)) - split_joined_input.in_port(1).connect(split_joined_input_axis.out_port(0)) - - init_value_prev_lstm_state = create_const_with_batch_from_input(split_joined_input.out_port(0), - node.input_gate_weights.shape[0]) - prev_lstm_state = ReadValue(graph, {'name': 'prev_memory_state', - 'variable_id': memory_pair_output, - 'variable_shape': None, - 'variable_type': None - }).create_node() - prev_lstm_state.in_port(0).connect(init_value_prev_lstm_state.out_port(0)) - - # *Memory(state) -> *ScaleShift(input) - state_input_scaleshift_attrs = {'name': 'input_scaleshift', - 'bias_term': False - } - state_input_scaleshift = ScaleShiftOp(graph, state_input_scaleshift_attrs).create_node() - state_input_scaleshift.in_port(0).connect(prev_lstm_state.out_port(0)) - input_as_const(state_input_scaleshift, state_input_scaleshift_attrs, 1, 'weights', node.input_gate_weights) - - # *Memory(state) -> *ScaleShift(forget) - state_forget_scaleshift_attrs = {'name': 'forget_scaleshift', - 'bias_term': False - } - state_forget_scaleshift = ScaleShiftOp(graph, state_forget_scaleshift_attrs).create_node() - state_forget_scaleshift.in_port(0).connect(prev_lstm_state.out_port(0)) - input_as_const(state_forget_scaleshift, state_forget_scaleshift_attrs, 1, 'weights', node.forget_gate_weights) - - # Split \ - # (2)Eltwise(sum) - # Memory(state) -> *ScaleShift(input) / - join_prev_lstm_input_joined_input_sum = Add(graph, {'name': 'join_prev_lstm_input_joined_input_eltwise' - }).create_node() - join_prev_lstm_input_joined_input_sum.in_port(0).connect(split_joined_input.out_port(1)) - join_prev_lstm_input_joined_input_sum.in_port(1).connect(state_input_scaleshift.out_port(0)) - # Split \ - # (3)Eltwise(sum) - # Memory(state) -> *ScaleShift(forget) / - join_prev_lstm_input_joined_forget_sum = Add(graph, {'name': 'join_prev_lstm_input_joined_forget_sum', - }).create_node() - join_prev_lstm_input_joined_forget_sum.in_port(0).connect(split_joined_input.out_port(2)) - join_prev_lstm_input_joined_forget_sum.in_port(1).connect(state_forget_scaleshift.out_port(0)) - - # Split -> Tanh - remember_tahn = Tanh(graph, {'name': 'remember_tahnv'}).create_node() - remember_tahn.in_port(0).connect(split_joined_input.out_port(0)) - - # Split -> (2)Eltwise(sum) -> *Sigmoid - remember_sigmoid = Sigmoid(graph, {'name': 'remember_sigmoid'}).create_node() - remember_sigmoid.in_port(0).connect(join_prev_lstm_input_joined_input_sum.out_port(0)) - - # Split -> (3)Eltwise(sum) -> **Sigmoid - forget_sigmoid = Sigmoid(graph, {'name': 'forget_sigmoid'}).create_node() - forget_sigmoid.in_port(0).connect(join_prev_lstm_input_joined_forget_sum.out_port(0)) - - # *Memory(state) \ - # (6)Eltwise(mul) - # Split -> (3)Eltwise(sum) -> **Sigmoid / - join_forget_prev_state_mul = Mul(graph, {'name': 'join_forget_prev_state_mul'}).create_node() - join_forget_prev_state_mul.in_port(0).connect(forget_sigmoid.out_port(0)) - join_forget_prev_state_mul.in_port(1).connect(prev_lstm_state.out_port(0)) - - # Split -> Tahn \ - # (5)Eltwise(mul) - # Split -> (2)Eltwise(sum) -> *Sigmoid / - join_remember_candidates_mul = Mul(graph, {'name': 'join_remember_candidates_mul'}).create_node() - join_remember_candidates_mul.in_port(0).connect(remember_tahn.out_port(0)) - join_remember_candidates_mul.in_port(1).connect(remember_sigmoid.out_port(0)) - - # (5)Eltwise(mul) \ - # (7)Eltwise(sum) - # (6)Eltwise(mul) / - join_forget_remember_sum = Add(graph, {'name': 'join_forget_remember_sum'}).create_node() - join_forget_remember_sum.in_port(0).connect(join_forget_prev_state_mul.out_port(0)) - join_forget_remember_sum.in_port(1).connect(join_remember_candidates_mul.out_port(0)) - - # (7)Eltwise(sum) -> Clamp - join_forget_clamp = create_op_with_const_inputs(graph, Clamp, {1: float32_array(-node.clip_value), - 2: float32_array(node.clip_value)}, - {'name': 'join_forget_clamp'}, - join_forget_remember_sum) - # - # Clamp -> (2)Memory(state) - next_lstm_state = Assign(graph, {'name': 'next_lstm_state', - 'variable_id': memory_pair_output}).create_node() - next_lstm_state.in_port(0).connect(join_forget_clamp.out_port(0)) - - res_node = Result(graph, {'name': 'next_lstm_state_out'}).create_node() - res_node.in_port(0).connect(next_lstm_state.out_port(0)) - - # Clamp -> (2)Tahn - state_filtered_tahn = Tanh(graph, {'name': 'state_filtered_tahn'}).create_node() - state_filtered_tahn.in_port(0).connect(join_forget_clamp.out_port(0)) - - # Clamp -> (2)ScaleShift - clamp_scaleshift_attrs = {'name': 'clamp_scaleshift', - 'bias_term': False} - clamp_scaleshift = ScaleShiftOp(graph, clamp_scaleshift_attrs).create_node() - clamp_scaleshift.in_port(0).connect(join_forget_clamp.out_port(0)) - input_as_const(clamp_scaleshift, clamp_scaleshift_attrs, 1, 'weights', node.output_gate_weights) - - # Split \ - # (4)Eltwise(sum) - # Clamp -> (2)ScaleShift / - join_next_lstm_input_joined_input_sum = Add(graph, {'name': 'join_next_lstm_input_joined_input_sum', - }).create_node() - join_next_lstm_input_joined_input_sum.in_port(0).connect(split_joined_input.out_port(3)) - join_next_lstm_input_joined_input_sum.in_port(1).connect(clamp_scaleshift.out_port(0)) - - # (4)Eltwise(sum) -> (3)Sigmoid - output_sigmoid = Sigmoid(graph, {'name': 'output_sigmoid'}).create_node() - output_sigmoid.in_port(0).connect(join_next_lstm_input_joined_input_sum.out_port(0)) - - # (4)Eltwise(sum) -> (3)Sigmoid \ - # (5)Eltwise(mul) - # Clamp -> (2)Tahn / - joined_output_mul = Mul(graph, {'name': 'joined_output_mul'}).create_node() - joined_output_mul.in_port(0).connect(state_filtered_tahn.out_port(0)) - joined_output_mul.in_port(1).connect(output_sigmoid.out_port(0)) - - # (5)Eltwise(mul) -> (3)FullyConnected - fc_output_attrs = {'name': 'FullyConnected', - 'out-size': node.projection_weights_shape[0], - 'transpose_weights': True, - 'bias_term': False} - fc_output = FullyConnected(graph, fc_output_attrs).create_node() - fc_output.in_port(0).connect(joined_output_mul.out_port(0)) - input_as_const(fc_output, fc_output_attrs, 1, 'weights', node.projection_weights) - - # / (2)Memory(output) - # (3)FullyConnected - # \ Output (any next node) (edge created automatically after replacement) - next_lstm_output = Assign(graph, {'name': 'next_lstm_output', - 'variable_id': memory_pair_input}).create_node() - next_lstm_output.in_port(0).connect(fc_output.out_port(0)) - - res_node_lstm_output = Result(graph, {'name': 'next_lstm_output_out'}).create_node() - res_node_lstm_output.in_port(0).connect(next_lstm_output.out_port(0)) - - return [fc_output.id] diff --git a/tools/mo/openvino/tools/mo/front/kaldi/replace_lstm_nonlinearity.py b/tools/mo/openvino/tools/mo/front/kaldi/replace_lstm_nonlinearity.py deleted file mode 100644 index c9feccb4e40e18..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/replace_lstm_nonlinearity.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.activation_ops import Sigmoid, Tanh -from openvino.tools.mo.ops.elementwise import Add, Mul -from openvino.tools.mo.ops.split import Split, AttributedVariadicSplit -from openvino.tools.mo.front.caffe.extractors.utils import input_as_const -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp - - -class ReplaceLstmNonLinearityPattern(FrontReplacementOp): - op = "LstmNonLinearity" - enabled = True - - def run_after(self): - from openvino.tools.mo.front.restore_ports import RestorePorts - return [RestorePorts] - - def run_before(self): - from openvino.tools.mo.front.MatMul_normalizer import FullyConnectedDecomposer - return [FullyConnectedDecomposer] - - def replace_op(self, graph: Graph, node: Node): - node_name = node.soft_get('name', node.id) - # check if we have dropout - input_port = node.in_port(0) - if node.has_and_set('use_dropout'): - split_dropout = AttributedVariadicSplit(graph, - {'name': node_name + '/split_dropout', - 'size_splits': int64_array([-1, 1, 1, 1]), - 'axis': int64_array(1)}).create_node() - input_port.get_connection().set_destination(split_dropout.in_port(0)) - input_port = split_dropout.out_port(0) - i_drop_scale = split_dropout.out_port(1) - f_drop_scale = split_dropout.out_port(2) - o_drop_scale = split_dropout.out_port(3) - - # split input to (i_part, f_part, c_part, o_part, ct_1) - split_node = create_op_with_const_inputs(graph, Split, {1: np.int64(1)}, - {'name': node_name + '/split_lstm_input', - 'num_splits': 5}) - input_port.get_connection().set_destination(split_node.in_port(0)) - - i_part = split_node.out_port(0) - f_part = split_node.out_port(1) - c_part = split_node.out_port(2) - o_part = split_node.out_port(3) - ct_1 = split_node.out_port(4) - - # i_t = Sigmoid(i_part + w_ic*ct_1) - i_scale_attrs = {'name': node_name + '/i_scaleshift', - 'bias_term': False} - i_scale = ScaleShiftOp(graph, i_scale_attrs).create_node() - input_as_const(i_scale, i_scale_attrs, 1, 'weights', node.i_weights) - ct_1.connect(i_scale.in_port(0)) - - sum_i_c = Add(graph, {'name': node_name + '/sum_i_c_'}).create_node() - i_part.connect(sum_i_c.in_port(0)) - i_scale.out_port(0).connect(sum_i_c.in_port(1)) - - i_sigmoid = Sigmoid(graph, {'name': node_name + '/i_sigmoid'}).create_node() - sum_i_c.out_port(0).connect(i_sigmoid.in_port(0)) - - if node['use_dropout']: - mul_dropout_i = Mul(graph, {'name': split_node.soft_get('name', split_node.id) + '/mul_i'}).create_node() - mul_dropout_i.in_port(0).connect(i_sigmoid.out_port(0)) - mul_dropout_i.in_port(1).connect(i_drop_scale) # pylint: disable=possibly-used-before-assignment - i_sigmoid = mul_dropout_i - - # f_t = Sigmoid(f_part + w_fc*ct_1) - f_scale_attrs = {'name': node_name + '/f_scaleshift', - 'bias_term': False} - f_scale = ScaleShiftOp(graph, f_scale_attrs).create_node() - input_as_const(f_scale, f_scale_attrs, 1, 'weights', node.f_weights) - ct_1.connect(f_scale.in_port(0)) - - sum_f_c = Add(graph, {'name': node_name + '/sum_f_c_'}).create_node() - f_part.connect(sum_f_c.in_port(0)) - f_scale.out_port(0).connect(sum_f_c.in_port(1)) - - f_sigmoid = Sigmoid(graph, {'name': node_name + '/f_sigmoid'}).create_node() - sum_f_c.out_port(0).connect(f_sigmoid.in_port(0)) - - if node['use_dropout']: - mul_dropout_f = Mul(graph, {'name': split_node.soft_get('name', split_node.id) + '/mul_f'}).create_node() - mul_dropout_f.in_port(0).connect(f_sigmoid.out_port(0)) - mul_dropout_f.in_port(1).connect(f_drop_scale) # pylint: disable=possibly-used-before-assignment - f_sigmoid = mul_dropout_f - - # c_t = f_t*ct_1 + i_t * tanh(c_part) - c_tanh = Tanh(graph, {'name': node_name + '/c_tanh'}).create_node() - c_part.connect(c_tanh.in_port(0)) - - prod_i_c_tanh = Mul(graph, {'name': node_name + '/prod_i_c_tanh_'}).create_node() - i_sigmoid.out_port(0).connect(prod_i_c_tanh.in_port(0)) - c_tanh.out_port(0).connect(prod_i_c_tanh.in_port(1)) - - prod_f_ct_1 = Mul(graph, {'name': node_name + '/prod_f_ct_1_'}).create_node() - f_sigmoid.out_port(0).connect(prod_f_ct_1.in_port(0)) - ct_1.connect(prod_f_ct_1.in_port(1)) - - sum_f_i = Add(graph, {'name': node_name + '/sum_f_i_'}).create_node() - prod_f_ct_1.out_port(0).connect(sum_f_i.in_port(0)) - prod_i_c_tanh.out_port(0).connect(sum_f_i.in_port(1)) - - # o_t = Sigmoid(o_part + w_oc*c_t) - o_scale_attrs = {'name': node_name + '/o_scaleshift', - 'bias_term': False} - o_scale = ScaleShiftOp(graph, o_scale_attrs).create_node() - input_as_const(o_scale, o_scale_attrs, 1, 'weights', node.o_weights) - sum_f_i.out_port(0).connect(o_scale.in_port(0)) - - sum_o_c = Add(graph, {'name': node_name + '/sum_o_c_'}).create_node() - o_part.connect(sum_o_c.in_port(0)) - o_scale.out_port(0).connect(sum_o_c.in_port(1)) - - o_sigmoid = Sigmoid(graph, {'name': node_name + '/o_sigmoid'}).create_node() - sum_o_c.out_port(0).connect(o_sigmoid.in_port(0)) - - if node['use_dropout']: - mul_dropout_o = Mul(graph, {'name': split_node.soft_get('name', split_node.id) + '/mul_o'}).create_node() - mul_dropout_o.in_port(0).connect(o_sigmoid.out_port(0)) - mul_dropout_o.in_port(1).connect(o_drop_scale) # pylint: disable=possibly-used-before-assignment - o_sigmoid = mul_dropout_o - - # m_t = o_t * Tanh(c_t) - c_t_tanh = Tanh(graph, {'name': node_name + '/c_t_tanh'}).create_node() - sum_f_i.out_port(0).connect(c_t_tanh.in_port(0)) - - prod_o_c_t_tanh = Mul(graph, {'name': node_name + '/prod_o_c_t_tanh_'}).create_node() - o_sigmoid.out_port(0).connect(prod_o_c_t_tanh.in_port(0)) - c_t_tanh.out_port(0).connect(prod_o_c_t_tanh.in_port(1)) - - # add concat to create 1 output - concat = Concat(graph, {'name': node_name + '/concat_c_m'}).create_node() - concat.add_sequence_of_ports('in', range(2)) - sum_f_i.out_port(0).connect(concat.in_port(0)) - prod_o_c_t_tanh.out_port(0).connect(concat.in_port(1)) - - return [concat.id] diff --git a/tools/mo/openvino/tools/mo/front/kaldi/replace_timeheightconvolution.py b/tools/mo/openvino/tools/mo/front/kaldi/replace_timeheightconvolution.py deleted file mode 100644 index 1e2666196725af..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/replace_timeheightconvolution.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Node, Graph, rename_node -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.ops.memoryoffset import MemoryOffset - - -class ReplaceTimeHeightConvolutionPattern(FrontReplacementPattern): - enabled = True - run_not_recursively = True - - def run_after(self): - from openvino.tools.mo.front.MoveEmbeddedInputsToInputs import MoveEmbeddedInputsToInputs - return [MoveEmbeddedInputsToInputs] - - def run_before(self): - from openvino.tools.mo.front.kaldi.add_reshape_transpose_around_conv_pool import AddReshapeTransposeAroundConvPool - from openvino.tools.mo.front.kaldi.memory_offset_adjustment import MemoryOffsetAdjustment - from openvino.tools.mo.front.kaldi.split_recurrent_memoryoffset import SplitRecurrentMemoryOffset - return [MemoryOffsetAdjustment, AddReshapeTransposeAroundConvPool, SplitRecurrentMemoryOffset] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='timeheightconvolutioncomponent'): - self.replace_timeheightconv(graph, node) - - def replace_timeheightconv(self, graph: Graph, node: Node): - req_time_offsets = node.soft_get('time_offsets') - offsets = node.soft_get("offsets", [[]]) - all_time_offsets = list(set(offsets[:, 0])) - all_time_offsets.sort() - in_name = node.soft_get('name', node.id) - rename_node(node, in_name + '/to_delete') - - # create memoryoffsets for context gathering - # we need concat if time offsets more than 1 - concat = Concat(graph, attrs={'name': in_name + '/Concat', - 'in_ports_count': len(all_time_offsets)}).create_node() - i = 0 - for t in all_time_offsets: - # if time offset included in required_time_offsets we don't need default value - has_default = t not in req_time_offsets - memoff = MemoryOffset(graph, attrs={'name': in_name + '/MemoryOffset_' + str(i), - 't': t, 'has_default': has_default, 'splitted': False, - 'pair_name': in_name + '/MemoryOffset_pair_' + str(i)}).create_node() - concat.in_port(i).connect(memoff.out_port(0)) - memoff.in_port(0).connect(node.in_port(0).get_source()) - i = i + 1 - - stride = node.soft_get("height_subsample", 1) - - kernel = int64_array([0, 0]) - kernel[0] = len(set(offsets[:, 0])) - kernel[1] = len(set(offsets[:, 1])) - - pad_h = int64_array([0, 0]) - pad_h[0] = -min(offsets[:, 1]) if min(offsets[:, 1]) < 0 else 0 - pad_h[1] = stride * node.height_out - (node.height_in - max([max(offsets[:, 1]), 0])) - - dilation_t = (max(offsets[:, 0]) - min(offsets[:, 0])) / (kernel[0] - 1) if kernel[0] > 1 else 1 - dilation_h = (max(offsets[:, 1]) - min(offsets[:, 1])) / (kernel[1] - 1) if kernel[0] > 1 else 1 - - conv_attrs = { - 'name': in_name, - 'output': node['out_channels'], - 'height_in': node.height_in, - 'bias_term': None, - 'pad': int64_array([[0, 0], [0, 0], [0, 0], pad_h]), - 'pad_spatial_shape': int64_array([[0, 0], pad_h]), - 'dilation': int64_array([1, 1, dilation_t, dilation_h]), - 'kernel': int64_array([node.out_channels, node.in_channels, kernel[0], kernel[1]]), - 'stride': int64_array([1, 1, 1, stride]), - 'kernel_spatial': kernel, - 'input_feature_channel': 1, - 'output_feature_channel': 0, - 'channel_dims': int64_array([1]), - 'spatial_dims': int64_array([2, 3]), - 'batch_dims': int64_array([0]), - 'kernel_spatial_idx': int64_array([2, 3]), - 'group': 1, - 'reshape_kernel': True, - 'bias_addable': True, - } - conv = Convolution(graph, attrs=conv_attrs).create_node() - conv.in_port(0).connect(concat.out_port(0)) - conv.in_port(1).connect(node.in_port(1).get_source()) - - # change layout for weights from OHWI to OIHW - # in future should be replaced by common Permute mechanics - weights = conv.in_port(1).get_source().node.value - weights = weights.reshape(int64_array([node.out_channels, -1, node.in_channels])) - weights = weights.transpose(int64_array([0, 2, 1])) - weights = weights.flatten() - conv.in_port(1).get_source().node.value = weights - - conv.in_port(2).connect(node.in_port(2).get_source()) - node.out_port(0).get_connection().set_source(conv.out_port(0)) - graph.remove_node(node.id) diff --git a/tools/mo/openvino/tools/mo/front/kaldi/restrictedattentioncomponent_replacer.py b/tools/mo/openvino/tools/mo/front/kaldi/restrictedattentioncomponent_replacer.py deleted file mode 100644 index c84a6b2217f105..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/restrictedattentioncomponent_replacer.py +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils \ - import create_op_with_const_inputs, create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.split import VariadicSplit -from openvino.tools.mo.ops.memoryoffset import MemoryOffset -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.ops.einsum import Einsum -from openvino.tools.mo.ops.elementwise import Mul, Add -from openvino.tools.mo.ops.softmax import Softmax - - -class RestrictedAttentionComponentReplacer(FrontReplacementPattern): - r""" - This class expands RestrictedAttention operator into the following subgraph: - - placeholder - | - Reshape[batch*num_heads, -1] - | - VariadicSplit(val_dim, key_dim, key_dim + context_dim) - | - __________________________ - | | \ - | MemoryOffset* VariadicSplit(key_dim, contex_dim) - | \ / | - | Einsum(dot) | - | | | - | Mul(key_scale) | - | \ | - | ______ - | | - | Add - | | - MemoryOffset* SoftMax - \ / | - __________________ | - | | - Einsum(dot) | - \ / - __________ - | - Concat - | - Reshape[batch, -1] - | - - where context_dim = num_left_inputs + num_right_inputs + 1. - *MemoryOffsets are described in the create_memory_offsets_subgraph method. - Specification of the RestrictedAttention Kaldi operator can be found in the Kaldi documentation: - https://kaldi-asr.org/doc/classkaldi_1_1nnet3_1_1RestrictedAttentionComponent.html. - """ - enabled = True - run_not_recursively = True - - def __init__(self) -> None: - self.in_name: str - self.num_left_inputs: int - self.num_right_inputs: int - self.time_stride: int - super().__init__() - - def run_before(self): - from openvino.tools.mo.front.kaldi.memory_offset_adjustment import MemoryOffsetAdjustment - return [MemoryOffsetAdjustment] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='restrictedattentioncomponent'): - self.replace_restrictedattention(graph, node) - - def create_memory_offsets_subgraph(self, graph: Graph, input_node: Node, out_port, - mem_offset_idx): - r""" - This method creates the following subgraph and returns endpoint Concat node: - input_node - __________________|___________________________ - / | \ \ - MemoryOffset(t1) MemoryOffset(t2) ... MemoryOffset(tk) - \_____________ _____|______________/____________/ - | - Concat - where t1 = -time_stride*num_left_inputs, t2 = t1 + time_stride and - tk = time_stride*num_right_inputs - """ - concat_node = Concat( - graph, attrs={'name': self.in_name + f'/Concat_tmp_{mem_offset_idx}'}).create_node() - - for idx, t in enumerate(list(range(-self.time_stride*self.num_left_inputs, - self.time_stride*self.num_right_inputs+1)\ - [::self.time_stride])): - concat_node.add_input_port(idx) - if t != 0: - memoff = MemoryOffset(graph, attrs={'name': self.in_name +\ - f'/MemoryOffset_{mem_offset_idx}_' +\ - str(idx), - 't': t, 'has_default': False, - 'splitted': False, - 'pair_name': self.in_name + - f'/MemoryOffset_{mem_offset_idx}_pair_' + - str(idx)}).create_node() - memoff.out_port(0).connect(concat_node.in_port(idx)) - input_node.out_port(out_port).connect(memoff.in_port(0)) - else: - # 0 time delay is not allowed in IE, it's meaningless - # if time offset is 0 then connect input directly to Concat without memoryoffset - input_node.out_port(out_port).connect(concat_node.in_port(idx)) - - return concat_node - - def replace_restrictedattention(self, graph: Graph, restrictedattention_node: Node): - """ - This method replaces RestrictedAttention operator with a subgraph composed with supported - OpenVino operators. - """ - - self.num_left_inputs = restrictedattention_node['num_left_inputs'] - self.num_right_inputs = restrictedattention_node['num_right_inputs'] - context_dim = self.num_left_inputs + self.num_right_inputs + 1 - num_heads = restrictedattention_node['num_heads'] - key_dim = restrictedattention_node['key_dim'] - value_dim = restrictedattention_node['value_dim'] - self.time_stride = restrictedattention_node['time_stride'] - key_scale = restrictedattention_node['key_scale'] - - batch_axis = 0 - input_shape = restrictedattention_node.in_port(0).data.get_shape() - if input_shape: - batch_num = input_shape[batch_axis] - else: - batch_num = 1 - - self.in_name = restrictedattention_node.soft_get('name', restrictedattention_node.id) - - reshape_1_node = create_op_node_with_second_input(graph, Reshape, - int64_array([batch_num * num_heads, -1]), - {'name': self.in_name + '/Reshape_1'}) - restrictedattention_node.in_port(0).get_source().connect(reshape_1_node.in_port(0)) - - split_1_node = create_op_with_const_inputs(graph, VariadicSplit, - {1: int64_array(1), - 2: int64_array([key_dim, value_dim, - key_dim + context_dim])}, - {'name': self.in_name + '/VariadicSplit_1', - 'out_ports_count': 3}) - reshape_1_node.out_port(0).connect(split_1_node.in_port(0)) - - concat_1_node = self.create_memory_offsets_subgraph(graph, split_1_node, 0, 1) - - split_2_node = create_op_with_const_inputs(graph, VariadicSplit, - {1: int64_array(1), - 2: int64_array([key_dim, context_dim])}, - {'name': self.in_name + '/VariadicSplit_2', - 'out_ports_count': 2}) - split_1_node.out_port(2).connect(split_2_node.in_port(0)) - - einsum_1_node = Einsum(graph, {'name': self.in_name + '/Einsum_1', - 'override_output_shape': False, - 'in_ports_count': 2, - 'equation': 'ij,ik->i'}).create_node() - - reshape_helper_1_node = create_op_node_with_second_input(graph, Reshape, - int64_array( - [num_heads, 1]), - {'name': self.in_name +\ - '/Reshape_helper_1'}) - einsum_1_node.out_port(0).connect(reshape_helper_1_node.in_port(0)) - - concat_1_node.out_port(0).connect(einsum_1_node.in_port(0)) - - split_2_node.out_port(0).connect(einsum_1_node.in_port(1)) - - mul_node = create_op_with_const_inputs(graph, Mul, {1: mo_array(key_scale, dtype=float)}, - {'name': self.in_name + '/Mul'}) - reshape_helper_1_node.out_port(0).connect(mul_node.in_port(0)) - - add_node = Add(graph, {'name': self.in_name + '/Add'}).create_node() - mul_node.out_port(0).connect(add_node.in_port(1)) - split_2_node.out_port(1).connect(add_node.in_port(0)) - - softmax_node = Softmax(graph, {'axis': 1, 'name': self.in_name + '/Softmax'}).create_node() - add_node.out_port(0).connect(softmax_node.in_port(0)) - - concat_2_node = self.create_memory_offsets_subgraph(graph, split_1_node, 1, 2) - - reshape_helper_2_node = create_op_node_with_second_input(graph, Reshape, - int64_array([num_heads, - value_dim, - context_dim]), - {'name': self.in_name +\ - '/Reshape_helper_2'}) - concat_2_node.out_port(0).connect(reshape_helper_2_node.in_port(0)) - - reshape_helper_3_node = create_op_node_with_second_input(graph, Reshape, - int64_array( - [num_heads, 1, context_dim]), - {'name': self.in_name +\ - '/Reshape_helper_3'}) - - einsum_2_node = Einsum(graph, {'name': self.in_name + '/Einsum_2', - 'in_ports_count': 2, - 'equation': 'ijk,ilk->ij'}).create_node() - reshape_helper_2_node.out_port(0).connect(einsum_2_node.in_port(0)) - - softmax_node.out_port(0).connect(reshape_helper_3_node.in_port(0)) - reshape_helper_3_node.out_port(0).connect(einsum_2_node.in_port(1)) - - concat_3_node = Concat(graph, {'name': self.in_name + '/Concat_2', - 'in_ports_count': 2}).create_node() - einsum_2_node.out_port(0).connect(concat_3_node.in_port(0)) - softmax_node.out_port(0).connect(concat_3_node.in_port(1)) - - reshape_2_node = create_op_node_with_second_input(graph, Reshape, - int64_array([batch_num, -1]), - {'name': self.in_name + '/Reshape_2'}) - concat_3_node.out_port(0).connect(reshape_2_node.in_port(0)) - - restrictedattention_node.in_port(0).disconnect() - restrictedattention_node.out_port(0).get_connection().set_source(reshape_2_node.out_port(0)) - graph.remove_node(restrictedattention_node.id) diff --git a/tools/mo/openvino/tools/mo/front/kaldi/set_ports.py b/tools/mo/openvino/tools/mo/front/kaldi/set_ports.py deleted file mode 100644 index 2208880bb6b8fb..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/set_ports.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.utils.error import Error - - -class SetPortsPattern(FrontReplacementSubgraph): - """ - Pass used to set ports for loaded graph for Kaldi - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.front.restore_ports import RestorePorts - return [RestorePorts] - - def run_after(self): - from openvino.tools.mo.load.loader import LoadFinish - return [LoadFinish] - - def find_and_replace_pattern(self, graph: Graph): - graph.stage = 'front' - for node_id in graph.nodes(data=False): - node = Node(graph, node_id) - inputs = node.get_sorted_inputs() - outputs = node.get_sorted_outputs() - - in_ports_count = node.in_ports_count if node.has_valid('in_ports_count') else len(inputs) - out_ports_count = node.out_ports_count if node.has_valid('out_ports_count') else len(outputs) - - if len(outputs) > out_ports_count > 1: - raise Error("Node {} has more children than it should: " + - "should be {} but there is {}".format(node_id, out_ports_count, len(outputs))) - - node['_in_ports'] = {} - node['_out_ports'] = {} - if in_ports_count is not None: - for idx in range(in_ports_count): - node.add_input_port(idx=idx) - - if out_ports_count is not None: - for idx in range(out_ports_count): - node.add_output_port(idx=idx) - idx = 0 - for in_node_id, edge_attrs in inputs: - graph.remove_edge(in_node_id, node_id) - if len(Node(graph, in_node_id).out_ports()) == 0: - Node(graph, in_node_id).add_output_port(0) - in_node = Node(graph, in_node_id) - in_node.out_port(edge_attrs['out']).connect(node.in_port(idx)) - # need to keep this attribute in edge for correct .mapping file generation and - # for generation of "names" field in IR - in_node.out_edge(edge_attrs['out'])['fw_tensor_debug_info'] = edge_attrs['fw_tensor_debug_info'] - if idx < in_ports_count - 1: - idx = idx + 1 - - idx = 0 - for out_node_id, edge_attrs in outputs: - graph.remove_edge(node_id, out_node_id) - if len(Node(graph, out_node_id).in_ports()) == 0: - Node(graph, out_node_id).add_input_port(0) - node.out_port(idx).connect(Node(graph, out_node_id).in_port(edge_attrs['in'])) - # need to keep this attribute in edge for correct .mapping file generation and - # for generation of "names" field in IR - node.out_edge(idx)['fw_tensor_debug_info'] = edge_attrs['fw_tensor_debug_info'] - if idx < out_ports_count - 1: - idx = idx + 1 diff --git a/tools/mo/openvino/tools/mo/front/kaldi/sigmoid_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/sigmoid_ext.py deleted file mode 100644 index a41dc8af8b6a26..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/sigmoid_ext.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import Sigmoid -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class SigmoidFrontExtractor(FrontExtractorOp): - op = 'sigmoid' - enabled = True - - @classmethod - def extract(cls, node): - Sigmoid.update_node_stat(node) - return cls.enabled - - -class SigmoidComponentFrontExtractor(FrontExtractorOp): - op = 'sigmoidcomponent' - enabled = True - - @classmethod - def extract(cls, node): - Sigmoid.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/split_recurrent_memoryoffset.py b/tools/mo/openvino/tools/mo/front/kaldi/split_recurrent_memoryoffset.py deleted file mode 100644 index e7b0370aebcbba..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/split_recurrent_memoryoffset.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import networkx as nx - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.memoryoffset import MemoryOffset -from openvino.tools.mo.ops.result import Result -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.graph import Node - - -class SplitRecurrentMemoryOffset(FrontReplacementSubgraph): - """ - Splits MemoryOffsets in recurrent blocks (typically LSTM blocks) into 2 parts. - - These parts then will be converted to ReadValue and Assign. Splitting complicates shape inference but - MemoryOffsets in recurrent blocks are cycled and, in order to make topological sort possible - during shape inference, they are splitted earlier on the front phase. In contrast, - MemoryOffsets in TDNN blocks are not cycled, so they will be splitted after shape infer on the middle. - Now only LSTM blocks with MemoryOffset are present. - """ - enabled = True - graph_condition = [lambda graph: graph.graph['fw'] == 'kaldi'] - - @staticmethod - def split_offset(offset_node: Node): - paired_node = MemoryOffset(offset_node.graph, {'name': offset_node.pair_name, 'splitted': True, - 'pair_name': offset_node.id, - 'element_size': offset_node['element_size'], - 't': offset_node.t, - 'has_default': offset_node.has_default}).create_node() - offset_node['splitted'] = True - offset_node.out_port(0).get_connection().set_source(paired_node.out_port(0)) - res_node = Result(offset_node.graph, {'name': offset_node.id + '_output'}).create_node() - offset_node.out_port(0).connect(res_node.in_port(0)) - - def find_and_replace_pattern(self, graph: Graph): - for offset_node in graph.get_op_nodes(op='MemoryOffset', splitted=False): - try: - # if graph contains recurrent block -> split MemoryOffset to enable shape infer - nx.find_cycle(graph, offset_node.id) - except nx.NetworkXNoCycle as e: - # MemoryOffset node is not in a recurrent block -- no splitting is needed - return - - # check that node has information for future partial infer - # element_size is set in loader based on dimensions of previous layer from original Kaldi model - if not offset_node.has_valid('element_size'): - # check if previous layer contains information about its shape in out-size - # out-size is set in extractor of some nodes like affinecomponent based on weight's size - if offset_node.in_port(0).get_source().node.has_valid('out-size'): - offset_node['element_size'] = int64_array([1, offset_node.in_port(0).get_source().node['out-size']]) - else: - raise Error("In a recurrent block 'element_size' for node {} is not set".format(offset_node.id)) - SplitRecurrentMemoryOffset.split_offset(offset_node) diff --git a/tools/mo/openvino/tools/mo/front/kaldi/tanh_component_ext.py b/tools/mo/openvino/tools/mo/front/kaldi/tanh_component_ext.py deleted file mode 100644 index e972760822bff2..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/tanh_component_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import Tanh -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class TanhFrontExtractor(FrontExtractorOp): - op = 'tanhcomponent' - enabled = True - - @classmethod - def extract(cls, node): - Tanh.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/kaldi/tdnn_component_replacer.py b/tools/mo/openvino/tools/mo/front/kaldi/tdnn_component_replacer.py deleted file mode 100644 index f29aaf5ef5ebc1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/tdnn_component_replacer.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.MatMul import FullyConnected -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.graph.graph import rename_nodes -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.memoryoffset import MemoryOffset - - -class TdnnComponentReplacer(FrontReplacementPattern): - r""" - Expand TdnnComponent into MemoryOffsets, Concat and FullyConected nodes - - BEFORE: - placeholder - | - TdnnComponent('time_offsets': t1, t2,... tk) - | - _______________________________________________________________ - - AFTER: - placeholder - __________________|___________________________ - / | \ \ - MemoryOffset(t1) MemoryOffset(t2) ... MemoryOffset(tk) - \_____________ _____|______________/____________/ - Concat - | - FullyConnected - | - """ - enabled = True - run_not_recursively = True - - def run_before(self): - from openvino.tools.mo.front.kaldi.memory_offset_adjustment import MemoryOffsetAdjustment - return [MemoryOffsetAdjustment] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='tdnncomponent'): - self.replace_tdnn(graph, node) - - def replace_tdnn(self, graph: Graph, tdnn_node: Node): - tdnn_name = tdnn_node.soft_get('name', tdnn_node.id) - - concat_node = Concat(graph, {'axis': 1}).create_node() - rename_nodes([(tdnn_node, tdnn_name + '/to_be_removed'), (concat_node, tdnn_name)]) - - for offset_ind, t in enumerate(tdnn_node['time_offsets']): - concat_node.add_input_port(offset_ind) - if t != 0: - memory_name = tdnn_name + '/MemoryOffset/' + str(abs(t)) - memoryoffset_node = MemoryOffset(graph, {'name': memory_name, 't': t, - 'pair_name': memory_name + '_out', - 'has_default': False, 'splitted': False}).create_node() - - tdnn_node.in_port(0).get_source().connect(memoryoffset_node.in_port(0)) - memoryoffset_node.out_port(0).connect(concat_node.in_port(offset_ind)) - else: - # 0 time delay is not allowed in IE, it's meaningless - # if time offset is 0 then connect input of tdnncomponent directly to Concat without memoryoffset - tdnn_node.in_port(0).get_source().connect(concat_node.in_port(offset_ind)) - - weights = tdnn_node['weights'] - fc_inputs = {1: weights} - - bias_term = False - if tdnn_node.has_valid('biases'): - assert len(tdnn_node['biases']) == weights.shape[0] - fc_inputs.update({2: tdnn_node['biases']}) - bias_term = True - - fc_node = create_op_with_const_inputs(graph, FullyConnected, fc_inputs, - {'name': tdnn_name + '/FC', 'out-size': weights.shape[0], - 'transpose_weights': True, 'bias_term': bias_term}) - - concat_node.out_port(0).connect(fc_node.in_port(0)) - tdnn_node.in_port(0).disconnect() - tdnn_node.out_port(0).get_connection().set_source(fc_node.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/kaldi/utils.py b/tools/mo/openvino/tools/mo/front/kaldi/utils.py deleted file mode 100644 index 4871c27150debe..00000000000000 --- a/tools/mo/openvino/tools/mo/front/kaldi/utils.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import io -import logging as log -import os - -import numpy as np - -from openvino.tools.mo.front.kaldi.loader.utils import read_placeholder, read_binary_integer32_token, read_blob, read_token_value, \ - find_next_tag -from openvino.tools.mo.utils.error import Error - - -def read_binary_matrix(file_desc: io.BufferedReader, read_token: bool = True): - if read_token: - read_placeholder(file_desc) - rows_number = read_binary_integer32_token(file_desc) - cols_number = read_binary_integer32_token(file_desc) - # to compare: ((float *)a->buffer())[10] - return read_blob(file_desc, rows_number * cols_number), (rows_number, cols_number) - - -def read_binary_vector(file_desc: io.BufferedReader, read_token: bool = True, dtype=np.float32): - if read_token: - read_placeholder(file_desc) - elements_number = read_binary_integer32_token(file_desc) - return read_blob(file_desc, elements_number, dtype) - - -def read_binary_vector_of_pairs(file_desc: io.BufferedReader, read_token: bool = True, dtype=np.float32): - if read_token: - read_placeholder(file_desc) - elements_number = read_binary_integer32_token(file_desc) - return read_blob(file_desc, 2 * elements_number, dtype) - - -def read_learning_info(pb: io.BufferedReader): - while True: - read_placeholder(pb, 1) - first_char = pb.read(1) - pb.seek(-2, os.SEEK_CUR) - position = pb.tell() - if first_char == b'L': - cur_pos = pb.tell() - token = find_next_tag(pb) - pb.seek(cur_pos) - if token in ['', '']: - token = bytes(token, 'ascii') - else: - log.debug('Unexpected tag: {}'.format(token)) - break - elif first_char == b'B': - token = b'' - elif first_char == b'M': - token = b'' - elif first_char == b'!': # token = b'' - break - else: - break - try: - read_token_value(pb, token) - except Error: - pb.seek(position) - break - diff --git a/tools/mo/openvino/tools/mo/front/no_op_eraser.py b/tools/mo/openvino/tools/mo/front/no_op_eraser.py deleted file mode 100644 index 3c24b19ac0fa02..00000000000000 --- a/tools/mo/openvino/tools/mo/front/no_op_eraser.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph - - -class NoOpEraser(FrontReplacementSubgraph): - enabled = True - - @staticmethod - def pattern(): - return dict( - nodes=[('noop', dict(kind='op', op='NoOp')), - ('output', dict(kind='op', op='Result')) - ], - edges=[('noop', 'output')] - ) - - @staticmethod - def replace_sub_graph(graph: Graph, match: dict): - graph.erase_node(match['output']) - graph.erase_node(match['noop']) - log.info("NoOp node \"{}\" was removed from the graph".format(match['noop'].id)) diff --git a/tools/mo/openvino/tools/mo/front/non_max_suppression_normalize.py b/tools/mo/openvino/tools/mo/front/non_max_suppression_normalize.py deleted file mode 100644 index 11cc1b5d1e3ee0..00000000000000 --- a/tools/mo/openvino/tools/mo/front/non_max_suppression_normalize.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.reshape import Reshape - - -class NonMaxSuppressionNormalize(FrontReplacementSubgraph): - """ - The transformation converts several inputs of the NonMaxSuppression layer to be 1D instead of 0D with shape [1] to - comply with the layer specification. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for nms in graph.get_op_nodes(op='NonMaxSuppression'): - # make inputs 2 to 5 to have shape [1] instead of [0] (convert 0D to 1D) - nms_name = nms.soft_get('name', nms.id) - for port_id in range(2, 6): - if port_id in nms.in_ports() and not nms.in_port(port_id).disconnected(): - reshape_1d = create_op_node_with_second_input(graph, Reshape, int64_array([1]), - {'name': nms_name + '/Reshape_1D_{}'.format(port_id)}) - nms.in_port(port_id).get_connection().insert_node(reshape_1d) diff --git a/tools/mo/openvino/tools/mo/front/onnx/AttributedSliceToSlice.py b/tools/mo/openvino/tools/mo/front/onnx/AttributedSliceToSlice.py deleted file mode 100644 index 433597c9c85147..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/AttributedSliceToSlice.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.slice import Slice - - -class AttributedSliceToSliceReplacer(FrontReplacementOp): - """ - This class replaces AttributedSlice -> Slice - """ - op = 'AttributedSlice' - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['op'] - slice_name = node.soft_get('name', node.id) - - slice_node = create_op_with_const_inputs(graph, Slice, {1: node.starts, 2: node.ends, 3: node.axes}) - rename_nodes([(node, slice_name + '/to_be_removed'), (slice_node, slice_name)]) - - node.in_port(0).get_connection().set_destination(slice_node.in_port(0)) - node.out_port(0).get_connection().set_source(slice_node.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/onnx/CTCGreedyDecoder_ext.py b/tools/mo/openvino/tools/mo/front/onnx/CTCGreedyDecoder_ext.py deleted file mode 100644 index e6f66c42b0244c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/CTCGreedyDecoder_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ctc_greedy_decoder_seq_len import CTCGreedyDecoderSeqLenOp -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class CTCCGreedyDecoderFrontExtractor(FrontExtractorOp): - op = 'CTCGreedyDecoder' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'merge_repeated': bool(onnx_attr(node, 'merge_repeated', 'i', default=1)), - } - CTCGreedyDecoderSeqLenOp.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/LoopNormalize.py b/tools/mo/openvino/tools/mo/front/onnx/LoopNormalize.py deleted file mode 100644 index 222e63d631db64..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/LoopNormalize.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.pass_separator import FrontStart -from openvino.tools.mo.front.restore_ports import RestorePorts -from openvino.tools.mo.ops.loop import Loop -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class ONNXLoopNormalize(FrontReplacementSubgraph): - enabled = True - - def run_before(self): - return [FrontStart] - - def run_after(self): - return [RestorePorts] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='Loop'): - self.normalize_body_graph(node) - - @staticmethod - def normalize_body_graph(loop_node: Node): - loop_name = loop_node.soft_get('name', loop_node.id) - # connect "trip count" input if it is not connected with default value "Infinity" (-1) - if not loop_node.is_in_port_connected(0): - loop_node.add_input_port(0, skip_if_exist=True) - Const(loop_node.graph, {'name': loop_name + '/trip_count', 'value': int64_array(-1)}).\ - create_node().out_port(0).connect(loop_node.in_port(0)) - - # connect "execution condition" input if it is not connected with default value True - if not loop_node.is_in_port_connected(1): - loop_node.add_input_port(1, skip_if_exist=True) - Const(loop_node.graph, {'name': loop_name + '/execution_cond', 'value': mo_array(True, dtype=bool)}).\ - create_node().out_port(0).connect(loop_node.in_port(1)) - - # scan output need Unsqueeze over axis 0 - for record in loop_node.output_port_map: - body_node = Loop.get_body_node_by_internal_id(loop_node, record['internal_layer_id']) - assert body_node is not None - assert body_node.soft_get('type') == 'Result' - - if record['axis'] is not None: - unsqueeze = create_op_with_const_inputs(loop_node.body, Unsqueeze, {1: int64_array([0])}) - body_node.in_port(0).get_connection().insert_node(unsqueeze) - - Loop.normalize_input_output_ports(loop_node) diff --git a/tools/mo/openvino/tools/mo/front/onnx/MvnOnnxToMvn.py b/tools/mo/openvino/tools/mo/front/onnx/MvnOnnxToMvn.py deleted file mode 100644 index 3a3438d39b5664..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/MvnOnnxToMvn.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.mvn import MVN -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes - - -class MvnOnnxToMvn(FrontReplacementPattern): - """ - Replace AttributedMVN operation from ONNX with MVN - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='MVNOnnx'): - node_name = node.soft_get('name', node.id) - - new_mvn = create_op_with_const_inputs(graph, MVN, {1: node.axes}, - {'eps': node.eps, - 'eps_mode': node.eps_mode, - 'normalize_variance': node.normalize_variance}) - node.in_port(0).get_connection().set_destination(new_mvn.in_port(0)) - node.out_port(0).get_connection().set_source(new_mvn.out_port(0)) - rename_nodes([(node, node_name + '/to_be_removed'), (new_mvn, node_name)]) - - graph.remove_node(node.id) diff --git a/tools/mo/openvino/tools/mo/front/onnx/ONNXResize10ToInterpolate.py b/tools/mo/openvino/tools/mo/front/onnx/ONNXResize10ToInterpolate.py deleted file mode 100644 index d4eb4240ecddb4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/ONNXResize10ToInterpolate.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.activation_ops import Floor -from openvino.tools.mo.ops.elementwise import Add, Mul -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.ops.rank import Rank -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.strided_slice import StridedSlice - - -def replace_resize(graph: Graph, resize: Node): - log.debug("Converting of ONNX Resize-10 to Interpolate-4 " - "is triggered for node {}.".format(resize.soft_get('name', resize.id))) - - resize_name = resize.soft_get('name', resize.id) - - rank_node = Rank(graph, {'name': resize_name + '/max_axes'}).create_node() - range_node = create_op_with_const_inputs(graph, Range, {0: int64_array(2), 2: int64_array(1)}, - {'name': resize_name + '/axes'}) - - sizes_ss = create_op_with_const_inputs(graph, StridedSlice, - {1: int64_array([2]), - 2: int64_array([0]), - 3: int64_array([1])}, - {'name': resize_name + '/sizes_ss', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([0]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0])}) - scales_ss = create_op_with_const_inputs(graph, StridedSlice, - {1: int64_array([2]), - 2: int64_array([0]), - 3: int64_array([1])}, - {'name': resize_name + '/scales_ss', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([0]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0])}) - - rank_node.out_port(0).connect(range_node.in_port(1)) - - interpolate_node = Interpolate(graph, {'version': 'opset4', - 'mode': 'linear_onnx' if resize.mode == 'linear' else 'nearest', - 'coordinate_transformation_mode': 'asymmetric', - 'cube_coeff': -0.75, - 'nearest_mode': 'simple', - 'pads_begin': int64_array([0]), - 'pads_end': int64_array([0]), - 'antialias': 0, - 'shape_calculation_mode': 'scales', - 'in_ports_count': 4}).create_node() - - range_node.out_port(0).connect(interpolate_node.in_port(3)) - shape_of = Shape(graph, {'name': resize_name + '/ShapeOf'}).create_node() - - # When we calculate 'sizes' input as floor(input_shape * scales), we can get incorrect 'sizes' if, e.g., - # scales = [1.0, 1.0, 1.33333, 2.0], input_shape = [1, 3, 30, 200], because - # input_shape * scales = [1, 3, 39.9999, 400], and floor(input_shape * scales)[2] == 39, not 40. - # Maybe we need to calculate 'sizes' input as floor(input_shape * scales + eps), where eps is some small - # floating point number, e.g. 1.0e-5. But, in this case, if scales = [1.0, 1.0, 1.333333, 2.0], - # input_shape = [1, 3, 30, 200], floor(input_shape * scales + eps) = 39, not 40, because - # input_shape[2] * scales[2] + 1.0e-5 = 39.99991. - # Hence, we need to calculate 'sizes' as floor(input_shape * (scales + eps)). - add_node = create_op_with_const_inputs(graph, Add, - {1: float_array([1.0e-5])}, - {'name': resize_name + '/Add'}) - - dst_dtype = np.float32 # even if data_type=FP16 use float32 for shape values - - cast_shape_to_float = Cast(graph, {'dst_type': dst_dtype}).create_node() - - shape_of.out_port(0).connect(cast_shape_to_float.in_port(0)) - mul_node = Mul(graph, {'name': resize_name + '/Mul'}).create_node([cast_shape_to_float, add_node]) - floor_node = Floor(graph, {'name': resize_name + '/Floor'}).create_node([mul_node]) - cast_mul_result_to_int = Cast(graph, {'dst_type': np.int64}).create_node([floor_node]) - cast_mul_result_to_int.out_port(0).connect(sizes_ss.in_port(0)) - sizes_ss.out_port(0).connect(interpolate_node.in_port(1)) - - scales_ss.out_port(0).connect(interpolate_node.in_port(2)) - - connection_of_resize_input = resize.in_port(0).get_connection() - connection_of_resize_input.set_destination(interpolate_node.in_port(0)) - - connection_of_scales = resize.in_port(1).get_connection() - connection_of_scales.set_destination(scales_ss.in_port(0)) - - connection_of_resize_input.get_source().connect(shape_of.in_port(0)) - connection_of_resize_input.get_source().connect(rank_node.in_port(0)) - connection_of_scales.get_source().connect(add_node.in_port(0)) - - rename_nodes([(resize, resize_name + '/delete'), (interpolate_node, resize_name)]) - resize.out_port(0).get_connection().set_source(interpolate_node.out_port(0)) - - -class ONNXResize10ToInterpolate(FrontReplacementOp): - """ - The transformation replaces ONNX Resize 10 with Interpolate-4. - """ - op = 'ONNXResize10' - enabled = True - - def run_after(self): - from openvino.tools.mo.front.InterpolateNormalizer import InterpolateNormalizer - return [InterpolateNormalizer] - - def replace_sub_graph(self, graph: Graph, match: dict): - resize = match['op'] - replace_resize(graph, resize) diff --git a/tools/mo/openvino/tools/mo/front/onnx/__init__.py b/tools/mo/openvino/tools/mo/front/onnx/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/onnx/activation_ext.py b/tools/mo/openvino/tools/mo/front/onnx/activation_ext.py deleted file mode 100644 index 8f8ae899159b40..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/activation_ext.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import * -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class AbsExtractor(FrontExtractorOp): - op = 'Abs' - enabled = True - - @classmethod - def extract(cls, node): - Abs.update_node_stat(node) - return cls.enabled - - -class AcosExtractor(FrontExtractorOp): - op = 'Acos' - enabled = True - - @classmethod - def extract(cls, node): - Acos.update_node_stat(node) - return cls.enabled - - -class AcoshExtractor(FrontExtractorOp): - op = 'Acosh' - enabled = True - - @classmethod - def extract(cls, node): - Acosh.update_node_stat(node) - return cls.enabled - - -class AsinExtractor(FrontExtractorOp): - op = 'Asin' - enabled = True - - @classmethod - def extract(cls, node): - Asin.update_node_stat(node) - return cls.enabled - - -class AsinhExtractor(FrontExtractorOp): - op = 'Asinh' - enabled = True - - @classmethod - def extract(cls, node): - Asinh.update_node_stat(node) - return cls.enabled - - -class AtanExtractor(FrontExtractorOp): - op = 'Atan' - enabled = True - - @classmethod - def extract(cls, node): - Atan.update_node_stat(node) - return cls.enabled - - -class AtanhExtractor(FrontExtractorOp): - op = 'Atanh' - enabled = True - - @classmethod - def extract(cls, node): - Atanh.update_node_stat(node) - return cls.enabled - - -class CeilExtractor(FrontExtractorOp): - op = 'Ceil' - enabled = True - - @classmethod - def extract(cls, node): - Ceiling.update_node_stat(node) - return cls.enabled - - -class CosExtractor(FrontExtractorOp): - op = 'Cos' - enabled = True - - @classmethod - def extract(cls, node): - Cos.update_node_stat(node) - return cls.enabled - - -class CoshExtractor(FrontExtractorOp): - op = 'Cosh' - enabled = True - - @classmethod - def extract(cls, node): - Cosh.update_node_stat(node) - return cls.enabled - - -class EluExtractor(FrontExtractorOp): - op = 'Elu' - enabled = True - - @classmethod - def extract(cls, node): - alpha = onnx_attr(node, 'alpha', 'f', default=1.0) - Elu.update_node_stat(node, {'alpha': alpha}) - return EluExtractor.enabled - - -class ErfExtractor(FrontExtractorOp): - op = 'Erf' - enabled = True - - @classmethod - def extract(cls, node): - Erf.update_node_stat(node) - return cls.enabled - - -class ExpExtractor(FrontExtractorOp): - op = 'Exp' - enabled = True - - @classmethod - def extract(cls, node): - Exp.update_node_stat(node) - return cls.enabled - - -class FloorExtractor(FrontExtractorOp): - op = 'Floor' - enabled = True - - @classmethod - def extract(cls, node): - Floor.update_node_stat(node) - return cls.enabled - - -class ThresholdedReluExtractor(FrontExtractorOp): - op = 'ThresholdedRelu' - enabled = True - - @classmethod - def extract(cls, node): - alpha = onnx_attr(node, 'alpha', 'f', default=1.0) - ThresholdedRelu.update_node_stat(node, {'alpha': alpha}) - return cls.enabled - - -class LeakyReLUExtractor(FrontExtractorOp): - op = 'LeakyRelu' - enabled = True - - @classmethod - def extract(cls, node): - negative_slope = onnx_attr(node, 'alpha', 'f', default=1.0) - if negative_slope == 0: - ReLU.update_node_stat(node) - else: - LeakyReLU.update_node_stat(node, {'negative_slope': negative_slope}) - return cls.enabled - - -class LogExtractor(FrontExtractorOp): - op = 'Log' - enabled = True - - @classmethod - def extract(cls, node): - Log.update_node_stat(node) - return cls.enabled - - -class NotExtractor(FrontExtractorOp): - op = 'Not' - enabled = True - - @classmethod - def extract(cls, node): - LogicalNot.update_node_stat(node) - return cls.enabled - - -class ReLUExtractor(FrontExtractorOp): - op = 'Relu' - enabled = True - - @classmethod - def extract(cls, node): - ReLU.update_node_stat(node) - return cls.enabled - - -class SigmoidExtractor(FrontExtractorOp): - op = 'Sigmoid' - enabled = True - - @classmethod - def extract(cls, node): - Sigmoid.update_node_stat(node) - return cls.enabled - - -class SignExtractor(FrontExtractorOp): - op = 'Sign' - enabled = True - - @classmethod - def extract(cls, node): - Sign.update_node_stat(node) - return cls.enabled - - -class SinExtractor(FrontExtractorOp): - op = 'Sin' - enabled = True - - @classmethod - def extract(cls, node): - Sin.update_node_stat(node) - return cls.enabled - - -class SinhExtractor(FrontExtractorOp): - op = 'Sinh' - enabled = True - - @classmethod - def extract(cls, node): - Sinh.update_node_stat(node) - return cls.enabled - - -class TanExtractor(FrontExtractorOp): - op = 'Tan' - enabled = True - - @classmethod - def extract(cls, node): - Tan.update_node_stat(node) - return cls.enabled - - -class TanhExtractor(FrontExtractorOp): - op = 'Tanh' - enabled = True - - @classmethod - def extract(cls, node): - Tanh.update_node_stat(node) - return cls.enabled - - -class SoftSignExtractor(FrontExtractorOp): - op = 'Softsign' - enabled = True - - @classmethod - def extract(cls, node): - SoftSign.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/affine_ext.py b/tools/mo/openvino/tools/mo/front/onnx/affine_ext.py deleted file mode 100644 index 491c27d50c02b6..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/affine_ext.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class AffineFrontExtractor(FrontExtractorOp): - # Affine operation will be transformed to ImageScalar and further will be converted to Mul->Add seq - op = 'Affine' - enabled = True - - @classmethod - def extract(cls, node): - dst_type = lambda x: mo_array(x) - - scale = onnx_attr(node, 'alpha', 'f', default=None, dst_type=dst_type) - bias = onnx_attr(node, 'beta', 'f', default=None, dst_type=dst_type) - - node['scale'] = scale - node['bias'] = bias - node['op'] = 'ImageScaler' - - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/argmax_ext.py b/tools/mo/openvino/tools/mo/front/onnx/argmax_ext.py deleted file mode 100644 index f21bf86f825b64..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/argmax_ext.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.argmax import ArgMaxOp -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - -class ArgMaxFrontExtractor(FrontExtractorOp): - op = 'ArgMax' - enabled = True - - @classmethod - def extract(cls, node): - keepdims = onnx_attr(node, 'keepdims', 'i', default=1) - axis = onnx_attr(node, 'axis', 'i', default=0) - - attrs = { - 'axis': axis, - - # ONNX ArgMax always computes an index of one maximum value - 'top_k' : 1, - 'out_max_val' : 0, - - # Set attribute to trigger ArgMax replacer in case do not keep the dimension - 'keepdims': keepdims, - - 'remove_values_output': True - } - - ArgMaxOp.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/argmin_ext.py b/tools/mo/openvino/tools/mo/front/onnx/argmin_ext.py deleted file mode 100644 index 9aad2ef84da7c3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/argmin_ext.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.argmin import ArgMinOp -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class ArgMinFrontExtractor(FrontExtractorOp): - op = 'ArgMin' - enabled = True - - @classmethod - def extract(cls, node): - keepdims = onnx_attr(node, 'keepdims', 'i', default=1) - axis = onnx_attr(node, 'axis', 'i', default=0) - - attrs = { - 'axis': axis, - 'top_k': 1, - 'keepdims': keepdims, - 'remove_values_output': True - } - - ArgMinOp.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/aten_ext.py b/tools/mo/openvino/tools/mo/front/onnx/aten_ext.py deleted file mode 100644 index 4a36d7676f0dd1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/aten_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.aten import ATen -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class ATenFrontExtractor(FrontExtractorOp): - op = 'ATen' - enabled = True - - @classmethod - def extract(cls, node): - mode = onnx_attr(node, 'mode', 'i', default=1) - operator = onnx_attr(node, 'operator', 's').decode() - - ATen.update_node_stat(node, {'operator': operator, 'mode': mode}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/cast_ext.py b/tools/mo/openvino/tools/mo/front/onnx/cast_ext.py deleted file mode 100644 index ba7a8f5b15b54f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/cast_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import get_onnx_datatype_as_numpy, onnx_attr - - -class CastFrontExtractor(FrontExtractorOp): - op = 'Cast' - enabled = True - - @classmethod - def extract(cls, node): - to = onnx_attr(node, 'to', 'i', default=None) - Cast.update_node_stat(node, {'dst_type': get_onnx_datatype_as_numpy(to)}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/clip_ext.py b/tools/mo/openvino/tools/mo/front/onnx/clip_ext.py deleted file mode 100644 index 5ecc7d15ff684a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/clip_ext.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version, onnx_node_has_attr -from openvino.tools.mo.ops.clamp import Clamp, AttributedClamp - - -class ClipFrontExtractor(FrontExtractorOp): - op = 'Clip' - enabled = True - - @classmethod - def extract(cls, node): - if get_onnx_opset_version(node) < 11: - attrs = { - 'min': onnx_attr(node, 'min', 'f', np.finfo(np.float32).min), - 'max': onnx_attr(node, 'max', 'f', np.finfo(np.float32).max), - } - AttributedClamp.update_node_stat(node, attrs) - else: - if onnx_node_has_attr(node, 'min') or onnx_node_has_attr(node, 'max'): - log.error("ONNX Clip-11 operation '{}' shouldn't have attributes 'min' and 'max', this may mean that " - "this operation created with older opset version.".format( - node.soft_get('name', node.id)), extra={'is_warning': True}) - Clamp.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/concat_ext.py b/tools/mo/openvino/tools/mo/front/onnx/concat_ext.py deleted file mode 100644 index 0e0d41a0f87b56..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/concat_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.concat import Concat - -class ConcatFrontExtractor(FrontExtractorOp): - op = 'Concat' - enabled = True - - @classmethod - def extract(cls, node): - mapping_rule = { - 'axis': onnx_attr(node, 'axis', 'i', default=0) - } - Concat.update_node_stat(node, mapping_rule) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/const_ext.py b/tools/mo/openvino/tools/mo/front/onnx/const_ext.py deleted file mode 100644 index 9cb6811cee8192..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/const_ext.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from onnx import numpy_helper -from onnx.numpy_helper import to_array - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.const import Const - - -class ConstExtractor(FrontExtractorOp): - op = 'Const' - enabled = True - - @classmethod - def extract(cls, node): - value = to_array(node.pb_init) - attrs = { - 'data_type': value.dtype, - 'value': value - } - Const.update_node_stat(node, attrs) - return cls.enabled - - -class ConstantExtractor(FrontExtractorOp): - op = 'Constant' - enabled = True - - @classmethod - def extract(cls, node): - pb_value = onnx_attr(node, 'value', 't') - value = numpy_helper.to_array(pb_value) - - attrs = { - 'data_type': value.dtype, - 'value': value, - } - Const.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/constant_fill_ext.py b/tools/mo/openvino/tools/mo/front/onnx/constant_fill_ext.py deleted file mode 100644 index a1821e3fed6314..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/constant_fill_ext.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.constant_fill import ConstantFill -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class ConstantFillFrontExtractor(FrontExtractorOp): - op = 'ConstantFill' - enabled = True - - @classmethod - def extract(cls, node): - - value = onnx_attr(node, 'value', 'f', default=float(0.0)) - input_as_shape = onnx_attr(node, 'input_as_shape', 'i') - extra_shape = onnx_attr(node, 'extra_shape', 'ints') - shape = onnx_attr(node, 'shape', 'ints') - dtype = onnx_attr(node, 'dtype', 'i', 1) - - assert input_as_shape - assert extra_shape is None - assert shape is None - assert dtype == 1 - - attrs = { - 'fill_value': value, - 'input_as_shape': input_as_shape, - } - - ConstantFill.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/constant_of_shape_ext.py b/tools/mo/openvino/tools/mo/front/onnx/constant_of_shape_ext.py deleted file mode 100644 index 1c297cb1581eb9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/constant_of_shape_ext.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -from onnx import numpy_helper - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.constant_of_shape import ConstantOfShape - - -class ConstantOfShapeExtractor(FrontExtractorOp): - op = 'ConstantOfShape' - enabled = True - - @classmethod - def extract(cls, node): - fill_value = onnx_attr(node, 'value', 't', default=mo_array([0.0]), dst_type=lambda x: numpy_helper.to_array(x)) - - ConstantOfShape.update_node_stat(node, {'fill_value': fill_value}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/constant_of_shape_to_broadcast.py b/tools/mo/openvino/tools/mo/front/onnx/constant_of_shape_to_broadcast.py deleted file mode 100644 index 263a76f4c619ef..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/constant_of_shape_to_broadcast.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.broadcast import Broadcast -from openvino.tools.mo.ops.const import Const - - -class ConstantOfShapeToBroadcast(FrontReplacementPattern): - """ - Converts the 'ConstantOfShape' layer to 'Broadcast'. - - The 'ConstantOfShape' has one 1D input defining the output constant shape. The value to be filled is defined by the - 'value' attribute. The transformation creates constant node with value equal to 'value' attribute and connects it to - the first input of a newly created 'Broadcast' node which defines value to broadcast. Then the input of the - 'ConstantOfShape' is connected to the second input of the 'Broadcast'. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for const_of_shape_node in graph.get_op_nodes(op='ConstantOfShape'): - broadcast_node = Broadcast(graph, {'name': const_of_shape_node.name + '/Broadcast'}).create_node() - const_of_shape_node.in_port(0).get_connection().set_destination(broadcast_node.in_port(1)) - broadcast_node.in_port(0).connect(Const(graph, {'name': broadcast_node.name + '/FillValue', - 'value': const_of_shape_node.fill_value} - ).create_node().out_port(0)) - const_of_shape_node.out_port(0).get_connection().set_source(broadcast_node.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/onnx/conv_ext.py b/tools/mo/openvino/tools/mo/front/onnx/conv_ext.py deleted file mode 100644 index 9d424035d85ac8..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/conv_ext.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_autopad -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.utils.error import Error - - -class ConvFrontExtractor(FrontExtractorOp): - op = 'Conv' - enabled = True - - @classmethod - def extract(cls, node): - # Extract pads attribute - # In case if pads is not specified it will be set in default (1) in infer function - pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: int64_array(x)) - assert pads is None or len(pads) % 2 == 0 - final_pad = None - if pads is not None: - pads = pads.reshape([2, -1]) - pads = np.transpose(pads) - final_pad = int64_array([[0, 0], [0, 0], *pads]) - - # Extract dilations attribute - # In case if dilations is not specified it will be set in default (1) in infer function - dilations = onnx_attr(node, 'dilations', 'ints', default=None, dst_type=lambda x: int64_array(x)) - final_dilations = int64_array([1, 1, *dilations]) if dilations is not None else None - - # Extract dilations attribute - # In case if dilations is not specified it will be set in default (1) in infer function - strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: int64_array(x)) - final_strides = int64_array([1, 1, *strides]) if strides is not None else None - - kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None) - auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad) - group = onnx_attr(node, 'group', 'i', default=1, dst_type=lambda x: int64_array(x)) - - attrs = { - 'op': __class__.op, - 'auto_pad': auto_pad, - 'bias_addable': True, - 'bias_term': None, - 'pad': final_pad, - 'pad_spatial_shape': int64_array(pads) if pads is not None else None, - 'dilation': final_dilations, - 'output_spatial_shape': None, - 'output_shape': None, - 'stride': final_strides, - 'group': group, - 'output': None, - 'kernel_spatial': int64_array(kernel_shape) if kernel_shape is not None else None, - - 'input_feature_channel': 1, - 'output_feature_channel': 0, - 'kernel_spatial_idx': None, # Will be calculated in infer function (np.array([2, 3])) - - 'spatial_dims': None, # Will be calculated in infer function - 'channel_dims': int64_array([1]), - 'batch_dims': int64_array([0]), - 'layout': 'NCHW' - } - - # update the attributes of the node - Convolution.update_node_stat(node, attrs) - return cls.enabled - - -class ConvTransposeFrontExtractor(FrontExtractorOp): - op = 'ConvTranspose' - enabled = True - - @staticmethod - def get_pad(node, input_shape, kernel_shape): - # Reference: https://github.com/onnx/onnx/blob/master/docs/Operators.md#ConvTranspose - input_shape = node.in_node(0).shape - pad = np.zeros((len(input_shape), 2), dtype=np.int64) - total_padding = int64_array([node.stride[node.spatial_dims][x] * - (input_shape[node.spatial_dims][x] - 1) + - node.output_padding[node.spatial_dims][x] + - kernel_shape[node.kernel_spatial_idx][x] - - node.output_spatial_shape[x] for x in range(len(node.spatial_dims))]) - if node.has_valid('auto_pad') and node.auto_pad != 'same_upper': - pad[node.spatial_dims] = int64_array( - [[total_padding[x] / 2, total_padding[x] - (total_padding[x] // 2)] for x in - range(len(node.spatial_dims))]) - else: - pad[node.spatial_dims] = int64_array( - [[total_padding[x] - (total_padding[x] // 2), total_padding[x] / 2] for x in - range(len(node.spatial_dims))]) - return pad - - @classmethod - def extract(cls, node): - pads = onnx_attr(node, 'pads', 'ints', dst_type=int64_array) - auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad) - - if pads is not None: - if len(pads) % 2 != 0: - raise Error( - 'ConvTranspose node {} specifies pads = {} which has odd number of elements. The model is not correct.', - node.soft_get('name'), - pads - ) - pads = pads.reshape([2, -1]) - pads = np.transpose(pads) - - final_pads = int64_array([[0, 0], [0, 0], *pads]) if pads is not None else None - - dilations = onnx_attr(node, 'dilations', 'ints', default=None) - final_dilations = int64_array([1, 1, *dilations]) if dilations is not None else None - - strides = onnx_attr(node, 'strides', 'ints', default=None) - final_strides = int64_array([1, 1, *strides]) if strides is not None else None - - kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', dst_type=int64_array) - - if kernel_shape is None: - raise Error( - 'ConvTranspose node {} doesn\'t have explicitly defined kernel_shape. It is not supported.', - node.soft_get('name') - ) - - output_padding = onnx_attr(node, 'output_padding', 'ints', default=None) - final_output_padding = int64_array([0, 0, *output_padding]) if output_padding is not None else None - - output_shape = onnx_attr(node, 'output_shape', 'ints', default=None, dst_type=int64_array) - - attrs = { - 'type': 'Deconvolution', - 'op': 'Deconv2D', - 'auto_pad': auto_pad, - 'bias_addable': True, - 'bias_term': None, # will be deduced later; not really needed - 'pad': final_pads, - 'dilation': final_dilations, - 'output_spatial_shape': output_shape, - 'original_output_spatial_shape': output_shape, - 'output_shape': None, - 'output_padding': final_output_padding, - 'stride': final_strides, - 'group': onnx_attr(node, 'group', 'i', default=1), - 'output': None, - - 'spatial_dims': None, # Will be calculated in infer function - 'channel_dims': int64_array([1]), - 'batch_dims': int64_array([0]), - 'layout': 'NCHW', - - 'input_feature_channel': 0, - 'output_feature_channel': 1, - 'get_pad': ConvTransposeFrontExtractor.get_pad, - 'get_output_feature_dim': lambda node: node.kernel_shape[node.output_feature_channel] * node.group, - } - - # update the attributes of the node - Convolution.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/crop_ext.py b/tools/mo/openvino/tools/mo/front/onnx/crop_ext.py deleted file mode 100644 index 89baee7499888c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/crop_ext.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.crop import Crop - - -class CropFrontExtractor(FrontExtractorOp): - op = 'Crop' - enabled = True - - @classmethod - def extract(cls, node): - # borders: leftBorder, topBorder, rightBorder, bottomBordes - borders = onnx_attr(node, 'border', 'ints', default=None, dst_type=int64_array) - scale = onnx_attr(node, 'scale', 'ints', default=None, dst_type=int64_array) - - # Crop reference: https://github.com/onnx/onnx/blob/master/docs/Operators.md#Crop - if len(borders) != 4: - log.error('ONNX Crop layer {} should take exactly 4 borders instead of {}'.format(node.name, len(borders))) - return False - - attrs = {'axis': int64_array([2, 3])} - if scale is not None: - attrs.update({ - 'dim': scale, - 'offset': int64_array([borders[1], borders[0]]) - }) - else: - attrs.update({ - 'crop_begin': int64_array([borders[1], borders[0]]), - 'crop_end': int64_array([borders[3], borders[2]]) - }) - - Crop.update_node_stat(node, attrs) - return CropFrontExtractor.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/cumsum_ext.py b/tools/mo/openvino/tools/mo/front/onnx/cumsum_ext.py deleted file mode 100644 index b43c848d3007a7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/cumsum_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.cumsum import CumSum -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class CumSumFrontExtractor(FrontExtractorOp): - op = 'CumSum' - enabled = True - - @classmethod - def extract(cls, node): - exclusive = onnx_attr(node, 'exclusive', 'i', 0) - reverse = onnx_attr(node, 'reverse', 'i', 0) - CumSum.update_node_stat(node, {'exclusive': exclusive, 'reverse': reverse}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/deformable_conv_ext.py b/tools/mo/openvino/tools/mo/front/onnx/deformable_conv_ext.py deleted file mode 100644 index 1f0a765336aafc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/deformable_conv_ext.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_autopad -from openvino.tools.mo.ops.deformable_convolution import DeformableConvolution - - -class DeformableConvExtractor(FrontExtractorOp): - op = 'DeformableConv2D' - enabled = True - - @classmethod - def extract(cls, node): - # Extract pads attribute - # In case if pads is not specified it will be set in default (1) in infer function - pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: int64_array(x)) - assert pads is None or len(pads) % 2 == 0 - final_pad = None - if pads is not None: - pads = pads.reshape([2, -1]) - pads = np.transpose(pads) - final_pad = int64_array([[0, 0], [0, 0], *pads]) - - # Extract dilations attribute - # In case if dilations is not specified it will be set in default (1) in infer function - dilations = onnx_attr(node, 'dilations', 'ints', default=None, dst_type=lambda x: int64_array(x)) - final_dilations = int64_array([1, 1, *dilations]) if dilations is not None else None - - # Extract dilations attribute - # In case if dilations is not specified it will be set in default (1) in infer function - strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: int64_array(x)) - final_strides = int64_array([1, 1, *strides]) if strides is not None else None - - kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None) - auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad) - group = onnx_attr(node, 'group', 'i', default=1, dst_type=lambda x: int64_array(x)) - deformable_groups = onnx_attr(node, 'deformable_groups', 'i', default=1) - - attrs = { - 'op': __class__.op, - 'auto_pad': auto_pad, - 'bias_addable': False, - 'bias_term': False, - 'pad': final_pad, - 'pad_spatial_shape': int64_array(pads) if pads is not None else None, - 'dilation': final_dilations, - 'output_spatial_shape': None, - 'output_shape': None, - 'stride': final_strides, - 'group': group, - 'deformable_group': deformable_groups, - 'output': None, - 'weights_index': 2, - 'kernel_spatial': int64_array(kernel_shape) if kernel_shape is not None else None, - - 'input_feature_channel': 1, - 'output_feature_channel': 0, - 'kernel_spatial_idx': None, # Will be calculated in infer function (np.array([2, 3])) - - 'spatial_dims': None, # Will be calculated in infer function - 'channel_dims': int64_array([1]), - 'batch_dims': int64_array([0]), - 'layout': 'NCHW' - } - - # update the attributes of the node - DeformableConvolution.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/depth_to_space_ext.py b/tools/mo/openvino/tools/mo/front/onnx/depth_to_space_ext.py deleted file mode 100644 index 21b817b1b3c9cf..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/depth_to_space_ext.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.ops.depth_to_space import DepthToSpaceOp -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class DepthToSpaceFrontExtractor(FrontExtractorOp): - op = 'DepthToSpace' - enabled = True - - @classmethod - def extract(cls, node): - # update the attributes of the node - node_name = node.soft_get('name', node.id) - block_size = onnx_attr(node, 'blocksize', 'i', default=None) - assert block_size is not None, \ - 'DepthToSpace should have "blocksize" attribute specified for node {}'.format(node_name) - onnx_mode = onnx_attr(node, 'mode', 's', default=b'DCR').decode() - assert onnx_mode in ['DCR', 'CRD'], 'Unrecognized mode provided for DepthToSpace node {}'.format(node_name) - if onnx_mode == 'DCR': - mode = 'blocks_first' - else: - mode = 'depth_first' - - DepthToSpaceOp.update_node_stat(node, {'block_size': block_size, 'mode': mode}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/dequantize_linear_ext.py b/tools/mo/openvino/tools/mo/front/onnx/dequantize_linear_ext.py deleted file mode 100644 index 4fc0ee74defa43..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/dequantize_linear_ext.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.ops.dequantize_linear import DequantizeLinear -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version - - -class DequantizeLinearFrontExtractor(FrontExtractorOp): - op = 'DequantizeLinear' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {} - if get_onnx_opset_version(node) >= 13: - axis = onnx_attr(node, 'axis', 'i', default=None) - attrs.update(axis=axis) - DequantizeLinear.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/detection_output.py b/tools/mo/openvino/tools/mo/front/onnx/detection_output.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/openvino/tools/mo/front/onnx/detection_output_ext.py b/tools/mo/openvino/tools/mo/front/onnx/detection_output_ext.py deleted file mode 100644 index 1f83f72d865da7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/detection_output_ext.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.DetectionOutput import DetectionOutput -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.utils.error import Error - - -class DetectionOutputFrontExtractor(FrontExtractorOp): - op = 'DetectionOutput' - enabled = True - - @classmethod - def extract(cls, node): - nms_threshold = onnx_attr(node, 'nms_threshold', 'f', default=0.0) - eta = onnx_attr(node, 'eta', 'f', default=0.0) - top_k = onnx_attr(node, 'top_k', 'i', default=-1) - - code_type_values = { - b"CORNER": "caffe.PriorBoxParameter.CORNER", - b"CENTER_SIZE": "caffe.PriorBoxParameter.CENTER_SIZE", - } - - code_type = onnx_attr(node, 'code_type', 's', default=code_type_values[b"CORNER"]) - try: - code_type = code_type_values[code_type] - except KeyError: - raise Error("Incorrect value of code_type parameter {}".format(code_type)) - - resize_mode_values = { - b"": "", - b"WARP": "caffe.ResizeParameter.WARP", - b"FIT_SMALL_SIZE": "caffe.ResizeParameter.FIT_SMALL_SIZE", - b"FIT_LARGE_SIZE_AND_PAD": "caffe.ResizeParameter.FIT_LARGE_SIZE_AND_PAD", - } - resize_mode = onnx_attr(node, 'resize_mode', 's', default=b"") - try: - resize_mode = resize_mode_values[resize_mode] - except KeyError: - raise Error("Incorrect value of resize_mode parameter {}".format(resize_mode)) - - pad_mode_values = { - b"": "", - b"CONSTANT": "caffe.ResizeParameter.CONSTANT", - b"MIRRORED": "caffe.ResizeParameter.MIRRORED", - b"REPEAT_NEAREST": "caffe.ResizeParameter.REPEAT_NEAREST" - } - pad_mode = onnx_attr(node, 'pad_mode', 's', default=b"") - try: - pad_mode = pad_mode_values[pad_mode] - except KeyError: - raise Error("Incorrect value of pad_mode parameter {}".format(pad_mode)) - - interp_mode_values = { - b"": "", - b"LINEAR": "caffe.ResizeParameter.LINEAR", - b"AREA": "caffe.ResizeParameter.AREA", - b"NEAREST": "caffe.ResizeParameter.NEAREST", - b"CUBIC": "caffe.ResizeParameter.CUBIC", - b"LANCZOS4": "caffe.ResizeParameter.LANCZOS4" - } - interp_mode = onnx_attr(node, 'interp_mode', 's', default=b"") - try: - interp_mode = interp_mode_values[interp_mode] - except KeyError: - raise Error("Incorrect value of interp_mode parameter {}".format(interp_mode)) - - attrs = { - 'num_classes': onnx_attr(node, 'num_classes', 'i', default=0), - 'share_location': onnx_attr(node, 'share_location', 'i', default=0), - 'background_label_id': onnx_attr(node, 'background_label_id', 'i', default=0), - 'code_type': code_type, - 'variance_encoded_in_target': onnx_attr(node, 'variance_encoded_in_target', 'i', default=0), - 'keep_top_k': onnx_attr(node, 'keep_top_k', 'i', default=0), - 'confidence_threshold': onnx_attr(node, 'confidence_threshold', 'f', default=0), - 'visualize_threshold': onnx_attr(node, 'visualize_threshold', 'f', default=0.6), - # nms_param - 'nms_threshold': nms_threshold, - 'top_k': top_k, - 'eta': eta, - # save_output_param.resize_param - 'prob': onnx_attr(node, 'prob', 'f', default=0), - 'resize_mode': resize_mode, - 'height': onnx_attr(node, 'height', 'i', default=0), - 'width': onnx_attr(node, 'width', 'i', default=0), - 'height_scale': onnx_attr(node, 'height_scale', 'i', default=0), - 'width_scale': onnx_attr(node, 'width_scale', 'i', default=0), - 'pad_mode': pad_mode, - 'pad_value': onnx_attr(node, 'pad_value', 's', default=""), - 'interp_mode': interp_mode, - 'input_width': onnx_attr(node, 'input_width', 'i', default=1), - 'input_height': onnx_attr(node, 'input_height', 'i', default=1), - 'normalized': onnx_attr(node, 'normalized', 'i', default=1), - } - - # update the attributes of the node - DetectionOutput.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/detection_output_onnx_ext.py b/tools/mo/openvino/tools/mo/front/onnx/detection_output_onnx_ext.py deleted file mode 100644 index 58885dd589937b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/detection_output_onnx_ext.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from math import log - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.detection_output_onnx import ExperimentalDetectronDetectionOutput - - -class ExperimentalDetectronDetectionOutputFrontExtractor(FrontExtractorOp): - op = 'ExperimentalDetectronDetectionOutput' - enabled = True - - @classmethod - def extract(cls, node): - attrs = dict(class_agnostic_box_regression=onnx_attr(node, 'class_agnostic_box_regression', 'i', 0), - max_detections_per_image=onnx_attr(node, 'max_detections_per_image', 'i', 100), - nms_threshold=onnx_attr(node, 'nms_threshold', 'f', 0.5), - num_classes=onnx_attr(node, 'num_classes', 'i', 81), - post_nms_count=onnx_attr(node, 'post_nms_count', 'i', 2000), - score_threshold=onnx_attr(node, 'score_threshold', 'f', 0.05), - max_delta_log_wh=onnx_attr(node, 'max_delta_log_wh', 'f', log(1000. / 16.)), - deltas_weights=float32_array(onnx_attr(node, 'deltas_weights', 'floats', [10., 10., 5., 5.])) - ) - ExperimentalDetectronDetectionOutput.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/dropout_ext.py b/tools/mo/openvino/tools/mo/front/onnx/dropout_ext.py deleted file mode 100644 index e0a4b209f84ef0..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/dropout_ext.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.identity import Identity -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.utils.error import Error - - -class DropoutFrontExtractor(FrontExtractorOp): - op = 'Dropout' - enabled = True - - @classmethod - def extract(cls, node): - # some Dropout flavors doesn't have is_test attribute; when it is missing, interpret it as 1 - is_test = onnx_attr(node, 'is_test', 'i', 1) - if len(node.out_nodes()) > 1: - raise Error('Dropout node {} has more than one consumer. Unsupported.', node.name) - if not is_test: - raise Error('Dropout node {} has is_test: 0. This means training mode which is not supported.', node.name) - Identity.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/einsum_ext.py b/tools/mo/openvino/tools/mo/front/onnx/einsum_ext.py deleted file mode 100644 index 86aca737dc6d68..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/einsum_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.einsum import Einsum -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class EinsumExtractor(FrontExtractorOp): - op = 'Einsum' - enabled = True - - @classmethod - def extract(cls, einsum_node): - einsum_name = einsum_node.soft_get('name', einsum_node.id) - equation = onnx_attr(einsum_node, 'equation', 's').decode(encoding="utf-8") - normalized_equation = Einsum.normalize_equation(einsum_name, equation) - Einsum.update_node_stat(einsum_node, {'equation': normalized_equation}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/elementwise_ext.py b/tools/mo/openvino/tools/mo/front/onnx/elementwise_ext.py deleted file mode 100644 index 00eaabdad524f1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/elementwise_ext.py +++ /dev/null @@ -1,217 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.ops.elementwise import Add, Sub, Mul, Div, Pow, Less, Equal, Greater, LogicalAnd, LogicalOr, LogicalXor, \ - Round, GreaterEqual, LessEqual -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.eltwise_n import EltwiseNAdd, EltwiseNMax, EltwiseNMin -from openvino.tools.mo.ops.power import AttributedPower - - -class AddFrontExtractor(FrontExtractorOp): - op = 'Add' - enabled = True - - @classmethod - def extract(cls, node: Node): - axis = onnx_attr(node, 'axis', 'i', default=None) - Add.update_node_stat(node, {'axis': axis}) - return cls.enabled - - -class SubFrontExtractor(FrontExtractorOp): - op = 'Sub' - enabled = True - - @classmethod - def extract(cls, node: Node): - axis = onnx_attr(node, 'axis', 'i', default=None) - Sub.update_node_stat(node, {'axis': axis}) - return cls.enabled - - -class MulFrontExtractor(FrontExtractorOp): - op = 'Mul' - enabled = True - - @classmethod - def extract(cls, node: Node): - axis = onnx_attr(node, 'axis', 'i', default=None) - Mul.update_node_stat(node, {'axis': axis}) - return cls.enabled - - -class DivFrontExtractor(FrontExtractorOp): - op = 'Div' - enabled = True - - @classmethod - def extract(cls, node: Node): - axis = onnx_attr(node, 'axis', 'i', default=None) - Div.update_node_stat(node, {'axis': axis}) - return cls.enabled - - -class SumFrontExtractor(FrontExtractorOp): - op = 'Sum' - enabled = True - - @classmethod - def extract(cls, node: Node): - axis = onnx_attr(node, 'axis', 'i', default=None) - EltwiseNAdd.update_node_stat(node, {'axis': axis}) - return cls.enabled - - -class PowFrontExtractor(FrontExtractorOp): - op = 'Pow' - enabled = True - - @classmethod - def extract(cls, node: Node): - Pow.update_node_stat(node) - return cls.enabled - - -class NegFrontExtractor(FrontExtractorOp): - op = 'Neg' - enabled = True - - @classmethod - def extract(cls, node: Node): - AttributedPower.update_node_stat(node, {'scale': -1}) - return cls.enabled - - -class SqrtExtractor(FrontExtractorOp): - op = 'Sqrt' - enabled = True - - @classmethod - def extract(cls, node): - AttributedPower.update_node_stat(node, {'power': 0.5}) - return cls.enabled - - -class ScaleFrontExtractor(FrontExtractorOp): - op = 'Scale' - enabled = True - - @classmethod - def extract(cls, node: Node): - scale = onnx_attr(node, 'scale', 'f', default=mo_array(1.0), dst_type=lambda x: mo_array(x)) - AttributedPower.update_node_stat(node, {'scale': scale}) - return cls.enabled - - -class MaxExtractor(FrontExtractorOp): - op = 'Max' - enabled = True - - @classmethod - def extract(cls, node: Node): - EltwiseNMax.update_node_stat(node) - return cls.enabled - - -class MinExtractor(FrontExtractorOp): - op = 'Min' - enabled = True - - @classmethod - def extract(cls, node: Node): - EltwiseNMin.update_node_stat(node) - return cls.enabled - - -class EqualExtractor(FrontExtractorOp): - op = 'Equal' - enabled = True - - @classmethod - def extract(cls, node): - Equal.update_node_stat(node) - return cls.enabled - - -class LessExtractor(FrontExtractorOp): - op = 'Less' - enabled = True - - @classmethod - def extract(cls, node): - Less.update_node_stat(node) - return cls.enabled - - -class GreaterExtractor(FrontExtractorOp): - op = 'Greater' - enabled = True - - @classmethod - def extract(cls, node): - Greater.update_node_stat(node) - return cls.enabled - - -class GreaterOrEqualExtractor(FrontExtractorOp): - op = 'GreaterOrEqual' - enabled = True - - @classmethod - def extract(cls, node): - GreaterEqual.update_node_stat(node) - return cls.enabled - - -class LessOrEqualExtractor(FrontExtractorOp): - op = 'LessOrEqual' - enabled = True - - @classmethod - def extract(cls, node): - LessEqual.update_node_stat(node) - return cls.enabled - - -class AndExtractor(FrontExtractorOp): - op = 'And' - enabled = True - - @classmethod - def extract(cls, node): - LogicalAnd.update_node_stat(node) - return cls.enabled - - -class OrExtractor(FrontExtractorOp): - op = 'Or' - enabled = True - - @classmethod - def extract(cls, node): - LogicalOr.update_node_stat(node) - return cls.enabled - - -class XorExtractor(FrontExtractorOp): - op = 'Xor' - enabled = True - - @classmethod - def extract(cls, node): - LogicalXor.update_node_stat(node) - return cls.enabled - - -class RoundFrontExtractor(FrontExtractorOp): - op = 'Round' - enabled = True - - @classmethod - def extract(cls, node: Node): - Round.update_node_stat(node, {'mode': 'half_to_even'}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/expand_ext.py b/tools/mo/openvino/tools/mo/front/onnx/expand_ext.py deleted file mode 100644 index b4d1fe8331d4bc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/expand_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.broadcast import Broadcast - - -class ExpandExtractor(FrontExtractorOp): - op = 'Expand' - enabled = True - - @classmethod - def extract(cls, node): - Broadcast.update_node_stat(node, {'mode': 'bidirectional'}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/extractor.py b/tools/mo/openvino/tools/mo/front/onnx/extractor.py deleted file mode 100644 index 80a64d834b949d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/extractor.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node - - -def node_pb_arg(pb_extractor: callable): - return lambda node: pb_extractor(node.pb) - - -onnx_op_extractors = {} - - -def common_onnx_fields(node: Node): - return { - 'kind': 'op', - 'name': node.id, - # no reliable name for an onnx node, name can be empty, so we use that surrogate built as ID in the loader - 'op': node.op if node.has_valid('op') else node.pb.op_type, - } - - -def onnx_op_extractor(node: Node, lowered_keys_map: dict): - if not node.has_valid('pb'): - return True, node.graph.node[node.id] - - result = common_onnx_fields(node) - node.graph.node[node.id].update(result) - supported = False - op = result['op'].lower() - if op in lowered_keys_map: - op = lowered_keys_map[op] - assert op in onnx_op_extractors - attrs = onnx_op_extractors[op](node) - if attrs: - result.update(attrs) - supported = True - return supported, result diff --git a/tools/mo/openvino/tools/mo/front/onnx/extractors/__init__.py b/tools/mo/openvino/tools/mo/front/onnx/extractors/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/extractors/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/onnx/extractors/utils.py b/tools/mo/openvino/tools/mo/front/onnx/extractors/utils.py deleted file mode 100644 index 0fc890a399e89b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/extractors/utils.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error - - -def onnx_node_has_attr(node: Node, name: str): - attrs = [a for a in node.pb.attribute if a.name == name] - return len(attrs) != 0 - - -def onnx_attr(node: Node, name: str, field: str, default=None, dst_type=None): - """ Retrieves ONNX attribute with name `name` from ONNX protobuf `node.pb`. - The final value is casted to dst_type if attribute really exists. - The function returns `default` otherwise. - """ - attrs = [a for a in node.pb.attribute if a.name == name] - if len(attrs) == 0: - # there is no requested attribute in the protobuf message - return default - elif len(attrs) > 1: - raise Error('Found multiple entries for attribute name {} when at most one is expected. Protobuf message with ' - 'the issue: {}.', name, node.pb) - else: - res = getattr(attrs[0], field) - if dst_type is not None: - return dst_type(res) - else: - return res - - -def onnx_get_num_outputs(node: Node): - """ Retrieves number of outputs for ONNX operation. - """ - return len(node.pb.output) - - -def get_backend_pad(pads, spatial_dims, axis): - return [x[axis] for x in pads[spatial_dims]] - - -def get_onnx_autopad(auto_pad): - auto_pad = auto_pad.decode().lower() - if auto_pad == 'notset': - auto_pad = None - return auto_pad - - -def get_onnx_opset_version(node: Node): - return node.graph.graph.get('fw_opset_version', 0) - - -def get_onnx_datatype_as_numpy(value): - datatype_to_numpy = { - 1: np.float32, - 9: bool, - 11: np.double, - 10: np.float16, - 5: np.int16, - 6: np.int32, - 7: np.int64, - 3: np.int8, - 8: np.ubyte, - 4: np.uint16, - 12: np.uint32, - 13: np.uint64, - 2: np.uint8, - } - try: - return datatype_to_numpy[value] - except KeyError: - raise Error("Incorrect value {} for Datatype enum".format(value)) diff --git a/tools/mo/openvino/tools/mo/front/onnx/faster_rcnn.json b/tools/mo/openvino/tools/mo/front/onnx/faster_rcnn.json deleted file mode 100644 index c08e094c12cd88..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/faster_rcnn.json +++ /dev/null @@ -1,20 +0,0 @@ -[ - { - "custom_attributes": - { - "max_detections_per_image": 100, - "max_delta_log_wh": 4.135166645050049, - "score_threshold": 0.05, - "nms_threshold": 0.5, - "post_nms_count": 2000, - "input_fpn_heads": ["486", "454", "422", "390"], - "do_outputs": ["6371", "6373", "6375"], - "box_regressions_input_node": "2614", - "class_predicitons_node": "2615", - "ROIFeatureExtractor2_input": "2335", - "ROIFeatureExtractor2_output": "2592" - }, - "id": "ONNXMaskRCNNReplacement", - "match_kind": "general" - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/onnx/flattenONNX_to_reshape.py b/tools/mo/openvino/tools/mo/front/onnx/flattenONNX_to_reshape.py deleted file mode 100644 index 7aeae3ef6e999e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/flattenONNX_to_reshape.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ReduceOps import ReduceProd -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.utils.shape import node_to_get_shape_value_of_indices, new_shape_node_from_shape_nodes - - -class FlattenONNXToReshape(FrontReplacementSubgraph): - """ - ONNX Flatten operation flattens the input tensor into a 2D matrix by given axis: - - Input of shape [d_0, d_1, ... d_n] - Output of shape [d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn] - - Corner case with axis=0: output shape will be [1, d_0 X d_1 ... X dn] - """ - enabled = True - - def pattern(self): - return dict(nodes=[('flatten', dict(op='FlattenONNX'))], - edges=[]) - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['flatten'] - name = node.soft_get('name', node.id) - - assert node.has_valid('axis'), 'Flatten {} should have `axis` attribute extracted, but it\'s not'.format(name) - axis = node.axis - - reshape_node = Reshape(graph, {'name': node.id + '/Reshape'}).create_node() - - if axis == 0: - dim = Const(graph, {'value': int64_array([1, -1]), 'name': reshape_node.name + '/shape'}).create_node() - elif axis == 1: - dim = Const(graph, {'value': int64_array([0, -1]), 'name': reshape_node.name + '/shape'}).create_node() - else: - shape = Shape(graph, {'name': name + '/input_shape'}).create_node() - - idxs = list(range(axis)) if axis > 0 else list(range(axis, 0)) - - axis_shape_portion = node_to_get_shape_value_of_indices(shape, idxs) - first_dims = create_op_node_with_second_input(graph, ReduceProd, int64_array([0]), - {'name': name + '/first_dims', 'keep_dims': True}) - second_dims = Const(graph, {'value': int64_array([-1]), 'name': name + '/second_dims'}).create_node() - - node.in_port(0).get_source().connect(shape.in_port(0)) - axis_shape_portion.out_port(0).connect(first_dims.in_port(0)) - - order_of_dims = [first_dims, second_dims] if axis > 0 else [second_dims, first_dims] - - dim = new_shape_node_from_shape_nodes(order_of_dims) - - reshape_node.in_port(1).connect(dim.out_port(0)) - - node.out_port(0).get_connection().set_source(reshape_node.out_port(0)) - node.in_port(0).get_connection().set_destination(reshape_node.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/onnx/flatten_ext.py b/tools/mo/openvino/tools/mo/front/onnx/flatten_ext.py deleted file mode 100644 index 3d001e5a6f33c4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/flatten_ext.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.flatten import FlattenONNX - - -class FlattenFrontExtractor(FrontExtractorOp): - op = 'Flatten' - enabled = True - - @classmethod - def extract(cls, node): - axis = onnx_attr(node, 'axis', 'i', 1) - attrs = { - 'axis': axis - } - - FlattenONNX.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/fused_bn_ext.py b/tools/mo/openvino/tools/mo/front/onnx/fused_bn_ext.py deleted file mode 100644 index d34633e5ac1b9f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/fused_bn_ext.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.BatchNormInference import BatchNormInference -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class BatchNormalizationExtractor(FrontExtractorOp): - op = 'BatchNormalization' - enabled = True - - @classmethod - def extract(cls, node): - attr_dict = { - 'data_format': 'NCHW', - 'eps': onnx_attr(node, 'epsilon', 'f', 1e-5), - } - BatchNormInference.update_node_stat(node, attr_dict) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/gather_ext.py b/tools/mo/openvino/tools/mo/front/onnx/gather_ext.py deleted file mode 100644 index 8de970e1c6686f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/gather_ext.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.ops.gather import AttributedGather -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class GatherFrontExtractor(FrontExtractorOp): - op = 'Gather' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'axis': int64_array(onnx_attr(node, 'axis', 'i', default=0)) - } - - AttributedGather.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/gatherelements_ext.py b/tools/mo/openvino/tools/mo/front/onnx/gatherelements_ext.py deleted file mode 100644 index 4640c83dfb6eb4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/gatherelements_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.gatherelements import GatherElements -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class GatherElementsFrontExtractor(FrontExtractorOp): - op = 'GatherElements' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'axis': onnx_attr(node, 'axis', 'i', default=0) - } - GatherElements.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/gathernd_ext.py b/tools/mo/openvino/tools/mo/front/onnx/gathernd_ext.py deleted file mode 100644 index 5752f1e8ed7631..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/gathernd_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.gathernd import GatherND -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class GatherNDFrontExtractor(FrontExtractorOp): - op = 'GatherND' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'batch_dims': onnx_attr(node, 'batch_dims', 'i', default=0) - } - GatherND.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/gemm_ext.py b/tools/mo/openvino/tools/mo/front/onnx/gemm_ext.py deleted file mode 100644 index f64de806f7e135..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/gemm_ext.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.ops.MatMul import GemmONNX -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class GemmFrontExtractor(FrontExtractorOp): - op = 'Gemm' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'alpha': onnx_attr(node, 'alpha', 'f', 1), - 'beta': onnx_attr(node, 'beta', 'f', 1), - 'transpose_a': onnx_attr(node, 'transA', 'i', 0), - 'transpose_b': onnx_attr(node, 'transB', 'i', 0), - 'broadcast_c': onnx_attr(node, 'broadcast', 'i', 1), - # TODO: there is no axis in onnx operators.md - 'axis': int64_array(onnx_attr(node, 'axis', 'i', default=0)) - } - GemmONNX.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/group_norm_ext.py b/tools/mo/openvino/tools/mo/front/onnx/group_norm_ext.py deleted file mode 100644 index 8f098d058c259d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/group_norm_ext.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array, int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.group_norm import GroupNorm - - -class ExperimentalDetectronGroupNorm(FrontExtractorOp): - op = 'ExperimentalDetectronGroupNorm' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=float), - 'num_groups': int64_array(onnx_attr(node, 'num_groups', 'i', default=1)), - } - GroupNorm.update_node_stat(node, attrs) - return cls.enabled - - -class GroupNormExtractor(FrontExtractorOp): - op = 'GroupNorm' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'eps': mo_array(onnx_attr(node, 'eps', 'f', default=1e-6), dtype=float), - 'num_groups': int64_array(onnx_attr(node, 'num_groups', 'i', default=1)), - } - GroupNorm.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/gru_ext.py b/tools/mo/openvino/tools/mo/front/onnx/gru_ext.py deleted file mode 100644 index 887c95b5274155..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/gru_ext.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array, int64_array -from openvino.tools.mo.ops.GRU import GRU -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class GRUFrontExtractor(FrontExtractorOp): - op = 'GRU' - enabled = True - - @classmethod - def extract(cls, node): - activation_alpha = onnx_attr(node, 'activation_alpha', 'floats', - default=None, dst_type=lambda x: float32_array(x)) - activation_beta = onnx_attr(node, 'activation_beta', 'floats', - default=None, dst_type=lambda x: float32_array(x)) - activations = onnx_attr(node, 'activations', 'strings', default=None, - dst_type=lambda x: list(map(lambda s: s.decode(encoding="utf-8").lower(), list(x)))) - clip = onnx_attr(node, 'clip', 'f', default=None) - linear_before_reset = onnx_attr(node, 'linear_before_reset', 'i', default=0) - - attrs = { - 'batch_dim': 1, - 'sequence_dim': 0, - 'blobs_wrb': True, - 'has_num_directions': True, - 'num_layers': 1, - 'format': 'onnx', - 'multilayers': False, - 'gate_order': [0, 1, 2], - - # ONNX - specific attrs - 'activation_alpha': activation_alpha, - 'activation_beta': activation_beta, - 'activations': activations, - 'clip': clip, - 'direction': onnx_attr(node, 'direction', 's', b'forward').decode().lower(), - 'hidden_size': int64_array(onnx_attr(node, 'hidden_size', 'i')), - 'linear_before_reset': linear_before_reset, - } - - GRU.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/hard_sigmoid_ext.py b/tools/mo/openvino/tools/mo/front/onnx/hard_sigmoid_ext.py deleted file mode 100644 index ba1c2a5b262a7a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/hard_sigmoid_ext.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.ops.hard_sigmoid import HardSigmoid -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs - - -class HardSigmoidFrontExtractor(FrontReplacementOp): - op = 'HardSigmoid' - enabled = True - - def replace_op(self, graph: Graph, node: Node): - alpha = onnx_attr(node, 'alpha', 'f', default=0.2) - beta = onnx_attr(node, 'beta', 'f', default=0.5) - - hard_sigmoid = create_op_with_const_inputs(graph, HardSigmoid, {1: mo_array(alpha), 2: mo_array(beta)}, - {'name': node.name + '/HardSigmoid_'}) - - node.in_port(0).get_connection().set_destination(hard_sigmoid.in_port(0)) - return [hard_sigmoid.id] diff --git a/tools/mo/openvino/tools/mo/front/onnx/identity_ext.py b/tools/mo/openvino/tools/mo/front/onnx/identity_ext.py deleted file mode 100644 index 3c10c9b5f4e0cd..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/identity_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.identity import Identity - - -class IdentityFrontExtractor(FrontExtractorOp): - op = 'Identity' - enabled = True - - @classmethod - def extract(cls, node): - Identity.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/image_scaler_ext.py b/tools/mo/openvino/tools/mo/front/onnx/image_scaler_ext.py deleted file mode 100644 index d33c8cbb46750f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/image_scaler_ext.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class ImageScalerFrontExtractor(FrontExtractorOp): - op = 'ImageScaler' - enabled = True - - @classmethod - def extract(cls, node): - dst_type = lambda x: mo_array(x) - - scale = onnx_attr(node, 'scale', 'f', default=mo_array(1.0), dst_type=dst_type) - bias = onnx_attr(node, 'bias', 'floats', default=None, dst_type=dst_type) - - # Expand dims for bias in case if it is not scalar - if bias.ndim != 0: - broadcast_dims_cnt = 2 if node.graph.graph['layout'] == 'NCHW' else 0 - for idx in range(broadcast_dims_cnt): - bias = np.expand_dims(bias, axis=-1) - - node['scale'] = scale - node['bias'] = bias - - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/instance_normalization_ext.py b/tools/mo/openvino/tools/mo/front/onnx/instance_normalization_ext.py deleted file mode 100644 index ecd79e253b814f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/instance_normalization_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.instance_normalization import InstanceNormalization -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class InstanceNormalizationExtractor(FrontExtractorOp): - op = 'InstanceNormalization' - enabled = True - - @classmethod - def extract(cls, node): - epsilon = onnx_attr(node, 'epsilon', 'f', default=float(1e-5)) - InstanceNormalization.update_node_stat(node, {'epsilon': epsilon}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/loader.py b/tools/mo/openvino/tools/mo/front/onnx/loader.py deleted file mode 100644 index 30ea56fc8efb83..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/loader.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import logging as log - -import onnx - -from openvino.tools.mo.graph.graph import fill_graph_with_nodes, Graph, Node -from openvino.tools.mo.utils.error import Error, FrameworkError - - -def load_onnx_model(file_name: str): - try: - onnx_model = onnx.load(file_name) - except Exception as e: - raise FrameworkError( - 'Cannot read the model file: "{}" is incorrect ONNX model file. Details: {}', - file_name, - str(e) - ) from e - - return onnx_model - - -def protobuf_attrs(pb): - return {'pb': pb} - - -def node_id(pb): - ''' The result of this function should be passed to unique_id to be used as a unuque ID for new node creation. ''' - if pb.name: - return str(pb.name) - elif len(pb.output): - # node may have multiple outputs, we choose the first one - return pb.output[0] - else: - return 'NoNamed' - - -def protobuf2nx(graph: Graph, pb): - """ - Convert proto message with ONNX model to equivalent NX representation. All nodes and edges are restored here as - ONNX model has op/data representation, that means that nodes are connected via tensor names. Name of tensors are - defined on demand in nodes, so we have a code similar to Caffe here. - - :param graph: the Graph object to load the graph into - :param pb: the ONNX file protobuf message - :return: None - """ - # maps a tensor name to a node produced it and the node port: str -> (node_id, node_port) - data_nodes_map = {} - - graph_pb = pb.graph - add_initializers_and_inputs_to_graph(graph, graph_pb, data_nodes_map) - - # Preserve inputs order - graph.inputs_order = [] - for inp in graph_pb.input: - name = str(inp.name) - graph.inputs_order.append(name) - - output_ids = [] - for outp in graph_pb.output: - name = str(outp.name) - if graph.has_node(name): - log.error('Name {} of output node already exists in graph. Ignoring this output. If the output is required,' - ' please rename it.'.format(name), extra={'is_warning': True}) - continue - else: - # add fake node on output - graph.add_node(name, kind='op', op='FakeOutput', pb=outp) - output_ids.append(name) - - # Preserve outputs order - graph.outputs_order = output_ids - - # Go through all nodes in the original model order (because data nodes are defined on-the-fly and order is - # important) - for node in graph_pb.node: - # create an NX node - fw_name = node_id(node) - id = graph.unique_id(fw_name) - graph.add_node(id, pb=node, kind='op') - if hasattr(graph, 'op_names_statistic') and hasattr(node, 'op_type'): - graph.op_names_statistic[node.op_type] += 1 - - # add incoming edges based on data_nodes_map - for dst_port, inp in enumerate(node.input): - # should add edge inp --> id - if inp not in data_nodes_map: - if inp == '': - # input is omitted; most likely it corresponds to an optional input for an operator - continue - else: - raise Error( - 'Reference to {} is not satisfied. A node refer not existing data tensor. ONNX model is not ' - 'consistent. Protobuf fragment: {}', inp, node) - src_id, src_port = data_nodes_map[inp] - - assert (graph.has_node(src_id)) - edge_attrs = { - 'out': src_port, - 'in': dst_port, - 'name': inp, - 'fw_tensor_debug_info': [(src_id, inp)], - 'in_attrs': ['in', 'name'], - 'out_attrs': ['out', 'name'], - 'data_attrs': ['fw_tensor_debug_info'] - } - graph.add_edge(src_id, id, **edge_attrs) - - # add outgoing edges to data_nodes_map - for src_port, out in enumerate(node.output): - if out in output_ids: - edge_attrs = { - 'out': src_port, - 'in': 0, - 'name': out, - 'fw_tensor_debug_info': [(fw_name, out)], - 'in_attrs': ['in', 'name'], - 'out_attrs': ['out', 'name'], - 'data_attrs': ['fw_tensor_debug_info'] - } - graph.add_edge(id, out, **edge_attrs) - if out in data_nodes_map: - log.debug("Detected reuse of blob {}.".format(out)) - data_nodes_map[out] = (id, src_port) - - graph.graph['tensor_mapping'] = data_nodes_map # save main graph tensor names mapping for Loop op parsing - - -def add_initializers_and_inputs_to_graph(graph: Graph, graph_pb, data_nodes_map: dict): - """ - The function adds nodes specified in the 'initializer' attribute of the pb and input nodes. - :param graph: the Graph to add nodes to - :param graph_pb: the graph protobuf message - :param data_nodes_map: the dictionary with mapping of tensor names to node id and port - :return: the list of Parameter nodes - """ - initializers = Graph() - fill_graph_with_nodes(initializers, graph_pb.initializer, get_id=lambda pb: pb.name, get_attrs=protobuf_attrs) - - parameters = [] - # first go through all inputs and separate constant from placeholders - for inp in graph_pb.input: - name = str(inp.name) - if graph.has_node(name): - raise Error('Name {} of input node already exists, input names are duplicated.', name) - elif initializers.has_node(name): - graph.add_node(name, kind='op', op='Const', pb=inp, pb_init=initializers.node[name]['pb']) - else: - graph.add_node(name, kind='op', op='Parameter', pb=inp) - parameters.append(Node(graph, name)) - - assert name not in data_nodes_map, 'Inconsistency between data_nodes_map and graph.nodes' - data_nodes_map[name] = (name, 0) - - # go over all initializers and make sure that all of them are added to the graph - for initializer in initializers.nodes(): - initializer_id = initializer - if not graph.has_node(initializer_id): - graph.add_node(initializer_id, kind='op', op='Const', pb=initializers.node[initializer]['pb'], - pb_init=initializers.node[initializer]['pb']) - data_nodes_map[initializer] = (initializer_id, 0) - return parameters diff --git a/tools/mo/openvino/tools/mo/front/onnx/logsoftmaxONNX_to_logsoftmax.py b/tools/mo/openvino/tools/mo/front/onnx/logsoftmaxONNX_to_logsoftmax.py deleted file mode 100644 index fcaaa945db3416..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/logsoftmaxONNX_to_logsoftmax.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.ops.flatten import FlattenONNX -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.log_softmax import LogSoftmax - - -class LogSoftmaxONNXFrontReplacer(FrontReplacementOp): - """ - Replace LogSoftmaxONNX operation with FlattenONNX -> LogSoftmax -> Reshape subgraph - """ - op = "LogSoftmaxONNX" - enabled = True - - def run_before(self): - from openvino.tools.mo.front.onnx.flattenONNX_to_reshape import FlattenONNXToReshape - return [FlattenONNXToReshape] - - def replace_op(self, graph: Graph, node: Node): - node_name = node.soft_get('name', node.id) - assert node.has_valid('axis'), 'The node "{}" does not have mandatory attribute "axis"'.format(node_name) - - flatten_node = FlattenONNX(graph, {'name': node_name + '/FlattenONNX_', 'axis': node.axis}).create_node() - shape_node = Shape(graph, {'name': node_name + '/ShapeOf_'}).create_node() - logsoftmax_node = LogSoftmax(graph, {'name': node_name + '/LogSoftmax_', 'axis': 1}).create_node() - reshape_node = Reshape(graph, {}).create_node() - - rename_nodes([(node, node_name + '/delete'), (reshape_node, node_name)]) - - shape_node.out_port(0).connect(reshape_node.in_port(1)) - logsoftmax_node.out_port(0).connect(reshape_node.in_port(0)) - flatten_node.out_port(0).connect(logsoftmax_node.in_port(0)) - - source = node.in_port(0).get_source() - - flatten_node.in_port(0).connect(source) - shape_node.in_port(0).connect(source) - - return [reshape_node.id] diff --git a/tools/mo/openvino/tools/mo/front/onnx/loop_ext.py b/tools/mo/openvino/tools/mo/front/onnx/loop_ext.py deleted file mode 100644 index b170d3c5ec6013..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/loop_ext.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import copy -import logging as log - -from openvino.tools.mo.ops.loop import Loop -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.front.common.register_custom_ops import check_for_duplicates -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.extractor import extract_node_attrs -from openvino.tools.mo.front.onnx.extractor import onnx_op_extractor, onnx_op_extractors -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.front.onnx.loader import node_id, add_initializers_and_inputs_to_graph -from openvino.tools.mo.graph.graph import Graph, Node, add_opoutput -from openvino.tools.mo.utils.error import Error - - -def create_edge_with_attrs(graph, src_name, src_internal_id, src_port, dst_id, dst_port): - # src_name - name of input for edge - # src_internal_id - input node dst_id. Can be the same as src_name or different if Parameter was created - assert (graph.has_node(src_internal_id)) - edge_attrs = { - 'out': src_port, - 'in': dst_port, - 'name': src_name, - 'fw_tensor_debug_info': [(src_internal_id, src_name)], - 'in_attrs': ['in', 'name'], - 'out_attrs': ['out', 'name'], - 'data_attrs': ['fw_tensor_debug_info'] - } - graph.add_edge(src_internal_id, dst_id, **edge_attrs) - - -def create_parameter_with_empty_attrs(graph, param_name): - graph.add_node(param_name, kind='op', op='Parameter', name=param_name, pb=None, shape=None) - parameter_node = Node(graph, param_name) - # need to manually update necessary attrs for the node because extractor will not be called - # for it because the node does not have .pb attribute - Parameter.update_node_stat(parameter_node, {}) - parameter_node['internal_layer_id'] = len(graph.nodes) - - return parameter_node - - -def create_cross_body_edge(body_graph, external_edges, additional_params, src_internal_id, dst_id, dst_port): - cur_graph = body_graph - counter = 0 - is_finished = False - transit_parameter = None - # go through all levels of nested graphs starting from the deepest - while not is_finished and 'parent_node' in cur_graph.graph: - parent_graph = cur_graph.graph['parent_node'].graph - external_edges.append([]) - additional_params.append({}) - assert 0 <= counter < len(additional_params) - assert 0 <= counter < len(external_edges) - # if parent graph contains input node, create edge from outer to inner graph - if src_internal_id in parent_graph.graph['tensor_mapping']: - log.debug('The edge between outer and inner graphs detected: {} -> {}'.format(src_internal_id, dst_id)) - # if parameter in inner graph already created, use it. Otherwise - create new one - if parent_graph.graph['tensor_mapping'][src_internal_id] not in additional_params[counter - 1]: - # possibly we create edge through several levels and have created transit parameter - if transit_parameter is None: - # create new Parameter body node and connect the body node with the outer graph using it - param_id = str(src_internal_id) - parameter_node = create_parameter_with_empty_attrs(cur_graph, param_id) - src_id, src_port = param_id, 0 - else: - parameter_node = transit_parameter - src_id, src_port = transit_parameter.id, 0 - external_edges[counter].append((parent_graph.graph['tensor_mapping'][src_internal_id], - parameter_node, src_internal_id)) - additional_params[counter][parent_graph.graph['tensor_mapping'][src_internal_id][0]] = parameter_node - else: - src_id, src_port = additional_params[counter - 1][parent_graph.graph['tensor_mapping'][src_internal_id][0]].id, 0 - is_finished = True - else: - # check that we are not in process of creating edge through several borders - # if we have transit node, it becomes destination of edge - # otherwise create new Parameter - if transit_parameter is None: - # create new Parameter in inner graph in hope that we will find node later - param_id = str(src_internal_id).split(':')[0] - parameter_node = create_parameter_with_empty_attrs(cur_graph, param_id) - else: - parameter_node = transit_parameter - param_id = transit_parameter.id - - # create transit parameter in outer graph in hope that real input will be found later - parent_param_id = str(src_internal_id).split(':')[0] + "_transit" - parent_parameter_node = create_parameter_with_empty_attrs(parent_graph, parent_param_id) - - external_edges[counter].append(((parent_param_id, 0), parameter_node, parent_param_id)) - src_id, src_port = param_id, 0 - additional_params[counter][parent_param_id + ":0"] = parameter_node - transit_parameter = parent_parameter_node - - if cur_graph.has_node(dst_id): - create_edge_with_attrs(cur_graph, src_internal_id, src_id, src_port, dst_id, dst_port) - - cur_graph = parent_graph - counter += 1 - - return is_finished - - -class LoopExtractor(FrontExtractorOp): - op = 'Loop' - enabled = True - - @classmethod - def extract(cls, loop_node): - Loop.update_node_stat(loop_node, {}) - - body_graph_proto = onnx_attr(loop_node, 'body', 'g', None) - main_graph = loop_node.graph - - # create a Graph object for the body and take graph attributes from the main graph - body_graph = Graph() - main_graph_attrs_copy = {} - for attr_key, attr_value in main_graph.graph.items(): - if attr_key not in ['tensor_mapping', 'parent_node']: - main_graph_attrs_copy[attr_key] = copy.deepcopy(attr_value) - body_graph.graph.update(main_graph_attrs_copy) - loop_node['body'] = body_graph - # save parent node for nested loops to know which node contains body (and which graph is on upper level) - body_graph.graph['parent_node'] = loop_node - - # maps a tensor name to a node produced it and the node port: str -> (node_id, node_port) - data_nodes_map = {} - body_graph.graph['tensor_mapping'] = data_nodes_map # save mapping for possible Loop inside the Loop - - body_parameters = add_initializers_and_inputs_to_graph(body_graph, body_graph_proto, data_nodes_map) - - external_edges = [] # (src_node, src_out_port), dest_body_parameter_node - # save additional edges information for graph on each level, the first one is the deepest - additional_params = [] # (src_node, src_out_port) -> parameter_node (for manually added Parameters) - # Go through all nodes in the original model order because data nodes are defined on-the-fly and order matters - for pb_node in body_graph_proto.node: - # create an NX node - id = body_graph.unique_id(node_id(pb_node)) - body_graph.add_node(id, pb=pb_node, kind='op') - if hasattr(body_graph, 'op_names_statistic') and hasattr(pb_node, 'op_type'): - body_graph.op_names_statistic[pb_node.op_type] += 1 - - # add incoming edges based on data_nodes_map - for dst_port, inp in enumerate(pb_node.input): - # should add edge src_internal_id --> dst_id - if inp not in data_nodes_map: - if inp == '': - # input is omitted; most likely it corresponds to an optional input for an operator - continue - else: - is_finished = create_cross_body_edge(body_graph, external_edges, additional_params, - inp, id, dst_port) - if not is_finished: - raise Error( - 'Reference to "{}" is not satisfied. A node refer not existing data tensor. ONNX ' - 'model is not consistent. Protobuf fragment: {}', inp, pb_node) - else: - src_id, src_port = data_nodes_map[inp] - create_edge_with_attrs(body_graph, inp, src_id, src_port, id, dst_port) - - # add outgoing edges to data_nodes_map - for src_port, out in enumerate(pb_node.output): - if out in data_nodes_map: - log.debug("Detected reuse of blob {}.".format(out)) - data_nodes_map[out] = (id, src_port) - - body_results = [] - for output in body_graph_proto.output: - tensor_name = str(output.name) - node_name, output_port = data_nodes_map[tensor_name] - assert body_graph.has_node(node_name), 'The body graph does not contain output with name "{}"'.format( - node_name) - body_results.append(Node(body_graph, add_opoutput(body_graph, node_name, output_port, False))) - - # add 'internal_layer_id' attribute which is a must have attribute for the loop body node - for idx, body_node in enumerate(body_graph.get_op_nodes()): - body_node['internal_layer_id'] = idx - - loop_carried_dependencies_count = len(body_graph_proto.input) - 2 - scan_outputs_count = len(body_graph_proto.output) - 1 - loop_carried_dependencies_count - - # Loop inputs: - # 0 - trip count - # 1 - execution condition - # 2 .. - loop carried dependencies - - # Loop outputs: - # 0 .. loop_carried_dependencies_count - 1 - loop carried dependencies - # loop_carried_dependencies_count .. - scan outputs - - # Body inputs: - # 0 - iteration number - # 1 - execution condition - # 2 .. - loop carried dependencies - - # Body outputs: - # 0 - execution condition - # 1 .. loop_carried_dependencies_count - loop carried dependencies - # loop_carried_dependencies_count + 1 .. - scan outputs - - # some of the inputs/outputs may not be connected but the normalization transformation will take care of it - # connection Loop body nodes with external input edges - next_loop_input_port_idx = sorted(loop_node.in_edges().keys())[-1] + 1 - cur_graph = body_graph - for external_edges_subg in external_edges: - if 'parent_node' not in cur_graph.graph: - continue - cur_loop_node = cur_graph.graph['parent_node'] - parent_graph = cur_loop_node.graph - for (src_node, src_port), body_node, tensor_name in external_edges_subg: - create_edge_with_attrs(parent_graph, tensor_name, src_node, src_port, - cur_loop_node.id, next_loop_input_port_idx) - - Loop.connect_body_input(cur_loop_node, next_loop_input_port_idx, body_node) - next_loop_input_port_idx += 1 - cur_graph = parent_graph - - # mark current iteration input Parameter node - Loop.mark_current_iteration_parameter_node(loop_node, body_parameters[0]) - - # connect initial value for "execution condition" input of the loop - Loop.connect_body_input(loop_node, 1, body_parameters[1]) - # add back edge with "execution condition" - Loop.add_back_edge(loop_node, body_parameters[1], body_results[0]) - # mark "execution condition" Result node - Loop.mark_execution_condition_result_node(loop_node, body_results[0]) - - # connect initial value for "loop carried" dependencies variables - for idx in range(loop_carried_dependencies_count): - Loop.connect_body_input(loop_node, idx + 2, body_parameters[idx + 2]) - # add back edge for "loop carried" dependencies variables - for idx in range(loop_carried_dependencies_count): - Loop.add_back_edge(loop_node, body_parameters[idx + 2], body_results[idx + 1]) - # connect final value for "loop carried" dependencies variables - for idx in range(loop_carried_dependencies_count): - Loop.connect_body_output(loop_node, idx, body_results[idx + 1]) - - # connect "scan outputs" and mark axis for concatenation - for idx in range(loop_carried_dependencies_count, loop_carried_dependencies_count + scan_outputs_count): - Loop.connect_body_output(loop_node, idx, body_results[idx + 1], axis=0) - - # run function to parse body nodes attributes similar to the main graph - extract_node_attrs(body_graph, lambda node: onnx_op_extractor(node, check_for_duplicates(onnx_op_extractors))) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/lp_normalization_ext.py b/tools/mo/openvino/tools/mo/front/onnx/lp_normalization_ext.py deleted file mode 100644 index aa2e4f85136dce..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/lp_normalization_ext.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.ops.normalize_l2 import NormalizeL2Op -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class LPNormalizeExtractor(FrontExtractorOp): - op = 'LpNormalization' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'p': onnx_attr(node, 'p', 'i', 2), - 'axis': onnx_attr(node, 'axis', 'i', -1), - 'eps_mode': 'add', # TODO check ONNX implementation - 'eps': 1e-6, # TODO check ONNX implementation - } - if attrs['p'] == 1: - log.debug('The node {} has unsupported attribute "p" = {}'.format(node.soft_get('name'), attrs['p'])) - return False - - NormalizeL2Op.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/lrn_ext.py b/tools/mo/openvino/tools/mo/front/onnx/lrn_ext.py deleted file mode 100644 index 0bb5ca278105ad..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/lrn_ext.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.lrn import AttributedLRN - - -class LRNFrontExtractor(FrontExtractorOp): - op = 'LRN' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'alpha': onnx_attr(node, 'alpha', 'f', 1e-4), - 'beta': onnx_attr(node, 'beta', 'f', 0.75), - 'bias': onnx_attr(node, 'bias', 'f', 1.0), - 'local_size': onnx_attr(node, 'size', 'i', None), - } - AttributedLRN.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/lstm_ext.py b/tools/mo/openvino/tools/mo/front/onnx/lstm_ext.py deleted file mode 100644 index 908ee1b81922a8..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/lstm_ext.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array, int64_array -from openvino.tools.mo.ops.LSTM import LSTM -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class LSTMFrontExtractor(FrontExtractorOp): - op = 'LSTM' - enabled = True - - @classmethod - def extract(cls, node): - activation_alpha = onnx_attr(node, 'activation_alpha', 'floats', - default=None, dst_type=lambda x: float32_array(x)) - activation_beta = onnx_attr(node, 'activation_beta', 'floats', - default=None, dst_type=lambda x: float32_array(x)) - activations = onnx_attr(node, 'activations', 'strings', default=None, - dst_type=lambda x: list(map(lambda s: s.decode(encoding="utf-8").lower(), list(x)))) - clip = onnx_attr(node, 'clip', 'f', default=None) - input_forget = onnx_attr(node, 'input_forget', 'i', default=0) - - attrs = { - 'batch_dim': 1, - 'sequence_dim': 0, - 'blobs_wrb': True, - 'has_num_directions': True, - 'num_layers': 1, - 'format': 'onnx', - 'multilayers': False, - 'gate_order': [2, 0, 3, 1], # iofc --> fico - - # ONNX attrs - 'activation_alpha': activation_alpha, - 'activation_beta': activation_beta, - 'activations': activations, - 'clip': clip, - 'direction': onnx_attr(node, 'direction', 's', b'forward').decode().lower(), - 'hidden_size': int64_array(onnx_attr(node, 'hidden_size', 'i')), - 'input_forget': input_forget, - } - - LSTM.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/mask_rcnn.json b/tools/mo/openvino/tools/mo/front/onnx/mask_rcnn.json deleted file mode 100644 index adc5b02d050f96..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/mask_rcnn.json +++ /dev/null @@ -1,21 +0,0 @@ -[ - { - "custom_attributes": - { - "max_detections_per_image": 100, - "max_delta_log_wh": 4.135166645050049, - "score_threshold": 0.05, - "nms_threshold": 0.5, - "post_nms_count": 2000, - "input_fpn_heads": ["486", "454", "422", "390"], - "do_outputs": ["6530", "6532", "6534"], - "box_regressions_input_node": "2773", - "class_predicitons_node": "2774", - "ROIFeatureExtractor1_output": "6795", - "ROIFeatureExtractor2_input": "2490", - "ROIFeatureExtractor2_output": "2751" - }, - "id": "ONNXMaskRCNNReplacement", - "match_kind": "general" - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/onnx/mask_rcnn_conversion.py b/tools/mo/openvino/tools/mo/front/onnx/mask_rcnn_conversion.py deleted file mode 100644 index 925d1ed3d4f76e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/mask_rcnn_conversion.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.onnx.softmaxONNX_to_softmax import SoftmaxONNXFrontReplacer -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.detection_output_onnx import ExperimentalDetectronDetectionOutput -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.ops.roifeatureextractor_onnx import ExperimentalDetectronROIFeatureExtractor -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.reshape import Reshape - - -class ONNXMaskRCNNTransformation(FrontReplacementFromConfigFileGeneral): - """ - This transformation performs 3 actions: - 1. Replaces a sub-graph calculating ROIAlign over FPN heads with a single ExperimentalDetectronROIFeatureExtractor - node. - 2. Replaces a sub-graph calculating post-processing of background/foreground with a single - ExperimentalDetectronDetectionOutput node. - 3. Replaces another sub-graph calculating ROIAlign over FPN heads with a single - ExperimentalDetectronROIFeatureExtractor node. These ROIAligns get boxes from the - ExperimentalDetectronDetectionOutput node. - """ - replacement_id = 'ONNXMaskRCNNReplacement' - - def run_before(self): - # the class_predicitons_node which is used in this transformation is of op SoftMaxONNX. But operations of op SoftMaxONNX - # will be replaced with a transformation SoftmaxONNXFrontReplacer - return [SoftmaxONNXFrontReplacer] - - def transform_graph(self, graph: Graph, replacement_descriptions: dict): - insert_ExperimentalDetectronROIFeatureExtractor2(graph, replacement_descriptions) - insert_do(graph, replacement_descriptions) - insert_ExperimentalDetectronROIFeatureExtractor1(graph, replacement_descriptions) - - -def insert_do(graph: Graph, replacement_descriptions: dict): - do_outputs = replacement_descriptions['do_outputs'] - prior_boxes_node = Node(graph, 'ROIFeatureExtractor_2') - num_classes = 81 - box_regressions_input_node = Node(graph, replacement_descriptions['box_regressions_input_node']) - box_regressions_node = create_op_node_with_second_input(graph, Reshape, int64_array([-1, 4 * num_classes]), - dict(name='box_regressions'), box_regressions_input_node) - - class_predicitons_node = Node(graph, replacement_descriptions['class_predicitons_node']) - im_info_node = Parameter(graph, {"name": 'im_info', 'shape': int64_array([1, 3])}).create_node() - - do_node = ExperimentalDetectronDetectionOutput(graph, {'name': 'DetectionOutput', - 'class_agnostic_box_regression': 0, - 'deltas_weights': mo_array([10.0, 10.0, 5.0, 5.0]), - 'max_delta_log_wh': - replacement_descriptions['max_delta_log_wh'], - 'nms_threshold': replacement_descriptions['nms_threshold'], - 'score_threshold': - replacement_descriptions['score_threshold'], - 'num_classes': num_classes, - 'max_detections_per_image': - replacement_descriptions['max_detections_per_image'], - 'post_nms_count': replacement_descriptions['post_nms_count'] - }).create_node() - prior_boxes_node.out_port(1).connect(do_node.in_port(0)) - box_regressions_node.out_port(0).connect(do_node.in_port(1)) - class_predicitons_node.out_port(0).connect(do_node.in_port(2)) - im_info_node.out_port(0).connect(do_node.in_port(3)) - - do_output_ports = [do_node.out_port(0), do_node.out_port(1), do_node.out_port(2)] - old_do_output_nodes = [Node(graph, node_id) for node_id in do_outputs] - for old_node, new_port in zip(old_do_output_nodes, do_output_ports): - old_node.out_port(0).get_connection().set_source(new_port) - # the consumer of the second output port of the ExperimentalDetectronDetectionOutput is the Mul node which second - # input is of type int64 so it is necessary to insert Cast to have data types match - do_node.out_port(1).get_connection().insert_node(Cast(graph, {'dst_type': np.int64}).create_node()) - - -def insert_ExperimentalDetectronROIFeatureExtractor1(graph: Graph, replacement_descriptions: dict): - if 'ROIFeatureExtractor1_output' not in replacement_descriptions: - # In case of Faster-RCNN this transformation is not needed and this attribute shouldn't be set - return - input_fpn_heads = replacement_descriptions['input_fpn_heads'] - old_output_node = Node(graph, replacement_descriptions['ROIFeatureExtractor1_output']) - input_fpn_head_nodes = [Node(graph, node_id) for node_id in input_fpn_heads] - fpn_roi_align = ExperimentalDetectronROIFeatureExtractor(graph, {'name': 'ROIFeatureExtractor_1', - 'output_size': 14, - 'pyramid_scales': int64_array( - [4, 8, 16, 32, 64]), - 'sampling_ratio': 2, - 'in_ports_count': 5}).create_node() - fpn_roi_align.in_port(0).connect(Node(graph, 'DetectionOutput').out_port(0)) - for ind, fpn_node in enumerate(input_fpn_head_nodes): - fpn_roi_align.in_port(ind + 1).connect(fpn_node.out_port(0)) - - old_output_node.out_port(0).get_connection().set_source(fpn_roi_align.out_port(0)) - - -def insert_ExperimentalDetectronROIFeatureExtractor2(graph: Graph, replacement_descriptions: dict): - input_fpn_heads = replacement_descriptions['input_fpn_heads'] - old_output_node = Node(graph, replacement_descriptions['ROIFeatureExtractor2_output']) - input_fpn_head_nodes = [Node(graph, node_id) for node_id in input_fpn_heads] - fpn_roi_align = ExperimentalDetectronROIFeatureExtractor(graph, {'name': 'ROIFeatureExtractor_2', - 'output_size': 7, - 'pyramid_scales': int64_array( - [4, 8, 16, 32, 64]), - 'sampling_ratio': 2, - 'in_ports_count': 5}).create_node() - fpn_roi_align.in_port(0).connect(Node(graph, replacement_descriptions['ROIFeatureExtractor2_input']).out_port(0)) - for ind, fpn_node in enumerate(input_fpn_head_nodes): - fpn_roi_align.in_port(ind + 1).connect(fpn_node.out_port(0)) - - old_output_node.out_port(0).get_connection().set_source(fpn_roi_align.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/onnx/matmul_ext.py b/tools/mo/openvino/tools/mo/front/onnx/matmul_ext.py deleted file mode 100644 index fd90f2c95d30b9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/matmul_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.MatMul import MatMul -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class MatMulFrontExtractor(FrontExtractorOp): - op = 'MatMul' - enabled = True - - @classmethod - def extract(cls, node): - MatMul.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/mean_variance_normalization_ext.py b/tools/mo/openvino/tools/mo/front/onnx/mean_variance_normalization_ext.py deleted file mode 100644 index 9a358de1228d75..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/mean_variance_normalization_ext.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.mvn import MVNOnnx -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class MeanVarianceNormalizationExtractor(FrontExtractorOp): - op = 'MeanVarianceNormalization' - enabled = True - - @classmethod - def extract(cls, node): - axes = onnx_attr(node, 'axes', 'ints', - default=int64_array([0, 2, 3]), - dst_type=lambda x: int64_array(x)) - - attrs = { - 'eps': 1e-9, - 'normalize_variance': 1, - 'axes': axes, - 'eps_mode': 'outside_sqrt', - } - - MVNOnnx.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/non_max_suppression_ext.py b/tools/mo/openvino/tools/mo/front/onnx/non_max_suppression_ext.py deleted file mode 100644 index 7dc01ee4dde3b3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/non_max_suppression_ext.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.non_max_suppression import NonMaxSuppression -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class NonMaxSuppressionExtractor(FrontExtractorOp): - op = 'NonMaxSuppression' - enabled = True - - @classmethod - def extract(cls, node): - encoding_map = {0: 'corner', 1: 'center'} - center_point_box = onnx_attr(node, 'center_point_box', 'i', default=0) - NonMaxSuppression.update_node_stat(node, {'sort_result_descending': 0, - 'output_type': np.int64, - 'box_encoding': encoding_map[center_point_box]}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/non_zero_ext.py b/tools/mo/openvino/tools/mo/front/onnx/non_zero_ext.py deleted file mode 100644 index b4001973d52836..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/non_zero_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.non_zero import NonZero -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class NonZeroExtractor(FrontExtractorOp): - op = 'NonZero' - enabled = True - - @classmethod - def extract(cls, node): - NonZero.update_node_stat(node, {'output_type': np.int64}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/normalize_ext.py b/tools/mo/openvino/tools/mo/front/onnx/normalize_ext.py deleted file mode 100644 index dc33bb71328982..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/normalize_ext.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.normalize import NormalizeOp -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class NormalizeFrontExtractor(FrontExtractorOp): - op = 'Normalize' - enabled = True - - @classmethod - def extract(cls, node): - across_spatial = onnx_attr(node, 'across_spatial', 'i', default=0) - channel_shared = onnx_attr(node, 'channel_shared', 'i', default=0) - eps = onnx_attr(node, 'eps', 'f', default=0) - - attrs = {'across_spatial': bool(across_spatial), - 'channel_shared': bool(channel_shared), - 'eps': eps, - 'layout': 'NCHW'} - - # update the attributes of the node - NormalizeOp.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/normalize_l2_normalize.py b/tools/mo/openvino/tools/mo/front/onnx/normalize_l2_normalize.py deleted file mode 100644 index 64f48f660fe00b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/normalize_l2_normalize.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class NormalizeL2Normalize(FrontReplacementPattern): - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for normalize_l2 in graph.get_op_nodes(op='NormalizeL2'): - if normalize_l2.in_port(1).disconnected(): - assert normalize_l2.has_valid('axis'), 'The NormalizeL2 node "{}" misses "axis" attribute.' \ - ''.format(normalize_l2.name) - axis_node = Const(graph, {'name': normalize_l2.id + '/Axis', - 'value': int64_array([normalize_l2.axis])}).create_node() - normalize_l2.in_port(1).connect(axis_node.out_port(0)) - del normalize_l2['axis'] - else: - log.debug('The NormalizeL2 node input "{}" is already normalized'.format(normalize_l2.name)) diff --git a/tools/mo/openvino/tools/mo/front/onnx/one_hot_ext.py b/tools/mo/openvino/tools/mo/front/onnx/one_hot_ext.py deleted file mode 100644 index 2f908328481ad4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/one_hot_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.one_hot import OneHot -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class OneHotExtractor(FrontExtractorOp): - op = 'OneHot' - enabled = True - - @classmethod - def extract(cls, node): - axis = onnx_attr(node, 'axis', 'i', default=-1) - OneHot.update_node_stat(node, {'axis': axis, 'split_values': True}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/one_hot_normalize.py b/tools/mo/openvino/tools/mo/front/onnx/one_hot_normalize.py deleted file mode 100644 index a430fe938261c3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/one_hot_normalize.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.split_normalizer import SqueezeAxis -from openvino.tools.mo.ops.split import Split -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph - - -class OneHotNormalize(FrontReplacementSubgraph): - """ - The ONNX OneHot layer has input with values of shape [2] which contains off and on values. This transformation - splits this input into two and connects them back to the OneHot layer in reverse order because in OV layer the - on value goes to port 2 and off values goes to port 3. - """ - enabled = True - - def run_before(self): - return [SqueezeAxis] - - def pattern(self): - return dict(nodes=[('onehot', dict(op='OneHot', split_values=True))], - edges=[]) - - def replace_sub_graph(self, graph: Graph, match: dict): - onehot = match['onehot'] - name = onehot.soft_get('name', onehot.id) - - split = create_op_with_const_inputs(graph, Split, {1: np.int64(0)}, - {'name': name + '/Split', 'num_splits': 2, 'squeeze_axis': True}) - - onehot.in_port(2).get_source().connect(split.in_port(0)) - onehot.in_port(2).disconnect() - - onehot.in_port(3).connect(split.out_port(0)) - onehot.in_port(2).connect(split.out_port(1)) diff --git a/tools/mo/openvino/tools/mo/front/onnx/pad_converter.py b/tools/mo/openvino/tools/mo/front/onnx/pad_converter.py deleted file mode 100644 index c1e63fe3f78f09..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/pad_converter.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.split import Split -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_node, Node -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.pad import Pad - - -class ONNXPadToPad(FrontReplacementOp): - """ - This transformation converts ONNXPad operation (ONNX semantic) to Pad operation (OpenVINO semantic). - Refer to the Op implementation for the operations semantics description. - """ - op = 'ONNXPad' - enabled = True - - def replace_op(self, graph: Graph, node: Node): - # save the original node name to use it in the new Pad op instance - original_name = node.soft_get('name', node.id) - rename_node(node, original_name + '/TBR') - - new_pad = Pad(graph, {'mode': node.soft_get('mode', None)}).create_node() - rename_node(new_pad, original_name) - - node.in_port(0).get_connection().set_destination(new_pad.in_port(0)) - - if node.soft_get('mode') == 'constant': - # the input with fill value is an optional third input in ONNX - if not node.in_port(2).disconnected(): - node.in_port(2).get_connection().set_destination(new_pad.in_port(3)) - else: - new_pad.in_port(3).connect(Const(graph, {'value': 0.0}).create_node().out_port(0)) - - # convert ONNX representation of the pads as [2 * N] to MO representation: [N] and [N] - split_pads = create_op_with_const_inputs(graph, Split, {1: int64_array(0)}, {'num_splits': 2}) - node.in_port(1).get_connection().set_destination(split_pads.in_port(0)) - split_pads.out_port(0).connect(new_pad.in_port(1)) - split_pads.out_port(1).connect(new_pad.in_port(2)) - - return [new_pad.id] diff --git a/tools/mo/openvino/tools/mo/front/onnx/pad_ext.py b/tools/mo/openvino/tools/mo/front/onnx/pad_ext.py deleted file mode 100644 index d6643f17b07ef9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/pad_ext.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version -from openvino.tools.mo.ops.pad import AttributedPad, ONNXPad - - -class PadFrontExtractor(FrontExtractorOp): - op = 'Pad' - enabled = True - - @classmethod - def extract(cls, node): - mode = onnx_attr(node, 'mode', 's', default='constant', dst_type=lambda x: x.decode()) - # Pytorch 1.3 while converting to opset 11, creates Pad from older opset. - # To be able to convert such models we have to check if pads attribute exists. - pads = onnx_attr(node, 'pads', 'ints', dst_type=int64_array) - if get_onnx_opset_version(node) < 11 or pads is not None: - value = onnx_attr(node, 'value', 'f', default=0.) - - assert pads is not None, 'pads is required attribute for Pad operation' - - # MO Pad op and ONNX Pad op have different format for pads values - # MO Pad has Dx2 where D is the total number of dimensions - # ONNX Pad pads flat layout, so need to reshape and transpose - - pads = np.transpose(pads.reshape([2, -1])) - - AttributedPad.update_node_stat(node, {'mode': mode, 'pads': pads, 'fill_value': value}) - else: - ONNXPad.update_node_stat(node, {'mode': mode}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/parameter_ext.py b/tools/mo/openvino/tools/mo/front/onnx/parameter_ext.py deleted file mode 100644 index 17feedc1f775d0..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/parameter_ext.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE - -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class PlaceholderFrontExtractor(FrontExtractorOp): - op = 'Parameter' - enabled = True - - @classmethod - def extract(cls, node): - t_type = node.pb.type.tensor_type - attrs = { - 'shape': shape_array([d.dim_value if (not hasattr(d, 'dim_param') or d.dim_param == '') and d.dim_value != 0 - else dynamic_dimension_value for d in t_type.shape.dim]), - 'data_type': TENSOR_TYPE_TO_NP_TYPE[t_type.elem_type] - } - Parameter.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/person_detection_crossroad.json b/tools/mo/openvino/tools/mo/front/onnx/person_detection_crossroad.json deleted file mode 100644 index 8fbd55564f6fbb..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/person_detection_crossroad.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "custom_attributes": - { - "fpn_heads": ["634", "635", "636", "637"], - "ROI_feature_extractor_inputs": ["2475", "2834", "3192"], - "ROI_feature_extractor_outputs": ["2614", "2972", "3330"] - }, - "id": "ONNXPersonDetectionCrossroadReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/onnx/person_detection_crossroad_conversion.py b/tools/mo/openvino/tools/mo/front/onnx/person_detection_crossroad_conversion.py deleted file mode 100644 index 6a9acfd0af69eb..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/person_detection_crossroad_conversion.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.roifeatureextractor_onnx import ExperimentalDetectronROIFeatureExtractor -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral -from openvino.tools.mo.graph.graph import Graph, Node, rename_node - - -class ONNXPersonDetectionCrossroadReplacement(FrontReplacementFromConfigFileGeneral): - """ - Insert ExperimentalDetectronROIFeatureExtractor layers instead of sub-graphs of the model. - """ - replacement_id = 'ONNXPersonDetectionCrossroadReplacement' - - def transform_graph(self, graph: Graph, replacement_descriptions: dict): - fpn_heads = replacement_descriptions['fpn_heads'] - for inp, out in zip(replacement_descriptions['ROI_feature_extractor_inputs'], - replacement_descriptions['ROI_feature_extractor_outputs']): - insert_experimental_layers(graph, fpn_heads, inp, out) - - -def insert_experimental_layers(graph: Graph, input_fpn_heads: list, inp: str, out: str): - old_output_node = Node(graph, out) - output_name = old_output_node.soft_get('name', old_output_node.id) - old_output_node_name = output_name + '/old' - rename_node(old_output_node, old_output_node_name) - - input_fpn_head_nodes = [Node(graph, node_id) for node_id in input_fpn_heads] - fpn_roi_align = ExperimentalDetectronROIFeatureExtractor(graph, {'name': output_name, - 'output_size': 7, - 'pyramid_scales': int64_array( - [4, 8, 16, 32, 64]), - 'sampling_ratio': 2, }).create_node() - rename_node(fpn_roi_align, output_name) - fpn_roi_align.in_port(0).connect(Node(graph, inp).out_port(0)) - for ind, fpn_node in enumerate(input_fpn_head_nodes): - fpn_roi_align.in_port(ind + 1).connect(fpn_node.out_port(0)) - - old_output_node.out_port(0).get_connection().set_source(fpn_roi_align.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/onnx/pooling_ext.py b/tools/mo/openvino/tools/mo/front/onnx/pooling_ext.py deleted file mode 100644 index cc272990d40b91..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/pooling_ext.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_autopad -from openvino.tools.mo.ops.pooling import Pooling -from openvino.tools.mo.utils.error import Error - - -class AveragePoolFrontExtractor(FrontExtractorOp): - op = 'AveragePool' - enabled = True - - @classmethod - def extract(cls, node): - attrs = common_onnx_pool_extractor(node) - - Pooling.update_node_stat(node, attrs) - return cls.enabled - - -class MaxPoolFrontExtractor(FrontExtractorOp): - op = 'MaxPool' - enabled = True - - @classmethod - def extract(cls, node): - attrs = common_onnx_pool_extractor(node) - - Pooling.update_node_stat(node, attrs) - return cls.enabled - - -class GlobalAveragePoolFrontExtractor(FrontExtractorOp): - op = 'GlobalAveragePool' - enabled = True - - @classmethod - def extract(cls, node): - attrs = common_onnx_pool_extractor(node) - attrs.update({'pooling_convention': 'full', - 'global_pool': True, - }) - - Pooling.update_node_stat(node, attrs) - return cls.enabled - - -class GlobalMaxPoolFrontExtractor(FrontExtractorOp): - op = 'GlobalMaxPool' - enabled = True - - @classmethod - def extract(cls, node): - attrs = common_onnx_pool_extractor(node) - attrs.update({'pooling_convention': 'full', - 'global_pool': True, - }) - - Pooling.update_node_stat(node, attrs) - return cls.enabled - - -def common_onnx_pool_extractor(node): - kernel_shape = onnx_attr(node, 'kernel_shape', 'ints', default=None, dst_type=lambda x: int64_array(x)) - final_kernel_shape = int64_array([1, 1, *[x for x in kernel_shape]]) if kernel_shape is not None else None - - pads = onnx_attr(node, 'pads', 'ints', default=None, dst_type=lambda x: int64_array(x)) - - if kernel_shape is not None and pads is not None and kernel_shape.size * 2 != pads.size: - log.warning('Node {} has pad = {} which is ill-formed -- it should have even amount of elements.'.format( - node.soft_get('name', node.id), pads)) - - # Try to convert slightly incorrect models with insufficient pad parameters - assert pads.size == kernel_shape.size - pads = np.concatenate([pads, pads]) - log.warning('Extended pads to {}'.format(pads)) - - final_pads = None - if pads is not None: - assert len(pads) % 2 == 0 - pads = pads.reshape([2, -1]) - pads = np.transpose(pads) - final_pads = int64_array([[0, 0], [0, 0], *[p for p in pads]]) - - # Extract strides attribute - # In case if strides is not specified it will be set in default (1) in infer function - strides = onnx_attr(node, 'strides', 'ints', default=None, dst_type=lambda x: int64_array(x)) - final_strides = int64_array([1, 1, *[x for x in strides]]) if strides is not None else None - - dilation = onnx_attr(node, 'dilations', 'ints', default=None, dst_type=lambda x: int64_array(x)) - final_dilation = int64_array([1, 1, *[x for x in dilation]]) if dilation is not None else None - - # exclude_pad = True only when count_include_pad == 0 - exclude_pad = onnx_attr(node, 'count_include_pad', 'i', default=0) == 0 - - global_pooling = False - if node.op in ['MaxPool', 'GlobalMaxPool']: - method = 'max' - elif node.op in ['AveragePool', 'GlobalAveragePool']: - method = 'avg' - else: - raise Error('Unsupported pooling op {}', node.op) - - # TODO check if it is a correct choice for ONNX - pooling_convention = 'valid' # for Caffe rounding type should be ceil - rt = 'floor' if onnx_attr(node, 'ceil_mode', 'i', default=0) == 0 else 'ceil' - - auto_pad = onnx_attr(node, 'auto_pad', 's', default=None, dst_type=get_onnx_autopad) - if auto_pad: - rt = 'ceil' - - attrs = { - 'op': node.op, - 'auto_pad': auto_pad, - 'window': final_kernel_shape, - 'stride': final_strides, - 'pad': final_pads, - 'pad_spatial_shape': int64_array(pads) if pads is not None else None, - 'pool_method': method, - 'exclude_pad': True if exclude_pad else False, - 'global_pool': global_pooling, - 'output_spatial_shape': None, - 'rounding_type': rt, - 'dilation': final_dilation, - - 'spatial_dims': None, - 'channel_dims': int64_array([1]), - 'batch_dims': int64_array([0]), - 'layout': 'NCHW', - - 'pooling_convention': pooling_convention - } - return attrs diff --git a/tools/mo/openvino/tools/mo/front/onnx/priorbox_clustered_ext.py b/tools/mo/openvino/tools/mo/front/onnx/priorbox_clustered_ext.py deleted file mode 100644 index 75ce3ed75f7da7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/priorbox_clustered_ext.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array -from openvino.tools.mo.ops.priorbox_clustered import PriorBoxClusteredOp -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class PriorBoxClusteredFrontExtractor(FrontExtractorOp): - op = 'PriorBoxClustered' - enabled = True - - @classmethod - def extract(cls, node): - variance = onnx_attr(node, 'variance', 'floats', default=[], dst_type=lambda x: float32_array(x)) - if len(variance) == 0: - variance = [0.1] - - update_attrs = { - 'width': onnx_attr(node, 'width', 'floats', dst_type=lambda x: float32_array(x)), - 'height': onnx_attr(node, 'height', 'floats', dst_type=lambda x: float32_array(x)), - 'flip': onnx_attr(node, 'flip', 'i', default=0), - 'clip': onnx_attr(node, 'clip', 'i', default=0), - 'variance': list(variance), - 'img_size': onnx_attr(node, 'img_size', 'i', default=0), - 'img_h': onnx_attr(node, 'img_h', 'i', default=0), - 'img_w': onnx_attr(node, 'img_w', 'i', default=0), - 'step': onnx_attr(node, 'step', 'f', default=0.0), - 'step_h': onnx_attr(node, 'step_h', 'f', default=0.0), - 'step_w': onnx_attr(node, 'step_w', 'f', default=0.0), - 'offset': onnx_attr(node, 'offset', 'f', default=0.0), - } - - # update the attributes of the node - PriorBoxClusteredOp.update_node_stat(node, update_attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/priorbox_ext.py b/tools/mo/openvino/tools/mo/front/onnx/priorbox_ext.py deleted file mode 100644 index 70eafd746799e7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/priorbox_ext.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array -from openvino.tools.mo.ops.priorbox import PriorBoxOp -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class PriorBoxFrontExtractor(FrontExtractorOp): - op = 'PriorBox' - enabled = True - - @classmethod - def extract(cls, node): - variance = onnx_attr(node, 'variance', 'floats', default=[], dst_type=lambda x: float32_array(x)) - if len(variance) == 0: - variance = [0.1] - - update_attrs = { - 'aspect_ratio': onnx_attr(node, 'aspect_ratio', 'floats', dst_type=lambda x: float32_array(x)), - 'min_size': onnx_attr(node, 'min_size', 'floats', dst_type=lambda x: float32_array(x)), - 'max_size': onnx_attr(node, 'max_size', 'floats', dst_type=lambda x: float32_array(x)), - 'flip': onnx_attr(node, 'flip', 'i', default=0), - 'clip': onnx_attr(node, 'clip', 'i', default=0), - 'variance': list(variance), - 'img_size': onnx_attr(node, 'img_size', 'i', default=0), - 'img_h': onnx_attr(node, 'img_h', 'i', default=0), - 'img_w': onnx_attr(node, 'img_w', 'i', default=0), - 'step': onnx_attr(node, 'step', 'f', default=0.0), - 'step_h': onnx_attr(node, 'step_h', 'f', default=0.0), - 'step_w': onnx_attr(node, 'step_w', 'f', default=0.0), - 'offset': onnx_attr(node, 'offset', 'f', default=0.0), - } - - # update the attributes of the node - PriorBoxOp.update_node_stat(node, update_attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/priorgridgenerator_ext.py b/tools/mo/openvino/tools/mo/front/onnx/priorgridgenerator_ext.py deleted file mode 100644 index b0efae9164bebc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/priorgridgenerator_ext.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.priorgridgenerator_onnx import ExperimentalDetectronPriorGridGenerator -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class ExperimentalDetectronPriorGridGeneratorFrontExtractor(FrontExtractorOp): - op = 'ExperimentalDetectronPriorGridGenerator' - enabled = True - - @classmethod - def extract(cls, node): - attrs = dict(h=onnx_attr(node, 'h', 'i', 0), - w=onnx_attr(node, 'w', 'i', 0), - stride_x=onnx_attr(node, 'stride_x', 'f', 0), - stride_y=onnx_attr(node, 'stride_y', 'f', 0), - flatten=onnx_attr(node, 'flatten', 'i', 1) - ) - ExperimentalDetectronPriorGridGenerator.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/proposal_ext.py b/tools/mo/openvino/tools/mo/front/onnx/proposal_ext.py deleted file mode 100644 index afb58c9be57987..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/proposal_ext.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.proposal_onnx import ExperimentalDetectronGenerateProposalsSingleImage -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class ExperimentalDetectronGenerateProposalsSingleImageFrontExtractor(FrontExtractorOp): - op = 'ExperimentalDetectronGenerateProposalsSingleImage' - enabled = True - - @classmethod - def extract(cls, node): - attrs = dict(min_size=onnx_attr(node, 'min_size', 'f', 0.0), - nms_threshold=onnx_attr(node, 'nms_threshold', 'f', 0.7), - post_nms_count=onnx_attr(node, 'post_nms_count', 'i', 1000), - pre_nms_count=onnx_attr(node, 'pre_nms_count', 'i', 1000) - ) - ExperimentalDetectronGenerateProposalsSingleImage.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/quantize_ext.py b/tools/mo/openvino/tools/mo/front/onnx/quantize_ext.py deleted file mode 100644 index fb97651f6766e5..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/quantize_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.fakequantize import FakeQuantize -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class FakeQuantizeFrontExtractor(FrontExtractorOp): - op = 'FakeQuantize' - enabled = True - - @classmethod - def extract(cls, node): - levels = onnx_attr(node, 'levels', 'i') - FakeQuantize.update_node_stat(node, {'levels': levels}) - return FakeQuantizeFrontExtractor.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/quantize_linear_ext.py b/tools/mo/openvino/tools/mo/front/onnx/quantize_linear_ext.py deleted file mode 100644 index 60c73faccdfbb8..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/quantize_linear_ext.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.quantize_linear import QuantizeLinear -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version - - -class QuantizeLinearFrontExtractor(FrontExtractorOp): - op = 'QuantizeLinear' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {} - if get_onnx_opset_version(node) >= 13: - axis = onnx_attr(node, 'axis', 'i', default=None) - attrs.update(axis=axis) - QuantizeLinear.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/random_uniform_ext.py b/tools/mo/openvino/tools/mo/front/onnx/random_uniform_ext.py deleted file mode 100644 index 3ec1078d3177a2..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/random_uniform_ext.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.random_uniform import AttributedRandomUniform -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_datatype_as_numpy -from openvino.tools.mo.graph.graph import Node - - -class RandomUniformFrontExtractor(FrontExtractorOp): - op = 'RandomUniform' - enabled = True - - @classmethod - def extract(cls, node: Node): - shape = onnx_attr(node, 'shape', 'ints', default=None, dst_type=int64_array) - out_type = get_onnx_datatype_as_numpy(onnx_attr(node, 'dtype', 'i', default=1)) - seed = onnx_attr(node, 'seed', 'f', default=0.0) - min_val = onnx_attr(node, 'low', 'f', default=0.0) - max_val = onnx_attr(node, 'high', 'f', default=1.0) - AttributedRandomUniform.update_node_stat(node, {'shape': shape, - 'output_type': out_type, - 'seed': seed, - 'min_val': out_type(min_val), - 'max_val': out_type(max_val)}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/range_ext.py b/tools/mo/openvino/tools/mo/front/onnx/range_ext.py deleted file mode 100644 index b00c43aced17ba..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/range_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node - - -class RangeFrontExtractor(FrontExtractorOp): - op = 'Range' - enabled = True - - @classmethod - def extract(cls, node: Node): - # output_type attribute will be deduced during shape infer - Range.update_node_stat(node, {}) - return cls.enabled - diff --git a/tools/mo/openvino/tools/mo/front/onnx/reduce_ext.py b/tools/mo/openvino/tools/mo/front/onnx/reduce_ext.py deleted file mode 100644 index 0cf3c6f9b90b6c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/reduce_ext.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ReduceOps import ReduceL1, ReduceL2, ReduceMax, ReduceMean, ReduceMin, ReduceProd, ReduceSum -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.graph.graph import Node - - -def update_reduce_node_attrs_with(node: Node, c: callable): - axis = onnx_attr(node, 'axes', 'ints', default=None) - if axis is not None: - axis = int64_array(axis) - keep_dims = onnx_attr(node, 'keepdims', 'i', default=True) - c.update_node_stat(node, {'axis': axis, 'keep_dims': keep_dims}) - - -class ReduceL1Extractor(FrontExtractorOp): - op = 'ReduceL1' - enabled = True - - @classmethod - def extract(cls, node: Node): - update_reduce_node_attrs_with(node, ReduceL1) - return cls.enabled - - -class ReduceL2Extractor(FrontExtractorOp): - op = 'ReduceL2' - enabled = True - - @classmethod - def extract(cls, node: Node): - update_reduce_node_attrs_with(node, ReduceL2) - return cls.enabled - - -class ReduceMaxFrontExtractor(FrontExtractorOp): - op = 'ReduceMax' - enabled = True - - @classmethod - def extract(cls, node: Node): - update_reduce_node_attrs_with(node, ReduceMax) - return cls.enabled - - -class ReduceMeanFrontExtractor(FrontExtractorOp): - op = 'ReduceMean' - enabled = True - - @classmethod - def extract(cls, node: Node): - update_reduce_node_attrs_with(node, ReduceMean) - return cls.enabled - - -class ReduceMinFrontExtractor(FrontExtractorOp): - op = 'ReduceMin' - enabled = True - - @classmethod - def extract(cls, node: Node): - update_reduce_node_attrs_with(node, ReduceMin) - return cls.enabled - - -class ReduceProdFrontExtractor(FrontExtractorOp): - op = 'ReduceProd' - enabled = True - - @classmethod - def extract(cls, node: Node): - update_reduce_node_attrs_with(node, ReduceProd) - return cls.enabled - - -class ReduceSumFrontExtractor(FrontExtractorOp): - op = 'ReduceSum' - enabled = True - - @classmethod - def extract(cls, node: Node): - update_reduce_node_attrs_with(node, ReduceSum) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/register_custom_ops.py b/tools/mo/openvino/tools/mo/front/onnx/register_custom_ops.py deleted file mode 100644 index 1e2be280b06390..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/register_custom_ops.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementOp, FrontReplacementPattern, FrontReplacementSubgraph -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileSubGraph, FrontReplacementFromConfigFileOp, \ - FrontReplacementFromConfigFileGeneral - - -def get_front_classes(): - front_classes = [FrontExtractorOp, FrontReplacementOp, FrontReplacementPattern, FrontReplacementSubgraph, - FrontReplacementFromConfigFileSubGraph, FrontReplacementFromConfigFileOp, - FrontReplacementFromConfigFileGeneral] - return front_classes diff --git a/tools/mo/openvino/tools/mo/front/onnx/reshape_ext.py b/tools/mo/openvino/tools/mo/front/onnx/reshape_ext.py deleted file mode 100644 index 68bca8f8f33e12..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/reshape_ext.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.reshape import Reshape - -class ReshapeFrontExtractor(FrontExtractorOp): - op = 'Reshape' - enabled = True - - @classmethod - def extract(cls, node): - dim = onnx_attr(node, 'shape', 'ints', None) - if dim is not None: - dim = int64_array(dim) - Reshape.update_node_stat(node, {'dim': dim}) - else: - Reshape.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/resize_ext.py b/tools/mo/openvino/tools/mo/front/onnx/resize_ext.py deleted file mode 100644 index 189f7314d55e8e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/resize_ext.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ONNXResize10 import ONNXResize10 -from openvino.tools.mo.ops.ONNXResize11 import ONNXResize11Op -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version -from openvino.tools.mo.graph.graph import Node - - -class ResizeExtractor(FrontExtractorOp): - op = 'Resize' - enabled = True - - @classmethod - def extract(cls, node: Node): - onnx_opset_version = get_onnx_opset_version(node) - if onnx_opset_version is not None and onnx_opset_version >= 11: - mode = onnx_attr(node, 'mode', 's', default=b'nearest').decode() - transformation_mode = onnx_attr(node, - 'coordinate_transformation_mode', - 's', - default=b'half_pixel').decode() - nearest_mode = onnx_attr(node, 'nearest_mode', 's', default=b'round_prefer_floor').decode() - cubic_coeff_a = onnx_attr(node, 'cubic_coeff_a', 'f', default=-0.75) - attrs = { - 'mode': mode, 'coordinate_transformation_mode': transformation_mode, - 'nearest_mode': nearest_mode, 'cube_coeff': cubic_coeff_a - } - ONNXResize11Op.update_node_stat(node, attrs) - else: - mode = onnx_attr(node, 'mode', 's', default=b'nearest').decode() - ONNXResize10.update_node_stat(node, {'mode': mode}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/reverse_sequence_ext.py b/tools/mo/openvino/tools/mo/front/onnx/reverse_sequence_ext.py deleted file mode 100644 index 3e94521c68fddb..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/reverse_sequence_ext.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.reverse_sequence import ReverseSequence -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class ReverseSequenceExtractor(FrontExtractorOp): - op = 'ReverseSequence' - enabled = True - - @classmethod - def extract(cls, node): - batch_axis = onnx_attr(node, 'batch_axis', 'i', default=1) - time_axis = onnx_attr(node, 'time_axis', 'i', default=0) - - attrs = { - 'batch_axis': batch_axis, - 'seq_axis': time_axis, - } - ReverseSequence.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/rnn_ext.py b/tools/mo/openvino/tools/mo/front/onnx/rnn_ext.py deleted file mode 100644 index ea7b21f78bcb3c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/rnn_ext.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array, int64_array -from openvino.tools.mo.ops.RNN import RNN -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class RNNFrontExtractor(FrontExtractorOp): - op = 'RNN' - enabled = True - - @classmethod - def extract(cls, node): - direction = onnx_attr(node, 'direction', 's', b'forward').decode().lower() - - activation_alpha = onnx_attr(node, 'activation_alpha', 'floats', - default=None, dst_type=lambda x: float32_array(x)) - activation_beta = onnx_attr(node, 'activation_beta', 'floats', - default=None, dst_type=lambda x: float32_array(x)) - activations = onnx_attr(node, 'activations', 'strings', - default=['tanh', 'tanh'] if direction == 'bidirectional' else ['tanh'], - dst_type=lambda x: list(map(lambda s: s.decode(encoding="utf-8").lower(), list(x)))) - clip = onnx_attr(node, 'clip', 'f', default=None) - - # Since pytorch generates ONNX bidirectional RNN models with only one activation, duplicating activation - if direction == 'bidirectional' and len(activations) == 1: - activations.append(activations[0]) - - attrs = { - 'batch_dim': 1, - 'sequence_dim': 0, - 'blobs_wrb': True, - 'has_num_directions': True, - 'num_layers': 1, - 'format': 'onnx', - 'multilayers': False, - 'gate_order': [0], - - # ONNX attrs - 'activation_alpha': activation_alpha, - 'activation_beta': activation_beta, - 'activations': activations, - 'clip': clip, - 'direction': direction, - 'hidden_size': int64_array(onnx_attr(node, 'hidden_size', 'i')), - } - - RNN.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/roialign_ext.py b/tools/mo/openvino/tools/mo/front/onnx/roialign_ext.py deleted file mode 100644 index 134f6a543cc8e5..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/roialign_ext.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.roialign import ROIAlign -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version - - -class ROIAlignExtractor(FrontExtractorOp): - op = 'ROIAlign' - enabled = True - - @classmethod - def extract(cls, node): - mode = onnx_attr(node, 'mode', 's', default=b'avg').decode() - output_height = onnx_attr(node, 'output_height', 'i', default=1) - output_width = onnx_attr(node, 'output_width', 'i', default=1) - sampling_ratio = onnx_attr(node, 'sampling_ratio', 'i', default=0) - spatial_scale = onnx_attr(node, 'spatial_scale', 'f', default=1.0) - onnx_opset_version = get_onnx_opset_version(node) - if onnx_opset_version >= 16: - aligned_mode = onnx_attr(node, 'coordinate_transformation_mode', 's', default=b'half_pixel').decode() - if aligned_mode == "output_half_pixel": - aligned_mode = "asymmetric" - ROIAlign.update_node_stat(node, {'pooled_h': output_height, 'pooled_w': output_width, - 'sampling_ratio': sampling_ratio, 'spatial_scale': spatial_scale, - 'mode': mode, 'aligned_mode': aligned_mode}) - else: - ROIAlign.update_node_stat(node, {'pooled_h': output_height, 'pooled_w': output_width, - 'sampling_ratio': sampling_ratio, 'spatial_scale': spatial_scale, - 'mode': mode}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/roifeatureextractor_ext.py b/tools/mo/openvino/tools/mo/front/onnx/roifeatureextractor_ext.py deleted file mode 100644 index 35f52981852ae5..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/roifeatureextractor_ext.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.ops.roifeatureextractor_onnx import ExperimentalDetectronROIFeatureExtractor -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class ExperimentalDetectronROIFeatureExtractorFrontExtractor(FrontExtractorOp): - op = 'ExperimentalDetectronROIFeatureExtractor' - enabled = True - - @classmethod - def extract(cls, node): - attrs = dict(output_size=onnx_attr(node, 'output_size', 'i', 7), - sampling_ratio=onnx_attr(node, 'sampling_ratio', 'i', 2), - aligned=onnx_attr(node, 'aligned', 'i', 0), - num_classes=onnx_attr(node, 'num_classes', 'i', 81), - post_nms_count=onnx_attr(node, 'post_nms_count', 'i', 2000), - score_threshold=onnx_attr(node, 'score_threshold', 'f', 0.05), - pyramid_scales=int64_array(onnx_attr(node, 'pyramid_scales', 'ints', [4, 8, 16, 32, 64])), - ) - - ExperimentalDetectronROIFeatureExtractor.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/scatter_ext.py b/tools/mo/openvino/tools/mo/front/onnx/scatter_ext.py deleted file mode 100644 index b6627202dcb6a5..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/scatter_ext.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.scatter import ScatterElementsUpdate -from openvino.tools.mo.ops.scatternd import ScatterNDUpdate -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class ScatterExtractor(FrontExtractorOp): - # deprecated ONNX operation - op = 'Scatter' - enabled = True - - @classmethod - def extract(cls, node): - axis = onnx_attr(node, 'axis', 'i', default=0) - ScatterElementsUpdate.update_node_stat(node, {'axis': axis}) - return cls.enabled - - -class ScatterElementsExtractor(FrontExtractorOp): - op = 'ScatterElements' - enabled = True - - @classmethod - def extract(cls, node): - axis = onnx_attr(node, 'axis', 'i', default=0) - ScatterElementsUpdate.update_node_stat(node, {'axis': axis}) - return cls.enabled - - -class ScatterNDExtractor(FrontExtractorOp): - op = 'ScatterND' - enabled = True - - @classmethod - def extract(cls, node): - ScatterNDUpdate.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/shape_ext.py b/tools/mo/openvino/tools/mo/front/onnx/shape_ext.py deleted file mode 100644 index bd6eea52017850..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/shape_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.shape import Shape - - -class ShapeFrontExtractor(FrontExtractorOp): - op = 'Shape' - enabled = True - - @classmethod - def extract(cls, node): - Shape.update_node_stat(node, {'output_type': np.int64}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/size_ext.py b/tools/mo/openvino/tools/mo/front/onnx/size_ext.py deleted file mode 100644 index 7d61482dc8bfc4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/size_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.size import Size -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class SizeExtractor(FrontExtractorOp): - op = 'Size' - enabled = True - - @classmethod - def extract(cls, node): - Size.update_node_stat(node, {'output_type': np.int64}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/slice_ext.py b/tools/mo/openvino/tools/mo/front/onnx/slice_ext.py deleted file mode 100644 index 2710e6270f84fe..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/slice_ext.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import get_onnx_opset_version -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.slice import Slice, AttributedSlice -from openvino.tools.mo.utils.error import Error - - -class SliceFrontExtractor(FrontExtractorOp): - op = 'Slice' - enabled = True - - @classmethod - def extract(cls, node): - if get_onnx_opset_version(node) < 10: - starts = int64_array(onnx_attr(node, 'starts', 'ints', default=[])) - ends = int64_array(onnx_attr(node, 'ends', 'ints', default=[])) - axes = int64_array(onnx_attr(node, 'axes', 'ints', default=[])) - - if len(starts) == 0 or len(ends) == 0: - raise Error("starts or/and ends are not specified for the node {}".format(node.name)) - if len(axes) == 0: - axes = np.arange(len(starts), dtype=int) - - attrs = {'axes': axes, 'starts': starts, 'ends': ends} - AttributedSlice.update_node_stat(node, attrs) - else: # onnx_opset_version >= 10 - Slice.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/softmaxONNX_to_softmax.py b/tools/mo/openvino/tools/mo/front/onnx/softmaxONNX_to_softmax.py deleted file mode 100644 index 2b72cebbcc4a98..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/softmaxONNX_to_softmax.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.ops.flatten import FlattenONNX -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.softmax import Softmax - - -class SoftmaxONNXFrontReplacer(FrontReplacementOp): - """ - Replace SoftmaxONNX operation with FlattenONNX -> Softmax -> Reshape subgraph - """ - op = "SoftMaxONNX" - enabled = True - - def run_before(self): - from openvino.tools.mo.front.onnx.flattenONNX_to_reshape import FlattenONNXToReshape - return [FlattenONNXToReshape] - - def replace_op(self, graph: Graph, node: Node): - node_name = node.soft_get('name', node.id) - assert node.has_valid('axis'), 'The node "{}" does not have mandatory attribute "axis"'.format(node_name) - - flatten_node = FlattenONNX(graph, {'name': node_name + '/FlattenONNX_', 'axis': node.axis}).create_node() - shape_node = Shape(graph, {'name': node_name + '/ShapeOf_'}).create_node() - softmax_node = Softmax(graph, {'name': node_name + '/Softmax_', - 'axis': 1, - 'framework_node_name': node_name, - 'rename_condition': lambda n: len(n.graph.get_op_nodes(name=node_name)) == 0 - }).create_node() - reshape_node = Reshape(graph, {}).create_node() - - rename_nodes([(node, node_name + '/delete'), (reshape_node, node_name)]) - - flatten_node.out_port(0).connect(softmax_node.in_port(0)) - softmax_node.out_port(0).connect(reshape_node.in_port(0)) - shape_node.out_port(0).connect(reshape_node.in_port(1)) - - source = node.in_port(0).get_source() - - flatten_node.in_port(0).connect(source) - shape_node.in_port(0).connect(source) - - return [reshape_node.id] diff --git a/tools/mo/openvino/tools/mo/front/onnx/softmax_ext.py b/tools/mo/openvino/tools/mo/front/onnx/softmax_ext.py deleted file mode 100644 index 2da163a5489bac..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/softmax_ext.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.softmax import SoftmaxONNX -from openvino.tools.mo.ops.log_softmax import LogSoftmaxONNX - - -class SoftmaxExtractor(FrontExtractorOp): - op = 'Softmax' - enabled = True - - @classmethod - def extract(cls, node): - axis = onnx_attr(node, 'axis', 'i', default=1) - SoftmaxONNX.update_node_stat(node, {'axis': axis}) - return cls.enabled - - -class LogSoftmaxExtractor(FrontExtractorOp): - op = 'LogSoftmax' - enabled = True - - @classmethod - def extract(cls, node): - axis = onnx_attr(node, 'axis', 'i', default=1) - LogSoftmaxONNX.update_node_stat(node, {'axis': axis}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/softplus_ext.py b/tools/mo/openvino/tools/mo/front/onnx/softplus_ext.py deleted file mode 100644 index d1e41c2b91616a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/softplus_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import SoftPlus -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class SoftPlusExtractor(FrontExtractorOp): - op = 'Softplus' - enabled = True - - @classmethod - def extract(cls, node): - SoftPlus.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/space_to_depth_ext.py b/tools/mo/openvino/tools/mo/front/onnx/space_to_depth_ext.py deleted file mode 100644 index 681b47eef327ea..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/space_to_depth_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.space_to_depth import SpaceToDepth -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class SpaceToDepthFrontExtractor(FrontExtractorOp): - op = 'SpaceToDepth' - enabled = True - - @classmethod - def extract(cls, node): - # update the attributes of the node - block_size = onnx_attr(node, 'blocksize', 'i', default=None) - SpaceToDepth.update_node_stat(node, {'block_size': block_size}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/split_ext.py b/tools/mo/openvino/tools/mo/front/onnx/split_ext.py deleted file mode 100644 index ec9ea965101d95..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/split_ext.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.split import AttributedVariadicSplit, AttributedSplit -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, onnx_get_num_outputs - - -class SplitFrontExtractor(FrontExtractorOp): - op = 'Split' - enabled = True - - @classmethod - def extract(cls, node): - axis = onnx_attr(node, 'axis', 'i', default=0, dst_type=np.int64) - size_splits = onnx_attr(node, 'split', 'ints', default=None, dst_type=int64_array) - if size_splits is None: - AttributedSplit.update_node_stat(node, { - 'axis': axis, - 'num_splits': onnx_get_num_outputs(node), - }) - else: - AttributedVariadicSplit.update_node_stat(node, { - 'axis': axis, - 'size_splits': size_splits, - }) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/squeeze_ext.py b/tools/mo/openvino/tools/mo/front/onnx/squeeze_ext.py deleted file mode 100644 index 020b9d14ad85bb..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/squeeze_ext.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.squeeze import Squeeze - - -class SqueezeFrontExtractor(FrontExtractorOp): - op = 'Squeeze' - enabled = True - - @classmethod - def extract(cls, node): - axis = int64_array(onnx_attr(node, 'axes', 'ints', default=[])) - - attrs = { - 'squeeze_dims': axis if len(axis) != 0 else None - } - - # update the attributes of the node - Squeeze.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/top_k_ext.py b/tools/mo/openvino/tools/mo/front/onnx/top_k_ext.py deleted file mode 100644 index e0b7ef5b3bc42f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/top_k_ext.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.topk import TopK -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, onnx_node_has_attr - - -class TopKExtractor(FrontExtractorOp): - op = 'TopK' - enabled = True - - @classmethod - def extract(cls, node): - """ - TopK-1 (k as attribute, required) - TopK-10 (k as input, no sorting manipulations) - TopK-11 (k as input, sorting manipulations through `sorted` and `largest` attrs) - """ - attrs = { - 'axis': onnx_attr(node, 'axis', 'i', default=-1), - 'index_element_type': np.int64 - } - if onnx_node_has_attr(node, 'k'): - attrs['k'] = onnx_attr(node, 'k', 'i') - attrs['sort'] = 'value' if onnx_attr(node, 'sorted', 'i', default=1) else 'none' - attrs['mode'] = 'max' if onnx_attr(node, 'largest', 'i', default=1) else 'min' - - TopK.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/topkrois_ext.py b/tools/mo/openvino/tools/mo/front/onnx/topkrois_ext.py deleted file mode 100644 index 616ee4af9d1f73..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/topkrois_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.topkrois_onnx import ExperimentalDetectronTopKROIs -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class ExperimentalDetectronTopKROIsFrontExtractor(FrontExtractorOp): - op = 'ExperimentalDetectronTopKROIs' - enabled = True - - @classmethod - def extract(cls, node): - attrs = dict(max_rois=onnx_attr(node, 'max_rois', 'i', 1000)) - ExperimentalDetectronTopKROIs.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/transpose_ext.py b/tools/mo/openvino/tools/mo/front/onnx/transpose_ext.py deleted file mode 100644 index 061962c336e009..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/transpose_ext.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr - - -class TransposeFrontExtractor(FrontExtractorOp): - op = 'Transpose' - enabled = True - - @classmethod - def extract(cls, node): - # In case of undefined 'perm' attribute, Transpose operation in ONNX reverse the dimensions - order = onnx_attr(node, 'perm', 'ints', default=None) - attrs = { - 'order': int64_array(order) if order is not None else None, - 'reverse_order': order is None - } - Transpose.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/unsqueeze_ext.py b/tools/mo/openvino/tools/mo/front/onnx/unsqueeze_ext.py deleted file mode 100644 index 62bcccdc00a6b8..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/unsqueeze_ext.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr -from openvino.tools.mo.ops.expand_dims import ExpandDims - - -class UnsqueezeFrontExtractor(FrontExtractorOp): - """ - Convert Unsqueeze layer to ExpandDims because the ExpandDims layer has fixed attribute with dimensions to unsqueeze. - """ - op = 'Unsqueeze' - enabled = True - - @classmethod - def extract(cls, node): - axis = int64_array(onnx_attr(node, 'axes', 'ints', default=[])) - - ExpandDims.update_node_stat(node, {'expand_axis': axis}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/upsample_ext.py b/tools/mo/openvino/tools/mo/front/onnx/upsample_ext.py deleted file mode 100644 index 7ebe0c36bd4005..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/upsample_ext.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import math - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array -from openvino.tools.mo.ops.ONNXResize10 import ONNXResize10 -from openvino.tools.mo.ops.upsample import UpsampleOp -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.onnx.extractors.utils import onnx_attr, get_onnx_opset_version -from openvino.tools.mo.utils.error import Error - - -class UpsampleFrontExtractor(FrontExtractorOp): - op = 'Upsample' - enabled = True - - @classmethod - def extract(cls, node): - onnx_opset_version = get_onnx_opset_version(node) - if onnx_opset_version is not None and onnx_opset_version >= 9: - mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode()) - ONNXResize10.update_node_stat(node, {'mode': mode}) - else: - mode = onnx_attr(node, 'mode', 's', default='nearest', dst_type=lambda x: x.decode()) - scales = onnx_attr(node, 'scales', 'floats', dst_type=lambda x: float32_array(x)) - width_scale = onnx_attr(node, 'width_scale', 'f') - height_scale = onnx_attr(node, 'height_scale', 'f') - - supported_modes = ['nearest', 'linear'] - if mode not in supported_modes: - raise Error( - 'Error decoding Upsample node {}, mode = {} is not in the list of supported modes {}.', - node.name, - mode, - supported_modes - ) - - if scales is not None: - if scales.shape != (4,): - raise Error( - 'Upsample scales attribute is wrong for node {}. Only 4D scales are supported.', - node.name - ) - if math.fabs(scales[0] - 1) > 1e-5 or math.fabs(scales[1] - 1) > 1e-5: - raise Error( - 'Upsampling of batch and feature dimensions is not supported for node {}.', - node.name - ) - height_scale = scales[2] - width_scale = scales[3] - - if (width_scale is None or height_scale is None) and len(node.in_nodes()) != 2: - raise Error( - 'One/both of widths_scale = {} and height_scale = {} is not defined for Upsample node {}.', - width_scale, - height_scale, - node.name - ) - - UpsampleOp.update_node_stat(node, {'mode': mode, 'height_scale': height_scale, - 'width_scale': width_scale}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/onnx/where_ext.py b/tools/mo/openvino/tools/mo/front/onnx/where_ext.py deleted file mode 100644 index 247376456b9cbc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/onnx/where_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.select import Select -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class WhereExtractor(FrontExtractorOp): - op = 'Where' - enabled = True - - @classmethod - def extract(cls, node): - Select.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/output_cut.py b/tools/mo/openvino/tools/mo/front/output_cut.py deleted file mode 100644 index d1ef11fdce39e0..00000000000000 --- a/tools/mo/openvino/tools/mo/front/output_cut.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.extractor import add_output_ops -from openvino.tools.mo.graph.graph import Graph, get_edge_attribute_between_nodes, set_edge_attribute_between_nodes - - -class OutputCut(FrontReplacementPattern): - enabled = True - run_not_recursively = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.front.user_data_repack import UserDataRepack - return [UserDataRepack] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - add_output_ops(graph, graph.graph['packed_outputs'], inputs=graph.graph['user_shapes']) - - # For keeping tensor names information for output nodes fake outputs are added - # to graph during the model loading. In the following code fake outputs are removed - # and tensor names information is moved to output->Result edge. - for node in graph.get_op_nodes(needs_removal=True): - fw_info = None - in_node = None - out_nodes_ids = {} - for in_port_idx in node.in_edges(): - node_idx = node.in_edge(in_port_idx)['in'] - if node_idx in node.in_nodes(): - in_node = node.in_node(node_idx) - fw_info_value = get_edge_attribute_between_nodes(in_node, node, 'fw_tensor_debug_info') - if fw_info_value: - fw_info = fw_info_value - break - if fw_info is not None and in_node is not None: - for out_idx in in_node.out_nodes(): - out_node = in_node.out_node(out_idx) - out_nodes_ids[out_idx] = out_node.id - - graph.erase_node(node) - - if fw_info is not None and in_node is not None: - for out_idx in in_node.out_nodes(): - if node.id == out_nodes_ids[out_idx]: - set_edge_attribute_between_nodes(in_node, in_node.out_node(out_idx), - 'fw_tensor_debug_info', fw_info) diff --git a/tools/mo/openvino/tools/mo/front/override_batch.py b/tools/mo/openvino/tools/mo/front/override_batch.py deleted file mode 100644 index 9079c576dd6370..00000000000000 --- a/tools/mo/openvino/tools/mo/front/override_batch.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.infer import override_batch - - -class OverrideBatch(FrontReplacementPattern): - enabled = True - run_not_recursively = True - - def find_and_replace_pattern(self, graph: Graph): - override_batch(graph, graph.graph['cmd_params'].batch) diff --git a/tools/mo/openvino/tools/mo/front/pass_separator.py b/tools/mo/openvino/tools/mo/front/pass_separator.py deleted file mode 100644 index 69a5b174b6a028..00000000000000 --- a/tools/mo/openvino/tools/mo/front/pass_separator.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class FrontStart(FrontReplacementPattern): - enabled = True - - def run_after(self): - return [] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - pass - - -class FrontFinish(FrontReplacementPattern): - enabled = True - - def run_after(self): - return [] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - pass diff --git a/tools/mo/openvino/tools/mo/front/rank_decomposer.py b/tools/mo/openvino/tools/mo/front/rank_decomposer.py deleted file mode 100644 index 96c61c1961c929..00000000000000 --- a/tools/mo/openvino/tools/mo/front/rank_decomposer.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.squeeze import Squeeze - - -class RankDecomposer(FrontReplacementOp): - op = 'Rank' - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['op'] - name = node.soft_get('name', node.id) - - assert node.has_valid('output_type'), \ - 'Rank node should have `output_type` attribute, but it`s not for node {}'.format(name) - - shape_of = Shape(graph, {'name': name + '/shape_of', 'output_type': node.output_type}).create_node() - rank_1d = Shape(graph, {'name': name + '/rank_of', 'output_type': node.output_type}).create_node() - rank_0d = create_op_node_with_second_input( - graph, Squeeze, int64_array(0), {'name': name + '/0d_rank_of'}, rank_1d) - - shape_of.out_port(0).connect(rank_1d.in_port(0)) - node.out_port(0).get_connection().set_source(rank_0d.out_port(0)) - node.in_port(0).get_connection().set_destination(shape_of.in_port(0)) - - rename_nodes([(node, name + '/ToBeDeleted'), (rank_0d, name)]) diff --git a/tools/mo/openvino/tools/mo/front/reciprocal.py b/tools/mo/openvino/tools/mo/front/reciprocal.py deleted file mode 100644 index eb6918e3d51d4b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/reciprocal.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Pow -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.const import Const - - -class ReciprocalReplacer(FrontReplacementOp): - op = "Reciprocal" - enabled = True - - def replace_op(self, graph: Graph, node: Node): - const = Const(graph, dict(value=mo_array(-1.), name=node.name + '/reciprocal_pow_const_')).create_node() - reciprocal = Pow(graph, {'name': node.name + '/reciprocal_pow_'}).create_node() - node.in_port(0).get_connection().set_destination(reciprocal.in_port(0)) - const.out_port(0).connect(reciprocal.in_port(1)) - return [reciprocal.id] diff --git a/tools/mo/openvino/tools/mo/front/reduce_axis_normalizer.py b/tools/mo/openvino/tools/mo/front/reduce_axis_normalizer.py deleted file mode 100644 index 898f696ace01ed..00000000000000 --- a/tools/mo/openvino/tools/mo/front/reduce_axis_normalizer.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ReduceOps import reduce_map -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.ops.rank import Rank -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class ReduceAxisNormalizer(FrontReplacementSubgraph): - """ - Reduce operation requires information about axis, that is represented in original frameworks differently: as an - operation attribute or as a 1-st input port value. ReduceAxisNormalizer adds second input to Reduce operations with - axes to normalize if axes are specified as an attribute. - """ - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('reduce', dict(kind='op', op=lambda op: op in reduce_map)) - ], - edges=[] - ) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - node = match['reduce'] - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - if len(connected_in_ports) == 1: - node_name = node.soft_get('name', node.id) - - # if the 'axis' is None then we still add a second input to the layer with a 1D array with 1 element equal - # to None. The infer function handles this case because the input shape is known at this stage only - if node.has_valid('axis'): - const = Const(graph, {'name': node_name + '/axis', 'value': node.axis}).create_node() - node.add_input_port(1, skip_if_exist=True) - const.out_port(0).connect(node.in_port(1)) - del graph.node[node.id]['axis'] - else: - # The default (if there is no 'axis') is to reduce over all the dimensions of the input tensor. - axes = create_op_with_const_inputs(graph, Range, {0: int64_array(0), 2: int64_array(1)}, - dict(name=node_name + '/axes')) - end_of_range = Rank(graph, dict(name=node_name + '/range_end')).create_node() - node.in_port(0).get_connection().get_source().connect(end_of_range.in_port(0)) - end_of_range.out_port(0).connect(axes.in_port(1)) - - node.add_input_port(1, skip_if_exist=True) - axes.out_port(0).connect(node.in_port(1)) diff --git a/tools/mo/openvino/tools/mo/front/reshape_dim_normalizer.py b/tools/mo/openvino/tools/mo/front/reshape_dim_normalizer.py deleted file mode 100644 index 7470289ea80202..00000000000000 --- a/tools/mo/openvino/tools/mo/front/reshape_dim_normalizer.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.pass_separator import FrontStart -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.utils.error import Error - - -class ReshapeDimNormalizer(FrontReplacementSubgraph): - """ - Reshape operation requires information about output dimensions, that is represented in original frameworks - differently: - - by layer parameter - - by 1-port input value - - This transformation reforms Reshape operations to store dim info in 1-port input. - """ - enabled = True - force_shape_inference = True - - def run_before(self): - return [FrontStart] - - def run_after(self): - from openvino.tools.mo.front.freeze_placeholder_value import FreezePlaceholderValue - return [FreezePlaceholderValue] - - def pattern(self): - return dict( - nodes=[ - ('reshape', dict(kind='op', op='Reshape')) - ], - edges=[] - ) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - node = match['reshape'] - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - if len(connected_in_ports) == 1: - if node.has('dim'): - const = Const(graph, {'value': node.dim}).create_node() - node.add_input_port(1, skip_if_exist=True) - const.out_port(0).connect(node.in_port(1)) - del node['dim'] - else: - raise Error('The `dim` attribute for node {} is not set'.format(node.op)) diff --git a/tools/mo/openvino/tools/mo/front/restore_ports.py b/tools/mo/openvino/tools/mo/front/restore_ports.py deleted file mode 100644 index 9c2c5db21ca6ff..00000000000000 --- a/tools/mo/openvino/tools/mo/front/restore_ports.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph - - -class RestorePorts(FrontReplacementSubgraph): - enabled = True - - def run_after(self): - from openvino.tools.mo.front.input_cut import InputCut - return [InputCut] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - for node_id, attrs in graph.nodes(data=True): - if '_in_ports' not in attrs: - attrs['_in_ports'] = set() - if '_out_ports' not in attrs: - attrs['_out_ports'] = set() - - for u, v, k, d in graph.edges(data=True, keys=True): - from_node_attrs = graph.node[u] - to_node_attrs = graph.node[v] - is_control_flow = 'control_flow_edge' in d and d['control_flow_edge'] is True - - in_port_id = d['in'] if not is_control_flow else 'control_flow_' + str(d['in']) - out_port_id = d['out'] if not is_control_flow else 'control_flow_' + str(d['out']) - - to_node_attrs['_in_ports'].update({in_port_id: {'control_flow': is_control_flow}}) - from_node_attrs['_out_ports'].update({out_port_id: {'control_flow': is_control_flow}}) - - graph.stage = 'front' diff --git a/tools/mo/openvino/tools/mo/front/scatter_normalizer.py b/tools/mo/openvino/tools/mo/front/scatter_normalizer.py deleted file mode 100644 index 9982247b862b83..00000000000000 --- a/tools/mo/openvino/tools/mo/front/scatter_normalizer.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class ScatterNormalizer(FrontReplacementPattern): - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(is_scatter=True): - name = node.soft_get('name', node.id) - input_ports_count = len([port for port in node.in_ports().values() if not port.disconnected()]) - has_axis = node.has_valid('axis') - - if has_axis: - assert input_ports_count == 3, \ - '{} node {} has unexpected number of input ports {}'.format(node.op, name, input_ports_count) - const = Const(graph, {'name': name + '/axis', 'value': np.int64(node.axis)}).create_node() - node.add_input_port(3, skip_if_exist=True) - node.in_port(3).connect(const.out_port(0)) - del node['axis'] - else: - assert input_ports_count == 4, \ - '{} node {} has unexpected number of input ports {}'.format(node.op, name, input_ports_count) diff --git a/tools/mo/openvino/tools/mo/front/softmax.py b/tools/mo/openvino/tools/mo/front/softmax.py deleted file mode 100644 index 8bc31506116ba4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/softmax.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.reduce_axis_normalizer import ReduceAxisNormalizer -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.softmax import Softmax - - -class SoftmaxFromKeras(FrontReplacementSubgraph): - """ - The transformation looks for the pattern that Keras produces for SoftMax layer. The transformation works if the - softmax is performed over one pre-defined axis. - """ - enabled = True - - def run_after(self): - return [ReduceAxisNormalizer] - - def pattern(self): - return dict( - nodes=[ - ('input', dict()), - ('reduce_max', dict(op='ReduceMax')), - ('reduce_indices_max', dict(op='Const', value=lambda x: x is not None and x.size != 0)), - ('sub', dict(op='Sub')), - ('exp', dict(op='Exp')), - ('reduce_sum', dict(op='ReduceSum')), - ('reduce_indices_sum', dict(op='Const', value=lambda x: x is not None and x.size != 0)), - ('div', dict(op='Div')), - ], - edges=[ - ('input', 'sub', {'in': 0}), - ('input', 'reduce_max', {'in': 0}), - ('reduce_indices_max', 'reduce_max', {'in': 1}), - ('reduce_max', 'sub', {'in': 1}), - ('sub', 'exp', {'in': 0}), - ('exp', 'div', {'in': 0}), - ('exp', 'reduce_sum', {'in': 0}), - ('reduce_indices_sum', 'reduce_sum', {'in': 1}), - ('reduce_sum', 'div', {'in': 1}), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - - reduce_max_axis = match['reduce_indices_max'].value - reduce_sum_axis = match['reduce_indices_sum'].value - - if reduce_max_axis.ndim == 0: - reduce_max_axis = reduce_max_axis.reshape([1]) - - if reduce_sum_axis.ndim == 0: - reduce_sum_axis = reduce_sum_axis.reshape([1]) - - if len(reduce_max_axis) != 1: - log.info('The reductions indices contain more than 1 element. Cannot convert to Softmax.') - return - - if not np.array_equal(reduce_max_axis, reduce_sum_axis): - log.info('The reduce indices are not equal: {} vs {}. Cannot convert to Softmax' - ''.format(reduce_max_axis, reduce_sum_axis)) - return - - softmax = Softmax(graph, {'name': match['input'].name + '/Softmax', 'axis': reduce_sum_axis[0]}).create_node() - match['input'].out_port(0).connect(softmax.in_port(0)) - match['div'].out_port(0).get_connection().set_source(softmax.out_port(0)) - - log.debug('Successfully created SoftMax node') diff --git a/tools/mo/openvino/tools/mo/front/split_normalizer.py b/tools/mo/openvino/tools/mo/front/split_normalizer.py deleted file mode 100644 index 7c17d2f474893e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/split_normalizer.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.split import Split, VariadicSplit -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.utils.error import Error - - -class SqueezeAxis(FrontReplacementOp): - """ - Split-like operations from original frameworks split tensor by a certain `axis` dimension, removing - dimension over which splitting is performed. The "Split" layer of OV doesn't do that. - This replacer inserts Squeeze operation for each output of the Split nodes to remove the dimension. - - It is applicable to Unpack from TF operation and MxNet SliceChannel - """ - enabled = True - - def run_before(self): - return [AttributedSplitToSplit, AttributedVariadicSplitToVariadicSplit] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(squeeze_axis=True): - name = node.soft_get('name', node.id) - for out_port in node.out_ports().values(): - if node.has_valid('axis'): - squeeze_node = create_op_with_const_inputs(graph, Squeeze, {1: mo_array(node.axis)}, - {'name': name + '/Squeeze_'}) - out_port.get_connection().insert_node(squeeze_node) - elif node.is_in_port_connected(1): - squeeze_node = Squeeze(graph, {'name': name + '/Squeeze_'}).create_node() - out_port.get_connection().insert_node(squeeze_node) - node.in_port(1).get_connection().add_destination(squeeze_node.in_port(1)) - else: - raise Error('Unknown axis to squeeze for node {}'.format(name)) - - -class SplitInputsReconnect(FrontReplacementSubgraph): - """ - Reconnect input ports to fit IR specification - - The Split operation in original frameworks (e.g. TF) may have different semantics than IR specification states: - IE: 0 - input data to Split, 1 - axis of splitting - TF: 0 - axis of splitting, 1 - input data to Split - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='Split', input_port=1): - axis_src = node.in_port(0).get_source() - node.in_port(0).disconnect() - node.in_port(1).get_connection().set_destination(node.in_port(0)) - node.in_port(1).connect(axis_src) - del node['input_port'] - - -class AttributedSplitToSplit(FrontReplacementSubgraph): - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='AttributedSplit'): - name = node.soft_get('name', node.id) - - axis = node.soft_get('axis', None) - assert axis is not None, \ - 'AttributedSplit should have `axis` parameter set, but it`s not for node {}'.format(name) - - num_splits = node.soft_get('num_splits', None) - assert num_splits is not None, \ - 'AttributedSplit should have `num_splits` parameter set, but it`s not for node {}'.format(name) - - split = create_op_with_const_inputs(graph, Split, {1: np.int64(axis)}, - {'name': name + '/Split', 'num_splits': num_splits}) - - for idx, port in node.out_ports().items(): - port.get_connection().set_source(split.out_port(idx)) - node.in_port(0).get_connection().set_destination(split.in_port(0)) - graph.remove_node(node.id) - - -class AttributedVariadicSplitToVariadicSplit(FrontReplacementSubgraph): - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='AttributedVariadicSplit'): - name = node.soft_get('name', node.id) - - axis = node.soft_get('axis', None) - assert axis is not None, \ - 'AttributedVariadicSplit should have `axis` parameter set, but it`s not for node {}'.format(name) - - size_splits = node.soft_get('size_splits', None) - assert size_splits is not None, \ - 'AttributedVariadicSplit should have `size_splits` parameter set, but it`s not for node {}'.format(name) - - split = create_op_with_const_inputs(graph, VariadicSplit, {1: np.int64(axis), 2: size_splits}, - {'name': name + '/VariadicSplit', 'out_ports_count': len(size_splits)}) - - for idx, port in node.out_ports().items(): - port.get_connection().set_source(split.out_port(idx)) - - node.in_port(0).get_connection().set_destination(split.in_port(0)) - graph.remove_node(node.id) - - -class VariadicSplitInputsSwap(FrontReplacementSubgraph): - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='VariadicSplit', swap_axis_and_split_size_inputs=True): - axis_src = node.in_port(2).get_source() - node.in_port(2).disconnect() - node.in_port(1).get_connection().set_destination(node.in_port(2)) - node.in_port(1).connect(axis_src) diff --git a/tools/mo/openvino/tools/mo/front/sub.py b/tools/mo/openvino/tools/mo/front/sub.py deleted file mode 100644 index c8ee1e2a932b34..00000000000000 --- a/tools/mo/openvino/tools/mo/front/sub.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Mul, Add -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node, rename_node - - -class Sub(FrontReplacementPattern): - # This transformation is called directly from the 'openvino/tools/mo/middle/fusings.py' transformation - enabled = False - - @staticmethod - def sub_to_add_replacement(sub: Node): - # we execute this transformation for V10 IR later on middle phase despite graph_condition - # so we prevent Sub replacement on shape-calculating sub-graphs - if sub.in_port(0).data.get_value() is not None and sub.in_port(1).data.get_value() is not None: - return - - graph = sub.graph - name = sub.soft_get('name', sub.id) - - # keep Add name the same as Sub -- because of mathematical equality of output tensors - rename_node(node=sub, name=name + '/to_be_removed') - - # reconnect Sub in(out)puts to Add - add = Add(graph, {'name': name}).create_node() - rename_node(add, name) - - sub.in_port(0).get_connection().set_destination(add.in_port(0)) - sub.in_port(1).get_connection().set_destination(add.in_port(1)) - sub.out_port(0).get_connection().set_source(add.out_port(0)) - - # restore mathematical equivalence to Sub operation: Sub(A, B) = Add(A, Mul(B, -1)) - const_dtype = sub.soft_get('data_type', np.float32) - negate = create_op_with_const_inputs(graph, Mul, {1: mo_array(-1, dtype=const_dtype)}, {'name': name + '/neg_'}) - add.in_port(1).get_connection().insert_node(negate) - - def find_and_replace_pattern(self, graph: Graph): - for sub in graph.get_op_nodes(op='Sub'): - - # The attribute zero_point_sub indicates that the node can be used in ConvertQuantizeDequantize - # transformation (offline transformations). Pattern of such transformation expects Subtract node. - if sub.has_and_set('zero_point_sub'): - continue - self.sub_to_add_replacement(sub) diff --git a/tools/mo/openvino/tools/mo/front/subgraph_matcher.py b/tools/mo/openvino/tools/mo/front/subgraph_matcher.py deleted file mode 100644 index 44b81f54399442..00000000000000 --- a/tools/mo/openvino/tools/mo/front/subgraph_matcher.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import re - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.utils.custom_replacement_config import CustomReplacementDescriptor -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.graph import nodes_matching_name_pattern, sub_graph_between_nodes -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def find_object_by_pattern(names: list, pattern: str): - """ - :param names: list of names to find objects from. - :param pattern: regular expression for the name. - :return: list of matched objects. - """ - compiled_pattern = re.compile(pattern) - return [name for name in names if re.match(compiled_pattern, name)] - - -class SubgraphMatch(object): - """ - Class providing information about matched sub-graph. - """ - - def __init__(self, graph: Graph, replacement_desc: CustomReplacementDescriptor, matched_nodes: list, - inputs_order: list, outputs_order: list, prefix: str): - """ - Creates instance of a SubgraphMatch class from the provided configuration. - :param graph: networkx graph. - :param replacement_desc: CustomReplacementDescriptor object describing sub-graph. - :param matched_nodes: list of matched nodes. - :param inputs_order: nodes description in the format described in the FrontReplacementFromConfigFileSubGraph. - :param outputs_order: nodes description in the format described in the FrontReplacementFromConfigFileSubGraph. - :param prefix: optional prefix of the node names. Is not used in the sub-graph match by points. - """ - self._input_nodes_map = dict() - self._output_nodes_map = dict() - self._matched_nodes_names = matched_nodes - self.graph = graph - self.custom_replacement_desc = replacement_desc - self.scope = prefix - - for sub_graph_input_port, input_desc in enumerate(inputs_order): - for node_pattern, node_in_port in input_desc: - node = self.node_by_pattern(node_pattern) - if node is not None: - self._add_input_node(node.id, node_in_port, sub_graph_input_port) - - for sub_graph_output_port, (node_pattern, out_port) in enumerate(outputs_order): - node = self.node_by_pattern(node_pattern) - if node is not None: - self._add_output_node(node.id, out_port, sub_graph_output_port) - - def matched_nodes_names(self): - """ - Returns list of node names in the matched sub-graph. - :return: list of node names in the matched sub-graph. - """ - return self._matched_nodes_names - - def inputs_count(self): - """ - Returns number of inputs for the matched sub-graph. Only unique input tensors are considered, thus if the same - tensor is consumed by two or more input nodes of the sub-graph it is counted only once. - :return: Number or unique input tensors. - """ - return len(self._input_nodes_map.keys()) - - def outputs_count(self): - """ - Returns number of outputs for the matched sub-graph. Only unique output tensors are considered, thus if the same - tensor is consumed by two or more nodes outside of the sub-graph it is counted only once. - :return: Number or unique input tensors. - """ - return len(self._output_nodes_map.keys()) - - def input_nodes(self, port: int): - """ - Returns list of tuples where the first element is a Node of the sub-graph and the second is the input port for - that node. Each node of this list gets the same input tensor through the input port with number 'port' of the - sub-graph. - - For example, if the returned list requested for port 'portSG' is the following: [(NodeA, portA), (nodeB, portB)] - then the same tensor is passed to node 'NodeA' as input with number 'portA' and node 'nodeB' as input with - number 'portB' for the sub-graph input with number 'portSG'. - :param port: input port of the sub-graph. - :return: list describing nodes of the sub-graph getting tensor through the specified port. - """ - return self._input_nodes_map[port] - - def single_input_node(self, port: int): - """ - The function does the same as function 'input_nodes' but it relies on fact that there is just one node that - gets input tensor for sub-graph input with number 'port', so it return just tuple (Node, nodePort) or raises - exception if the amount of nodes is not equal to 1. - :param port: input port of the sub-graph. - :return: tuple describing node of the sub-graph getting tensor through the specified port. - """ - input_nodes = self.input_nodes(port) - if len(input_nodes) != 1: - raise Error('The amount of input nodes for port "{}" is not equal to 1. '.format(port) + - refer_to_faq_msg(33)) - return input_nodes[0] - - def output_node(self, port: int): - """ - Returns a tuple where the first element is a Node of the sub-graph and the second is the output port of that - node. Th node produces output tensor through the output port with number 'port' of the sub-graph. - :param port: output port of the sub-graph. - :return: tuple describing node of the sub-graph producing sub-graph output tensor through the specified port. - """ - return self._output_nodes_map[port] - - def node_by_pattern(self, pattern: str): - """ - Returns Node from the list of sub-graph nodes matching node name regular expression 'pattern'. If there are more - than one nodes matched then the function raises exception. - :param pattern: the regular expression for the node name. - :return: matched Node. - """ - if self.scope != '': - if self.scope[-1] == '/': - pattern = self.scope + pattern - else: - pattern = self.scope + '/' + pattern - found_names = find_object_by_pattern(self._matched_nodes_names, pattern) - if len(found_names) > 1: - raise Error('The amount of nodes matched pattern "{}" is more than 1. '.format(pattern) + - refer_to_faq_msg(78)) - if len(found_names) == 0: - return None - return Node(self.graph, found_names[0]) - - def _add_input_node(self, node_name: str, node_port: int, sub_graph_input_port: int): - self._input_nodes_map.setdefault(sub_graph_input_port, []).append((Node(self.graph, node_name), node_port)) - - def _add_output_node(self, node_name: str, node_port: int, sub_graph_output_port: int): - if sub_graph_output_port in self._output_nodes_map: - raise Error('Output node for port "{}" has already been specified. '.format(sub_graph_output_port) + - refer_to_faq_msg(34)) - self._output_nodes_map[sub_graph_output_port] = (Node(self.graph, node_name), node_port) - - -# TODO looks like this class is not needed. Can be implemented as pure functions. -class SubgraphMatcher(object): - def __init__(self, replacement_descriptor: CustomReplacementDescriptor): - self.replacement_desc = replacement_descriptor - - def _match_sub_graph_for_scope(self, graph: Graph, scope_pattern: str): - """ - :param graph: networkx graph to find sub-graph in. - :param scope_pattern: regular expression specifying sub-graph scope. - :return: an object describing matched sub-graph. - """ - inputs_order = self.replacement_desc.get_inputs_description() - outputs_order = self.replacement_desc.get_outputs_description() - - for list_nodes in inputs_order: - for node_name_pattern, port in list_nodes: - if len(find_object_by_pattern(graph.nodes(), '.*' + node_name_pattern)) == 0: - log.info('Node "{} does not exist in the graph". Failed to match sub-graph by scope "{}".'.format( - node_name_pattern, self.replacement_desc.id)) - return None - - matched_nodes = nodes_matching_name_pattern(graph, scope_pattern) - if len(matched_nodes) == 0: - log.info('There are no instances of the sub-graph by scope "{}"'.format(scope_pattern)) - return None - - return SubgraphMatch(graph, self.replacement_desc, matched_nodes, inputs_order, outputs_order, scope_pattern) - - def _match_sub_graph_for_points(self, graph: Graph): - """ - :param graph: networkx graph to find sub-graph in. - :return: an object describing matched sub-graph. - """ - start_points = self.replacement_desc.get_internal_input_nodes(graph) - end_points = self.replacement_desc.get_internal_output_nodes(graph) - # check that start and end points exist in the graph - for node_name in start_points + end_points: - if node_name not in graph.nodes(): - log.info('Node "{}" does not exist in the graph. Failed to match sub-graph by points "{}".'.format( - node_name, self.replacement_desc.id)) - return None - - matched_nodes = sub_graph_between_nodes(graph, start_points, end_points, include_control_flow=False) - return SubgraphMatch(graph, self.replacement_desc, matched_nodes, - self.replacement_desc.get_inputs_description(), - self.replacement_desc.get_outputs_description(), '') - - def matched_sub_graph_instances(self, graph: Graph): - """ - Generator to product all instances of matched sub-graphs. - :param graph: graph to find instances in. - :return: generator producing SubGraphMatch objects. - """ - if self.replacement_desc.match_kind == 'points': # instance is specified with lists of start/end nodes - match = self._match_sub_graph_for_points(graph) - if match is not None: - yield match - elif self.replacement_desc.match_kind == 'scope': # instance is specified with a node name pattern - for instance in self.replacement_desc.sub_graph_instances(): - match = self._match_sub_graph_for_scope(graph, instance) - if match is not None: - yield match - else: - raise Error('Unsupported match kind "{}". Match kinds "points" or "scope" are supported only. '.format( - self.replacement_desc.match_kind) + - refer_to_faq_msg(35)) diff --git a/tools/mo/openvino/tools/mo/front/tf/AutomlEfficientDet.py b/tools/mo/openvino/tools/mo/front/tf/AutomlEfficientDet.py deleted file mode 100644 index b02c54f1627d4e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/AutomlEfficientDet.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.Pack import Pack -from openvino.tools.mo.front.TransposeOrderNormalizer import TransposeOrderNormalizer -from openvino.tools.mo.front.eltwise_n import EltwiseNReplacement -from openvino.tools.mo.front.tf.pad_tf_to_pad import PadTFToPad -from openvino.tools.mo.ops.DetectionOutput import DetectionOutput -from openvino.tools.mo.ops.activation_ops import Sigmoid -from openvino.tools.mo.ops.priorbox_clustered import PriorBoxClusteredOp -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array -from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileGeneral -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.result import Result - - -class EfficientDet(FrontReplacementFromConfigFileGeneral): - replacement_id = 'AutomlEfficientDet' - run_not_recursively = True - - def run_before(self): - from openvino.tools.mo.front.ExpandDimsToUnsqueeze import ExpandDimsToUnsqueeze - return [ExpandDimsToUnsqueeze, Pack, TransposeOrderNormalizer, PadTFToPad, EltwiseNReplacement] - - class AnchorGenerator: - def __init__(self, min_level, aspect_ratios, num_scales, anchor_scale): - self.min_level = min_level - self.aspect_ratios = aspect_ratios - self.anchor_scale = anchor_scale - self.scales = [2 ** (float(s) / num_scales) for s in range(num_scales)] - - def get(self, layer_id): - widths = [] - heights = [] - for s in self.scales: - for a in self.aspect_ratios: - base_anchor_size = 2 ** (self.min_level + layer_id) * self.anchor_scale - heights.append(base_anchor_size * s * a[1]) - widths.append(base_anchor_size * s * a[0]) - return widths, heights - - def transform_graph(self, graph: Graph, replacement_descriptions: dict): - parameter_node = graph.get_op_nodes(op='Parameter')[0] - parameter_node['data_type'] = data_type_str_to_np(parameter_node.graph.graph['cmd_params'].data_type) - - # remove existing Result operations to remove unsupported sub-graph - graph.remove_nodes_from([node.id for node in graph.get_op_nodes(op='Result')] + ['detections']) - - # determine if the op which is a input/final result of mean value and scale applying to the input tensor - # then connect it to the input of the first convolution of the model, so we remove the image pre-processing - # which includes padding and resizing from the model - preprocessing_input_node_id = replacement_descriptions['preprocessing_input_node'] - assert preprocessing_input_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \ - 'should be a last node before image normalization and is specified' \ - ' in the json file.'.format(preprocessing_input_node_id) - preprocessing_input_node = Node(graph, preprocessing_input_node_id) - consumer_node = preprocessing_input_node.out_port(0).get_connection().get_destination().node - consumer_node.in_port(0).get_connection().set_source(parameter_node.out_port(0)) - - preprocessing_output_node_id = replacement_descriptions['preprocessing_output_node'] - assert preprocessing_output_node_id in graph.nodes, 'The node with name "{}" is not found in the graph. This ' \ - 'node should provide scaled image output and is specified' \ - ' in the json file.'.format(preprocessing_output_node_id) - preprocessing_output_node = Node(graph, preprocessing_output_node_id) - preprocessing_output_node.out_port(0).disconnect() - - convolution_nodes = [n for n in graph.pseudo_topological_sort() if n.soft_get('type') == 'Convolution'] - convolution_nodes[0].in_port(0).get_connection().set_source(preprocessing_output_node.out_port(0)) - - # create prior boxes (anchors) generator - aspect_ratios = replacement_descriptions['aspect_ratios'] - assert len(aspect_ratios) % 2 == 0 - aspect_ratios = list(zip(aspect_ratios[::2], aspect_ratios[1::2])) - priors_generator = self.AnchorGenerator(min_level=int(replacement_descriptions['min_level']), - aspect_ratios=aspect_ratios, - num_scales=int(replacement_descriptions['num_scales']), - anchor_scale=replacement_descriptions['anchor_scale']) - - prior_boxes = [] - for i in range(100): - inp_name = 'box_net/box-predict{}/BiasAdd'.format('_%d' % i if i else '') - if inp_name not in graph: - break - widths, heights = priors_generator.get(i) - prior_box_op = PriorBoxClusteredOp(graph, {'width': mo_array(widths), - 'height': mo_array(heights), - 'clip': 0, 'flip': 0, - 'variance': replacement_descriptions['variance'], - 'offset': 0.5}) - prior_boxes.append(prior_box_op.create_node([Node(graph, inp_name), parameter_node])) - - # concatenate prior box operations - concat_prior_boxes = Concat(graph, {'axis': -1}).create_node() - for idx, node in enumerate(prior_boxes): - concat_prior_boxes.add_input_port(idx) - concat_prior_boxes.in_port(idx).connect(node.out_port(0)) - - conf = Sigmoid(graph, dict(name='concat/sigmoid')).create_node([Node(graph, 'concat')]) - reshape_size_node = Const(graph, {'value': int64_array([0, -1])}).create_node([]) - logits = Reshape(graph, dict(name=conf.name + '/Flatten')).create_node([conf, reshape_size_node]) - deltas = Reshape(graph, dict(name='concat_1/Flatten')).create_node([Node(graph, 'concat_1'), reshape_size_node]) - - # revert convolution boxes prediction weights from yxYX to xyXY (convolutions share weights and bias) - weights = Node(graph, 'box_net/box-predict/pointwise_kernel') - weights.value = weights.value.reshape(-1, 4)[:, [1, 0, 3, 2]].reshape(weights.shape) - bias = Node(graph, 'box_net/box-predict/bias') - bias.value = bias.value.reshape(-1, 4)[:, [1, 0, 3, 2]].reshape(bias.shape) - - detection_output_node = DetectionOutput(graph, dict( - name='detections', - share_location=1, - background_label_id=int(replacement_descriptions['num_classes']) + 1, - nms_threshold=replacement_descriptions['nms_threshold'], - confidence_threshold=replacement_descriptions['confidence_threshold'], - top_k=100, - keep_top_k=100, - code_type='caffe.PriorBoxParameter.CENTER_SIZE', - )).create_node([deltas, logits, concat_prior_boxes]) - - output_op = Result(graph, dict(name='output')) - output_op.create_node([detection_output_node]) diff --git a/tools/mo/openvino/tools/mo/front/tf/BatchMatMul_ext.py b/tools/mo/openvino/tools/mo/front/tf/BatchMatMul_ext.py deleted file mode 100644 index e4b872a15f2250..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/BatchMatMul_ext.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.MatMul import MatMul -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class BatchMatMulExtractor(FrontExtractorOp): - op = 'BatchMatMul' - enabled = True - - @classmethod - def extract(cls, node): - attr = node.pb.attr - attrs = { - 'transpose_a': int(attr['adj_x'].b), - 'transpose_b': int(attr['adj_y'].b), - } - MatMul.update_node_stat(node, attrs) - return cls.enabled - - -class BatchMatMulV2Extractor(FrontExtractorOp): - op = 'BatchMatMulV2' - enabled = True - - @classmethod - def extract(cls, node): - attr = node.pb.attr - attrs = { - 'transpose_a': int(attr['adj_x'].b), - 'transpose_b': int(attr['adj_y'].b), - } - MatMul.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/BatchToSpaceNDToUpsample.py b/tools/mo/openvino/tools/mo/front/tf/BatchToSpaceNDToUpsample.py deleted file mode 100644 index 9d2a3fb5ac2568..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/BatchToSpaceNDToUpsample.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.upsample import UpsampleOp -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph, Node - - -class BatchToSpaceToUpsample(FrontReplacementSubgraph): - """ - The transformation looks for pattern that performs NX upscale of the input image specified in the NHWC layout. - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.front.tf.space_to_batch import BatchToSpaceNormalizer - return [BatchToSpaceNormalizer] - - @staticmethod - def pattern(**kwargs): - return dict( - nodes=[ - ('transpose', dict(op='Transpose')), - ('expand_dims', dict(op='Unsqueeze')), - ('tile', dict(op='Tile')), - ('batch_to_space_nd', dict(op='BatchToSpace')), - ('strided_slice', dict(op='StridedSlice')), - ('transpose_back', dict(op='Transpose')), - ], - edges=[ - ('transpose', 'expand_dims', {'out': 0}), - ('expand_dims', 'tile', {'out': 0}), - ('tile', 'batch_to_space_nd', {'out': 0}), - ('batch_to_space_nd', 'strided_slice', {'out': 0}), - ('strided_slice', 'transpose_back', {'out': 0}) - ] - ) - - @staticmethod - def replace_sub_graph(graph: Graph, match: dict, **kwargs): - def _input_node_value(node: Node, port_ind: int): - input_node = node.in_port(port_ind).get_source().node - return input_node.value if input_node.op == 'Const' else None - - transpose = match['transpose'] - transpose_order = _input_node_value(transpose, 1) - if transpose_order is None or not np.all(np.equal(transpose_order, int64_array([1, 2, 3, 0]))): - log.debug('The transpose order {} for node {} is not equal to [1, 2, 3, 0]. Cannot apply ' - 'BatchToSpaceToUpsample transformation.'.format(transpose_order, transpose.name)) - return - - expand_axis = match['expand_dims'] - expand_axis_value = _input_node_value(expand_axis, 1) - if expand_axis_value != 0: - log.debug('The expand axis {} for node {} is not equal to 0. Cannot apply BatchToSpaceToUpsample ' - 'transformation.'.format(expand_axis_value, expand_axis.name)) - return - - tile = match['tile'] - tile_value = _input_node_value(tile, 1) - if tile_value is None: - log.debug('The tile value is not defined for node {}. Cannot apply BatchToSpaceToUpsample ' - 'transformation.'.format(tile.name)) - return - - if len(np.where(tile_value != 1)) != 1: - log.debug('The number of tiles not equal to 1 not equal to 1. Cannot apply BatchToSpaceToUpsample ' - 'transformation.') - return - tile_batch = tile_value[0] - - batch_to_space_nd = match['batch_to_space_nd'] - block_shape = _input_node_value(batch_to_space_nd, 1) - if block_shape is None or tile_batch != np.prod(block_shape): - log.debug('The block shape {} for node {} is not defined or inconsistent with the tile size. Cannot apply ' - 'BatchToSpaceToUpsample transformation.'.format(block_shape, batch_to_space_nd.name)) - return - if len(block_shape) != 2: - log.debug('The block shape len is not equal to 2 for node {}. Cannot apply BatchToSpaceToUpsample ' - 'transformation.'.format(batch_to_space_nd.name)) - return - - crops = _input_node_value(batch_to_space_nd, 2) - if crops is None or np.count_nonzero(crops) != 0: - log.debug('Crops for node {} are non zero. Cannot apply BatchToSpaceToUpsample ' - 'transformation.'.format(batch_to_space_nd.name)) - return - - transpose_back = match['transpose_back'] - transpose_back_order = _input_node_value(transpose_back, 1) - if transpose_back_order is None or not np.all(np.equal(transpose_back_order, int64_array([3, 0, 1, 2]))): - log.debug('The transpose order {} for node {} is not equal to [3, 0, 1, 2]. Cannot apply ' - 'BatchToSpaceToUpsample transformation.'.format(transpose_back_order, transpose_back.name)) - return - - upsample_node = UpsampleOp(graph, {'height_scale': block_shape[0], 'width_scale': block_shape[1], - 'mode': 'nearest', - 'name': transpose.name + '/upsample'}).create_node() - - match['transpose'].in_port(0).get_connection().set_destination(upsample_node.in_port(0)) - match['transpose_back'].out_port(0).get_connection().set_source(upsample_node.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/tf/BlockLSTM.py b/tools/mo/openvino/tools/mo/front/tf/BlockLSTM.py deleted file mode 100644 index bcc81156b54c18..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/BlockLSTM.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.error import Error - - -class BlockLSTM(FrontReplacementPattern): - """ - We prepare TensorFlow BlockLSTM op to be replaced with LSTMSequence op that will be repacked to TensorIterator later - - TensorFlow BlockLSTM op description: - - Op parameters: - cell_clip: Value to clip the 'cs' value to. - use_peephole: Whether to use peephole weights. - forget_bias: The forget gate bias. - - Inputs: - 0: seq_len_max: Maximum time length actually used by this input. Outputs are padded with 0s beyond this length - 1: x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs) - 2: cs_prev: Value of the initial cell state - 3: h_prev: Initial output of cell (to be used for peephole) - 4: w: The weight matrix - 5: wci: The weight matrix for input gate peephole connection - 6: wcf: The weight matrix for forget gate peephole connection - 7: wco: The weight matrix for output gate peephole connection - 8: b: The bias vector - - Outputs: - 0: i: The input gate over the whole time sequence - 1: cs: The cell state before the tanh over the whole time sequence - 2: f: The forget gate over the whole time sequence - 3: o: The output gate over the whole time sequence - 4: ci: The cell input over the whole time sequence - 5: co: The cell after the tanh over the whole time sequence - 6: h: The output h vector over the whole time sequence - - Limitations: - - peephole connection, so we check `use_peephole`!=True and cut `wci`, `wco`, `wcf` off - - cell_clip parameter, so we check `cell_clip==-1`, which means we do not clip - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='BlockLSTM'): - if node.use_peephole: - raise Error("BlockLSTM operation is not supported with `use_peephole`==True. Node: {}" - "".format(node.soft_get('name'))) - - if node.cell_clip != -1: - raise Error("Clipping is not supported for BlockLSTM operation. `cell_clip`={!s} for node: {}" - "".format(node.cell_clip, node.soft_get('name'))) - - log.debug("Start BlockLSTM->LSTMSequence translation for node: {} with parameters:\n" - "`cell_clip`={!s}, `use_peephole`=={!s}, `forget_bias`={!s}\n" - "inputs: {},\noutputs:{}".format(node.soft_get('name'), node.cell_clip, node.use_peephole, - node.forget_bias, {p: i.id for p, i in node.in_nodes().items()}, - {p: o.id for p, o in node.out_nodes().items()})) - - log.debug("Cutting all inputs for peephole connection (5, 6, 7 input ports) off, as `use_peephole`=False") - log.debug("Cutting seq_len_max input off") - - # disconnect all peephole releated inputs and seq_len_max - for port_idx in [0, 5, 6, 7]: - if node.is_in_port_connected(port_idx): - node.in_port(port_idx).disconnect() - - assert node.is_in_port_connected(1), "Sequence input to the BlockLSTM is required (1 port). Node {}".format( - node.id) - assert node.is_in_port_connected(2), "Value of the initial cell state is required (2 port). Node {}".format( - node.id) - assert node.is_in_port_connected( - 3), "Initial output of cell is required input to BlockLSTM (3 port). Node {}".format(node.id) - assert node.is_in_port_connected( - 4), "The weight matrix is required input to BlockLSTM (4 port) . Node {}".format(node.id) - assert node.is_in_port_connected( - 8), "The bias vector is required input to BlockLSTM (8 port). Node {}".format(node.id) - - # reconnect inputs since OpenVINO LSTMSequence requires different order - # Reconnecting input edges of LSTMSequence: - # TF input edges: Description: MO input edges: - # 1 input 0 - # 4 weights 1 - # 8 biases 2 - # 3 h_prev: initial output of cell 3 - # 2 cs_prev: initial cell state 4 - - input_source = node.in_port(1).get_source() - weights_source = node.in_port(4).get_source() - biases_source = node.in_port(8).get_source() - h_prev_source = node.in_port(3).get_source() - cs_prev_source = node.in_port(2).get_source() - - node.in_port(0).get_connection().set_source(input_source) - node.in_port(1).get_connection().set_source(weights_source) - node.in_port(2).get_connection().set_source(biases_source) - node.in_port(3).get_connection().set_source(h_prev_source) - node.in_port(4).get_connection().set_source(cs_prev_source) - # disconnect original bias input that is no longer needed - if node.is_in_port_connected(8): - node.in_port(8).disconnect() - - # check that all outputs unsupported by OpenVINO LSTMSequence are absent - for output_port_idx in [0, 2, 3, 4, 5]: - if node.is_out_port_connected(output_port_idx): - raise Error("Output port {} of BlockLSTM node {} is not supported".format(node.id, output_port_idx)) - - # Reconnecting output edges of LSTMSequence: - # TF output edges: Description: MO output edges: - # 6 output h vector 0 - # 1 cell state before the tanh 1 - - # we need to move only 6-th output port to 0-th port - if node.is_out_port_connected(6): - node.add_output_port(0, skip_if_exist=True) - node.out_port(6).get_connection().set_source(node.out_port(0)) - node.out_port(6).disconnect() - node.delete_output_port(6, skip_if_absent=True) diff --git a/tools/mo/openvino/tools/mo/front/tf/BlockLSTM_ext.py b/tools/mo/openvino/tools/mo/front/tf/BlockLSTM_ext.py deleted file mode 100644 index 57783dec230c73..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/BlockLSTM_ext.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.BlockLSTM import BlockLSTM -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class BlockLSTMExtractor(FrontExtractorOp): - op = 'BlockLSTM' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'use_peephole': node.pb.attr['use_peephole'].b, - 'cell_clip': node.pb.attr['cell_clip'].f, - 'forget_bias': node.pb.attr['forget_bias'].f, - } - BlockLSTM.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/CTCGreedyDecoderReplacement.py b/tools/mo/openvino/tools/mo/front/tf/CTCGreedyDecoderReplacement.py deleted file mode 100644 index 8eeeb86a685485..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/CTCGreedyDecoderReplacement.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.ops.ctc_greedy_decoder_seq_len import CTCGreedyDecoderSeqLenOp -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph, FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.result import Result - - -def replace_ctc_greedy_decoder(graph: Graph, match: dict): - ctc_greedy_decoder_tf = match['decoder'] - cast = match['cast'] - sparse_to_dense = match['sparse_to_dense'] - sparse_to_dense_name = sparse_to_dense.soft_get('name', sparse_to_dense.id) - ctc_greedy_decoder_tf_name = ctc_greedy_decoder_tf.soft_get('name', ctc_greedy_decoder_tf.id) - - # For normalizing input channel needs to transpose input data from [T, N, C] to [N, T, C] - # which supported CTCGreedyDecoderSeqLen op. - ctc_data_permute = create_op_with_const_inputs(graph, Transpose, {1: int64_array([1, 0, 2])}, - {'name': ctc_greedy_decoder_tf_name + '/ctc_data_permute'}) - - assert ctc_greedy_decoder_tf.has_valid('merge_repeated'), \ - 'The CTCGreedyDecoderSeqLen node "{}" misses "merge_repeated" attribute'.format(ctc_greedy_decoder_tf_name) - - ctc_greedy_decoder_tf.in_port(0).get_source().connect(ctc_data_permute.in_port(0)) - merge_repeated_tf = ctc_greedy_decoder_tf.merge_repeated - ctc_greedy_decoder = CTCGreedyDecoderSeqLenOp(graph, {'name': sparse_to_dense_name, - 'merge_repeated': merge_repeated_tf}).create_node() - rename_nodes( - [(sparse_to_dense, sparse_to_dense_name + '/AbandonedName'), (ctc_greedy_decoder, sparse_to_dense_name)]) - ctc_greedy_decoder.in_port(0).connect(ctc_data_permute.out_port(0)) - ctc_greedy_decoder_tf.in_port(1).get_source().connect(ctc_greedy_decoder.in_port(1)) - - # Set output of the new sub-graph as a source for SparseToDense consumer - sparse_to_dense.out_port(0).get_connection().set_source(ctc_greedy_decoder.out_port(0)) - - # Remove no longer needed nodes - graph.remove_nodes_from([sparse_to_dense.id, cast.id, ctc_greedy_decoder_tf.id]) - - -class CTCGreedyDecoderReplacement(FrontReplacementSubgraph): - """ - TensorFlow CTCGreedyDecoder produces output in a sparse tensor that is not supported by OpenVINO, and - OpenVINO's CTCGreedyDecoderSeqLen has a different output that is in a dense format. So this transformation - intents to replace TF CTCGreedyDecoder+SparseToDense where SparseToDense third input get from input parameter - to CTCGreedyDecoderSeqLen which compatible with IE. - """ - enabled = True - - @staticmethod - def pattern(**kwargs): - return dict( - nodes=[('decoder', dict(op='CTCGreedyDecoderSeqLen', output_sparse_format=True)), - ('cast', dict(op='Cast')), - ('sparse_to_dense', dict(op='SparseToDense')) - ], - edges=[('decoder', 'sparse_to_dense', {'out': 0}), - ('decoder', 'cast', {'out': 1}), - ('cast', 'sparse_to_dense', {'out': 0})] - ) - - def replace_sub_graph(self, graph: Graph, match: dict): - replace_ctc_greedy_decoder(graph, match) - - -class CTCGreedyDecoderWithSparseToDenseShapeReplacement(FrontReplacementSubgraph): - """ - TensorFlow CTCGreedyDecoder produces output in a sparse tensor that is not supported by OpenVINO, and - OpenVINO's CTCGreedyDecoderSeqLen has a different output that is in a dense format. So this transformation - intents to replace TF CTCGreedyDecoder+SparseToDense where SparseToDense third input get from CTCGreedyDecoder - second output to CTCGreedyDecoderSeqLen which compatible with IE. - """ - enabled = True - - @staticmethod - def pattern(**kwargs): - return dict( - nodes=[('decoder', dict(op='CTCGreedyDecoderSeqLen', output_sparse_format=True)), - ('cast', dict(op='Cast')), - ('sparse_to_dense', dict(op='SparseToDense')) - ], - edges=[('decoder', 'sparse_to_dense', {'out': 0}), - ('decoder', 'cast', {'out': 1}), - ('decoder', 'sparse_to_dense', {'out': 2}), - ('cast', 'sparse_to_dense', {'out': 0})] - ) - - def replace_sub_graph(self, graph: Graph, match: dict): - replace_ctc_greedy_decoder(graph, match) - - -class CTCGreedyDecoderSingleReplacement(FrontReplacementPattern): - """ - TensorFlow CTCGreedyDecoder produces output in a sparse tensor that is not supported by OpenVINO, and - OpenVINO's CTCGreedyDecoderSeqLen has a different output that is in a dense format. So this transformation - handles a single TF CTCGreedyDecoder and warns the user about another format of the output - """ - enabled = True - - def run_after(self): - return [CTCGreedyDecoderReplacement, CTCGreedyDecoderWithSparseToDenseShapeReplacement] - - def find_and_replace_pattern(self, graph: Graph): - for ctc_greedy_decoder_tf in graph.get_op_nodes(op='CTCGreedyDecoderSeqLen', output_sparse_format=True): - ctc_greedy_decoder_tf_name = ctc_greedy_decoder_tf.soft_get('name', ctc_greedy_decoder_tf.id) - - # TF CTCGreedyDecoder have 4 output tensors. If any of them connected to not Result operation then - # transformation in not applicable - for port_num in ctc_greedy_decoder_tf.out_ports(): - if not ctc_greedy_decoder_tf.out_port(port_num).disconnected()\ - and ctc_greedy_decoder_tf.out_port(port_num).get_destination().node.soft_get('op') != 'Result': - return - - # If the first and second output are not connected to Result operations - - # create Result operation and connect it to appropriate output - if ctc_greedy_decoder_tf.out_port(0).disconnected(): - first_result = Result(graph, - {'name': ctc_greedy_decoder_tf_name + '/decoded_classes'} - ).create_node() - ctc_greedy_decoder_tf.out_port(0).connect(first_result.in_port(0)) - - if ctc_greedy_decoder_tf.out_port(1).disconnected(): - second_result = Result(graph, - {'name': ctc_greedy_decoder_tf_name + '/seq_lengths_output'} - ).create_node() - ctc_greedy_decoder_tf.out_port(1).connect(second_result.in_port(0)) - - - # For normalizing input channel needs to transpose input data from [T, N, C] to [N, T, C] - # which supported CTCGreedyDecoderSeqLen op. - log.warning('Found TF CTCGreedyDecoder operation at the end of network. ' - 'PLEASE NOTE, appropriate network output operation CTCGreedyDecoderSeqLen {} ' - 'will have dense format, not sparse format!'.format(ctc_greedy_decoder_tf_name)) - ctc_data_permute = create_op_with_const_inputs(graph, Transpose, {1: int64_array([1, 0, 2])}, - {'name': ctc_greedy_decoder_tf_name + '/ctc_data_permute'}) - - assert ctc_greedy_decoder_tf.has_valid('merge_repeated'), \ - 'The CTCGreedyDecoderSeqLen node "{}" misses "merge_repeated" attribute'.format( - ctc_greedy_decoder_tf_name) - - ctc_greedy_decoder_tf.in_port(0).get_source().connect(ctc_data_permute.in_port(0)) - ctc_greedy_decoder_tf.in_port(0).disconnect() - ctc_data_permute.out_port(0).connect(ctc_greedy_decoder_tf.in_port(0)) - - del ctc_greedy_decoder_tf['output_sparse_format'] - - for port_num in [2, 3]: # MO CTCGreedyDecoderSeqLen may have 2 outputs - if port_num in ctc_greedy_decoder_tf.out_ports(): - if not ctc_greedy_decoder_tf.out_port(port_num).disconnected(): - ctc_greedy_decoder_tf.out_port(port_num).disconnect() diff --git a/tools/mo/openvino/tools/mo/front/tf/CTCGreedyDecoder_ext.py b/tools/mo/openvino/tools/mo/front/tf/CTCGreedyDecoder_ext.py deleted file mode 100644 index 99819daa46e909..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/CTCGreedyDecoder_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ctc_greedy_decoder_seq_len import CTCGreedyDecoderSeqLenOp -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class CTCCGreedyDecoderFrontExtractor(FrontExtractorOp): - op = 'CTCGreedyDecoder' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'merge_repeated': bool(node.pb.attr['merge_repeated'].b), - 'output_sparse_format': True, # Special argument for TF CTCGreedyDecoder replacement transformations - } - CTCGreedyDecoderSeqLenOp.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/CTCLossReplacement.py b/tools/mo/openvino/tools/mo/front/tf/CTCLossReplacement.py deleted file mode 100644 index a6db2a3e1e690f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/CTCLossReplacement.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ctc_greedy_decoder_seq_len import CTCGreedyDecoderSeqLenOp -from openvino.tools.mo.ops.ctc_loss import CTCLoss -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes - - -class CTCLossReplacement(FrontReplacementSubgraph): - """ - The CTCLoss appears along with CTCGreedyDecoder operation in particular. Since the TensorFlow* CTCGreedyDecoder - outputs sparse tensor format, the OpenVINO CTCGreedyDecoderSeqLen has a different format and the CTCLoss is also affected - in terms of different format for its inputs. So the corresponding sub-graph with CTCGreedyDecoding and CTCLoss - must be transformed properly. - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.front.tf.CTCGreedyDecoderReplacement import CTCGreedyDecoderReplacement - return [CTCGreedyDecoderReplacement] - - def pattern(self): - return dict( - nodes=[ - ('transpose', dict(op='Transpose')), - ('ctc_greedy_decoder', dict(op='CTCGreedyDecoderSeqLen', output_sparse_format=True)), - ('cast', dict(op='Cast')), - ('sparse_to_dense', dict(op='SparseToDense')), - ('const', dict(op='Const')), - ('ctc_loss', dict(op='CTCLoss')), - ], - edges=[ - ('transpose', 'ctc_greedy_decoder', {'out': 0, 'in': 0}), - ('transpose', 'ctc_loss', {'out': 0, 'in': 0}), - ('ctc_greedy_decoder', 'sparse_to_dense', {'out': 0, 'in': 0}), - ('ctc_greedy_decoder', 'sparse_to_dense', {'out': 2, 'in': 1}), - ('ctc_greedy_decoder', 'sparse_to_dense', {'out': 1, 'in': 2}), - ('const', 'sparse_to_dense', {'out': 0, 'in': 3}), - ('ctc_greedy_decoder', 'cast', {'out': 1, 'in': 0}), - ('ctc_greedy_decoder', 'ctc_loss', {'out': 0, 'in': 1}), - ('cast', 'ctc_loss', {'out': 0, 'in': 2}) - ]) - - def replace_sub_graph(self, graph: Graph, match: dict): - transpose_tf = match['transpose'] - ctc_greedy_decoder_tf = match['ctc_greedy_decoder'] - cast_tf = match['cast'] - ctc_loss_tf = match['ctc_loss'] - sparse_to_dense_tf = match['sparse_to_dense'] - output_sparse_to_dense_name = sparse_to_dense_tf.soft_get('name', sparse_to_dense_tf.id) - ctc_data_permute = create_op_with_const_inputs(graph, Transpose, {1: int64_array([1, 0, 2])}, - {'name': ctc_greedy_decoder_tf.name + '/ctc_data_permute'}) - ctc_data_permute.in_port(0).connect(transpose_tf.out_port(0)) - - ctc_greedy_decoder_tf_name = ctc_greedy_decoder_tf.soft_get('name', ctc_greedy_decoder_tf.id) - assert ctc_greedy_decoder_tf.has_valid('merge_repeated'), \ - 'The CTCGreedyDecoderSeqLen node "{}" misses "merge_repeated" attribute'.format(ctc_greedy_decoder_tf_name) - merge_repeated_tf = ctc_greedy_decoder_tf.merge_repeated - ctc_greedy_decoder = CTCGreedyDecoderSeqLenOp(graph, {'name': output_sparse_to_dense_name, - 'merge_repeated': merge_repeated_tf}).create_node() - rename_nodes([(sparse_to_dense_tf, output_sparse_to_dense_name + '/AbandonedName'), - (ctc_greedy_decoder, output_sparse_to_dense_name)]) - ctc_greedy_decoder.in_port(0).connect(ctc_data_permute.out_port(0)) - ctc_greedy_decoder.in_port(1).connect(ctc_greedy_decoder_tf.in_port(1).get_connection().get_source()) - - # set output of the new sub-graph as a source for SparseToDense consumer - output_ctc_loss_name = ctc_loss_tf.soft_get('name', ctc_loss_tf.id) - assert ctc_loss_tf.has_valid('preprocess_collapse_repeated'), \ - 'The CTCLoss node "{}" misses "preprocess_collapse_repeated" attribute'.format(output_ctc_loss_name) - assert ctc_loss_tf.has_valid('ctc_merge_repeated'), \ - 'The CTCLoss node "{}" misses "ctc_merge_repeated" attribute'.format(output_ctc_loss_name) - assert ctc_loss_tf.has_valid('unique'), \ - 'The CTCLoss node "{}" misses "unique" attribute'.format(output_ctc_loss_name) - preprocess_collapse_repeated = ctc_loss_tf.preprocess_collapse_repeated - ctc_merge_repeated = ctc_loss_tf.ctc_merge_repeated - unique = ctc_loss_tf.unique - ctc_loss = CTCLoss(graph, {'name': output_ctc_loss_name, - 'preprocess_collapse_repeated': preprocess_collapse_repeated, - 'ctc_merge_repeated': ctc_merge_repeated, - 'unique': unique}).create_node() - rename_nodes([(ctc_loss_tf, output_ctc_loss_name + '/AbandonedName'), (ctc_loss, output_ctc_loss_name)]) - ctc_loss_tf.out_port(0).get_connection().set_source(ctc_loss.out_port(0)) - if ctc_loss_tf.logits_time_major: - ctc_loss.in_port(0).connect(ctc_data_permute.out_port(0)) - else: - ctc_loss.in_port(0).connect(transpose_tf.out_port(0)) - ctc_loss.in_port(1).connect(ctc_greedy_decoder_tf.in_port(1).get_connection().get_source()) - ctc_loss.in_port(2).connect(ctc_greedy_decoder.out_port(0)) - ctc_loss.in_port(3).connect(ctc_greedy_decoder.out_port(1)) - - # remove no longer needed nodes - graph.remove_nodes_from([sparse_to_dense_tf.id, cast_tf.id, ctc_loss_tf.id, ctc_greedy_decoder_tf.id]) diff --git a/tools/mo/openvino/tools/mo/front/tf/CTCLoss_ext.py b/tools/mo/openvino/tools/mo/front/tf/CTCLoss_ext.py deleted file mode 100644 index 526a1de26b3511..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/CTCLoss_ext.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ctc_loss import CTCLoss -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class CTCLossFrontExtractor(FrontExtractorOp): - op = 'CTCLoss' - enabled = True - - @classmethod - def extract(cls, node): - # For CTCLoss default value is [N, T] - logits_time_major = True - if 'logits_time_major' in node.pb.attr: - logits_time_major = node.pb.attr['logits_time_major'].b - - attrs = { - 'ctc_merge_repeated': node.pb.attr['ctc_merge_repeated'].b, - 'preprocess_collapse_repeated': node.pb.attr['preprocess_collapse_repeated'].b, - 'logits_time_major': logits_time_major, - # unique is always false for CTCLoss V1 - 'unique': False - } - - CTCLoss.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/Cast_ext.py b/tools/mo/openvino/tools/mo/front/tf/Cast_ext.py deleted file mode 100644 index ba6d6f568bdca3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/Cast_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.common import tf_data_type_decode - - -class CastFrontExtractor(FrontExtractorOp): - op = 'Cast' - enabled = True - - @classmethod - def extract(cls, node): - cast_dst_type = tf_data_type_decode[node.pb.attr['DstT'].type][0] - Cast.update_node_stat(node, {'dst_type': cast_dst_type}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/ClipByValueTFTransformation.py b/tools/mo/openvino/tools/mo/front/tf/ClipByValueTFTransformation.py deleted file mode 100644 index d683fdf24fccfb..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ClipByValueTFTransformation.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from openvino.tools.mo.ops.elementwise import Minimum, Maximum -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph, rename_nodes - - -class ClipByValueTFTransformation(FrontReplacementSubgraph): - """ - The transformation replaces the ClipByValueTF operation which works as Clamp but supports broadcasting of inputs - with Minimum and Maximum. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for cbv in graph.get_op_nodes(op='ClipByValueTF'): - cbv_name = cbv.soft_get('name', cbv.id) - minimum = Minimum(graph, {'name': cbv_name + '/CLipMinimum'}).create_node() - maximum = Maximum(graph, {'name': cbv_name + '/CLipMaximum'}).create_node() - minimum.in_port(0).connect(cbv.in_port(0).get_source()) - minimum.in_port(1).connect(cbv.in_port(2).get_source()) - maximum.in_port(0).connect(minimum.out_port(0)) - maximum.in_port(1).connect(cbv.in_port(1).get_source()) - cbv.out_port(0).get_connection().set_source(maximum.out_port(0)) - - rename_nodes([(cbv, cbv_name + '/TBR'), (maximum, cbv_name)]) - graph.remove_node(cbv.id) diff --git a/tools/mo/openvino/tools/mo/front/tf/ClipByValue_ext.py b/tools/mo/openvino/tools/mo/front/tf/ClipByValue_ext.py deleted file mode 100644 index df1ff155244405..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ClipByValue_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.ClipByValueTF import ClibByValueTF - - -class ClipByValueExtractor(FrontExtractorOp): - op = 'ClipByValue' - enabled = True - - @classmethod - def extract(cls, node): - ClibByValueTF.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/ComplexAbs.py b/tools/mo/openvino/tools/mo/front/tf/ComplexAbs.py deleted file mode 100644 index 96fa13a35b9e25..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ComplexAbs.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -from openvino.tools.mo.ops.elementwise import Pow -from openvino.tools.mo.ops.ReduceOps import ReduceSum -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np - - -class ComplexAbs(FrontReplacementSubgraph): - enabled = True - - def run_after(self): - from openvino.tools.mo.front.tf.ComplexAbsAfterComplex import ComplexAbsAfterComplex - return [ComplexAbsAfterComplex] - - def find_and_replace_pattern(self, graph: Graph): - for complex_abs in graph.get_op_nodes(op='ComplexAbs'): - complex_abs_name = complex_abs.soft_get('name', complex_abs.id) - power_type = data_type_str_to_np(graph.graph['cmd_params'].data_type) - - squared = create_op_with_const_inputs(graph, Pow, {1: power_type(2.0)}, - {'name': complex_abs_name + '/squared_parts'}) - complex_abs.in_port(0).get_connection().set_destination(squared.in_port(0)) - sum = create_op_with_const_inputs(graph, ReduceSum, {1: int64_array(-1)}, - {'name': complex_abs_name + '/squared_abs'}, - squared) - sqrt = create_op_with_const_inputs(graph, Pow, {1: power_type(0.5)}, {}, sum) - - complex_abs.out_port(0).get_connection().set_source(sqrt.out_port(0)) - - rename_nodes([(complex_abs, complex_abs_name + '/to_be_removed'), (sqrt, complex_abs_name)]) diff --git a/tools/mo/openvino/tools/mo/front/tf/ComplexAbsAfterComplex.py b/tools/mo/openvino/tools/mo/front/tf/ComplexAbsAfterComplex.py deleted file mode 100644 index ec8c4faa5df61b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ComplexAbsAfterComplex.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np -from openvino.tools.mo.ops.elementwise import Add, Pow - - -class ComplexAbsAfterComplex(FrontReplacementSubgraph): - """ - This transformation converts a sub-graph - - SomeOp1 SomeOp2 - | | - ------------ - | - Complex - | - ComplexAbs - - into the sub-graph - - SomeOp1 SomeOp2 - | | - Constant[2]--Pow Pow--Constant[2] - | | - ------------- - Add - | - Pow--Constant[0.5] - """ - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('complex', dict(op='Complex')), - ('abs', dict(op='ComplexAbs')), - ], - edges=[ - ('complex', 'abs', {'in': 0}), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - cmp = match['complex'] - complex_abs = match['abs'] - complex_abs_name = complex_abs.soft_get('name', complex_abs.id) - - power_type = data_type_str_to_np(graph.graph['cmd_params'].data_type) - - pow0 = create_op_with_const_inputs(graph, Pow, {1: power_type(2.0)}, - {'name': complex_abs_name + '/real_part_squared'}) - pow1 = create_op_with_const_inputs(graph, Pow, {1: power_type(2.0)}, - {'name': complex_abs_name + '/imag_part_squared'}) - - cmp.in_port(0).get_connection().set_destination(pow0.in_port(0)) - cmp.in_port(1).get_connection().set_destination(pow1.in_port(0)) - - add = Add(graph, {'name': complex_abs_name + '/squared_abs'}).create_node([pow0, pow1]) - sqrt = create_op_with_const_inputs(graph, Pow, {1: power_type(0.5)}, {}) - add.out_port(0).connect(sqrt.in_port(0)) - - complex_abs.out_port(0).get_connection().set_source(sqrt.out_port(0)) - - rename_nodes([(complex_abs, complex_abs_name + '/to_be_removed'), (sqrt, complex_abs_name)]) diff --git a/tools/mo/openvino/tools/mo/front/tf/CorrectPaddingsForPadAfterComplex.py b/tools/mo/openvino/tools/mo/front/tf/CorrectPaddingsForPadAfterComplex.py deleted file mode 100644 index f5d82e2e682ac9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/CorrectPaddingsForPadAfterComplex.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.concat import Concat - - -class CorrectPaddingsForPadAfterComplex(FrontReplacementSubgraph): - """ - There are TF models with the TF operation Complex that has two real tensors as arguments and returns the complex - tensor with real and imaginary parts given as arguments in port 0 and 1 respectively. - - Although TF has a native support of complex numbers, OpenVINO doesn't have such support and emulates a complex - tensor with the shape [N_0, ..., N_{r - 1}] as a real tensor of the shape [N_0, ..., N_{r - 1}, 2] interpreting - any complex number as a tuple of the form - (real part, imaginary part) - That is, the emulated complex tensor has the rank r + 1, not r as in the TF model. - - Hence, when we convert a subgraph of the form - - Complex - | - | - Pad - - we should correct pads_begin and pads_end adding zero at the end of pads_begin and pads_end. - - The transformation performs such corrections. - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.front.tf.pad_tf_to_pad import PadTFToPad - return [PadTFToPad] - - def pattern(self): - return dict( - nodes=[ - ('complex', dict(op='Complex')), - ('pad', dict(op='Pad')), - ], - edges=[ - ('complex', 'pad', {'in': 0}), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - pad_node = match['pad'] - pads_begin_node = pad_node.in_port(1).get_source().node - pads_end_node = pad_node.in_port(2).get_source().node - - pads_begin_node_name = pads_begin_node.soft_get('name', pads_begin_node.id) - pads_end_node_name = pads_end_node.soft_get('name', pads_end_node.id) - - concat_for_pads_begin = create_op_with_const_inputs(graph, Concat, - {1: int64_array([0])}, - { - 'name': pads_begin_node_name + '/additional', - 'in_ports_count': 2, - 'axis': 0, - }) - concat_for_pads_end = create_op_with_const_inputs(graph, Concat, - {1: int64_array([0])}, - { - 'name': pads_end_node_name + '/additional', - 'in_ports_count': 2, - 'axis': 0, - }) - pad_node.in_port(1).get_connection().insert_node(concat_for_pads_begin) - pad_node.in_port(2).get_connection().insert_node(concat_for_pads_end) diff --git a/tools/mo/openvino/tools/mo/front/tf/CropAndResizeReplacement.py b/tools/mo/openvino/tools/mo/front/tf/CropAndResizeReplacement.py deleted file mode 100644 index 8f4176a539df38..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/CropAndResizeReplacement.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import add_convolution_to_swap_xy_coordinates, create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class CropAndResizeReplacement(FrontReplacementOp): - """ - The CropAndResize operation from TF gets separate input with boxes coordinates and image batch indices. But - ROIPooling operation in the OpenVINO receives them as a single concatenated input. This replacer - concatenates two inputs into a new one. - """ - op = "CropAndResize" - enabled = True - - def nodes_to_remove(self, graph: Graph, match: dict): - # do not remove matched node - return [] - - def replace_op(self, graph: Graph, node: Node): - if node.has_and_set('inputs_preprocessed'): - log.debug('Node "{}" has already been preprocessed'.format(node.soft_get('name'))) - return [] - # reshape tensor with batch indices to 2d - unsqueeze_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([1]), - {'name': node.name + '/Unsqueeze'}, node.in_node(2)) - - convert_node = Cast(graph, {'name': unsqueeze_node.name + '/ToFloat', - 'dst_type': data_type_str_to_np(graph.graph['cmd_params'].data_type)}).create_node() - - convert_node.in_port(0).connect(unsqueeze_node.out_port(0)) - - concat_op = Concat(graph, {'axis': 1, 'name': node.name + '/concat_batch_indices_and_boxes', - 'in_ports_count': 2}) - concat_node = concat_op.create_node([convert_node, node.in_node(1)]) - - # do not remove edge with crop_size because it is needed in the partial infer - graph.remove_edge(node.in_node(1).id, node.id) - - # input to the CropAndResize contains boxes coordinates in YXYX layout. But OV layer ROIPooling expects - # coordinates in the XYXY layout, so convolution is added here to swap coordinates - swapped_box_coordinates_node = add_convolution_to_swap_xy_coordinates(graph, concat_node, 5) - - # reshape locations tensor to 2D so it could be passed to Eltwise which will be converted to ScaleShift - reshape_2d_node = create_op_node_with_second_input(graph, Reshape, int64_array([-1, 5]), - dict(name=swapped_box_coordinates_node.id + '/reshape_2d_'), - swapped_box_coordinates_node) - graph.create_edge(reshape_2d_node, node, 0, 1) - - # do not replace any output edge - return [] diff --git a/tools/mo/openvino/tools/mo/front/tf/FakeQuantWithMinMaxVars_ext.py b/tools/mo/openvino/tools/mo/front/tf/FakeQuantWithMinMaxVars_ext.py deleted file mode 100644 index 979f851de6cf41..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/FakeQuantWithMinMaxVars_ext.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.op import Op - - -class FakeQuantWithMinMaxVarsExtractor(FrontExtractorOp): - op = 'FakeQuantWithMinMaxVars' - enabled = True - - @classmethod - def extract(cls, node): - narrow_range = node.pb.attr['narrow_range'].b - num_bits = node.pb.attr['num_bits'].i - levels = 2 ** num_bits - int(narrow_range) - - # we prepare this operation to be converted to FakeQuantize op, - # but input reconnection is needed, so we don't set infer function and type attribute - Op.update_node_stat(node, {'op': 'FakeQuantWithMinMaxVars', 'levels': levels, - 'narrow_range': narrow_range, 'num_bits': num_bits}) - - return cls.enabled - - -class FakeQuantWithMinMaxVarsPerChannelExtractor(FrontExtractorOp): - op = 'FakeQuantWithMinMaxVarsPerChannel' - enabled = True - - @classmethod - def extract(cls, node): - narrow_range = node.pb.attr['narrow_range'].b - num_bits = node.pb.attr['num_bits'].i - levels = 2 ** num_bits - int(narrow_range) - - # we prepare this operation to be converted to FakeQuantize op, - # but input reconnection is needed, so we don't set infer function and type attribute - Op.update_node_stat(node, {'op': 'FakeQuantWithMinMaxVars', 'levels': levels, - 'narrow_range': narrow_range, 'num_bits': num_bits}) - - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/FlattenToReshape.py b/tools/mo/openvino/tools/mo/front/tf/FlattenToReshape.py deleted file mode 100644 index c502c0c958822a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/FlattenToReshape.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.Pack import Pack -from openvino.tools.mo.front.tf.nearest_neighbor_upsampling import NearestNeighborUpsampling -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -def is_value_is_constant(val: np.ndarray, const: [int, float]): - if val.ndim > 1: - return False - if val.ndim == 1 and len(val) > 1: - return False - return val.item() == const - - -class FlattenToReshapeableReshape(FrontReplacementSubgraph): - """ - The TensorFlow implementation of the Flatten operation is not reshape-able because the batch size is hardcoded - during the constant propagation. This transform sets the 'dim' attribute for the Reshape to [0, -1]. - """ - enabled = True - - def run_after(self): - return [NearestNeighborUpsampling] - - def run_before(self): - return [Pack] - - def pattern(self): - return dict( - nodes=[ - ('shape', dict(op='ShapeOf')), - ('strided_slice', dict(op='StridedSlice')), - ('pack', dict(op='Pack')), - ('const', dict(op='Const')), - ('reshape', dict(op='Reshape')), - ], - edges=[ - ('shape', 'strided_slice', {'in': 0}), - ('strided_slice', 'pack', {'in': 0}), - ('const', 'pack', {'in': 1}), - ('pack', 'reshape', {'in': 1}), - ]) - - @staticmethod - def replace_sub_graph(graph: Graph, match: dict): - strided_slice_node = match['strided_slice'] - const_node = match['const'] - reshape_node = match['reshape'] - pack_node = match['pack'] - - if not const_node.has_valid('value') or not is_value_is_constant(const_node.value, -1): - log.debug('The pattern does not correspond to flatten. The second reshape dimension is not -1. It is {}'. - format(const_node.soft_get('value'))) - return - if len(pack_node.in_nodes()) != 2: - log.debug('The pattern does not correspond to flatten. The "Pack" operation produces tensor with 3 items ' - 'but should produce just 2.') - return - - expected_values = [0, 1, 1] # expected values to a StridedSlice to get the batch size - for ind in range(3): - if not strided_slice_node.in_node(ind + 1).has_valid('value') or \ - not is_value_is_constant(strided_slice_node.in_node(ind + 1).value, expected_values[ind]): - log.debug('The pattern does not correspond to flatten because of the input with index {}. The value is ' - '"{}".'.format(ind, strided_slice_node.soft_get('value'))) - return - - reshape_node.in_port(1).disconnect() - reshape_const_node = Const(graph, {'value': int64_array([0, -1]), - 'name': reshape_node.soft_get('name', reshape_node.id) + '/shape'}).create_node() - reshape_node.in_port(1).connect(reshape_const_node.out_port(0)) - reshape_node['special_zero'] = True - log.debug('The node "{}" is actually a Flatten node'.format(reshape_node.soft_get('name'))) diff --git a/tools/mo/openvino/tools/mo/front/tf/GNMT_DynamicSequenceLengths.py b/tools/mo/openvino/tools/mo/front/tf/GNMT_DynamicSequenceLengths.py deleted file mode 100644 index 41891f923b247d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/GNMT_DynamicSequenceLengths.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.middle.TensorIteratorCondition import looking_for_op_in_list -from openvino.tools.mo.ops.elementwise import Mul -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class GNMT_sequence_lengths(FrontReplacementPattern): - """ - This pass matching GNMT-like condition (like in DynamicDecoderConditionMatcher) with path for sequence lengths - calculation: - Seq_len_data -> Max -> Cast -> Mul -> Round -> Cast. - - After matching this pattern: - 1. This replacer looking for encoder sequence lengths node (using information about encoder condition structure) - 2. Create node for multiplying Encoder sequence lengths by 2 (as it works in GNMT). - 3. Connect Encoder sequence lengths value multiplied by 2 with decoder TensorArrays as size. - """ - enabled = True - - @staticmethod - def pattern(): - log.debug('+++++++++++++++ GNMT Sequence Lengths ConditionMatching ++++++++++++++++') - return dict( - nodes=[ - ('loop_cond', dict(kind='op', op='LoopCond')), - ('logical_not', dict(kind='op', op='LogicalNot')), - - ('all', dict(kind='op', op='ReduceAnd')), - - ('Merge_16', dict(kind='op', op='Merge')), - - ('NextIteration_16', dict(kind='op', op='NextIteration')), - - ('Switch', dict(kind='op', op='Switch')), - - ('Identity', dict(kind='op', op='Identity')), - - ('Switch_1', dict(kind='op', op='Switch')), - - ('Identity_1', dict(kind='op', op='Identity')), - - ('add', dict(kind='op', op='Add')), - - ('Less_enter', dict(kind='op', op='Enter')), - - ('And', dict(kind='op', op='LogicalAnd')), - - ('Less', dict(kind='op', op='Less')), - ('TensorArrayWrite', dict(kind='op', op='TensorArrayWriteV3')), - ('TensorArrayWrite_1', dict(kind='op', op='TensorArrayWriteV3')), - - ('Max', dict(kind='op', op='ReduceMax')), - ('ToFloat', dict(kind='op', op='Cast')), - ('Mul', dict(kind='op', op='Mul')), - ('Round', dict(kind='op', op='Round')), - ('ToInt', dict(kind='op', op='Cast')), - ], - edges=[ - ('NextIteration_16', 'Merge_16'), - ('Merge_16', 'all'), - - ('all', 'logical_not'), - - ('Less_enter','Less'), - - ('Less', 'And'), - - ('logical_not', 'And'), - ('And', 'loop_cond'), - - ('loop_cond', 'Switch'), - ('Switch', 'Identity'), - ('Identity', 'add'), - - ('loop_cond', 'Switch_1'), - ('Switch_1', 'Identity_1'), - - ('Identity_1', 'TensorArrayWrite'), - ('Identity_1', 'TensorArrayWrite_1'), - - ('Max', 'ToFloat'), - ('ToFloat', 'Mul'), - ('Mul', 'Round'), - ('Round', 'ToInt'), - ('ToInt', 'Less_enter'), - ], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - log.debug('================== GNMTBeforeConditionFind ==================') - input_sequence_lengths = match['Max'].in_port(0).get_source().node - encoder_sequence_lengths = looking_for_op_in_list([port.node for port in input_sequence_lengths.out_port(0).get_destinations()], - 'Identity') - - # Looking for Sequence_length node in encoder looks like: - # Sequence_length -> CheckSeqLen -> Max -> Maximum -> Minimum - - check_seq_len = looking_for_op_in_list([port.node for port in encoder_sequence_lengths.out_port(0).get_destinations()], - 'Identity') - max = looking_for_op_in_list([port.node for port in check_seq_len.out_port(0).get_destinations()], 'ReduceMax') - maximum = max.out_port(0).get_destinations()[0].node - assert maximum.op == 'Maximum' - minimum = maximum.out_port(0).get_destinations()[0].node - assert minimum.op == 'Minimum' - - tensor_seq_len = looking_for_op_in_list([minimum.in_port(port).get_source().node for port in minimum.in_ports()], 'StridedSlice') - - # Create node for multiplying seq_len by 2 - const = Const(graph, {'name': 'FakeSeqLenMultiplyer', 'value': mo_array(2)}).create_node() - mul_op = Mul(graph, {'name': 'FakeSeqLen'}).create_node() - - const.out_port(0).get_connection().set_destination(mul_op.in_port(1)) - tensor_seq_len.out_port(0).get_connection().add_destination(mul_op.in_port(0)) - - # Connect seq_len * 2 to TensorArray from GNMT loop - ta_writes = [port.node for port in match['Identity_1'].out_port(0).get_destinations() if port.node.op == 'TensorArrayWriteV3'] - - for ta_write in ta_writes: - ta = ta_write.in_port(0).get_source().node.in_port(0).get_source().node - - ta.in_port(0).disconnect() - ta.in_port(0).get_connection().set_source(mul_op.out_port(0)) - - if not graph.graph['cmd_params'].static_shape: - log.error( - "Model can not be translated in a reshape-able way.\n" - "Model Optimizer key static_shape was turned on to prevent related errors.\n" - "There will be no success changing input shapes of the model with the help of " - "OpenVINO reshape method", extra={'is_warning': True}) - graph.graph['cmd_params'].static_shape = True diff --git a/tools/mo/openvino/tools/mo/front/tf/GRUBlockCellReplacement.py b/tools/mo/openvino/tools/mo/front/tf/GRUBlockCellReplacement.py deleted file mode 100644 index 5f5a8867506678..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/GRUBlockCellReplacement.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.GRUCell import GRUCell -from openvino.tools.mo.ops.split import AttributedSplit -from openvino.tools.mo.ops.transpose import Transpose - - -class GRUBlockCellToGRUCell(FrontReplacementPattern): - """ - This transformation converts TF GRUBlockCell to mo.ops.GRUCell - by alignment of weights and bias inputs. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for tf_gru_block_cell in graph.get_op_nodes(op='GRUBlockCell'): - original_name = tf_gru_block_cell.soft_get('name', tf_gru_block_cell.id) - new_gru_cell = GRUCell(graph, {}).create_node() - rename_nodes([(tf_gru_block_cell, original_name + '/to_be_removed'), (new_gru_cell, original_name)]) - - # Connect X data port - tf_gru_block_cell.in_port(0).get_connection().set_destination(new_gru_cell.in_port(0)) - # Connect hidden state port - tf_gru_block_cell.in_port(1).get_connection().set_destination(new_gru_cell.in_port(1)) - - # W (Weights) - # z - update, r - reset, h - hidden - # Convert gate order W_rz, W_h -> W_zrh - split_rz_w = AttributedSplit(graph, {'name': original_name + '/Split_W_rz', 'axis': 1, 'num_splits': 2}).create_node() - - # Split W_rz to W_r and W_z - tf_gru_block_cell.in_port(2).get_connection().set_destination(split_rz_w.in_port(0)) - - concat_zrh_w = Concat(graph, {'name': original_name + '/Concat_W_zrh', 'in_ports_count': 3, - 'axis': 1}).create_node() - - # Swap and concat gates: W_rz -> W_zr - split_rz_w.out_port(0).connect(concat_zrh_w.in_port(1)) - split_rz_w.out_port(1).connect(concat_zrh_w.in_port(0)) - - # Conncat W_h gate: W_zr -> W_zrh - tf_gru_block_cell.in_port(3).get_connection().set_destination(concat_zrh_w.in_port(2)) - - # B (Bias) - # z - update, r - reset, h - hidden - # Convert gate order B_rz, B_h -> B_zrh - split_rz_b = AttributedSplit(graph, {'name': original_name + '/Split_B_rz', 'axis': 0, 'num_splits': 2}).create_node() - - # Split B_rz to B_r and B_z - tf_gru_block_cell.in_port(4).get_connection().set_destination(split_rz_b.in_port(0)) - - concat_zrh_b = Concat(graph, {'name': original_name + '/Concat_B_zrh', 'in_ports_count': 3, - 'axis': 0}).create_node() - - # Swap and concat gates: B_rz -> B_zr - split_rz_b.out_port(0).connect(concat_zrh_b.in_port(1)) - split_rz_b.out_port(1).connect(concat_zrh_b.in_port(0)) - - # Concat B_h gate: B_zr -> B_zrh - tf_gru_block_cell.in_port(5).get_connection().set_destination(concat_zrh_b.in_port(2)) - - # Transpose W Shape [input_size + hidden_size, 3 * hidden_size] to [3 * hidden_size, input_size + hidden_size] - permute_order = int64_array([1, 0]) - transpose_w = create_op_node_with_second_input(graph, Transpose, permute_order, - dict(name=original_name + '/Transpose_W'), concat_zrh_w) - - transpose_w.out_port(0).connect(new_gru_cell.in_port(2)) - concat_zrh_b.out_port(0).connect(new_gru_cell.in_port(3)) - - tf_gru_block_cell.out_port(3).get_connection().set_source(new_gru_cell.out_port(0)) - graph.remove_nodes_from([tf_gru_block_cell.id]) diff --git a/tools/mo/openvino/tools/mo/front/tf/GRUBlockCell_ext.py b/tools/mo/openvino/tools/mo/front/tf/GRUBlockCell_ext.py deleted file mode 100644 index dd69fc9576d6a7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/GRUBlockCell_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.GRUBlockCell import GRUBlockCell - - -class GRUBlockCellExtractor(FrontExtractorOp): - op = 'GRUBlockCell' - enabled = True - - @classmethod - def extract(cls, node): - GRUBlockCell.update_node_stat(node, {'format': 'tf'}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/GatherTree_ext.py b/tools/mo/openvino/tools/mo/front/tf/GatherTree_ext.py deleted file mode 100644 index 976dbce4797d67..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/GatherTree_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.GatherTree import GatherTree -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class GatherTreeFrontExtractor(FrontExtractorOp): - op = 'GatherTree' - enabled = True - - @classmethod - def extract(cls, node): - GatherTree.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/IteratorGetNextCut.py b/tools/mo/openvino/tools/mo/front/tf/IteratorGetNextCut.py deleted file mode 100644 index 9f0429cd27bdac..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/IteratorGetNextCut.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from collections import defaultdict - -from openvino.tools.mo.front.extractor import add_input_ops -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.convert_data_type import SUPPORTED_DATA_TYPES, np_data_type_to_precision -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern - - -class IteratorGetNextCut(FrontReplacementPattern): - """ - Cuts OneShotIterator -> IteratorGetNext pattern - in order to enable Out Of the Box (OOB) usage. - Pass is run only if user didn't specify any inputs names and shapes. - """ - enabled = True - graph_condition = [lambda graph: graph.graph['cmd_params'].input is None] - - def run_before(self): - from openvino.tools.mo.front.output_cut import OutputCut - from openvino.tools.mo.front.input_cut import InputCut - return [OutputCut, InputCut] - - def run_after(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - iter_get_next_shapes = defaultdict(list) - for iter_get_next in graph.get_op_nodes(op='IteratorGetNext'): - iter_get_next_name = iter_get_next.soft_get('name', iter_get_next.id) - for port_idx, port in iter_get_next.out_ports().items(): - if port.disconnected(): - continue - - if not np_data_type_to_precision(iter_get_next.types[port_idx]) in SUPPORTED_DATA_TYPES: - raise Error("In IteratorGetNext node '{}' data type '{}' is not supported".format( - iter_get_next_name, iter_get_next.types[port_idx])) - - iter_get_next_shapes[iter_get_next_name].append(dict( - shape=iter_get_next.shapes[port_idx], - out=port_idx, - data_type=iter_get_next.types[port_idx] - )) - - add_input_ops(graph, iter_get_next_shapes, True) diff --git a/tools/mo/openvino/tools/mo/front/tf/IteratorGetNext_ext.py b/tools/mo/openvino/tools/mo/front/tf/IteratorGetNext_ext.py deleted file mode 100644 index eb359ddaa192c1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/IteratorGetNext_ext.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape -from openvino.tools.mo.ops.op import Op - - -class IteratorGetNextExtractor(FrontExtractorOp): - op = 'IteratorGetNext' - enabled = True - - @classmethod - def extract(cls, node): - shapes = node.pb.attr['output_shapes'].list.shape - tf_types = node.pb.attr['output_types'].list.type - extracted_types = [] - for t in tf_types: - extracted_types.append(tf_dtype_extractor(t)) - result_shapes = [] - for shape_pb in shapes: - result_shapes.append(tf_tensor_shape(shape_pb)) - Op.update_node_stat(node, {'shapes': result_shapes, 'types': extracted_types, 'out_ports_count': len(result_shapes)}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/LookupTableInsert_ext.py b/tools/mo/openvino/tools/mo/front/tf/LookupTableInsert_ext.py deleted file mode 100644 index 007125689fb337..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/LookupTableInsert_ext.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.LookupTableInsert import LookupTableInsert -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class LookupTableInsertFrontExtractor(FrontExtractorOp): - op = 'LookupTableInsert' - enabled = True - - @classmethod - def extract(cls, node): - LookupTableInsert.update_node_stat(node, {}) - return cls.enabled - - -class LookupTableInsertV2FrontExtractor(FrontExtractorOp): - op = 'LookupTableInsertV2' - enabled = True - - @classmethod - def extract(cls, node): - LookupTableInsert.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/LoopCond_ext.py b/tools/mo/openvino/tools/mo/front/tf/LoopCond_ext.py deleted file mode 100644 index 4ac722f9683e57..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/LoopCond_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import single_output_infer -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class LoopCondFrontExtractor(FrontExtractorOp): - op = 'LoopCond' - enabled = True - - @classmethod - def extract(cls, node): - node['infer'] = lambda node: single_output_infer( - node, - lambda node: node.in_node(0).shape, - lambda node: node.in_node(0).value - ) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/MapFNTransformation.py b/tools/mo/openvino/tools/mo/front/tf/MapFNTransformation.py deleted file mode 100644 index f3f0df9ec07504..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/MapFNTransformation.py +++ /dev/null @@ -1,396 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.WhileNormalize import WhileNormalize -from openvino.tools.mo.front.tf.custom_subgraph_call import skip_nodes_by_condition -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.middle.pattern_match import find_pattern_matches, inverse_dict -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.loop import Loop -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -def find_subgraph_match_to_pattern(graph: Graph, body_pattern: dict): - """ - Finds sub-graph matches corresponding pattern in graph - :param graph: a graph where to search for matched sub-graph - :param body_pattern: a pattern - :return: a list of sub-graph matches - """ - matches = [] - for match in find_pattern_matches(graph, **body_pattern): - match = inverse_dict(match) - for k in match: - match[k] = Node(graph, match[k]) - matches.append(match) - - return matches - - -class MapFNInputSlicing(FrontReplacementSubgraph): - """ - The transformation handles inputs slicing in While loop created by TensorFlow 2 Map Function primitive - (see tf.map_fn). It avoids TensorListFromTensor and TensorFlowGetItem operations and replaces the original - sub-graph by adding axis attribute in Loop node for slicing inputs. - The transformation is also applicable to TensorFlow 2 Keras Simple RNN, GRU, and LSTM layers. - """ - enabled = True - - def run_before(self): - return [WhileNormalize] - - @staticmethod - def get_body_pattern(): - return dict( - nodes=[('tensor_list', dict(op='Parameter')), - ('current_iteration', dict(op='Parameter')), - ('slicing', dict(op='TensorListGetItem')), - ('const_increment', dict(op='Const')), - ('increment_iteration', dict(op='Add')), - ('increment_iteration_identity', dict(op='Identity')), - ('increment_iteration_result', dict(op='Result'))], - edges=[('tensor_list', 'slicing', {'in': 0}), - ('current_iteration', 'slicing', {'in': 1}), - ('const_increment', 'increment_iteration', {'in': 1}), - ('current_iteration', 'increment_iteration', {'in': 0}), - ('increment_iteration', 'increment_iteration_identity', {'in': 0}), - ('increment_iteration_identity', 'increment_iteration_result', {'in': 0})] - ) - - @staticmethod - def get_body_pattern_without_identity(): - return dict( - nodes=[('tensor_list', dict(op='Parameter')), - ('current_iteration', dict(op='Parameter')), - ('slicing', dict(op='TensorListGetItem')), - ('const_increment', dict(op='Const')), - ('increment_iteration', dict(op='Add')), - ('increment_iteration_result', dict(op='Result'))], - edges=[('tensor_list', 'slicing', {'in': 0}), - ('current_iteration', 'slicing', {'in': 1}), - ('const_increment', 'increment_iteration', {'in': 1}), - ('current_iteration', 'increment_iteration', {'in': 0}), - ('increment_iteration', 'increment_iteration_result', {'in': 0})] - ) - - @staticmethod - def transform_map_fn_input_slicing(external_match: dict, internal_match: dict): - """ - Transforms TensorFlow 2 input slicing into use of axis attribute for input port of Loop node - :param external_match: a match used for handling a part of the main graph responsible for input slicing - :param internal_match: a match used for handling a part of the body graph responsible for input slicing - """ - loop_node = external_match['while'] - unstack_node = external_match['unstack'] - body_graph = loop_node['body'] - - tensor_list_get_item_node = internal_match['slicing'] - unstack_placeholder = internal_match['tensor_list'] - tensor_list_get_item_node_name = tensor_list_get_item_node.soft_get('name', tensor_list_get_item_node.id) - - # 1. process the body graph to avoid unsupported operations: TensorListGetItem and TensorListSetItem - # replace TensorListGetItem with Squeeze node and iterate through slices using axis for input port - squeeze_list_element = create_op_with_const_inputs(body_graph, Squeeze, {1: int64_array(0)}, - {'name': 'TensorListGetItemSqueeze'}) - tensor_list_get_item_node.in_port(0).get_connection().set_destination(squeeze_list_element.in_port(0)) - tensor_list_get_item_node.out_port(0).get_connection().set_source(squeeze_list_element.out_port(0)) - rename_nodes([(tensor_list_get_item_node, tensor_list_get_item_node_name + '/AbandonedName'), - (squeeze_list_element, tensor_list_get_item_node_name)]) - unstack_placeholder_layer_id = unstack_placeholder.internal_layer_id - Loop.update_port_map_value_ext(loop_node.input_port_map, 'internal_layer_id', unstack_placeholder_layer_id, - 'axis', 0) - - # 2. process locality of Loop node in the main graph to avoid unsupported operations: - # TensorListFromTensor, TensorListReserve, and TensorListStack - # remove TensorListFromTensor and pass a tensor to Loop as is - unstack_node.out_port(0).get_connection().set_source(unstack_node.in_port(0).get_connection().get_source()) - - def find_and_replace_pattern(self, graph: Graph): - for loop_node in graph.get_op_nodes(op='Loop'): - loop_name = loop_node.soft_get('name', loop_node.id) - body_graph = loop_node['body'] - body_pattern = MapFNInputSlicing.get_body_pattern() - body_pattern_without_identity = MapFNInputSlicing.get_body_pattern_without_identity() - internal_matches = find_subgraph_match_to_pattern(body_graph, body_pattern) - internal_matches += find_subgraph_match_to_pattern(body_graph, body_pattern_without_identity) - - for internal_match in internal_matches: - # check if TensorListGetItem from the body graph is connected with TensorListFromTensor - # from the main graph. If yes, the transformation detects input slicing by this port - # and can use Loop axis attribute - unstack_node = Loop.get_external_nodes_by_internal_id(loop_node, - internal_match['tensor_list'].internal_layer_id) - unstack_node = unstack_node[0] if (len(unstack_node) == 1 - and unstack_node[0].op == 'TensorListFromTensor') else None - if unstack_node is None: - log.info("A sub-graph around the loop node {} does not match " - "TensorFlow 2 MapFN pattern for input slicing".format(loop_name)) - continue - - external_match = {'while': loop_node, - 'unstack': unstack_node} - # check that back edges connect correct Parameter and Result nodes in the body - # check connections between body input ports and external inputs ports of Loop node - if Loop.back_edge_exists(loop_node.back_edges, - internal_match['increment_iteration_result'].internal_layer_id, - internal_match['current_iteration'].internal_layer_id): - MapFNInputSlicing.transform_map_fn_input_slicing(external_match, internal_match) - - -class MapFNOutputConcatenation(FrontReplacementSubgraph): - """ - The transformation handles inputs slicing in While loop created by TensorFlow 2 Map Function primitive - (see tf.map_fn). It avoids TensorListReserve, TensorListStack, and TensorListSetItem operations and replaces - the original sub-graph by adding axis attribute in Loop node for concatenation of intermediate output results. - The transformation is also applicable to TensorFlow 2 Keras Simple RNN, GRU, and LSTM layers. - """ - enabled = True - - def run_before(self): - return [WhileNormalize] - - @staticmethod - def get_body_pattern(): - return dict( - nodes=[('container', dict(op='Parameter')), - ('current_iteration', dict(op='Parameter')), - ('const_increment', dict(op='Const')), - ('increment_iteration', dict(op='Add')), - ('increment_iteration_identity', dict(op='Identity')), - ('increment_iteration_result', dict(op='Result')), - ('concatenation', dict(op='TensorListSetItem')), - ('concatenation_identity', dict(op='Identity')), - ('concatenation_result', dict(op='Result')), - ], - edges=[('const_increment', 'increment_iteration', {'in': 1}), - ('current_iteration', 'increment_iteration', {'in': 0}), - ('container', 'concatenation', {'in': 0}), - ('current_iteration', 'concatenation', {'in': 1}), - ('concatenation', 'concatenation_identity', {'in': 0}), - ('concatenation_identity', 'concatenation_result', {'in': 0}), - ('increment_iteration', 'increment_iteration_identity', {'in': 0}), - ('increment_iteration_identity', 'increment_iteration_result', {'in': 0})] - ) - - @staticmethod - def get_body_pattern_without_identity(): - return dict( - nodes=[('container', dict(op='Parameter')), - ('current_iteration', dict(op='Parameter')), - ('const_increment', dict(op='Const')), - ('increment_iteration', dict(op='Add')), - ('increment_iteration_result', dict(op='Result')), - ('concatenation', dict(op='TensorListSetItem')), - ('concatenation_result', dict(op='Result')) - ], - edges=[('const_increment', 'increment_iteration', {'in': 1}), - ('current_iteration', 'increment_iteration', {'in': 0}), - ('container', 'concatenation', {'in': 0}), - ('current_iteration', 'concatenation', {'in': 1}), - ('concatenation', 'concatenation_result', {'in': 0}), - ('increment_iteration', 'increment_iteration_result', {'in': 0}) - ] - ) - - @staticmethod - def transform_map_fn_output_concatenation(external_match: dict, internal_match: dict): - """ - Transforms TensorFlow 2 output concatenation into use of axis attribute for output port of Loop node - :param external_match: a match used for handling a part of the main graph responsible for output concatenation - :param internal_match: a match used for handling a part of the body graph responsible for output concatenation - """ - loop_node = external_match['while'] - stack_node = external_match['stack'] - list_reserve_node = external_match['reserve'] - body_graph = loop_node['body'] - - tensor_list_set_item_node = internal_match['concatenation'] - tensor_list_set_item_node_name = tensor_list_set_item_node.soft_get('name', tensor_list_set_item_node.id) - list_result_node = internal_match['concatenation_result'] - - # replace TensorListSetItem with Unsqueeze and use axis attribute for corresponding Result node - # to concatenate results from different iterations - unsqueeze_list_element = create_op_with_const_inputs(body_graph, Unsqueeze, {1: int64_array(0)}, - {'name': 'TensorListSetItemUnsqueeze'}) - tensor_list_set_item_node.in_port(2).get_connection().set_destination(unsqueeze_list_element.in_port(0)) - tensor_list_set_item_node.out_port(0).get_connection().set_source(unsqueeze_list_element.out_port(0)) - rename_nodes([(tensor_list_set_item_node, tensor_list_set_item_node_name + '/AbandonedName'), - (unsqueeze_list_element, tensor_list_set_item_node_name)]) - list_result_node_layer_id = list_result_node.internal_layer_id - Loop.update_port_map_value_ext(loop_node.output_port_map, 'internal_layer_id', list_result_node_layer_id, - 'axis', 0) - - # remove TensorListStack to by-pass the node since the result from the Loop node is already concatenated - stack_node.out_port(0).get_connection().set_source(stack_node.in_port(0).get_connection().get_source()) - - # disconnect ListReserve node because it is no longer needed for Loop - list_reserve_node.out_port(0).disconnect() - - # connect a number of iterations with trip count that can be received from the second input of ListReserve - # create a constant network with True value for execution_condition so that OV can ignore execution condition - # and perform trip_counts iterations. This approach with known trip count value allows to avoid dynamism. - loop_node.in_port(1).disconnect() - list_reserve_node.in_port(1).get_source().connect(loop_node.in_port(1)) - for record in loop_node.output_port_map: - if 'purpose' in record and record['purpose'] == 'execution_condition': - exec_cond_layer_id = record['internal_layer_id'] - exec_cond_node = Loop.get_body_node_by_internal_id(loop_node, exec_cond_layer_id) - const_true = Const(body_graph, {'value': mo_array(True, dtype=bool)}).create_node() - exec_cond_node.in_port(0).get_connection().set_source(const_true.out_port(0)) - - # remove back edge - for record in loop_node.back_edges: - if 'from_layer' in record and record['from_layer'] == list_result_node_layer_id: - loop_node.back_edges.remove(record) - - def find_and_replace_pattern(self, graph: Graph): - for loop_node in graph.get_op_nodes(op='Loop'): - loop_name = loop_node.soft_get('name', loop_node.id) - body_graph = loop_node['body'] - body_pattern = MapFNOutputConcatenation.get_body_pattern() - body_pattern_without_identity = MapFNOutputConcatenation.get_body_pattern_without_identity() - internal_matches = find_subgraph_match_to_pattern(body_graph, body_pattern) - internal_matches += find_subgraph_match_to_pattern(body_graph, body_pattern_without_identity) - - for internal_match in internal_matches: - # check if TensorListReserve from the main graph is connected with Parameter node from the body graph - # that is assigned for storing intermediate output results of While Loop. If yes, the transformation - # detects intermediate outputs concatenation by this port and can use Loop axis attribute - reserve_node = Loop.get_external_nodes_by_internal_id(loop_node, - internal_match['container'].internal_layer_id) - reserve_node = reserve_node[0] if (len(reserve_node) == 1 and - reserve_node[0].op == 'TensorListReserve') else None - if reserve_node is None: - log.info("A sub-graph around the loop node {} does not match " - "TensorFlow 2 MapFN pattern for intermediate outputs concatenation".format(loop_name)) - continue - stack_node = Loop.get_external_nodes_by_internal_id( - loop_node, internal_match['concatenation_result'].internal_layer_id) - stack_node = stack_node[0] if len(stack_node) == 1 else None - - if stack_node is None: - log.info("A sub-graph around the loop node {} does not match " - "TensorFlow 2 MapFN pattern for intermediate outputs concatenation".format(loop_name)) - continue - - # skip StopGradient node if it exists between While loop output port and TensorListStack operation - stack_node = skip_nodes_by_condition(stack_node, lambda x: x.has_and_set('identity'), True) - stack_node = stack_node if stack_node.op == 'TensorListStack' else None - if stack_node is None: - log.info("A sub-graph around the loop node {} does not match " - "TensorFlow 2 MapFN pattern for intermediate outputs concatenation".format(loop_name)) - continue - - external_match = {'while': loop_node, - 'reserve': reserve_node, - 'stack': stack_node} - # check that back edges connect Parameter node (or container with intermediate output results) - # and concatenation result produced by TensorListSetItem node - if Loop.back_edge_exists(loop_node.back_edges, internal_match['concatenation_result'].internal_layer_id, - internal_match['container'].internal_layer_id) and \ - Loop.back_edge_exists(loop_node.back_edges, - internal_match['increment_iteration_result'].internal_layer_id, - internal_match['current_iteration'].internal_layer_id): - MapFNOutputConcatenation.transform_map_fn_output_concatenation(external_match, internal_match) - - -class TensorListOutputConcatenation(FrontReplacementSubgraph): - """ - The transformation handles inputs slicing in While loop. It avoids TensorListPushBack, and EmptyTensorList - operations and replaces the original sub-graph by adding axis attribute in Loop node for concatenation of - intermediate output results. - """ - enabled = True - - def run_before(self): - return [WhileNormalize] - - @staticmethod - def get_body_pattern(): - return dict( - nodes=[('container', dict(op='Parameter')), - ('concatenation', dict(op='TensorListPushBack')), - ('concatenation_result', dict(op='Result')) - ], - edges=[ - ('container', 'concatenation', {'in': 0}), - ('concatenation', 'concatenation_result', {'in': 0}), - ] - ) - - @staticmethod - def transform_tensor_list_output_concatenation(external_match: dict, internal_match: dict): - """ - Transforms TensorFlow 2 output concatenation into use of axis attribute for output port of Loop node - :param external_match: a match used for handling a part of the main graph responsible for output concatenation - :param internal_match: a match used for handling a part of the body graph responsible for output concatenation - """ - loop_node = external_match['while'] - empty_tensor_list_node = external_match['reserve'] - body_graph = loop_node['body'] - - tensor_list_push_back_node = internal_match['concatenation'] - tensor_list_push_back_node_name = tensor_list_push_back_node.soft_get('name', tensor_list_push_back_node.id) - list_result_node = internal_match['concatenation_result'] - - # replace TensorListPushBack with Unsqueeze and use axis attribute for corresponding Result node - # to concatenate results from different iterations - unsqueeze_list_element = create_op_with_const_inputs(body_graph, Unsqueeze, {1: int64_array(0)}, - {'name': tensor_list_push_back_node_name + - '/TensorListPushBackUnsqueeze'}) - tensor_list_push_back_node.in_port(1).get_connection().set_destination(unsqueeze_list_element.in_port(0)) - tensor_list_push_back_node.out_port(0).get_connection().set_source(unsqueeze_list_element.out_port(0)) - rename_nodes([(tensor_list_push_back_node, tensor_list_push_back_node_name + '/AbandonedName'), - (unsqueeze_list_element, tensor_list_push_back_node_name)]) - list_result_node_layer_id = list_result_node.internal_layer_id - Loop.update_port_map_value_ext(loop_node.output_port_map, 'internal_layer_id', list_result_node_layer_id, - 'axis', 0) - - # disconnect EmptyTensorList node because it is no longer needed for Loop - empty_tensor_list_node.out_port(0).disconnect() - - loop_node.in_port(1).disconnect() - empty_tensor_list_node.in_port(1).get_source().connect(loop_node.in_port(1)) - - # remove back edge - for record in loop_node.back_edges: - if 'from_layer' in record and record['from_layer'] == list_result_node_layer_id: - loop_node.back_edges.remove(record) - - def find_and_replace_pattern(self, graph: Graph): - for loop_node in graph.get_op_nodes(op='Loop'): - loop_name = loop_node.soft_get('name', loop_node.id) - body_graph = loop_node['body'] - body_pattern = TensorListOutputConcatenation.get_body_pattern() - internal_matches = find_subgraph_match_to_pattern(body_graph, body_pattern) - - for internal_match in internal_matches: - # check if EmptyTensorList from the main graph is connected with Parameter node from the body graph - # that is assigned for storing intermediate output results of While Loop. If yes, the transformation - # detects intermediate outputs concatenation by this port and can use Loop axis attribute - reserve_node = Loop.get_external_nodes_by_internal_id(loop_node, - internal_match['container'].internal_layer_id) - reserve_node = reserve_node[0] if (len(reserve_node) == 1 and - reserve_node[0].op == 'EmptyTensorList') else None - if reserve_node is None: - log.info("A sub-graph around the loop node {} does not match " - "TensorFlow 2 EmptyTensorList->TensorListPushBack pattern for intermediate " - "outputs concatenation".format(loop_name)) - continue - - external_match = {'while': loop_node, - 'reserve': reserve_node} - # check that back edges connect Parameter node (or container with intermediate output results) - # and concatenation result produced by TensorListPushBack node - if Loop.back_edge_exists(loop_node.back_edges, internal_match['concatenation_result'].internal_layer_id, - internal_match['container'].internal_layer_id): - TensorListOutputConcatenation.transform_tensor_list_output_concatenation(external_match, - internal_match) diff --git a/tools/mo/openvino/tools/mo/front/tf/NonConstBeginStridedSliceReplacement.py b/tools/mo/openvino/tools/mo/front/tf/NonConstBeginStridedSliceReplacement.py deleted file mode 100644 index 1b5c7cd10e8daf..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/NonConstBeginStridedSliceReplacement.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class NonConstBeginStridedSliceReplacement(FrontReplacementSubgraph): - r""" - The transformation handles StridedSlice operation with dynamic begin and end values - when slicing performs along just one dimension with a dynamic index. - For example, StridedSlice with begin=(0,idx,0), end=(0,idx+1,0), - and begin_mask=end_mask=shrink_mask=(0, 1, 0) can be replaced with Squeeze(axis=1;Gather(axis=1; Unsqueeze(idx))). - The transformation attempts to match to following sub-graph: - - Input ----> StridedSlice(begin_mask, end_mask, and shrink_mask where only element for axis equal to 1) --> OTHER OPS - /\ /\ /\ - | | | - ---------> Pack(Begin) Pack(End) Const(Step) = (1,..,1) - | /\ /\ /\ - | | | | - | | | Const(All others) - | Const(All others) | - Index ---------------------------> Add - /\ - Const(SliceSize)=1------------------| - - And the original sub-graph is transformed as follows: - - Input --------> Gather(axis) ---> Squeeze(axis) ---> OTHER OPS - /\ - | - Index -----> Unsqueeze(axis=1) - - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.front.Pack import Pack - return [Pack] - - @staticmethod - def pattern(**kwargs): - return dict( - nodes=[('begin', dict(op='Pack')), - ('end', dict(op='Pack')), - ('step', dict(op='Const')), - ('strided_slice', dict(op='StridedSlice')), - ], - edges=[('begin', 'strided_slice', {'in': 1}), - ('end', 'strided_slice', {'in': 2}), - ('step', 'strided_slice', {'in': 3})]) - - def replace_sub_graph(self, graph: Graph, match: dict): - strided_slice_node = match['strided_slice'] - begin_node = match['begin'] - end_node = match['end'] - step_node = match['step'] - - # retrieve attribute values - begin_mask = strided_slice_node.soft_get('begin_mask') - end_mask = strided_slice_node.soft_get('end_mask') - shrink_mask = strided_slice_node.soft_get('shrink_axis_mask', int64_array([0])) - - # check applicability of this transformation to the given sub-graph: - # 1. check that slicing is performed along just one axis - if np.sum(begin_mask) != 1 or np.sum(end_mask) != 1 or np.sum(shrink_mask) != 1: - return - # 2. check that shrink axis is equal to slicing axis - if not np.array_equal(np.argwhere(begin_mask == 1), np.argwhere(end_mask == 1)) or \ - not np.array_equal(np.argwhere(begin_mask == 1), np.argwhere(shrink_mask == 1)): - return - sliced_axis = np.argwhere(begin_mask == 1)[0][0] - # 3. check constant nodes for begin and end correspond to non-slicing axes - for idx_port, in_port in begin_node.in_ports().items(): - if idx_port != sliced_axis and in_port.get_source().node.soft_get('type') != 'Const' or \ - idx_port == sliced_axis and in_port.get_source().node.soft_get('type') == 'Const': - return - for idx_port, in_port in end_node.in_ports().items(): - if idx_port != sliced_axis and in_port.get_source().node.soft_get('type') != 'Const' or \ - idx_port == sliced_axis and in_port.get_source().node.soft_get('type') == 'Const': - return - # 4. check that offset of begin and end values for slicing axis is constant - add_node = end_node.in_port(sliced_axis).get_source().node - slice_start_index_node = begin_node.in_port(sliced_axis).get_source().node - if add_node.soft_get('type') != 'Add': - return - - if add_node.in_port(1).get_source().node.soft_get('type') == 'Const': - slice_size_node = add_node.in_port(1).get_source().node - if add_node.in_port(0).get_source().node.id != slice_start_index_node.id: - return - elif add_node.in_port(0).get_source().node.soft_get('type') == 'Const': - slice_size_node = add_node.in_port(0).get_source().node - if add_node.in_port(1).get_source().node.id != slice_start_index_node.id: - return - else: - return - slice_size = slice_size_node.value - step_value = step_node.value[sliced_axis] - - # 5. check that step_value equal to 1 and step_value equal to 1 - # TODO: support other cases when slice_size not equal to 1 and step_value not equal to 1 - if slice_size != 1 or step_value != 1: - return - - # unsqueeze a scalar by which to slice input tensor - strided_slice_name = strided_slice_node.soft_get('name', strided_slice_node.id) - unsqueeze_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array(0)}, - {'name': strided_slice_name + '/Unsqueeze'}) - add_node.in_port(0).get_connection().add_destination(unsqueeze_node.in_port(0)) - - # replace StridedSlice with Gather operation that supports dynamic indices for slicing - gather_node = create_op_with_const_inputs(graph, Gather, {2: int64_array(sliced_axis)}, - {'name': strided_slice_name + '/Gather'}) - strided_slice_node.in_port(0).get_connection().set_destination(gather_node.in_port(0)) - unsqueeze_node.out_port(0).connect(gather_node.in_port(1)) - - # squeeze Gather output since sliced axis is shrinked - squeeze_node = create_op_with_const_inputs(graph, Squeeze, {1: int64_array(sliced_axis)}, - {'name': strided_slice_name + '/Squeeze'}) - squeeze_node.in_port(0).connect(gather_node.out_port(0)) - rename_nodes( - [(strided_slice_node, strided_slice_name + '/AbandonedName'), (squeeze_node, strided_slice_name)]) - - # preserve a name of original StridedSlice node - strided_slice_node.out_port(0).get_connection().set_source(squeeze_node.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/tf/ObjectDetectionAPI.py b/tools/mo/openvino/tools/mo/front/tf/ObjectDetectionAPI.py deleted file mode 100644 index 3c94f408884218..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ObjectDetectionAPI.py +++ /dev/null @@ -1,1865 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -""" -The file contains necessary transformations to convert models created with a TensorFlow Object Detection framework from -the https://github.com/tensorflow/models/blob/master/research/object_detection/ repository. There is a dedicated -OpenVINO document describing overall procedure of conversion these models with the Model Optimizer: -https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_prepare_model_convert_model_tf_specific_Convert_Object_Detection_API_Models.html - -Conversion of most of the TF OD API models requires execution of several transformations defined in this file. The list -of transformations to be executed for a particular model type (meta-architecture) is defined in the transformation -configuration JSON file located in the "openvino/tools/mo/front/tf/" directory. A file should be specified using the -"--transformations_config" command line parameter. An additional parameter -"--tensorflow_object_detection_api_pipeline_config" should be specified with the path to the pipeline.config used for -the model training. - -Refer to the code comments of a particular transformation for the explanation of its purpose and low-level -implementation details. -""" -import collections -import logging as log -from math import sqrt - -import numpy as np - -from openvino.tools.mo.front.Pack import Pack -from openvino.tools.mo.front.TransposeOrderNormalizer import TransposeOrderNormalizer -from openvino.tools.mo.front.split_normalizer import SqueezeAxis -from openvino.tools.mo.front.tf.CropAndResizeReplacement import CropAndResizeReplacement -from openvino.tools.mo.front.FakeQuantWithMinMaxVars import FakeQuantWithMinMaxVarsToQuantize -from openvino.tools.mo.front.tf.MapFNTransformation import MapFNInputSlicing, MapFNOutputConcatenation,\ - TensorListOutputConcatenation -from openvino.tools.mo.front.tf.TFSliceToSlice import TFSliceToSliceReplacer -from openvino.tools.mo.front.tf.pad_tf_to_pad import PadTFToPad -from openvino.tools.mo.middle.InsertLayoutPropagationTransposes import mark_as_correct_data_layout, \ - mark_input_as_in_correct_layout, mark_output_as_in_correct_layout -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.DetectionOutput import DetectionOutput -from openvino.tools.mo.ops.ReduceOps import ReduceMean -from openvino.tools.mo.ops.activation_ops import Sigmoid -from openvino.tools.mo.ops.elementwise import Mul, Sub, Add, Div -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.ops.priorbox_clustered import PriorBoxClusteredOp -from openvino.tools.mo.ops.proposal import ProposalOp -from openvino.tools.mo.ops.psroipooling import PSROIPoolingOp -from openvino.tools.mo.ops.split import Split -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.layout import get_batch_dim, get_height_dim, get_width_dim -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension, mo_array, dynamic_dimension_value -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.extractor import output_user_data_repack, add_output_ops -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.front.tf.custom_subgraph_call import skip_nodes_by_condition -from openvino.tools.mo.front.tf.graph_utils import add_activation_function_after_node, add_convolution_to_swap_xy_coordinates, \ - add_fake_background_loc, create_op_node_with_second_input, create_op_with_const_inputs -from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileSubGraph, FrontReplacementFromConfigFileGeneral -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.clamp import AttributedClamp -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.crop import Crop -from openvino.tools.mo.ops.op import PermuteAttrs -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.result import Result -from openvino.tools.mo.ops.roipooling import ROIPooling -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.softmax import Softmax -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.ops.tile import Tile -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.graph import backward_bfs_for_operation, bfs_search, clear_tensor_names_info, sub_graph_between_nodes -from openvino.tools.mo.utils.pipeline_config import PipelineConfig -from openvino.tools.mo.utils.shape import node_to_get_shape_value_of_indices - -missing_param_error = 'To convert the model specify path to the pipeline configuration file which was used to ' \ - 'generate the model. Please use "--tensorflow_object_detection_api_pipeline_config" option:\n' \ - '--tensorflow_object_detection_api_pipeline_config ""\nIf you have ' \ - 'downloaded the model file from the Object Detection Model zoo repository then this file is ' \ - 'located in the archive with frozen model and called "pipeline.config".\nIf you did not use ' \ - 'this command line parameter before that means that you are using currently deprecated ' \ - 'TensorFlow* Object Detection API models conversion mechanism.' - - -def _value_or_raise(match: SubgraphMatch, pipeline_config: PipelineConfig, key: str): - """ - Returns value from the 'custom_attributes' of the 'match' object or pipeline_config associated with a key 'key'. - If the value doesn't exist then raise error. - :param match: SubgraphMatch object containing 'custom_attributes'. - :param pipeline_config: PipelineConfig object with parsed values. - :param key: key to search for. - :return: the requested value. - """ - if match and key in match.custom_replacement_desc.custom_attributes: - return match.custom_replacement_desc.custom_attributes[key] - value = pipeline_config.get_param(key) - if value is None: - raise Error('The sub-graph replacer "[REPLACEMENT_ID]" was not able to find the value for key "{}" in the ' - 'pipeline configuration file specified with the --tensorflow_object_detection_api_pipeline_config ' - 'command line parameter. Update the sub-graph replacement configuration file specified with the ' - '--transformations_config command line parameter by adding key "{}" with required ' - 'value to the "custom_attributes" dictionary of the "[REPLACEMENT_ID]" replacer.'.format(key, key)) - return value - - -def _find_ssd_head_node(graph: Graph, ssd_head_index: int, head_type: str): - """ - Finds the SSD head node with index 'ssd_head_index' in the topology. The parameter 'head_type' specifies what type - of the head is requested: with box predictions or class predictions. - :param graph: graph with the topology. - :param ssd_head_index: index of the SSD head. - :param head_type: either 'box' or 'class' string specifying type of the SSD head node. - :return: the requested Node or None if node is not found. - """ - if head_type == 'box': - possible_node_names = ['BoxPredictor_%d/BoxEncodingPredictor/BiasAdd' % ssd_head_index, - 'WeightSharedConvolutionalBoxPredictor/BoxPredictor/BiasAdd' if ssd_head_index == 0 else - 'WeightSharedConvolutionalBoxPredictor_%d/BoxPredictor/BiasAdd' % ssd_head_index] - elif head_type == 'class': - possible_node_names = ['BoxPredictor_%d/ClassPredictor/BiasAdd' % ssd_head_index, - 'WeightSharedConvolutionalBoxPredictor/ClassPredictor/BiasAdd' if ssd_head_index == 0 - else 'WeightSharedConvolutionalBoxPredictor_%d/ClassPredictor/BiasAdd' % ssd_head_index] - else: - raise Error('SSD heads can be of type "box" and "class" only.') - - head_node = None - for head_node_name in possible_node_names: - if head_node_name in graph.nodes(): - assert (head_node is None) # only one of the possible node names should exist in the graph - head_node = Node(graph, head_node_name) - return head_node - - -def _variance_from_pipeline_config(pipeline_config: PipelineConfig): - """ - Generates a numpy array with variances values from the pipeline_config object. The order of the elements is the - following: variance x, variance y, variance box width, variance box height. - :param pipeline_config: pipeline_config object to get variances from. - :return: the numpy array with variances. - """ - return 1.0 / mo_array([pipeline_config.get_param('frcnn_variance_x'), - pipeline_config.get_param('frcnn_variance_y'), - pipeline_config.get_param('frcnn_variance_width'), - pipeline_config.get_param('frcnn_variance_height')]) - - -def _skip_node_of_type(node: Node, node_ops_to_skip: list): - """ - Skips nodes of specified ops starting from node 'node'. - :param node: node to start skipping Identity nodes. - :return: node of the op - """ - # skip the Identity node - while len(node.out_edges()) == 1 and node.op in node_ops_to_skip: - node = node.out_node() - return node - - -def _relax_reshape_nodes(graph: Graph, pipeline_config: PipelineConfig): - """ - Finds the 'Reshape' operations following the SSD head nodes which have hard-coded output dimensions and replaces - them with new ones with one of the dimensions sizes equal to -1. This function is used to make TF OD API SSD models - reshape-able. - :param graph: graph with the topology. - :param pipeline_config: PipelineConfig object with parsed values. - :return: None - """ - num_classes = pipeline_config.get_param('num_classes') - num_layers = pipeline_config.get_param('ssd_anchor_generator_num_layers') - if num_layers is None: - num_layers = pipeline_config.get_param('multiscale_anchor_generator_max_level') - \ - pipeline_config.get_param('multiscale_anchor_generator_min_level') + 1 - for ssd_head_ind in range(num_layers): - input_node = _find_ssd_head_node(graph, ssd_head_ind, 'box') - assert (input_node is not None) - old_reshape_node = _skip_node_of_type(input_node.out_node(), - ['Identity', 'FakeQuantWithMinMaxVars', 'FakeQuantize']) - assert old_reshape_node.op == 'Reshape' - reshape_size_node = Const(graph, {'value': int64_array([0, -1, 1, 4])}).create_node([]) - new_reshape_op = Reshape(graph, {'name': input_node.id + '/Reshape'}) - new_reshape_node = new_reshape_op.create_node([input_node, reshape_size_node]) - old_reshape_node.replace_node(new_reshape_node) - - # fix hard-coded value for the number of items in tensor produced by the convolution to make topology reshapable - input_node = _find_ssd_head_node(graph, ssd_head_ind, 'class') - assert (input_node is not None) - old_reshape_node = _skip_node_of_type(input_node.out_node(), - ['Identity', 'FakeQuantWithMinMaxVars', 'FakeQuantize']) - - assert old_reshape_node.op == 'Reshape' - reshape_size_node_2 = Const(graph, {'value': int64_array([0, -1, num_classes + 1])}).create_node([]) - new_reshape_op_2 = Reshape(graph, {'name': input_node.id + '/Reshape'}) - new_reshape_node_2 = new_reshape_op_2.create_node([input_node, reshape_size_node_2]) - old_reshape_node.replace_node(new_reshape_node_2) - - -def _create_prior_boxes_node(graph: Graph, pipeline_config: PipelineConfig): - """ - The function creates one or several PriorBoxClustered nodes based on information from the pipeline configuration - files. The PriorBoxClustered nodes get input data from SSD 'heads' and from the placeholder node (just to get - input image size). - :param graph: graph with the topology. - :param pipeline_config: PipelineConfig object with parsed values. - :return: node generating prior boxes. - """ - min_scale = pipeline_config.get_param('ssd_anchor_generator_min_scale') - max_scale = pipeline_config.get_param('ssd_anchor_generator_max_scale') - num_layers = pipeline_config.get_param('ssd_anchor_generator_num_layers') - aspect_ratios = pipeline_config.get_param('ssd_anchor_generator_aspect_ratios') - if not isinstance(aspect_ratios, list): - aspect_ratios = [aspect_ratios] - - # prior boxes have to be generated using the image size used for training - image_height = pipeline_config.get_param('resizer_image_height') - image_width = pipeline_config.get_param('resizer_image_width') - min_im_shape = min(image_height, image_width) - _base_anchor_height = pipeline_config.get_param('ssd_anchor_generator_base_anchor_height') - _base_anchor_width = pipeline_config.get_param('ssd_anchor_generator_base_anchor_width') - base_anchor_size = [min_im_shape / image_height * _base_anchor_height, - min_im_shape / image_width * _base_anchor_width] - reduce_boxes_in_lowest_layer = True - if pipeline_config.get_param('ssd_anchor_generator_reduce_lowest') is not None: - reduce_boxes_in_lowest_layer = pipeline_config.get_param('ssd_anchor_generator_reduce_lowest') - - if pipeline_config.get_param('ssd_anchor_generator_scales') is not None: - scales = pipeline_config.get_param('ssd_anchor_generator_scales') + [1.0] - else: - scales = [min_scale + (max_scale - min_scale) * i / (num_layers - 1) for i in range(num_layers)] + [1.0] - - prior_box_nodes = [] - for ssd_head_ind in range(num_layers): - ssd_head_node = _find_ssd_head_node(graph, ssd_head_ind, 'box') - assert (ssd_head_node is not None) - - if ssd_head_ind == 0 and reduce_boxes_in_lowest_layer: - widths = [0.1, min_scale * sqrt(2.0), min_scale * sqrt(0.5)] - heights = [0.1, min_scale / sqrt(2.0), min_scale / sqrt(0.5)] - else: - widths = [scales[ssd_head_ind] * sqrt(ar) for ar in aspect_ratios] - heights = [scales[ssd_head_ind] / sqrt(ar) for ar in aspect_ratios] - - interpolated_scale_ar = pipeline_config.get_param('ssd_anchor_generator_interpolated_scale_aspect_ratio') - if interpolated_scale_ar > 0.0: - widths += [sqrt(scales[ssd_head_ind] * scales[ssd_head_ind + 1]) * interpolated_scale_ar] - heights += [sqrt(scales[ssd_head_ind] * scales[ssd_head_ind + 1]) / interpolated_scale_ar] - widths = [w * image_width * base_anchor_size[1] for w in widths] - heights = [h * image_height * base_anchor_size[0] for h in heights] - - variance = _variance_from_pipeline_config(pipeline_config) - prior_box_op = PriorBoxClusteredOp(graph, {'width': mo_array(widths), 'height': mo_array(heights), - 'clip': 0, 'flip': 0, 'variance': variance, 'offset': 0.5, - }) - # connect the PriorBoxClustered node with the "Cast" node of the Placeholder node because the pass that removes - # Cast operations is executed in the middle phase and it will fail when there are several consumers of the - # Placeholder - input_node_name = 'image_tensor' if 'image_tensor' in graph.nodes else 'input_tensor' - prior_box_node = prior_box_op.create_node([ssd_head_node, Node(graph, input_node_name).out_node(0)], - {'name': 'PriorBoxClustered_{}'.format(ssd_head_ind)}) - prior_box_nodes.append(prior_box_node) - if len(prior_box_nodes) == 1: - return prior_box_nodes[0] - else: - concat_prior_boxes_op = Concat(graph, {'axis': -1, 'in_ports_count': len(prior_box_nodes)}) - return concat_prior_boxes_op.create_node(prior_box_nodes, {'name': 'ConcatPriorBoxesClustered'}) - - -def _create_multiscale_prior_boxes_node(graph: Graph, pipeline_config: PipelineConfig): - """ - The function creates one or several PriorBoxClustered nodes based on information from the pipeline configuration - files. The PriorBoxClustered nodes get input data from SSD 'heads' and from the placeholder node (just to get - input image size). - :param graph: graph with the topology. - :param pipeline_config: PipelineConfig object with parsed values. - :return: node generating prior boxes. - """ - min_level = pipeline_config.get_param('multiscale_anchor_generator_min_level') - max_level = pipeline_config.get_param('multiscale_anchor_generator_max_level') - anchor_scale = pipeline_config.get_param('multiscale_anchor_generator_anchor_scale') - aspect_ratios = pipeline_config.get_param('multiscale_anchor_generator_aspect_ratios') - scales_per_octave = pipeline_config.get_param('multiscale_anchor_generator_scales_per_octave') - - prior_box_nodes = [] - scales = [2 ** (float(scale) / scales_per_octave) for scale in range(scales_per_octave)] - for level in range(min_level, max_level + 1): - base_anchor_size = 2 ** level * anchor_scale - - ssd_head_ind = level - min_level - ssd_head_node = _find_ssd_head_node(graph, ssd_head_ind, 'box') - assert (ssd_head_node is not None) - - widths = [base_anchor_size * scale * sqrt(ar) for ar in aspect_ratios for scale in scales] - heights = [base_anchor_size * scale / sqrt(ar) for ar in aspect_ratios for scale in scales] - - variance = _variance_from_pipeline_config(pipeline_config) - prior_box_op = PriorBoxClusteredOp(graph, {'width': mo_array(widths), 'height': mo_array(heights), - 'clip': 0, 'flip': 0, 'variance': variance, - 'offset': 0.5, - }) - # connect the PriorBoxClustered node with the "Cast" node of the Placeholder node because the pass that removes - # Cast operations is executed in the middle phase and it will fail when there are several consumers of the - # Placeholder - prior_box_node = prior_box_op.create_node([ssd_head_node, Node(graph, 'image_tensor').out_node(0)], - {'name': 'PriorBoxClustered_{}'.format(ssd_head_ind)}) - prior_box_nodes.append(prior_box_node) - if len(prior_box_nodes) == 1: - return prior_box_nodes[0] - else: - concat_prior_boxes_op = Concat(graph, {'axis': -1, 'in_ports_count': len(prior_box_nodes)}) - return concat_prior_boxes_op.create_node(prior_box_nodes, {'name': 'ConcatPriorBoxesClustered'}) - - -def insert_weights_swap_xy_sub_graph(graph: Graph, connection): - """ - Inserts a sub-graph of operations which does the following: - 1. Reshapes the input tensor (should be convolution weights/biases) to [-1, 2]. - 2. Swaps slices of data [:, 0] and [:, 1]. - 3. Reshapes tensor to the initial shape. - """ - weights_producer = connection.get_source() - name = weights_producer.node.soft_get('name', weights_producer.node.id) - - # this Shape operation must be inferred and constant folded - origin_shape = Shape(graph, {'name': name + '/OriginShape', 'force_dead_node': True}).create_node() - origin_shape.in_port(0).connect(weights_producer) - - reshaped = create_op_node_with_second_input(graph, Reshape, int64_array([-1, 2]), {'name': name + '/Reshape2D'}) - reshaped.in_port(0).connect(weights_producer) - - swapped_weight = Gather(graph, {'name': name + '/SwappedWeights'}).create_node() - gather_indices = Const(graph, - {'name': swapped_weight.name + '/Indices', 'value': int64_array([1, 0])}).create_node() - gather_axis = Const(graph, {'name': swapped_weight.name + '/Axis', 'value': int64_array(1)}).create_node() - swapped_weight.in_port(0).connect(reshaped.out_port(0)) - swapped_weight.in_port(1).connect(gather_indices.out_port(0)) - swapped_weight.in_port(2).connect(gather_axis.out_port(0)) - - reshape_back = Reshape(graph, {'name': name + '/ReshapeBack'}).create_node() - reshape_back.in_port(0).connect(swapped_weight.out_port(0)) - reshape_back.in_port(1).connect(origin_shape.out_port(0)) - - connection.set_source(reshape_back.out_port(0)) - - -def swap_weights_xy(graph: Graph, nodes: list): - """ - The function changes weights of the nodes from the 'nodes' list which are used with calculations with coordinates of - some objects. The function should be used when it is necessary to virtually change the layout of data from XY to YX. - The node from the 'nodes' list should be some sort of convolution node or matrix multiplication. - The function also swaps weights in the following Add and BiasAdd operations. - :param graph: graph with the topology. - :param nodes: list of Node objects to change the weights in them. - :return: None - """ - producers_ports = set() - for node in nodes: - # need to skip the FakeQuantize node if it exists - weights_producer = node.in_port(1).get_source() - if weights_producer.node.soft_get('type') == 'FakeQuantize': - weights_producer = weights_producer.node.in_port(0).get_source() - producers_ports.add(weights_producer) - - for producers_port in producers_ports: - log.debug('Swapping weights for node "{}"'.format(producers_port.node.name)) - insert_weights_swap_xy_sub_graph(graph, producers_port.get_connection()) - - for node in nodes: - # swap biases - for m in [n.node for n in node.out_port(0).get_destinations()]: - if m.soft_get('type') in ['Add', 'BiasAdd']: - insert_weights_swap_xy_sub_graph(graph, m.in_port(1).get_connection()) - - -def calculate_shape_keeping_aspect_ratio(height: int, width: int, min_size: int, max_size: int): - """ - The function changes spatial sizes of the image keeping aspect ratio to satisfy provided requirements. - The behavior of this function is equivalent to the output shape calculation of the pre-processor block of TensorFlow - Object Detection API models with keep aspect ratio resizer. - - :param height: input height. - :param width: input width. - :param min_size: size limit. - :param max_size: size limit. - :return: the tuple with scaled image height, width. - """ - ratio_min = min_size / min(height, width) - ratio_max = max_size / max(height, width) - ratio = min(ratio_min, ratio_max) - return int(round(height * ratio)), int(round(width * ratio)) - - -def calculate_placeholder_spatial_shape(graph: Graph, match: SubgraphMatch, pipeline_config: PipelineConfig): - """ - The function calculates the preprocessed shape of the input image for a TensorFlow Object Detection API model. - It uses various sources to calculate it: - 1. The shape passed using the '--input_shape' command line parameter. - 2. The values from the pipeline configuration file describing Preprocessor block of the topology: - a. If the fixed size resizer is used then size passed via '--input_shape' can override them, but Model Optimizer - prints warning. If the '--input_shape' is not defined then use values from the pipeline configuration file. - b. If the keep aspect ratio resizer is used then scale the size passed via '--input_shape' using the provided - limits. If the '--input_shape' is not defined then use shape as (min_dimension_size, min_dimension_size) - defined in the pipeline configuration file. If the "pad_to_max_dimension" attribute is set to true then the - output shape will always be (max_dimension_size, max_dimension_size). - - :param graph: graph with the topology. - :param match: the object containing matching sub-graph and custom attributes from the sub-graph replacement file. - :param pipeline_config: the object contain information from the pipeline configuration file. - :return: tuple (height, width) of the placeholder shape. - """ - height = None - width = None - user_shapes = graph.graph['user_shapes'] - - if match and ('preprocessed_image_height' in match.custom_replacement_desc.custom_attributes or - 'preprocessed_image_width' in match.custom_replacement_desc.custom_attributes): - log.error('The "preprocessed_image_height" or "preprocessed_image_width" is specified in the sub-graph ' - 'replacement configuration file but they are ignored. Please, specify desired input shape using the ' - '"--input_shape" command line parameter.', extra={'is_warning': True}) - - user_defined_height = None - user_defined_width = None - input_name = 'input_tensor' if 'input_tensor' in graph.nodes else 'image_tensor' - if user_shapes and input_name in user_shapes and user_shapes[input_name]: - user_defined_shape = user_shapes[input_name][0]['shape'] - if user_defined_shape is not None: - user_defined_height = user_defined_shape[1].get_min_length() if user_defined_shape[1].is_static else dynamic_dimension_value - user_defined_width = user_defined_shape[2].get_min_length() if user_defined_shape[2].is_static else dynamic_dimension_value - - # the parameters below are set if the fixed_shape_resizer is used - resizer_height = pipeline_config.get_param('resizer_image_height') - resizer_width = pipeline_config.get_param('resizer_image_width') - if resizer_height and resizer_width: - log.debug('The model resizes image to a fixed shape: ({}, {})'.format(resizer_height, resizer_width)) - if user_defined_height and user_defined_width: - if user_defined_width != resizer_width or user_defined_width != resizer_width: - log.error('The model expects that the input image is resized to a fixed shape ({}, {}), but the shape ' - 'provided with the "--input_shape" command line parameter is different ({}, {}).'.format( - resizer_height, resizer_width, user_defined_height, user_defined_width), extra={'is_warning': True}) - height = user_defined_height - width = user_defined_width - else: - height = resizer_height - width = resizer_width - - # the parameters below are set if keep_aspect_ratio_resizer is used - resizer_min_dimension = pipeline_config.get_param('resizer_min_dimension') - resizer_max_dimension = pipeline_config.get_param('resizer_max_dimension') - pad_to_max_dimension = pipeline_config.get_param('pad_to_max_dimension') - if resizer_min_dimension and resizer_max_dimension: - log.debug('The model resizes image using keep aspect ratio with minimum size {}, maximum size {}, pad {}.' - ''.format(resizer_min_dimension, resizer_max_dimension, pad_to_max_dimension)) - if pad_to_max_dimension: - if user_defined_height and user_defined_width: - log.error('The model contains pre-processing block which resizes image keeping aspect ratio with a ' - 'padding to max dimension. The only valid model input image spatial shape after ' - 'pre-processing is ({}, {}). Ignoring the user provided input shapes.' - ''.format(resizer_max_dimension, resizer_max_dimension), extra={'is_warning': True}) - height = width = resizer_max_dimension - else: - log.error('Model Optimizer removes pre-processing block of the model which resizes image keeping aspect ' - 'ratio. OpenVINO does not support dynamic image size so the Intermediate Representation ' - 'file is generated with the input image size of a fixed size.', extra={'is_warning': True}) - if user_defined_height and user_defined_width: - scaled_height, scaled_width = calculate_shape_keeping_aspect_ratio(user_defined_height, - user_defined_width, - resizer_min_dimension, - resizer_max_dimension) - if scaled_height != user_defined_height or scaled_width != user_defined_width: - log.error('The model resizes the input image keeping aspect ratio with min dimension {}, max ' - 'dimension {}. The provided input height {}, width {} is transformed to height {}, width ' - '{}.'.format(resizer_min_dimension, resizer_max_dimension, user_defined_height, - user_defined_width, scaled_height, scaled_width), extra={'is_warning': True}) - height = scaled_height - width = scaled_width - else: - height = width = resizer_min_dimension - log.error('Specify the "--input_shape" command line parameter to override the default shape which is ' - 'equal to ({}, {}).'.format(height, width), extra={'is_warning': True}) - - if height is None or width is None: - raise Error('Failed to determine the placeholder shape. Unsupported image resizer from the pipeline.config was ' - 'used to create the model.') - return height, width - - -def update_parameter_shape(graph: Graph, match: [SubgraphMatch, None]): - """ - Updates the shape of the model Parameter node based on the user provided input shape or values provided in the - pipeline.config configuration file used for model training. - :param graph: model graph - :param match: Match object with information about matched sub-graph - :return: tuple with input node names and Parameter Node - """ - argv = graph.graph['cmd_params'] - if argv.tensorflow_object_detection_api_pipeline_config is None: - raise Error(missing_param_error) - - pipeline_config = PipelineConfig(argv.tensorflow_object_detection_api_pipeline_config) - if argv.tensorflow_object_detection_api_pipeline_config is None: - raise Error(missing_param_error) - - initial_input_node_name = 'input_tensor' if 'input_tensor' in graph.nodes else 'image_tensor' - if initial_input_node_name not in graph.nodes(): - raise Error('Input node "{}" of the graph is not found. Do not run the Model Optimizer with ' - '"--input" command line parameter.'.format(initial_input_node_name)) - parameter_node = Node(graph, initial_input_node_name) - - # set default value of the batch size to 1 if user didn't specify batch size and input shape - layout = graph.graph['layout'] - batch_dim = get_batch_dim(layout, 4) - if argv.batch is None and parameter_node.shape[batch_dim] is dynamic_dimension: - parameter_node.shape[batch_dim] = 1 - height, width = calculate_placeholder_spatial_shape(graph, match, pipeline_config) - parameter_node.shape[get_height_dim(layout, 4)] = height - parameter_node.shape[get_width_dim(layout, 4)] = width - return initial_input_node_name, parameter_node - - -def mark_squeeze_reshape_concat_before_detection_output(start_nodes: list): - """ - The function looks for Reshape, Concat and Squeeze ops after the 'start_nodes' with 4D output and marks them with - proper attributes to infer them in original NHWC layout. This is a case of the TensorFlow Object Detection API - models for the SSD heads output which produces 4D tensor with bounding box deltas. - - :param start_nodes: list of nodes to start search from. - :return: None - """ - q = collections.deque() - visited = set() - q.extend(start_nodes) - while len(q) != 0: - cur_node = q.popleft() - visited.add(cur_node.id) - if cur_node.has_valid('type'): - if cur_node.soft_get('type') == 'DetectionOutput': # do not go beyond the DetectionOutput node - continue - # the input to Reshape comes from Convolution so it will be converted from NCHW to NHWC layout in the - # InsertLayoutPropagationTransposes transformation. But the output should be kept in the original layout - if cur_node.soft_get('type') == 'Reshape': - mark_output_as_in_correct_layout(cur_node, 0) - - # Concat should be inferred in the original layout so the input with concatenation axis should not be - # updated from NHWC to NCHW layout - if cur_node.soft_get('type') == 'Concat': - cur_node.in_port(1).__setattr__('input_permutation', None) - cur_node['nchw_layout'] = True - cur_node.out_node(0)['nchw_layout'] = True - - # Squeeze should be inferred in the original layout so the input with squeeze axis should not be updated - # from NHWC to NCHW layout. The input is marked as in correct layout to prevent from inserting Transpose - # from NHWC to NCHW. - if cur_node.soft_get('type') == 'Squeeze': - cur_node.in_port(1).__setattr__('input_permutation', None) - mark_input_as_in_correct_layout(cur_node, 0) - - if cur_node.has_port('out', 0): - [q.append(port.node) for port in cur_node.out_port(0).get_destinations() if port.node.id not in visited] - - -class ObjectDetectionAPITransformationsStart(FrontReplacementPattern): - """ - This is a anchor transformation which is used to distinguish TF OD API models related transformations. - All transformations have a dependency to be executed after this transformation (or some other TF OD API - transformation which is executed after this one). - Some transformation which swap convolution weights using the "swap_weights_xy" function relies on the fact that the - "FakeQuantWithMinMaxVars" operations are decomposed into "FakeQuantize"s. - """ - enabled = True - - def run_after(self): - return [FakeQuantWithMinMaxVarsToQuantize] - - def find_and_replace_pattern(self, graph: Graph): - pass - - -class ObjectDetectionAPITransformationsFinish(FrontReplacementPattern): - """ - This is a anchor transformation which is used to separate TF OD API models related transformations. - All transformations have a dependency to be executed before this transformation (or some other TF OD API - transformation which is executed before this one). - 1. This anchor transformation is executed before any other standard MO transformations which may break the model - conversion. For example, PadTFToPad replaces PadTF operation nodes with the Pad operation nodes and re-uses an - input node defining the pad value. The scope pattern matcher will remove the node defining the pad value and the - newly created Pad operation become invalid. - 2. Another common issue that some transformations should be executed after TF OD API transformations is that these - transformations replace some nodes with new nodes but different "id" attribute. Since the pattern matcher is based - on node "id" (not "name") attribute the matching will be broken. - 3. Some TF OD API transformations mark TF CropAndResize nodes with specific flag which is then handled in the - CropAndResizeReplacement transformation that is why latter one should be executed after this transformation. - """ - enabled = True - # cleanup the graph after applying of TF OD API transformations to remove a lot of unconnected nodes to avoid issues - # with shape inference - force_clean_up = True - - def run_before(self): - return [Pack, TransposeOrderNormalizer, PadTFToPad, SqueezeAxis, TFSliceToSliceReplacer, MapFNInputSlicing, - MapFNOutputConcatenation, TensorListOutputConcatenation, CropAndResizeReplacement] - - def find_and_replace_pattern(self, graph: Graph): - pass - - -def get_specific_ops_with_const_inputs(first_node: Node, allowed_ops: list, forward: bool = True): - """ - Returns the list with information about consecutive nodes of operation from "allowed_ops". - - :param first_node: The first node (not included) to start looking for nodes from the "allowed_ops" list - :param allowed_ops: list of allowed operations - :param forward: flag specifying direction of search - :return: list of triplets (Node, const_port_index, const_value) - """ - node = first_node.out_port(0).get_destination().node if forward else first_node.in_port(0).get_source().node - result = [] # (Node, port # with constant input, value) - while node.soft_get('op') in allowed_ops: - num_in_ports = len(node.in_ports()) - assert num_in_ports == 2, 'The node "{}" should have exactly 2 inputs, but it has only {}.' \ - ''.format(node.soft_get('name', node.id), num_in_ports) - for port in (0, 1): - if node.in_port(port).get_source().node.has_valid('value'): # this is a constant input to the node - result.append((node, port, node.in_port(port).get_source().node.value.copy())) - node = node.out_port(0).get_destination().node if forward else node.in_port(1 - port).get_source().node - break - return result - - -def get_preprocessing_ops(graph: Graph, start_node_id_suffix: str, end_node_id_suffix: str): - """ - Finds a sequence of pre-processing nodes (Sub, Mul, Div and Add) after the node with the id suffix - 'end_node_id_suffix' or ending with the node with id suffix 'end_node_id_suffix'. - - :param graph: graph to look for pre-processing ops - :param start_node_id_suffix: suffix of the start node name - :param end_node_id_suffix: suffix of the end node name - :return: the list with pre-processing nodes information and flag specifying nodes position - """ - start_node = None - end_node = None - for node in graph.get_op_nodes(): - if node.id.endswith(start_node_id_suffix): - start_node = node - if node.id.endswith(end_node_id_suffix): - end_node = node - - assert start_node is not None and end_node is not None, \ - 'Failed to find start/end nodes of the pre-processing block. The section of the transformation JSON ' \ - 'configuration file related to "ObjectDetectionAPIPreprocessor2Replacement" transformation should be updated ' \ - 'for this particular model.' - allowed_ops = ['Sub', 'Mul', 'Div', 'Add'] - preprocessing_nodes = get_specific_ops_with_const_inputs(start_node, allowed_ops, False) - trailing = False # switch to apply newly created pre-processing nodes after/before start_node/end_node - if len(preprocessing_nodes) == 0: - preprocessing_nodes = get_specific_ops_with_const_inputs(end_node, allowed_ops, True) - trailing = True - - # try to detect floating-point casting inside the body graph - # that also needs to stay in the resulted graph - casting = False - cast_nodes = backward_bfs_for_operation(start_node, ['Cast']) - if len(cast_nodes) == 1 and cast_nodes[0].dst_type == np.float32: - casting = True - - return preprocessing_nodes, trailing, casting - - -""" -Object Detection API models contain the sub-graph that performs some (not necessarily all) of the following tasks -(possibly in different order): -* Resizes image according to the constraints defined in the pipeline.config file. -* Applies mean and scale values. -* Pads the resized image to the size specified in the pipeline.config file. -This sub-graph is called "Preprocessor" in TF1 OD API models and early versions of the TF2 OD API models. Starting from -version 2.4 the block is called "map". The sub-graph has one output with the pre-processed input image and optionally -has a second output which contains either the original image size or the resized image size (before padding). When the -second output exists it is used to map predicted bounding boxes of the resized image to the original image coordinates. - -Model Optimizer removes nodes performing image resize and padding, but keeps nodes applying mean and scale values. -Historically, Model Optimizer didn't support converting TF sub-graphs into TensorIterator/Loop from TF 1 models so this -was the only option to convert the model and avoid dynamism which occurs when keep_aspect_ratio resizer is used. And the -user should resize the image the same way as it is implemented in the model before feeding the data to the Inference -Engine. - -If the "keep_aspect_ratio" resizer with "pad_to_max_dimension" parameter equal to "true" is used and mean/scale -operations are applied before the resize like this: - -input_tensor -> mean/scale -> resize -> pad -> ... - -then it is not allowed to remove the resize and padding operations and pre-process the input data before feeding the -model like this: - -resized_padded_input_data -> mean/scale -> ... - -because the output results will be different because mean/scale operations will be applied for padding area as well. So -the only option in this case is to remove all pre-processing operations from the model and expect that user perform them -before feeding the model. -""" - - -class ObjectDetectionAPIPreprocessorReplacement(FrontReplacementFromConfigFileSubGraph): - """ - The transformation is triggered for the pre-processing block which resizes the input image and applies mean/scale - values in the TF1 OD API models. - """ - replacement_id = 'ObjectDetectionAPIPreprocessorReplacement' - run_not_recursively = True - - def run_before(self): - return [ObjectDetectionAPITransformationsFinish] - - def run_after(self): - return [ObjectDetectionAPITransformationsStart] - - def nodes_to_remove(self, graph: Graph, match: SubgraphMatch): - new_nodes_to_remove = match.matched_nodes_names() - # do not remove nodes that perform input image scaling and mean value subtraction - for node_to_keep in ('Preprocessor/sub', 'Preprocessor/sub/y', 'Preprocessor/mul', 'Preprocessor/mul/x'): - if node_to_keep in new_nodes_to_remove: - new_nodes_to_remove.remove(node_to_keep) - return new_nodes_to_remove - - def is_preprocessing_applied_before_resize(self, to_float: Node, mul: Node, sub: Node): - """ - The function checks if the output of 'to_float' operation is consumed by 'mul' or 'sub'. If this is true then - the pre-processing (mean/scale) is applied before the image resize. The image resize was applied first in the - original version of the TF OD API models, but in the recent versions it is applied after. - - :param to_float: the Cast node which converts the input tensor to Float - :param mul: the Mul node (can be None) - :param sub: the Sub node - :return: the result of the check - """ - assert sub is not None, 'The Sub node should not be None. Check the caller function.' - if mul is not None: - return any([port.node.id == mul.id for port in to_float.out_port(0).get_destinations()]) - else: - return any([port.node.id == sub.id for port in to_float.out_port(0).get_destinations()]) - - def generate_sub_graph(self, graph: Graph, match: SubgraphMatch): - sub_node = match.output_node(0)[0] - # sanity check whether this is really TF OD API model. The Sub operation always exists in TF1 OD API models - # pre-processing sub-graph - if sub_node.soft_get('op') != 'Sub': - raise Error('The output op of the Preprocessor sub-graph is not of type "Sub". Looks like the topology is ' - 'not created with TensorFlow Object Detection API.') - - # identify the node performing scale (if it exists) - mul_node = None - if sub_node.in_port(0).get_source().node.soft_get('op') == 'Mul': - log.info('There is image scaling node in the Preprocessor block.') - mul_node = sub_node.in_port(0).get_source().node - - # update the model Parameter node shape based on MO command line parameters and values in the pipeline.config - initial_input_node_name, placeholder_node = update_parameter_shape(graph, match) - - to_float_node = placeholder_node.out_port(0).get_destination().node - # one more sanity check - if to_float_node.soft_get('op') != 'Cast': - raise Error('The output of the node "{}" is not Cast operation. Cannot apply transformation.'.format( - initial_input_node_name)) - - if self.is_preprocessing_applied_before_resize(to_float_node, mul_node, sub_node): - # connect sub node directly to nodes which consume resized image - resize_output_node_id = 'Preprocessor/map/TensorArrayStack/TensorArrayGatherV3' - if resize_output_node_id not in graph.nodes: - raise Error('There is no expected node "{}" in the graph.'.format(resize_output_node_id)) - resize_output = Node(graph, resize_output_node_id) - for dst_port in resize_output.out_port(0).get_destinations(): - dst_port.get_connection().set_source(sub_node.out_port(0)) - else: - # connect to_float_node directly with node performing scale on mean value subtraction - if mul_node is None: - to_float_node.out_port(0).connect(sub_node.in_port(0)) - else: - to_float_node.out_port(0).connect(mul_node.in_port(1)) - - log.error('The Preprocessor block has been removed. Only nodes performing mean value subtraction and scaling ' - '(if applicable) are kept.', extra={'is_warning': True}) - # the pre-processing sub-graph is connected with the main graph, so there is no need to return new nodes mapping - # dictionary - return {} - - -class ObjectDetectionAPIPreprocessor2Replacement(FrontReplacementFromConfigFileGeneral): - """ - The transformation is triggered for the pre-processing block which resizes the input image and applies mean/scale - values in the TF2 OD API model. Only nodes related to applying mean/scaling values are kept. - If the mean/scale values are applied before the resize and the pre-processing includes padding then mean/scale - values are removed as well. Refer to the comments section before the ObjectDetectionAPIPreprocessorReplacement - transformation. - - There are 6 possible cases: - 1. ... -> Scale -> Start -> Resize -> End -> ... - 2. ... -> Start -> Resize -> End -> Scale -> ... - 3. ... -> Start -> Resize -> End -> ... - 4. ... -> Start -> While (... -> Scale -> Resize -> ...) -> End -> ... - 5. ... -> Start -> While (... -> Resize -> Scale -> ...) -> End -> ... - 6. ... -> Start -> While (... -> Resize -> ...) -> End -> ... - - Where: - - "Start" - is the node name specified in the transformation configuration file - - "End" - is the node name specified in the transformation configuration file - - "Scale" - a node or a sequence of element-wise nodes like Mul, Add, Sub or Div with Const input - - "While" (... nodes ... ) - a Loop operation with body nodes specified in parentheses - - "Resize" - the Resize sub-graph being removed - - The transformation creates a new sub-graph of pre-processing nodes if in the original model it is inside the Loop, - or keeps the existing one if they are in the main graph already. - """ - replacement_id = 'ObjectDetectionAPIPreprocessor2Replacement' - run_not_recursively = True - - def run_before(self): - return [ObjectDetectionAPITransformationsFinish] - - def run_after(self): - return [ObjectDetectionAPITransformationsStart] - - def transform_graph(self, graph: Graph, replacement_descriptions: dict): - argv = graph.graph['cmd_params'] - if argv.tensorflow_object_detection_api_pipeline_config is None: - raise Error(missing_param_error) - - pipeline_config = PipelineConfig(argv.tensorflow_object_detection_api_pipeline_config) - pad_to_max_dimension = pipeline_config.get_param('pad_to_max_dimension') - - # update the model Parameter node shape based on MO command line parameters and values in the pipeline.config - update_parameter_shape(graph, None) - - # NOTE: this transformation can be implemented as a "scope" or "points" transformation since we need to match - # some sub-graph between specific nodes - start_nodes = replacement_descriptions['start_nodes'] - end_nodes = replacement_descriptions['end_nodes'] - - start_nodes = [node_id for node_id in start_nodes if node_id in graph.nodes] - end_nodes = [node_id for node_id in end_nodes if node_id in graph.nodes] - - assert len(start_nodes) >= 1 - start_node = Node(graph, start_nodes[0]) - - assert len(end_nodes) >= 1 - end_node = Node(graph, end_nodes[0]) - - # determine nodes between specified input and output nodes to check if there is a Loop op among them - sub_graph_node_ids = sub_graph_between_nodes(graph, start_nodes, end_nodes, include_control_flow=False, - allow_non_reachable_end_nodes=True) - - pre_processing_in_loop = False - # If the pre-processing block contains Loop operation then mean and scale value should be obtained from it using - # some pre-defined marker nodes existing for all pre-processing blocks. - # If there is no Loop then pre-processing nodes are in the main graph and they should be obtained from it - loop_nodes_ids = [node_id for node_id in sub_graph_node_ids if graph.nodes[node_id].get('op') == 'Loop'] - if len(loop_nodes_ids): - assert len(loop_nodes_ids) == 1, 'There should be exactly one Loop node in the pre-processor block.' - pre_processing_in_loop = True - loop_node = Node(graph, loop_nodes_ids[0]) - body_graph = loop_node.body - # we stick to the nodes with ids 'map/while/Preprocessor/unstack' and 'map/while/Preprocessor/stack' as they - # "wrap" nodes performing image resize. The scale/mean values nodes are located strictly before or after - # them - pre_processing_ops, trailing, casting = get_preprocessing_ops(body_graph, - 'map/while/Preprocessor/unstack', - 'map/while/Preprocessor/stack') - else: - pre_processing_ops, trailing, casting = get_preprocessing_ops(graph, start_node.id, end_node.id) - - mean_scale_kept = True - if len(pre_processing_ops): - # if the pre-processing is applied before the resize then reverse them to be in the topological order - if not trailing: - pre_processing_ops = list(reversed(pre_processing_ops)) - - if pre_processing_in_loop: # case 4 and 5 - # build a sub-graph containing a sequence of pre_processing_ops if they came from the Loop - new_preprocessing_ops = [] - - # cast data before start pre-processing with mean/scale values - if casting: - cast_node = Cast(graph, {'dst_type': np.float32}).create_node() - new_preprocessing_ops.append(cast_node) - - ops_mapping = {'Add': Add, 'Div': Div, 'Mul': Mul, 'Sub': Sub} - for idx in range(len(pre_processing_ops)): - origin_node, const_port_ind, value = pre_processing_ops[idx] - new_node = create_op_with_const_inputs(graph, ops_mapping[origin_node.op], {const_port_ind: value}) - if len(new_preprocessing_ops): - new_node.in_port(1 - const_port_ind).connect(new_preprocessing_ops[-1].out_port(0)) - new_preprocessing_ops.append(new_node) - - # replace sub-graph between start and end nodes (including them) with new_preprocessing_ops nodes - end_node.out_port(0).get_connection().set_source(new_preprocessing_ops[-1].out_port(0)) - start_node.in_port(0).get_connection().set_destination( - new_preprocessing_ops[0].in_port(int(new_preprocessing_ops[0].is_in_port_connected(0)))) - else: - if trailing: # case 2 - # change output of the end_node to be produced with the start node producer - source_port = start_node.in_port(0).get_source() - source_port.disconnect() - end_node.out_port(0).get_connection().set_source(source_port) - else: # case 1 - # if padding is specified then need to remove mean/scale as well. Refer to the transformation - # comments for more details - if pad_to_max_dimension: - # change output of the end_node to be produced with the node producing data for the first - # preprocessing op - mean_scale_kept = False - first_pre_processing_node = pre_processing_ops[0][0] - consumer_port = first_pre_processing_node.in_port(int(not pre_processing_ops[0][1])) - end_node.out_port(0).get_connection().set_source(consumer_port.get_connection().get_source()) - else: - # change output of the end_node to be produced with the last preprocessing op - end_node.out_port(0).get_connection().set_source(pre_processing_ops[-1][0].out_port(0)) - start_node.in_port(0).disconnect() - else: # simply remove the nodes in between start_node and end_node (including them). Case 3 and 6 - end_node.out_port(0).get_connection().set_source(start_node.in_port(0).get_source()) - - if mean_scale_kept: - log.error('The pre-processing block has been removed. Only nodes performing mean value subtraction and ' - 'scaling (if applicable) are kept. It is necessary to resize an input image using the same ' - 'algorithm as in the original model before feeding it to the OpenVINO.', - extra={'is_warning': True}) - else: - log.error('The Preprocessor block has been removed including mean value subtraction and scaling (if ' - 'applicable). It is necessary to resize, scale and pad an input image using the same algorithm ' - 'as in the original model before feeding it to the OpenVINO.', extra={'is_warning': True}) - - -class ObjectDetectionAPIDetectionOutputReplacement(FrontReplacementFromConfigFileSubGraph): - """ - Replaces the sub-graph that is equal to the DetectionOutput layer from OpenVINO (similarly to the - ObjectDetectionAPISSDPostprocessorReplacement). This transformation is used for Faster R-CNN, R-FCN and Mask R-CNN - topologies conversion. - Refer to the code for more details. - """ - replacement_id = 'ObjectDetectionAPIDetectionOutputReplacement' - run_not_recursively = True - - def run_before(self): - return [ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement] - - def run_after(self): - return [ObjectDetectionAPIProposalReplacement] - - def nodes_to_remove(self, graph: Graph, match: SubgraphMatch): - new_nodes_to_remove = match.matched_nodes_names().copy() - outputs = ['detection_boxes', 'detection_scores', 'num_detections'] - for output in outputs: - if output in graph.nodes: - children = Node(graph, output).out_nodes() - if len(children) != 1: - log.warning('Output {} has {} children. It should have only one output: with op==`Result`' - ''.format(output, len(children))) - elif children[list(children.keys())[0]].op == 'Result': - new_nodes_to_remove.append(children[list(children.keys())[0]].id) - new_nodes_to_remove.extend(outputs) - return new_nodes_to_remove - - def output_edges_match(self, graph: Graph, match: SubgraphMatch, new_sub_graph: dict): - # the DetectionOutput in OV produces single tensor, but in TF it produces four tensors, so we need to create - # only one output edge match - if match.outputs_count() >= 1: - return {match.output_node(0)[0].id: new_sub_graph['detection_output_node'].id} - else: - return {list(graph.graph.get("packed_outputs").keys())[0]: new_sub_graph['detection_output_node'].id} - - def generate_sub_graph(self, graph: Graph, match: SubgraphMatch): - argv = graph.graph['cmd_params'] - if argv.tensorflow_object_detection_api_pipeline_config is None: - raise Error(missing_param_error) - pipeline_config = PipelineConfig(argv.tensorflow_object_detection_api_pipeline_config) - custom_attributes = match.custom_replacement_desc.custom_attributes - - num_classes = _value_or_raise(match, pipeline_config, 'num_classes') - max_proposals = _value_or_raise(match, pipeline_config, 'first_stage_max_proposals') - activation_function = _value_or_raise(match, pipeline_config, 'postprocessing_score_converter') - - activation_conf_node = add_activation_function_after_node(graph, match.single_input_node(1)[0].in_node(0), - activation_function) - - # OV DetectionOutput operation consumes flattened tensors so need add a Reshape layer. - # The batch value of the input tensor is not equal to the batch of the topology, so it is not possible to use - # "0" value in the Reshape layer attribute to refer to the batch size, but we know how to - # calculate the second dimension so the batch value will be deduced from it with help of "-1". - reshape_conf_node = create_op_node_with_second_input(graph, Reshape, - int64_array([-1, (num_classes + 1) * max_proposals]), - dict(name='do_reshape_conf'), activation_conf_node) - mark_as_correct_data_layout(reshape_conf_node) - - # We looking for first not Reshape-typed node before match.single_input_node(0)[0].in_node(0). - # And add reshape_offsets node after this first not Reshape-typed node to avoid issues with Reshape-like - # operations which may trigger insert of Transpose operations before/after them - current_node = skip_nodes_by_condition(match.single_input_node(0)[0].in_node(0), - lambda x: x['kind'] == 'op' and x.has_and_set('reinterp_shape')) - - # if share_box_across_classes=1 then the same set of bounding boxes shape offsets is used for all classes, - # otherwise per-class set of shape offsets is used and we need to use appropriate Reshape output shape - share_box_across_classes = _value_or_raise(match, pipeline_config, 'share_box_across_classes') - if share_box_across_classes: - reshape_offsets_shape = int64_array([-1, 1, 1, 4]) - else: - reshape_offsets_shape = int64_array([-1, num_classes, 1, 4]) - reshape_offsets = create_op_node_with_second_input(graph, Reshape, reshape_offsets_shape, - dict(name='reshape_loc'), current_node) - mark_as_correct_data_layout(reshape_offsets) - - if share_box_across_classes: - offsets = reshape_offsets - else: - # TF produces shape offsets tensor without boxes corresponding to "background" class - # OpenVINO DetectionOutput layer requires "background" class data be included so we generate them - offsets = add_fake_background_loc(graph, reshape_offsets) - PermuteAttrs.set_permutation(reshape_offsets, offsets, None) - - # reshape offsets tensor to 2D so it could be multiplied with variances - reshape_offsets_2d = create_op_node_with_second_input(graph, Reshape, int64_array([-1, 4]), - dict(name='reshape_locs_2d'), offsets) - mark_as_correct_data_layout(reshape_offsets_2d) - - # multiply bounding boxes shape offsets with variances as it is expected when variance_encoded_in_target=1 for - # the DetectionOutput operation - variances = Const(graph, dict(value=_variance_from_pipeline_config(pipeline_config))).create_node([]) - scaled_offsets = Mul(graph, dict()).create_node([reshape_offsets_2d, variances], dict(name='scale_locs')) - - # there are Convolution/MatMul nodes before the post-processing block in all models except RFCN. So for most of - # the models we can just update Convolution/MatMul weights to perform swapping of coordinates. But for the RFCN - # models we use approach with adding special Convolution node which perform the same swap. Previously we used a - # dedicated parameter in the transformation config but now it is not needed and the get this information from - # the model automatically by performing graph traversal until CropAndResize (RFCN case) or Conv/MatMul nodes are - # found - if 'coordinates_swap_method' in custom_attributes: - log.error('The "coordinates_swap_method" parameter is not needed anymore. Consider removing it from the ' - '"ObjectDetectionAPIDetectionOutputReplacement" transformation custom attributes.', - extra={'is_warning': True}) - matmul_or_conv_nodes = backward_bfs_for_operation(scaled_offsets, ['MatMul', 'Conv2D'], ['ShapeOf', - 'CropAndResize']) - if len(matmul_or_conv_nodes) == 0: - swapped_offsets = add_convolution_to_swap_xy_coordinates(graph, scaled_offsets, 4) - flattened_offsets = Reshape(graph, dict(name='do_reshape_locs')).create_node([swapped_offsets]) - else: - swap_weights_xy(graph, matmul_or_conv_nodes) - flattened_offsets = Reshape(graph, dict(name='do_reshape_locs')).create_node([scaled_offsets]) - - # OV DetectionOutput layer consumes flattened tensors so need add a Reshape layer. - # The batch value of the input tensor is not equal to the batch of the topology, so it is not possible to use - # "0" value in the Reshape layer attribute to refer to the batch size, but we know how to - # calculate the second dimension so the batch value will be deduced from it with help of "-1". - if share_box_across_classes: - reshape_shape = int64_array([-1, max_proposals * 4]) - else: - reshape_shape = int64_array([-1, (num_classes + 1) * max_proposals * 4]) - Const(graph, {'value': reshape_shape, 'name': flattened_offsets.name + '/Dim'}).create_node().out_port(0).\ - connect(flattened_offsets.in_port(1)) - mark_as_correct_data_layout(flattened_offsets) - - # find Proposal output which has the data layout as in TF: YXYX coordinates without batch indices. - proposal_nodes_ids = [node_id for node_id, attrs in graph.nodes(data=True) - if 'name' in attrs and attrs['name'] == 'crop_proposals'] - if len(proposal_nodes_ids) != 1: - raise Error("Found the following nodes '{}' with name 'crop_proposals' but there should be exactly 1. " - "Looks like ObjectDetectionAPIProposalReplacement transformation didn't work." - "".format(proposal_nodes_ids)) - proposal = Node(graph, proposal_nodes_ids[0]) - - # Need to swap proposals coordinates before passing them to the DetectionOutput for the RFCN topologies - if len(matmul_or_conv_nodes) == 0: - proposal = add_convolution_to_swap_xy_coordinates(graph, proposal, 4) - - # reshape priors boxes as Detection Output expects - reshape_priors = create_op_node_with_second_input(graph, Reshape, int64_array([-1, 1, max_proposals * 4]), - dict(name='DetectionOutput_reshape_priors_'), proposal) - mark_as_correct_data_layout(reshape_priors) - - detection_output_op = DetectionOutput(graph, {}) - for key in ('clip_before_nms', 'clip_after_nms'): - if key in match.custom_replacement_desc.custom_attributes: - detection_output_op.attrs[key] = int(match.custom_replacement_desc.custom_attributes[key]) - - detection_output = detection_output_op.create_node([flattened_offsets, reshape_conf_node, reshape_priors], dict( - name=detection_output_op.attrs['type'], - share_location=int(share_box_across_classes), - variance_encoded_in_target=1, - background_label_id=int(custom_attributes.get('background_label_id', 0)), - code_type='caffe.PriorBoxParameter.CENTER_SIZE', - pad_mode='caffe.ResizeParameter.CONSTANT', - resize_mode='caffe.ResizeParameter.WARP', - confidence_threshold=_value_or_raise(match, pipeline_config, 'postprocessing_score_threshold'), - top_k=_value_or_raise(match, pipeline_config, 'postprocessing_max_detections_per_class'), - keep_top_k=_value_or_raise(match, pipeline_config, 'postprocessing_max_total_detections'), - nms_threshold=_value_or_raise(match, pipeline_config, 'postprocessing_iou_threshold'))) - # sets specific name to the node so we can find it in other transformations - detection_output.name = 'detection_output' - - # when the use_matmul_crop_and_resize = True then the prior boxes were not swapped and we need to swap them from - # YXYX to XYXY before passing to the DetectionOutput operation - if pipeline_config.get_param('use_matmul_crop_and_resize'): - insert_weights_swap_xy_sub_graph(graph, detection_output.in_port(2).get_connection()) - - # create Result since after the transformation other Results are removed - Result(graph, dict(name='do_OutputOp')).create_node([detection_output]) - - log.error('The graph output nodes have been replaced with a single layer of type "DetectionOutput". Refer to ' - 'the operation set specification documentation for more information about the operation.', - extra={'is_warning': True}) - return {'detection_output_node': detection_output} - - -class ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement(FrontReplacementFromConfigFileSubGraph): - """ - There are two TensorFlow CropAndResize (corresponding to OpenVINO ROIPooling with bilinear interpolation - mode) operations in the Mask-RCNN model. The second CropAndResize gets bounding boxes coordinates as input from the - part of the model which is replaced with the DetectionOutput operation using the transformation - ObjectDetectionAPIDetectionOutputReplacement. DetectionOutput operation produces tensor with 7-element tuples - [batch_id, class_id, confidence, x_1, y_1, x_2, y_2]. The ROIPooling operation expects input defining bounding boxes - with the following format [batch_id, x_1, y_1, x_2, y_2]. The ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement - transformation inserts ROIPooling operation instead of the CropAndResize and crops slices of data from the - DetectionOutput operation and concatenates them to produce a tensor with correct content. - """ - replacement_id = 'ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement' - run_not_recursively = True - - def run_before(self): - return [ObjectDetectionAPITransformationsFinish] - - def run_after(self): - return [ObjectDetectionAPIProposalReplacement] - - def output_edges_match(self, graph: Graph, match: SubgraphMatch, new_sub_graph: dict): - return {match.output_node(0)[0].id: new_sub_graph['roi_pooling_node'].id} - - def generate_sub_graph(self, graph: Graph, match: SubgraphMatch): - argv = graph.graph['cmd_params'] - if argv.tensorflow_object_detection_api_pipeline_config is None: - raise Error(missing_param_error) - pipeline_config = PipelineConfig(argv.tensorflow_object_detection_api_pipeline_config) - - # the output spatial dimensions of the ROIPooling operation are defined in the pipeline.config - roi_pool_size = _value_or_raise(match, pipeline_config, 'initial_crop_size') - - # find the DetectionOutput operation by name to get tensor with information about bounding boxes from it. - # the layout of bounding boxes is XYXY already, so no need to swap them - detection_output_nodes_ids = [node_id for node_id, attrs in graph.nodes(data=True) - if 'name' in attrs and attrs['name'] == 'detection_output'] - if len(detection_output_nodes_ids) != 1: - raise Error("Found the following nodes '{}' with name 'detection_output' but there should be exactly 1.". - format(detection_output_nodes_ids)) - detection_output = Node(graph, detection_output_nodes_ids[0]) - do_outputs = [port.node for port in detection_output.out_port(0).get_destinations() if port.node.op == 'Result'] - if len(do_outputs) == 1: - graph.remove_node(do_outputs[0].id) - - # add reshape of Detection Output so it can be an output of the topology. - # this looks like some legacy not relevant constraint anymore - flatten_do = create_op_node_with_second_input(graph, Reshape, int64_array([-1, 7]), dict(name='reshape_do_2d'), - detection_output) - mark_as_correct_data_layout(flatten_do) - - # adds "Result" node so this output is returned by OV by default for the backward compatibility - do_result = Result(graph, dict(name='do_reshaped_OutputOp')).create_node([flatten_do]) - - # add attribute 'output_sort_order' so it will be used as a key to sort output nodes before generation of IR - do_result.in_edge()['data_attrs'].append('output_sort_order') - do_result.in_edge()['output_sort_order'] = [('detection_boxes', 0)] - - # creates two Crop operations which get input from the DetectionOutput, cuts off slices of data with class ids - # and probabilities and produces a tensor with batch ids and bounding boxes only (as it is expected by the - # ROIPooling operation) - batch_ids = Crop(graph, dict(axis=int64_array([1]), offset=int64_array([0]), dim=int64_array([1]))).create_node( - [flatten_do], dict(name='crop_do_batch_ids')) - coords = Crop(graph, dict(axis=int64_array([1]), offset=int64_array([3]), dim=int64_array([4]))).create_node( - [flatten_do], dict(name='crop_do_coords')) - batch_and_coords = Concat(graph, dict(axis=1)).create_node([batch_ids, coords], dict(name='batch_and_coords')) - - roi_pooling = ROIPooling(graph, dict(method="bilinear", spatial_scale=1, pooled_h=roi_pool_size, - pooled_w=roi_pool_size)).create_node( - [match.single_input_node(0)[0].in_node(), batch_and_coords], dict(name='ROI_pooling_2')) - return {'roi_pooling_node': roi_pooling} - - -class ObjectDetectionAPIMaskRCNNSigmoidReplacement(FrontReplacementFromConfigFileGeneral): - """ - The transformation is used to convert Mask R-CNN topologies only. - - The post-processing part of Mask-RCNN models is to select masks from the output tensor which correspond to bounding - boxes with probability exceeding specific threshold. The final step of the post-processing is to apply Sigmoid - activation function to the tensor with selected masks so the values become in range [0, 1]. The post-processing part - of the model is not supported so it is removed using the transformation ObjectDetectionAPIOutputReplacement. - This transformation adds back the activation function to the end of the network producing masks tensors. So the - post-processing with selecting masks corresponding to bounding boxes with high probabilities should be implemented - in the application. - """ - replacement_id = 'ObjectDetectionAPIMaskRCNNSigmoidReplacement' - run_not_recursively = True - - def run_before(self): - return [ObjectDetectionAPITransformationsFinish] - - def run_after(self): - return [ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement] - - def transform_graph(self, graph: Graph, replacement_descriptions): - # there could be multiple Result nodes in the graph. We identify the one containing masks data using the node - # name prefix - masks_node_prefix_name = replacement_descriptions.get('masks_node_prefix_name', 'SecondStageBoxPredictor') - op_outputs = graph.get_op_nodes(op='Result') - for op_output in op_outputs: - last_node = op_output.in_port(0).get_source().node - if last_node.name.startswith(masks_node_prefix_name): - sigmoid_node = Sigmoid(graph, dict(name='masks')).create_node() - op_output.in_port(0).get_connection().insert_node(sigmoid_node) - - # the line below is needed to keep layout as is, istead of default NCHW->NHWC changing - sigmoid_node['nchw_layout'] = True - - # adding op name to tensor names list is needed for compatiblity with old api configs - op_output.in_port(0).get_connection().get_source().add_tensor_names([sigmoid_node['name']]) - - log.error('The predicted masks are produced by the "masks" layer for each bounding box generated with a ' - '"detection_output" operation.\n Refer to operation specification in the documentation for the ' - 'information about the DetectionOutput operation output data interpretation.\nThe model can be ' - 'inferred using the dedicated demo "mask_rcnn_demo" from the OpenVINO Open Model Zoo.', - extra={'is_warning': True}) - - -class ObjectDetectionAPIProposalReplacement(FrontReplacementFromConfigFileSubGraph): - """ - The outputs of the Region Proposal Network which produces shape offsets and probabilities whether anchors contain - object or not is fed to the part of the model which decodes bounding boxes and performs non-maximum suppression. - There are two operations in the OpenVINO which can perform such calculations: Proposal and DetectionOutput. - Historically, the Proposal operation was inserted by this transformation, but now a DetectionOutput can be inserted - instead if the "operation_to_add" parameter in the JSON configuration file is set to "DetectionOutput". There was a - model for which inserting DetectionOutput instead of Proposal operation results in generation more accurate results. - Another reason why Proposal operation is not preferable is that it requires addition model input which defines - original image size and special scale value (refer to the operation specification for more details). So even though - the original TensorFlow model has one input (actual image), the generated IR contains two inputs (actual image and - a special input for the Proposal operation). It is not possible to switch to inserting DetectionOutput operation - by default because it is not backward compatible change and some customer script may start to fail since one input - disappears. - Refer to the code for details on the conversion process and operations inserted. - """ - replacement_id = 'ObjectDetectionAPIProposalReplacement' - run_not_recursively = True - matched_input_nodes_to_keep = 2 # number of matched input nodes to keep - - def run_after(self): - return [ObjectDetectionAPIPreprocessorReplacement, ObjectDetectionAPIPreprocessor2Replacement] - - def run_before(self): - return [ObjectDetectionAPITransformationsFinish] - - def output_edges_match(self, graph: Graph, match: SubgraphMatch, new_sub_graph: dict): - return {match.output_node(0)[0].id: new_sub_graph['proposal_node'].id} - - def nodes_to_remove(self, graph: Graph, match: SubgraphMatch): - new_list = match.matched_nodes_names().copy() - # do not remove nodes that produce box predictions and class predictions and optionally generated anchors - for port in range(self.matched_input_nodes_to_keep): - new_list.remove(match.single_input_node(port)[0].id) - return new_list - - def generate_sub_graph(self, graph: Graph, match: SubgraphMatch): - argv = graph.graph['cmd_params'] - if argv.tensorflow_object_detection_api_pipeline_config is None: - raise Error(missing_param_error) - pipeline_config = PipelineConfig(argv.tensorflow_object_detection_api_pipeline_config) - - # the transformation configuration file specifies what operations should be included with this transformation - if match.custom_replacement_desc.custom_attributes.get('operation_to_add', 'Proposal') == 'DetectionOutput': - self.matched_input_nodes_to_keep = 3 # keep the third input with prior boxes (anchors) - return self.insert_detection_output_instead_of_proposal(graph, match, pipeline_config) - - max_proposals = _value_or_raise(match, pipeline_config, 'first_stage_max_proposals') - proposal_ratios = _value_or_raise(match, pipeline_config, 'anchor_generator_aspect_ratios') - proposal_scales = _value_or_raise(match, pipeline_config, 'anchor_generator_scales') - anchors_count = len(proposal_ratios) * len(proposal_scales) - - # Find Convolution/MatMul node that produces classes confidence - class_conf = backward_bfs_for_operation(match.single_input_node(1)[0], ['Add'])[0] - - # size of 'C' dimension of the tensor with class predictions is equal to base_anchors_count * 2, where 2 - # corresponds to a number of classes (background and foreground) and base_anchors_count is equal to number of - # anchors applied to each position of 'H' and 'W' dimensions. Therefore, there are H * W * base_anchors_count - # bounding boxes. OpenVINO Proposal operation interprets the input tensor as a tensor - # [batch, 2 * base_anchors_count, H, W] but in TensorFlow model it is calculated as - # [batch, base_anchors_count, H, W] (after NHWC->NCHW layout conversion), so it is necessary to decompose the - # 'C' dimension into base_anchors_count and 2 and swap these two dimensions - reshape_class_conf = create_op_node_with_second_input(graph, Reshape, int64_array([0, anchors_count, 2, -1]), - dict(name='predictions/Reshape')) - class_conf.insert_node_after(reshape_class_conf, 0) - mark_as_correct_data_layout(reshape_class_conf) - - # the part of the sub-graph being removed contains the SoftMax operation, so here we insert it back - softmax_conf_op = Softmax(graph, dict(axis=2, nchw_layout=True, name=reshape_class_conf.id + '/Softmax')) - softmax_conf = softmax_conf_op.create_node([reshape_class_conf]) - - order_const = Const(graph, dict(value=int64_array([0, 2, 1, 3]), - name=softmax_conf.name + '/TransposeOrder')).create_node() - permute_reshape_softmax_op = Transpose(graph, dict()) - permute_reshape_softmax = permute_reshape_softmax_op.create_node([softmax_conf, order_const], dict( - name=softmax_conf.name + '/Transpose')) - mark_input_as_in_correct_layout(permute_reshape_softmax, 1) - mark_output_as_in_correct_layout(permute_reshape_softmax, 0) - - initial_shape = Shape(graph, dict(name=class_conf.id + '/Shape')).create_node([class_conf]) - - reshape_conf_initial = Reshape(graph, dict(name='Reshape_Transpose_Class')).create_node( - [permute_reshape_softmax, initial_shape]) - mark_input_as_in_correct_layout(reshape_conf_initial, 0) - mark_output_as_in_correct_layout(reshape_conf_initial, 0) - - variance_height = pipeline_config.get_param('frcnn_variance_height') - variance_width = pipeline_config.get_param('frcnn_variance_width') - variance_x = pipeline_config.get_param('frcnn_variance_x') - variance_y = pipeline_config.get_param('frcnn_variance_y') - anchor_generator_height_stride = pipeline_config.get_param('anchor_generator_height_stride') - anchor_generator_width_stride = pipeline_config.get_param('anchor_generator_width_stride') - anchor_generator_height = pipeline_config.get_param('anchor_generator_height') - anchor_generator_width = pipeline_config.get_param('anchor_generator_width') - - if variance_height != variance_width: - log.error('The values for variance for height "{}" is not equal to variance for width "{}". The detection ' - 'results will be inaccurate.'.format(variance_height, variance_width)) - if variance_x != variance_y: - log.error('The values for variance for x "{}" is not equal to variance for y "{}". The detection ' - 'results will be inaccurate.'.format(variance_x, variance_y)) - if anchor_generator_height_stride != anchor_generator_width_stride: - log.error('The values for the anchor generator height stride "{}" is not equal to the anchor generator ' - 'width stride "{}". The detection results will be inaccurate.' - ''.format(anchor_generator_height_stride, anchor_generator_width_stride)) - if anchor_generator_height != anchor_generator_width: - log.error('The values for the anchor generator height "{}" is not equal to the anchor generator width ' - 'stride "{}". The detection results will be inaccurate.'.format(anchor_generator_height, - anchor_generator_width)) - - proposal_op = ProposalOp(graph, dict(min_size=1, - framework='tensorflow', - pre_nms_topn=2 ** 31 - 1, - box_size_scale=variance_height, - box_coordinate_scale=variance_x, - post_nms_topn=max_proposals, - feat_stride=anchor_generator_height_stride, - ratio=proposal_ratios, - scale=proposal_scales, - normalize=1, - base_size=anchor_generator_height, - nms_thresh=_value_or_raise(match, pipeline_config, - 'first_stage_nms_iou_threshold'))) - for key in ('clip_before_nms', 'clip_after_nms'): - if key in match.custom_replacement_desc.custom_attributes: - proposal_op.attrs[key] = int(match.custom_replacement_desc.custom_attributes[key]) - - bboxes_offsets = backward_bfs_for_operation(match.single_input_node(0)[0], ['Add'])[0] - - # creates input to store input image height, width and scales (usually 1.0s) which is a mandatory input to the - # Proposal operation. The batch size for this input is fixed because it is allowed to pass images of the same - # size only as input - im_info = Parameter(graph, dict(shape=int64_array([1, 3]), fixed_batch=True)).create_node( - [], dict(name='image_info')) - - proposal = proposal_op.create_node([reshape_conf_initial, bboxes_offsets, im_info], dict(name='proposals')) - return {'proposal_node': ObjectDetectionAPIProposalReplacement.ie_to_tf_proposals(graph, proposal, match, - pipeline_config, - max_proposals)} - - @staticmethod - def insert_detection_output_instead_of_proposal(graph: Graph, match: SubgraphMatch, - pipeline_config: PipelineConfig): - """ - The function inserts DetectionOutput operation instead of Proposal operation which may result in an increase of - the accuracy for some models. The function is enabled with the custom attribute "operation_to_insert" with - value "DetectionOutput" in the transformation configuration file section for the - "ObjectDetectionAPIProposalReplacement" transformation. - - :param graph: the graph to operate on - :param match: the object containing information about the matched sub-graph - :param pipeline_config: object containing information from the pipeline.config file of the model - :return: the dictionary with mapping information needed for other transformations - """ - max_proposals = _value_or_raise(match, pipeline_config, 'first_stage_max_proposals') - - # Convolution/matmul node that produces classes confidence - # Transpose result of the tensor with classes confidences so it will be in a correct layout for Softmax - class_conf_nodes = backward_bfs_for_operation(match.single_input_node(1)[0], ['Add']) - assert len(class_conf_nodes) >= 1, 'Expected to find nodes of type "Add" starting from the node "{}" in ' \ - 'backward direction'.format(match.single_input_node(1)[0].id) - class_conf = class_conf_nodes[0] - - # prepare input with class confidences. The DetectionOutput operation which will consume this tensor as a - # second input expects probabilities to be normalized with SoftMax operation per each bounding box class. In - # order to do this we first reshape the tensor so the last dimension contains probability for 2 classes - # (background and foreground) for each bounding box. Before feeding this tensor to the DO operation the tensor - # is flattened to the shape [num_batches, num_classes * num_bounding_boxes] - reshape_conf = create_op_node_with_second_input(graph, Reshape, int64_array([0, -1, 2]), - dict(name='predictions/Reshape')) - # transpose from NCHW to NHWC will be inserted as input to the Reshape automatically. This is expected - class_conf.out_port(0).disconnect() - class_conf.out_port(0).connect(reshape_conf.in_port(0)) - softmax_conf = Softmax(graph, dict(axis=2, name=reshape_conf.id + '/Softmax')).create_node([reshape_conf]) - flattened_conf = create_op_node_with_second_input(graph, Reshape, int64_array([0, -1]), - dict(name=softmax_conf.name + '/Flatten'), softmax_conf) - # prepare input with bounding boxes shape offsets - offsets = backward_bfs_for_operation(match.single_input_node(0)[0], ['Add'])[0] - flatten_offsets = create_op_node_with_second_input(graph, Reshape, int64_array([0, -1]), - dict(name=offsets.soft_get('name', offsets.id) + '/Flatten'), - offsets) - - # TensorFlow produces anchor boxes in absolute coordinates in YXYX order. Need to normalize them to [0, 1] - # interval and append a tensor with variances. Refer to the ObjectDetectionAPISSDPostprocessorReplacement - # transformation comments about variances. The YXYX->XYXY order change will be performed with the output of the - # inserted DetectionOutput operation - yxyx_anchors = match.single_input_node(2)[0] - - # get the input image height and width to divide the anchors values by it - initial_input_node_name = 'input_tensor' if 'input_tensor' in graph.nodes else 'image_tensor' - if initial_input_node_name not in graph.nodes(): - raise Error('Input node "{}" of the graph is not found. Do not run the Model Optimizer with ' - '"--input" command line parameter.'.format(initial_input_node_name)) - parameter_node = Node(graph, initial_input_node_name) - - input_shape = Shape(graph, {'name': parameter_node.name}).create_node([parameter_node]) - input_image_hw = node_to_get_shape_value_of_indices(input_shape, [1, 2]) # NHWC layout - hwhw = create_op_with_const_inputs(graph, Tile, {1: int64_array([2])}, {'name': 'image_hwhw'}, input_image_hw) - - hwhw_float = Cast(graph, {'dst_type': np.float32}).create_node([hwhw]) - scaled_anchors = Div(graph, {'name': 'scaled_anchors'}).create_node([yxyx_anchors, hwhw_float]) - - flattened_anchors = create_op_with_const_inputs(graph, Reshape, {1: int64_array([1, 1, -1])}, - {'name': 'flattened_anchors'}, scaled_anchors) - cropped_anchors = AttributedClamp(graph, {'min': 0.0, 'max': 1.0, 'name': 'clamped_yxyx', - 'nchw_layout': True}).create_node([flattened_anchors]) - # the input tensor "scaled_anchors" for the "flattened_anchors" may be 4D. In order to avoid inserting Transpose - # operation mark the "flattened_anchors" with the correct data layout - mark_as_correct_data_layout(flattened_anchors) - - # create tensor of shape [4] with variance values which then are tiled by the number of boxes which is obtained - # from the 'yxyx_anchors' node - variances = Const(graph, {'value': _variance_from_pipeline_config(pipeline_config)}).create_node() - - anchors_shape = Shape(graph, {'name': 'anchors_shape'}).create_node([yxyx_anchors]) - anchors_count = node_to_get_shape_value_of_indices(anchors_shape, [0]) - tiled_variances = Tile(graph, {'name': 'tiled_variances'}).create_node([variances, anchors_count]) - reshaped_tiled_variances = create_op_with_const_inputs(graph, Reshape, {1: int64_array([1, 1, -1])}, - {'name': 'flattened_variances'}, tiled_variances) - - # now we can merge actual anchors coordinates with a tensor with variances as it is expected by the - # DetectionOutput operation - duplicate_anchors = Concat(graph, {'axis': 1, 'name': 'anchors_with_variances'}).create_node( - [cropped_anchors, reshaped_tiled_variances]) - - do = DetectionOutput(graph, - {'background_label_id': 0, - 'clip_after_nms': True, - 'clip_before_nms': False, - 'code_type': 'caffe.PriorBoxParameter.CENTER_SIZE', - 'confidence_threshold': 0.0, - 'decrease_label_id': False, - 'input_height': 1, - 'input_width': 1, - 'keep_top_k': max_proposals, - 'normalized': True, - 'objectness_score': 0, - 'share_location': True, - 'top_k': 6000, - 'variance_encoded_in_target': False, - 'nms_threshold': _value_or_raise(match, pipeline_config, 'first_stage_nms_iou_threshold'), - 'name': 'first_do', - }).create_node([flatten_offsets, flattened_conf, duplicate_anchors]) - # DetectionOutput output tensor has YXYX box coordinates order - # switch to 3D to avoid issues that part of the model with 4D shapes should be inferred in NCHW layout - do_3d = create_op_with_const_inputs(graph, Squeeze, {1: int64_array(0)}, {'name': do.name + '/SqueezeDO'}, do) - mark_as_correct_data_layout(do_3d) - - # DetectionOutput output tensor produces a tensor of tuples with the following 7 elements: - # [batch_id, class_id, confidence, x1, y1, x2, y2]. Here we split the DetectionOutput result into the 7 - # tensors with each of these elements for predictions. Then we crop predicted box coordinates (scaled) to be - # within [0, 1] range (as it is predicted in the TF model) and then combine tensors back to the Proposal - # operation output format: [batch_id, x1, y1, x2, y2]. - do_split = create_op_node_with_second_input(graph, Split, int64_array(2), {'num_splits': 7, - 'name': do.name + '/Split'}, do_3d) - - coords = Concat(graph, {'axis': -1, 'in_ports_count': 4, 'name': do_split.name + '/coords'}).create_node() - # concat bounding boxes with the same order (XYXY) as Proposal produces - for port_idx in range(4): - do_split.out_port(3 + port_idx).connect(coords.in_port(port_idx)) - - clamped_coords = AttributedClamp(graph, {'min': 0.0, 'max': 1.0, 'name': 'clamped_xyxy'}).create_node([coords]) - - # prepare final proposal boxes [batch_id, x1, y1, x2, y2] - proposal_node = Concat(graph, {'axis': -1, 'in_ports_count': 2, 'name': 'proposals'}).create_node() - do_split.out_port(0).connect(proposal_node.in_port(0)) - clamped_coords.out_port(0).connect(proposal_node.in_port(1)) - return {'proposal_node': ObjectDetectionAPIProposalReplacement.ie_to_tf_proposals(graph, proposal_node, match, - pipeline_config, - max_proposals)} - - @staticmethod - def ie_to_tf_proposals(graph: Graph, proposal: Node, match: SubgraphMatch, pipeline_config: PipelineConfig, - max_proposals: int): - """ - Builds a graph which converts the proposals data in OV format to the format of TensorFlow. This includes - cropping the OV output of format [batch, x1, y1, x2, y2] to simply [x1, y1, x2, y2] and reshaping tensor to an - appropriate shape. Swapping of the Proposal output is performed when necessary. - - :param graph: the graph to operate on - :param proposal: the node producing OV proposals - :param match: the object containing information about matched sub-graph - :param pipeline_config: object containing information from the pipeline.config file of the model - :param max_proposals: maximum number of proposal boxes. Needed for the reshaping of the tensor - :return: the node producing output in the TF format. - """ - # models with use_matmul_crop_and_resize = True should not swap order of elements (YX to XY) after the Proposal - # because the TF output has XYXY layout originally. - # Also old version of RFCN model (1.9) does not require proposal swap since the output has proper layout - # already. The swap is controlled with the 'do_not_swap_proposals' parameter from the transformation file - swap_proposals = not match.custom_replacement_desc.custom_attributes.get('do_not_swap_proposals', False) and \ - not pipeline_config.get_param('use_matmul_crop_and_resize') - if swap_proposals: - proposal = add_convolution_to_swap_xy_coordinates(graph, proposal, 5) - - # the "reshape_swap_proposals_2d" is used in the ObjectDetectionAPIPSROIPoolingReplacement transformation. It - # is important that this input may be swapped several lines above - proposal_reshape_2d = create_op_node_with_second_input(graph, Reshape, int64_array([-1, 5]), - dict(name="reshape_swap_proposals_2d"), proposal) - mark_input_as_in_correct_layout(proposal_reshape_2d, 0) - - # Find closest CropAndResize in topological order - start_node = match.single_input_node(0)[0] - crop_and_resize_nodes_ids = [node.id for node in graph.pseudo_topological_sort_with_start_node(start_node) if - graph.nodes[node.id]['op'] == 'CropAndResize'] - - if len(crop_and_resize_nodes_ids) != 0 and swap_proposals: - # feed the CropAndResize node with a correct boxes information produced with the Proposal layer - # find the first CropAndResize node in the BFS order. This is needed in the case when we already swapped - # box coordinates data after the Proposal node - crop_and_resize_node = Node(graph, crop_and_resize_nodes_ids[0]) - # set a marker that an input with box coordinates has been pre-processed so the CropAndResizeReplacement - # transform doesn't try to merge the second and the third inputs - crop_and_resize_node['inputs_preprocessed'] = True - crop_and_resize_node.in_port(1).disconnect() - proposal_reshape_2d.out_port(0).connect(crop_and_resize_node.in_port(1)) - - tf_proposal_reshape_4d = create_op_node_with_second_input(graph, Reshape, - int64_array([-1, 1, max_proposals, 5]), - dict(name="reshape_proposal_4d"), proposal) - mark_as_correct_data_layout(tf_proposal_reshape_4d) - - crop_op = Crop(graph, dict(axis=int64_array([3]), offset=int64_array([1]), dim=int64_array([4]), - nchw_layout=True)) - # the crop_proposals node is used in the ObjectDetectionAPIDetectionOutputReplacement transformation - crop = crop_op.create_node([tf_proposal_reshape_4d], dict(name='crop_proposals')) - - tf_proposals_crop_reshape_3d_node = create_op_node_with_second_input(graph, Reshape, int64_array([0, -1, 4]), - dict(name="reshape_crop_3d"), crop) - mark_input_as_in_correct_layout(tf_proposals_crop_reshape_3d_node, 0) - return tf_proposals_crop_reshape_3d_node - - -""" -An important part of many object detection models is an operation DetectionOutput which decodes final detection boxes -using predicted bounding boxes shape offsets and prior boxes inputs. And finally performs non-maximum-suppression based -on decoded boxes and their confidences (scores). There is no DetectionOutput operation in TensorFlow operation set, it -is implemented as a sub-graph of primitive operations instead. There are two transformations which replace the sub-graph -implementing DetectionOutput operation in this file: ObjectDetectionAPISSDPostprocessorReplacement and -ObjectDetectionAPIDetectionOutputReplacement. The first one is used for SSD models, the second one for Faster-RCNN, -Mask-RCNN and RFCN models. These transformations also prepare input data for the DetectionOutput operation because the -layout and shape of the data is different between the TensorFlow and the OpenVINO. The most notable difference -is that bounding boxes and deltas are calculated with YXYX order in the TensorFlow model whilst OpenVINO -operation DetectionOutput, ROIPooling and Proposal expects them and produce the output with XYXY order. Refer to the -transformation code and operations specifications for more details. -""" - - -class ObjectDetectionAPISSDPostprocessorReplacement(FrontReplacementFromConfigFileSubGraph): - """ - The transformation replaces the TensorFlow sub-graph performing DetectionOutput with the DetectionOutput operation - and adds some nodes to prepare input data in correct layout and shape. - """ - replacement_id = 'ObjectDetectionAPISSDPostprocessorReplacement' - run_not_recursively = True - - def run_after(self): - return [ObjectDetectionAPIPreprocessorReplacement, ObjectDetectionAPIPreprocessor2Replacement] - - def run_before(self): - return [ObjectDetectionAPITransformationsFinish] - - def output_edges_match(self, graph: Graph, match: SubgraphMatch, new_sub_graph: dict): - # the DetectionOutput in OV produces single tensor, but in TF it produces two tensors, so create only one output - # edge match - return {match.output_node(0)[0].id: new_sub_graph['detection_output_node'].id} - - def generate_sub_graph(self, graph: Graph, match: SubgraphMatch): - argv = graph.graph['cmd_params'] - if argv.tensorflow_object_detection_api_pipeline_config is None: - raise Error(missing_param_error) - pipeline_config = PipelineConfig(argv.tensorflow_object_detection_api_pipeline_config) - - has_background_class = _value_or_raise(match, pipeline_config, 'add_background_class') - num_classes = _value_or_raise(match, pipeline_config, 'num_classes') + has_background_class - - # reshapes confidences to 4D before applying activation function and do not convert from NHWC to NCHW this node. - # the add_activation_function_after_node function may insert the Softmax operation which is performed over the - # last dimension which should have a specific size = num_classes. In the original model the last dimension may - # be different, so this Reshape is absolutely necessary - reshape_conf_before_ac = create_op_node_with_second_input(graph, Reshape, int64_array([0, 1, -1, num_classes]), - {'name': 'do_ExpandDims_conf'}) - reshape_conf_before_ac.in_port(0).connect(match.input_nodes(1)[0][0].in_node(0).out_port(0)) - mark_as_correct_data_layout(reshape_conf_before_ac) - - # the transformation nodes are selected such a way that the confidences/scores post-processing activation - # function is removed. This was done in order to support several versions of the model using one JSON config - # file. Therefore, it is necessary to manually add this operation back to the graph - activation_function = _value_or_raise(match, pipeline_config, 'postprocessing_score_converter') - activation_conf_node = add_activation_function_after_node(graph, reshape_conf_before_ac, activation_function) - - # OV DetectionOutput operation expects flattened tensor with bounding boxes shape offsets, so reshaping it - reshape_offsets = create_op_node_with_second_input(graph, Reshape, int64_array([0, -1]), - {'name': 'do_reshape_offsets'}) - - # skip all Identity nodes and Reshape/Squeeze/Unsqueeze ops which may break the conversion because add or split - # unnecessary dimensions - current_node = skip_nodes_by_condition(match.input_nodes(0)[0][0].in_node(0), - lambda x: x.op == 'Identity' or x.has_and_set('reinterp_shape')) - reshape_offsets.in_port(0).connect(current_node.out_port(0)) - mark_as_correct_data_layout(reshape_offsets) - - # OV DetectionOutput operation expects flattened tensor with class confidences, so reshaping it - reshape_conf_node = create_op_node_with_second_input(graph, Reshape, int64_array([0, -1]), - {'name': 'do_reshape_conf'}, activation_conf_node) - mark_as_correct_data_layout(reshape_conf_node) - - need_swap_priors = False - # the SSD model is a fully convolutional model so it can perform detection for the arbitrary input shape image - # if the input with prior boxes is properly generated based on the input image size. There were some TensorFlow - # models where this input was hardcoded as a constant and so the model can predict images of the specific input - # size only. The code below inserts PriorBox or PriorBoxClustered operations which generate prior boxes and a - # function call "_relax_reshape_nodes" to fix hardcoded output shapes specified for some Reshape operations in - # the original model. These workarounds can be disabled by specifying parameter - # 'disable_prior_boxes_layers_generator' in the JSON transformation configuration file or is automatically - # disabled if necessary information about prior box generators is not known - if not match.custom_replacement_desc.custom_attributes.get('disable_prior_boxes_layers_generator', False) and \ - (pipeline_config.get_param('ssd_anchor_generator_num_layers') is not None or - pipeline_config.get_param('multiscale_anchor_generator_min_level') is not None): - # change the Reshape operations with hardcoded number of output elements of the convolution nodes to be - # reshape-able - _relax_reshape_nodes(graph, pipeline_config) - - # create PriorBoxClustered nodes instead of a constant value with prior boxes so the model could be reshaped - if pipeline_config.get_param('ssd_anchor_generator_num_layers') is not None: - priors_node = _create_prior_boxes_node(graph, pipeline_config) - else: - priors_node = _create_multiscale_prior_boxes_node(graph, pipeline_config) - else: - log.info('The anchor generator is not known. Save constant with prior-boxes to IR.') - tf_priors_node = match.input_nodes(2)[0][0].in_node(0) - # original prior boxes are stored as YXYX while DetectionOutput expects them to be represented as XYXY. - # also variances should be encoded into this input. Variances are the values which are used during decoding - # of bounding boxes from prior boxes and shape offsets. Refer to the DetectionOutput operation - # implementation for more details - flattened_priors = create_op_with_const_inputs(graph, Reshape, {1: int64_array([1, 1, -1])}, - {'name': 'flattened_priors'}, tf_priors_node) - mark_as_correct_data_layout(flattened_priors) - - # create tensor of shape [4] with variance values which then are tiled by the number of boxes which is - # obtained from the 'priors_node' node - priors_shape = Shape(graph, {'name': 'priors_shape'}).create_node([tf_priors_node]) - priors_count = node_to_get_shape_value_of_indices(priors_shape, [-2]) - - # replicating the variance values for all prior-boxes - variances = Const(graph, {'value': _variance_from_pipeline_config(pipeline_config)}).create_node() - tiled_variances = Tile(graph, {'name': 'tiled_variances'}).create_node([variances, priors_count]) - flattened_tiled_variances = create_op_with_const_inputs(graph, Reshape, {1: int64_array([1, 1, -1])}, - {'name': 'flattened_tiled_variances'}, - tiled_variances) - # now we can concatenate priors with a tensor with variances as it is expected by the DetectionOutput - priors_node = Concat(graph, {'axis': 1, 'name': 'priors_with_variances'}).create_node( - [flattened_priors, flattened_tiled_variances]) - - # set a flag that priors should we swapped from YXYX to XYXY - need_swap_priors = True - - detection_output_op = DetectionOutput(graph, match.custom_replacement_desc.custom_attributes) - # during the bounding boxes detection the intermediate boxes are clipped to be in range [0, 1]. Different - # versions of the TF OD API SSD models have this clipping at different stages. Special attributes - # "clip_before_nms" and "clip_after_nms" were introduced to the operation DetectionOutput to handle these cases. - # These attributes are specified in the JSON transformation configuration file - detection_output_node = detection_output_op.create_node( - [reshape_offsets, reshape_conf_node, priors_node], - dict(name=detection_output_op.attrs['type'], - background_label_id=0 if has_background_class else -1, - variances_encoded_in_target=False, - confidence_threshold=_value_or_raise(match, pipeline_config, 'postprocessing_score_threshold'), - top_k=_value_or_raise(match, pipeline_config, 'postprocessing_max_detections_per_class'), - keep_top_k=_value_or_raise(match, pipeline_config, 'postprocessing_max_total_detections'), - nms_threshold=_value_or_raise(match, pipeline_config, 'postprocessing_iou_threshold'))) - - # the TensorFlow model keeps the bounding boxes shape offsets as YXYX, while OV DetectionOutput expects them to - # be specified as XYXY. The solution is to update last convolutions weights and biases to produce XY->YX swapped - # bounding boxes offsets - conv_nodes = backward_bfs_for_operation(detection_output_node.in_node(0), ['Conv2D'], ['ShapeOf']) - swap_weights_xy(graph, conv_nodes) - - # also need to swap priors from YXYX to XYXY if this input was used from the original model. If the input was - # not with PriorBox or PriorBoxClustered operations above then the layout will be XYXY - if need_swap_priors: - insert_weights_swap_xy_sub_graph(graph, detection_output_node.in_port(2).get_connection()) - - # need to mark some Squeeze, Reshape and Concat operations to not change the layout - mark_squeeze_reshape_concat_before_detection_output(conv_nodes) - - # As outputs are replaced with a postprocessing node, outgoing tensor names are no longer correspond to the - # original tensors and should be removed from output->Result edges - clear_tensor_names_info([match.output_node(out)[0] for out in range(match.outputs_count())]) - - # return dictionary with mapping of nodes that is used in the `output_edges_match` function to finish sub-graph - # replacement by re-connecting output from the original matched output node to the DetectionOutput node - return {'detection_output_node': detection_output_node} - - -class ObjectDetectionAPIOutputReplacement(FrontReplacementFromConfigFileGeneral): - """ - This replacer is used to cut-off the network by specified nodes for models generated with Object Detection API. - The custom attribute for the replacer contains one value for key "outputs". This string is a comma separated list - of outputs alternatives. Each output alternative is a '|' separated list of node name which could be outputs. The - first node from each alternative group that exits in the graph is chosen. Others are ignored. - For example, if the "outputs" is equal to the following string: - - "Reshape_16,SecondStageBoxPredictor_1/Conv_3/BiasAdd|SecondStageBoxPredictor_1/Conv_1/BiasAdd" - - then the "Reshape_16" will be an output if it exists in the graph. The second output will be - SecondStageBoxPredictor_1/Conv_3/BiasAdd if it exist in the graph, if not then - SecondStageBoxPredictor_1/Conv_1/BiasAdd will be output if it exists in the graph. - """ - replacement_id = 'ObjectDetectionAPIOutputReplacement' - run_not_recursively = True - - def run_after(self): - return [ObjectDetectionAPITransformationsStart] - - def run_before(self): - return [ObjectDetectionAPIPreprocessorReplacement, ObjectDetectionAPIPreprocessor2Replacement] - - def transform_graph(self, graph: Graph, replacement_descriptions: dict): - if graph.graph['cmd_params'].output is not None: - log.warning('User defined output nodes are specified. Skip the graph cut-off by the ' - 'ObjectDetectionAPIOutputReplacement.') - return - outputs = [] - outputs_string = replacement_descriptions['outputs'] - for alternatives in outputs_string.split(','): - for out_node_name in alternatives.split('|'): - if graph.has_node(out_node_name): - outputs.append(out_node_name) - break - else: - log.debug('A node "{}" does not exist in the graph. Do not add it as output'.format(out_node_name)) - _outputs = output_user_data_repack(graph, outputs) - add_output_ops(graph, _outputs, graph.graph['inputs']) - - -class ObjectDetectionAPIPSROIPoolingReplacement(FrontReplacementFromConfigFileSubGraph): - """ - RFCN models contain a unique block ("SecondStageBoxPredictor") performing bounding boxes predictions which is - called Position Sensitive ROI Pooling (PSROIPooling). The combination of "CropAndResize operations located in the - "while" loop forms a single PSROIPooling operation with bilinear interpolation. The transformation matches two - "while" loops with PSROIPooling layers applied to the tensors with box coordinates and classes predictions. The - sub-graph being replaced also contains a Reduce operation performing mean calculation over the spatial dimensions, - so the transformation adds this operation as well. - """ - replacement_id = 'ObjectDetectionAPIPSROIPoolingReplacement' - run_not_recursively = True - - def run_after(self): - return [ObjectDetectionAPIProposalReplacement] - - def run_before(self): - return [ObjectDetectionAPITransformationsFinish] - - def output_edges_match(self, graph: Graph, match: SubgraphMatch, new_sub_graph: dict): - return {match.output_node(0)[0].id: new_sub_graph['output_node'].id} - - def generate_sub_graph(self, graph: Graph, match: SubgraphMatch): - argv = graph.graph['cmd_params'] - if argv.tensorflow_object_detection_api_pipeline_config is None: - raise Error(missing_param_error) - pipeline_config = PipelineConfig(argv.tensorflow_object_detection_api_pipeline_config) - num_classes = _value_or_raise(match, pipeline_config, 'num_classes') - - input_node = match.input_nodes(0)[0][0].in_node(0) - if 'class_predictions' in input_node.id: - psroipooling_output_dim = num_classes + 1 - else: - psroipooling_output_dim = num_classes * 4 - - num_spatial_bins_height = pipeline_config.get_param('num_spatial_bins_height') - num_spatial_bins_width = pipeline_config.get_param('num_spatial_bins_width') - crop_height = pipeline_config.get_param('crop_height') - crop_width = pipeline_config.get_param('crop_width') - if crop_height != crop_width: - raise Error('Different "crop_height" and "crop_width" parameters from the pipeline config are not ' - 'supported: {} vs {}'.format(crop_height, crop_width)) - - proposal_nodes = graph.get_op_nodes(name='reshape_swap_proposals_2d') - if len(proposal_nodes) != 1: - raise Error("Found the following nodes '{}' with name 'reshape_swap_proposals_2d' but there should be " - "exactly 1. Looks like ObjectDetectionAPIProposalReplacement transformation didn't work." - "".format(proposal_nodes)) - reshape_swap_proposals_node = proposal_nodes[0] - - psroipooling_node = PSROIPoolingOp(graph, {'name': input_node.soft_get('name') + '/PSROIPooling', - 'output_dim': psroipooling_output_dim, - 'group_size': crop_width // num_spatial_bins_width, - 'spatial_bins_x': num_spatial_bins_width, - 'spatial_bins_y': num_spatial_bins_height, - 'mode': 'bilinear', - 'spatial_scale': 1, - }).create_node([input_node, reshape_swap_proposals_node]) - - # add Reduce operation which is a part of the graph being removed - reduce_node = create_op_node_with_second_input(graph, ReduceMean, int64_array([1, 2]), - {'name': 'mean', 'keep_dims': True}, psroipooling_node) - - output_node = match.output_node(0)[0].out_node() - if len(output_node.in_ports()) == 2 and not output_node.in_port(1).disconnected(): - output_node.in_port(1).disconnect() # disconnect the second input to make "erase_node" function work - graph.erase_node(match.output_node(0)[0].out_node()) - - return {'output_node': reduce_node} - - -class ObjectDetectionAPIConstValueOverride(FrontReplacementFromConfigFileGeneral): - """ - Transforms allows to override specific constant values in the topology. The replacement description configuration - file contains list of tuples describing the desired replacements specified in the "replacements" key of the - "custom_attributes". The first element in the tuple is the initial node name of the graph with constant value. The - second element is the name of the parameter from the pipeline configuration file which stores new value. - - Usage example. The Faster-RCNNs topologies has constant node with the number specifying maximum generated proposals. - This value is specified in the pipeline configuration file in the parameter 'first_stage_max_proposals' and is - saved as a constant node in the generated topology. If the parameter is modified from it's original value then the - topology will be incorrect because the number 'first_stage_max_proposals' is used in the transforms of this file is - no more equal to the 'first_stage_max_proposals' saved as a constant. - """ - replacement_id = 'ObjectDetectionAPIConstValueOverride' - run_not_recursively = True - - def run_after(self): - return [ObjectDetectionAPITransformationsStart] - - def run_before(self): - return [ObjectDetectionAPIPreprocessorReplacement, ObjectDetectionAPIPreprocessor2Replacement] - - def transform_graph(self, graph: Graph, replacement_descriptions: dict): - argv = graph.graph['cmd_params'] - if argv.tensorflow_object_detection_api_pipeline_config is None: - raise Error(missing_param_error) - pipeline_config = PipelineConfig(argv.tensorflow_object_detection_api_pipeline_config) - for (node_id, pipeline_config_name) in replacement_descriptions['replacements']: - if node_id not in graph.nodes(): - log.debug('Node with id {} does not exist in the graph'.format(node_id)) - continue - node = Node(graph, node_id) - if not node.has_valid('value'): - log.debug('Node with id {} does not have value'.format(node_id)) - continue - node.value = mo_array(pipeline_config.get_param(pipeline_config_name)) - node.value = node.value.reshape(node.shape) diff --git a/tools/mo/openvino/tools/mo/front/tf/QueueDequeue_ext.py b/tools/mo/openvino/tools/mo/front/tf/QueueDequeue_ext.py deleted file mode 100644 index e9b336daf486cd..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/QueueDequeue_ext.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.op import Op - - -def get_attrs(node: Node): - shapes = node.pb.attr["_output_shapes"].list.shape - tf_types = node.pb.attr["component_types"].list.type - extracted_types = [] - for t in tf_types: - extracted_types.append(tf_dtype_extractor(t)) - result_shapes = [] - for shape_pb in shapes: - result_shapes.append(tf_tensor_shape(shape_pb)) - assert len(result_shapes) == len(extracted_types), "Output shapes do not match output" \ - "types in the node {}".format(node.soft_get('name', node.id)) - attrs = {"shapes": result_shapes, "types": extracted_types, 'out_ports_count': len(result_shapes)} - return attrs - - -class QueueDequeueV1Extractor(FrontExtractorOp): - op = "QueueDequeue" - enabled = True - - @classmethod - def extract(cls, node): - attrs = get_attrs(node) - Op.update_node_stat(node, attrs) - return cls.enabled - - -class QueueDequeueV2Extractor(FrontExtractorOp): - op = "QueueDequeueV2" - enabled = True - - @classmethod - def extract(cls, node): - attrs = get_attrs(node) - Op.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/RFFTRealImagToRFFTSplit.py b/tools/mo/openvino/tools/mo/front/tf/RFFTRealImagToRFFTSplit.py deleted file mode 100644 index 0cf3894ae3186a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/RFFTRealImagToRFFTSplit.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.split import Split -from openvino.tools.mo.ops.squeeze import Squeeze - - -class RFFTRealImagToRDFTSplit(FrontReplacementSubgraph): - """ - This transformation converts the operation TFRFFT into OpenVINO RDFT. - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.front.tf.TFFFTToDFT import TFFFTToDFT - return [TFFFTToDFT] - - def pattern(self): - return dict( - nodes=[ - ('rfft', dict(op='TFFFT', fft_kind='RDFT')), - ('real', dict(op='Real')), - ('imag', dict(op='Imag')), - ], - edges=[ - ('rfft', 'real', {'in': 0}), - ('rfft', 'imag', {'in': 0}), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - rfft_node = match['rfft'] - real_node = match['real'] - imag_node = match['imag'] - - rfft_name = rfft_node.soft_get('name', rfft_node.id) - real_name = rfft_node.soft_get('name', real_node.id) - imag_name = rfft_node.soft_get('name', imag_node.id) - split_node = create_op_with_const_inputs(graph, Split, {1: int64_array(-1)}, - { - 'name': rfft_name + '/split', - 'num_splits': 2, - 'out_ports_count': 2 - }) - squeeze_real = create_op_with_const_inputs(graph, Squeeze, {1: int64_array(-1)}, - {'name': rfft_name + '/squeeze_real'}) - squeeze_imag = create_op_with_const_inputs(graph, Squeeze, {1: int64_array(-1)}, - {'name': rfft_name + '/squeeze_imag'}) - - split_node.out_port(0).connect(squeeze_real.in_port(0)) - split_node.out_port(1).connect(squeeze_imag.in_port(0)) - real_node.out_port(0).get_connection().set_source(squeeze_real.out_port(0)) - imag_node.out_port(0).get_connection().set_source(squeeze_imag.out_port(0)) - - rfft_node.out_port(0).connect(split_node.in_port(0)) - - rename_nodes([(real_node, real_name + '/to_be_removed'), (squeeze_real, real_name)]) - rename_nodes([(imag_node, imag_name + '/to_be_removed'), (squeeze_imag, imag_name)]) - - real_node.in_port(0).disconnect() - imag_node.in_port(0).disconnect() diff --git a/tools/mo/openvino/tools/mo/front/tf/RetinaNetFilteredDetectionsReplacement.py b/tools/mo/openvino/tools/mo/front/tf/RetinaNetFilteredDetectionsReplacement.py deleted file mode 100644 index c279c2da9138c4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/RetinaNetFilteredDetectionsReplacement.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.DetectionOutput import DetectionOutput -from openvino.tools.mo.ops.elementwise import Mul, Sub, Pow -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.split import VariadicSplit -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array, mo_array -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input, create_op_with_const_inputs -from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileSubGraph -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.broadcast import Broadcast -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.strided_slice import StridedSlice -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.graph import clear_tensor_names_info - - -class RetinaNetFilteredDetectionsReplacement(FrontReplacementFromConfigFileSubGraph): - """ - The class replaces the sub-graph that performs boxes post-processing and NMS with the DetectionOutput layer. - - The post-processing in the RetinaNet topology is performed differently from the DetectionOutput layer implementation - in the OpenVINO. The first one calculates (d_x1, d_y1, d_x2, d_y2) which are a factor of the prior box width - and height. The DetectionOuput with "code_type" equal to "caffe.PriorBoxParameter.CORNER" just adds predicted deltas - to the prior box coordinates. This replacer add nodes which calculate prior box widths and heights, apply variances - to the predicated box coordinates and multiply them. With this approach the DetectionOutput layer with "code_type" - equal to "caffe.PriorBoxParameter.CORNER" produces the same result as the post-processing in the original topology. - """ - replacement_id = 'RetinaNetFilteredDetectionsReplacement' - - def output_edges_match(self, graph: Graph, match: SubgraphMatch, new_sub_graph: dict): - return {match.output_node(0)[0].id: new_sub_graph['detection_output_node'].id} - - def nodes_to_remove(self, graph: Graph, match: SubgraphMatch): - new_nodes_to_remove = match.matched_nodes_names() - new_nodes_to_remove.remove(match.single_input_node(0)[0].id) - new_nodes_to_remove.remove(match.single_input_node(1)[0].id) - new_nodes_to_remove.remove(match.single_input_node(2)[0].id) - return new_nodes_to_remove - - @staticmethod - def append_variances(priors_scale_node: Node, variance: list): - graph = priors_scale_node.graph - name = priors_scale_node.name - - sp_shape = Shape(graph, {'name': name + '/shape'}).create_node() - priors_scale_node.out_port(0).connect(sp_shape.in_port(0)) - - begin = Const(graph, {'value': int64_array([-2])}).create_node() - end = Const(graph, {'value': int64_array([-1])}).create_node() - stride = Const(graph, {'value': int64_array([1])}).create_node() - shape_part_for_tiling = StridedSlice(graph, {'name': name + '/get_-2_dim', 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0])}).create_node() - - sp_shape.out_port(0).connect(shape_part_for_tiling.in_port(0)) - begin.out_port(0).connect(shape_part_for_tiling.in_port(1)) - end.out_port(0).connect(shape_part_for_tiling.in_port(2)) - stride.out_port(0).connect(shape_part_for_tiling.in_port(3)) - - shape_concat = create_op_node_with_second_input(graph, Concat, int64_array([4]), - {'name': name + '/shape_for_tiling', 'in_ports_count': 2, - 'axis': int64_array(0)}, - shape_part_for_tiling) - - variance = Const(graph, {'name': name + '/variance', 'value': float32_array(variance)}).create_node() - tile = Broadcast(graph, {'name': name + '/variance_tile'}).create_node() - variance.out_port(0).connect(tile.in_port(0)) - shape_concat.out_port(0).connect(tile.in_port(1)) - - reshape_dim = Const(graph, {'value': int64_array([-1, 4])}).create_node() - sp_reshape = Reshape(graph, {'name': name + '/reshape'}).create_node() - sp_reshape.in_port(0).connect(priors_scale_node.out_port(0)) - sp_reshape.in_port(1).connect(reshape_dim.out_port(0)) - - concat = Concat(graph, - {'name': name + '/priors_concat', 'axis': int64_array(0), 'in_ports_count': 2}).create_node() - sp_reshape.out_port(0).connect(concat.in_port(0)) - tile.out_port(0).connect(concat.in_port(1)) - - output_dims = Const(graph, {'value': int64_array([1, 2, -1])}).create_node() - output_node = Reshape(graph, {'name': name + '/3D_priors_wth_variances'}).create_node() - concat.out_port(0).connect(output_node.in_port(0)) - output_dims.out_port(0).connect(output_node.in_port(1)) - - return output_node - - def placeholder_scales(self, placeholder: Node): - """ - Helper function to get scales for prior boxes out of input image size: - [1 / im_width, 1 / im_height, 1 / im_width, 1 / im_height] - """ - graph = placeholder.graph - name = placeholder.soft_get('name', placeholder.id) - - shape_value = placeholder.soft_get('shape', None) - assert shape_value is not None, \ - "[ {} replacer ] Placeholder `{}` should have shape attribute".format(self.replacement_id, name) - assert isinstance(shape_value, np.ndarray), \ - "[ {} replacer ] Placeholder `{}` shape attribute should be np.ndarray".format(self.replacement_id, name) - assert shape_value.size == 4, \ - "[ {} replacer ] Placeholder `{}` should be 4D. Shape: {}".format(self.replacement_id, name, shape_value) - - shape = Shape(graph, {'name': 'input_image_shape'}).create_node() - shape.in_port(0).connect(placeholder.out_port(0)) - - begin = Const(graph, {'value': int64_array([1])}).create_node() - end = Const(graph, {'value': int64_array([3])}).create_node() - stride = Const(graph, {'value': int64_array([1])}).create_node() - spatial = StridedSlice(graph, {'name': name + '/get_h_w', 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), 'ellipsis_mask': int64_array([0])}).create_node() - - spatial.in_port(0).connect(shape.out_port(0)) - spatial.in_port(1).connect(begin.out_port(0)) - spatial.in_port(2).connect(end.out_port(0)) - spatial.in_port(3).connect(stride.out_port(0)) - - power = Const(graph, {'value': float32_array([-1.])}).create_node() - spatial_scale = Pow(graph, {}).create_node() - - spatial_scale.in_port(0).connect(spatial.out_port(0)) - spatial_scale.in_port(1).connect(power.out_port(0)) - - # Power `type_infer` requires inputs to have equal data type - convert_to_fp32 = Cast(graph, {'dst_type': np.float32}).create_node() - spatial_scale.in_port(0).get_connection().insert_node(convert_to_fp32) - - order = Const(graph, {'value': int64_array([1, 0])}).create_node() - axis_const = Const(graph, {'value': int64_array(0)}).create_node() - reverse = Gather(graph, {}).create_node() - - reverse.in_port(0).connect(spatial_scale.out_port(0)) - reverse.in_port(1).connect(order.out_port(0)) - axis_const.out_port(0).connect(reverse.in_port(2)) - - priors_scale_node = Concat(graph, {'axis': 0, 'in_ports_count': 2}).create_node() - priors_scale_node.add_input_port(0, skip_if_exist=True) - priors_scale_node.add_input_port(1, skip_if_exist=True) - - priors_scale_node.in_port(0).connect(reverse.out_port(0)) - priors_scale_node.in_port(1).connect(reverse.out_port(0)) - return priors_scale_node - - def generate_sub_graph(self, graph: Graph, match: SubgraphMatch): - reshape_classes_node = create_op_node_with_second_input(graph, Reshape, int64_array([0, -1]), - dict(name='do_reshape_classes'), - match.single_input_node(1)[0]) - - initial_priors_node = match.single_input_node(2)[0] - priors_name = initial_priors_node.soft_get('name', initial_priors_node.id) - # model calculates identical prior boxes for each batch, so we take first slice of them - begin = Const(graph, {'value': mo_array([0, 0, 0], dtype=np.int32)}).create_node() - end = Const(graph, {'value': mo_array([1, 0, 0], dtype=np.int32)}).create_node() - stride = Const(graph, {'value': mo_array([1, 1, 1], dtype=np.int32)}).create_node() - - priors_node = StridedSlice(graph, {'name': priors_name + '/0_batch_slice', - 'begin_mask': int64_array([1, 1, 1]), - 'end_mask': int64_array([1, 0, 0]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0])}).create_node() - - initial_priors_node.out_port(0).connect(priors_node.in_port(0)) - begin.out_port(0).connect(priors_node.in_port(1)) - end.out_port(0).connect(priors_node.in_port(2)) - stride.out_port(0).connect(priors_node.in_port(3)) - - placeholders = graph.get_op_nodes(type='Parameter') - assert len(placeholders) == 1, "{} replacer requires model to have one Placeholder, but current model has " \ - "{} placeholders".format(self.replacement_id, len(placeholders)) - placeholder = placeholders[0] - - # scale prior boxes to the [0, 1] interval - node_with_scales_for_prior_boxes = self.placeholder_scales(placeholder) - priors_scale_node = Mul(graph, {'name': 'scale_priors'}).create_node() - - broadcast = Broadcast(graph, {'name': 'scales_broadcast'}).create_node() - shape_of_priors = Shape(graph, {'name': 'priors_shape'}).create_node() - priors_node.out_port(0).connect(shape_of_priors.in_port(0)) - broadcast.in_port(1).connect(shape_of_priors.out_port(0)) - broadcast.in_port(0).connect(node_with_scales_for_prior_boxes.out_port(0)) - - priors_scale_node.in_port(0).connect(priors_node.out_port(0)) - priors_scale_node.in_port(1).connect(broadcast.out_port(0)) - - try: - variance = match.custom_replacement_desc.custom_attributes['variance'] - except: - raise Error('There is no variance attribute in {} replacement config file `custom_attributes`' - ''.format(self.replacement_id)) - - priors = self.append_variances(priors_scale_node, variance) - - # calculate prior boxes widths and heights - split_node = create_op_with_const_inputs( - graph, VariadicSplit, {1: int64_array(2), 2: int64_array([1, 1, 1, 1])}, {'out_ports_count': 4}, - priors_scale_node) - - priors_width_node = Sub(graph, dict(name=split_node.name + '/sub_2-0_') - ).create_node([(split_node, 2), (split_node, 0)]) - priors_height_node = Sub(graph, dict(name=split_node.name + '/sub_3-1_') - ).create_node([(split_node, 3), (split_node, 1)]) - - # concat weights and heights into a single tensor and multiple with the box coordinates regression values - # WA with 3 Concats instead of 1 for keeping model reshapable - # concat_width_height_node = Concat(graph, {'name': 'concat_priors_width_height', 'axis': -1, - # 'in_ports_count': 4}).create_node( - # [priors_width_node, priors_height_node, priors_width_node, priors_height_node]) - - concat_1 = Concat(graph, {'name': 'concat_width_height', - 'axis': -1, 'in_ports_count': 2}).create_node([priors_width_node, priors_height_node]) - concat_2 = Concat(graph, {'name': 'concat_width_height_width', - 'axis': -1, 'in_ports_count': 2}).create_node([concat_1, priors_width_node]) - concat_width_height_node = Concat(graph, {'name': 'concat_priors_width_height', 'axis': -1, 'in_ports_count': 2} - ).create_node([concat_2, priors_height_node]) - - applied_width_height_regressions_node = Mul(graph, {'name': 'final_regressions'}).create_node( - [concat_width_height_node, match.single_input_node(0)[0]]) - - # reshape to 2D tensor as OpenVINO Detection Output layer expects - reshape_regression_node = create_op_node_with_second_input(graph, Reshape, int64_array([0, -1]), - dict(name='reshape_regression'), - applied_width_height_regressions_node) - - detection_output_op = DetectionOutput(graph, match.custom_replacement_desc.custom_attributes) - # get nms from the original network - iou_threshold = None - nms_nodes = graph.get_op_nodes(op='NonMaxSuppression') - if len(nms_nodes) > 0: - # it is highly unlikely that for different classes NMS has different - # moreover DetectionOutput accepts only scalar values for iou_threshold (nms_threshold) - iou_threshold = nms_nodes[0].in_node(3).value - if iou_threshold is None: - raise Error('During {} `iou_threshold` was not retrieved from RetinaNet graph'.format(self.replacement_id)) - - detection_output_node = detection_output_op.create_node( - [reshape_regression_node, reshape_classes_node, priors], - dict(name=detection_output_op.attrs['type'], nms_threshold=iou_threshold, clip_after_nms=1, normalized=1, - variance_encoded_in_target=0, background_label_id=1000)) - - # As outputs are replaced with a postprocessing node, outgoing tensor names are no longer - # correspond to original tensors and should be removed from output->Result edges - out_nodes = [] - for out in range(match.outputs_count()): - out_nodes.append(match.output_node(out)[0]) - clear_tensor_names_info(out_nodes) - - return {'detection_output_node': detection_output_node} diff --git a/tools/mo/openvino/tools/mo/front/tf/RollRealImagPack.py b/tools/mo/openvino/tools/mo/front/tf/RollRealImagPack.py deleted file mode 100644 index 89fbf10bb434c8..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/RollRealImagPack.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.front.tf.graph_utils import add_constant_to_negative_values -from openvino.tools.mo.graph.graph import Graph - - -class RollRealImagPack(FrontReplacementSubgraph): - """ - Some TF models contain Roll for complex data, as a part of the sub-graph - - input shift axes - | | | - ------------------- - Roll - | - ------------------- - | | - Real Imag - | | - ------- ------- - | | - Pack - | - SomeOp - - This sub-graph can be replaced with the sub-graph - - input shift axes - | | | - ------------------- - Roll - | - SomeOp - - But after such replacement, we should correct axes of Roll, because input data are real now. Namely, if - there are negative axes for Roll, we need subtract 1 from such axes indices. - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.front.Pack import Pack - return [Pack] - - def pattern(self): - return dict( - nodes=[ - ('unroll', dict(op='Roll')), - ('real', dict(op='Real')), - ('imag', dict(op='Imag')), - ('pack', dict(op='Pack')), - ], - edges=[ - ('unroll', 'real', {'in': 0}), - ('unroll', 'imag', {'in': 0}), - ('real', 'pack', {'in': 0}), - ('imag', 'pack', {'in': 1}), - ]) - - def replace_sub_graph(self, graph: Graph, match: [dict, SubgraphMatch]): - unroll = match['unroll'] - add_constant_to_negative_values(unroll, 2, int64_array(-1)) - pack = match['pack'] - pack.out_port(0).get_connection().set_source(unroll.out_port(0)) - graph.remove_nodes_from([match['real'].id, match['imag'].id]) diff --git a/tools/mo/openvino/tools/mo/front/tf/SSDToolboxDetectionOutput.py b/tools/mo/openvino/tools/mo/front/tf/SSDToolboxDetectionOutput.py deleted file mode 100644 index d3a8febee2cb82..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/SSDToolboxDetectionOutput.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.DetectionOutput import DetectionOutput -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileSubGraph -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import PermuteAttrs -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.result import Result - - -class SSDToolboxDetectionOutputReplacement(FrontReplacementFromConfigFileSubGraph): - replacement_id = 'SSDToolboxDetectionOutput' - - def nodes_to_remove(self, graph: Graph, match: SubgraphMatch): - return [] - - def generate_sub_graph(self, graph: Graph, match: SubgraphMatch): - # OV DetectionOutput layer consumes flattened confidences and locations tensors. - # That is why we add reshapes before them. - locs_node = match.single_input_node(0) - conf_node = match.single_input_node(1) - prior_boxes_node = match.single_input_node(2) - - locs_out_nodes = locs_node[0].out_nodes() - assert len(locs_out_nodes) == 1 - locs_out_node = locs_out_nodes[list(locs_out_nodes.keys())[0]] - assert locs_out_node.op == "Result", locs_out_node.op - graph.remove_node(locs_out_node.id) - - conf_out_nodes = conf_node[0].out_nodes() - assert len(conf_out_nodes) == 1 - conf_out_node = conf_out_nodes[list(conf_out_nodes.keys())[0]] - assert conf_out_node.op == "Result", conf_out_node.op - graph.remove_node(conf_out_node.id) - - # reshape operation to flatten confidence tensor - const = Const(graph, {'value': int64_array([0, -1])}).create_node() - reshape_loc_node = Reshape(graph, {}).create_node([locs_node, const], dict(name='DetectionOutput_Reshape_loc_')) - - # reshape operation to flatten confidence tensor - reshape_conf_node = Reshape(graph, {}).create_node([conf_node, const], dict(name='DetectionOutput_Reshape_conf_')) - - # remove the Result node after the priors node - assert prior_boxes_node[0].out_node().op == "Result" - graph.remove_node(prior_boxes_node[0].out_node().id) - - # reshape operation for prior boxes tensor - const = Const(graph, {'value': int64_array([1, 2, -1])}).create_node() - reshape_priors_node = Reshape(graph, {}).create_node([prior_boxes_node, const], - dict(name='DetectionOutput_Reshape_priors_')) - # create Detection Output node with three inputs: locations, confidences and prior boxes - detection_output_op = DetectionOutput(graph, match.custom_replacement_desc.custom_attributes) - detection_output_node = detection_output_op.create_node( - [reshape_loc_node, reshape_conf_node, reshape_priors_node], - dict(name=detection_output_op.attrs['type'] + '_')) - PermuteAttrs.set_permutation(reshape_priors_node, detection_output_node, None) - - # create Output node to mark DetectionOutput as a graph output operation - output_op = Result(graph) - output_op.create_node([detection_output_node], dict(name='sink_')) - return {} diff --git a/tools/mo/openvino/tools/mo/front/tf/SwitchMergeOptimization.py b/tools/mo/openvino/tools/mo/front/tf/SwitchMergeOptimization.py deleted file mode 100644 index 02d357deff0c2d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/SwitchMergeOptimization.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.select import Select -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph - - -class SwitchMergeOptimization(FrontReplacementSubgraph): - """ - Optimization for case, when combination of Switches have one common condition and can be expressed as Select node. - - This transformation matches too big number of instances for models with many BatchNorm layers with the same input - from the model input data node with training/inference flag. So the transformation is implemented as a simple graph - traversal instead of regular pattern-based approach. - - The following pattern is checked: - nodes=[('Merge', dict(kind='op', op='Merge')), - ('Switch_2_input', dict(kind='data')), - ('Switch_2', dict(kind='op', op='Switch')), - ('Switch_2_data', dict(kind='data')), - ('op', dict(kind='op')), - ('op_data', dict(kind='data')), - ('Switch', dict(kind='op', op='Switch')), - ('Switch_data', dict(kind='data')), - ('Switch_1', dict(kind='op', op='Switch')), - ('Switch_1_data', dict(kind='data')), - ('cond_data', dict(kind='data')), - ('identity', dict(kind='op', op='Identity')), - ('identity_data', dict(kind='data')), - ], - edges=[ - ('Switch_2_input', 'Switch_2', {'in': 0}), - ('Switch_2', 'Switch_2_data', {'out': 1}), - ('Switch_2_data', 'Merge'), - ('cond_data', 'Switch_2', {'in': 1}), - ('cond_data', 'Switch_1', {'in': 1}), - ('cond_data', 'Switch', {'in': 1}), - ('Switch_1', 'Switch_1_data', {'out': 0}), - ('Switch', 'Switch_data', {'out': 0}), - ('Switch_1_data', 'op', {'in': 1}), - ('Switch_data', 'op', {'in': 0}), - ('op', 'op_data'), - ('op_data', 'identity'), - ('identity', 'identity_data'), - ('identity_data', 'Merge'), - ], - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for merge in graph.get_op_nodes(op='Merge'): - for merge_switch_in_port in range(2): - if merge.in_port(merge_switch_in_port).disconnected() or \ - merge.in_port(merge_switch_in_port).get_source().node.op != 'Switch': - continue - switch_2 = merge.in_port(merge_switch_in_port).get_source().node - - if merge.in_port(1 - merge_switch_in_port).disconnected() or \ - merge.in_port(1 - merge_switch_in_port).get_source().node.op != 'Identity': - continue - false_value_port = merge.in_port(1 - merge_switch_in_port).get_source() - - true_value_port = switch_2.in_port(0).get_source() - op = false_value_port.node.in_port(0).get_source().node - - if op.in_port(0).disconnected() or op.in_port(0).get_source().node.op != 'Switch': - continue - switch = op.in_port(0).get_source().node - - if op.in_port(1).disconnected() or op.in_port(1).get_source().node.op != 'Switch': - continue - switch_1 = op.in_port(1).get_source().node - - if switch.in_port(1).get_source() == switch_1.in_port(1).get_source() and \ - switch.in_port(1).get_source() == switch_2.in_port(1).get_source(): - select = Select(graph, dict(name=merge.soft_get('name') + '/Select/', format='tf')).create_node() - select.in_port(0).connect(switch.in_port(1).get_source()) - select.in_port(1).connect(true_value_port) - select.in_port(2).connect(false_value_port) - - merge.out_port(0).get_connection().set_source(select.out_port(0)) - - assert 1 in op.in_ports() and 0 in op.in_ports() - - op.in_port(0).disconnect() - op.in_port(1).disconnect() - - switch.in_port(0).get_connection().set_destination(op.in_port(0)) - switch_1.in_port(0).get_connection().set_destination(op.in_port(1)) - - graph.remove_nodes_from(nodes=[switch_1.id, switch.id, switch_2.id, merge.id]) - # need to exit from the inner for loop because the Merge op has been removed - break diff --git a/tools/mo/openvino/tools/mo/front/tf/TFFFTToDFT.py b/tools/mo/openvino/tools/mo/front/tf/TFFFTToDFT.py deleted file mode 100644 index baefa191b8d60a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/TFFFTToDFT.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.dft import DFT, IDFT, IRDFT, RDFT - - -class TFFFTToDFT(FrontReplacementSubgraph): - """ - This transformation converts the operation TFFFT into OpenVINO operations DFT, RDFT, IDFT, or IRDFT, - according to the following rules: - 1) FFT, FFT2D, FFT3D are converted into DFT; - 2) IFFT, IFFT2D, IFFT3D are converted into IDFT; - 3) RFFT, RFFT2D, RFFT3D are converted into RDFT; - 4) IRFFT, IRFFT2D, IRFFT3D are converted into IRDFT. - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.front.tf.RollRealImagPack import RollRealImagPack - return [RollRealImagPack] - - def find_and_replace_pattern(self, graph: Graph): - for tf_fft in graph.get_op_nodes(op='TFFFT'): - tf_fft_name = tf_fft.soft_get('name', tf_fft.id) - - num_of_dims = tf_fft.soft_get('num_of_dimensions', 1) - axes = int64_array(range(-num_of_dims, 0)) - - fft_kind = tf_fft['fft_kind'] - assert fft_kind in ['DFT', 'IDFT', 'RDFT', 'IRDFT'], \ - 'Node {} with the operation TFFFT supports only the following FFT-like operations: ' \ - 'DFT, IDFT, RDFT, IRDFT. Got: {}'.format(tf_fft_name, fft_kind) - - op = {'DFT': DFT, 'IDFT': IDFT, 'RDFT': RDFT, 'IRDFT': IRDFT}[fft_kind] - - if fft_kind in ['DFT', 'IDFT'] or not tf_fft.is_in_port_connected(1): - dft_node = create_op_with_const_inputs(graph, op, {1: axes}, {'in_ports_count': 2}, - tf_fft.in_port(0).get_source().node) - else: - dft_node = create_op_with_const_inputs(graph, op, {1: axes}, {'in_ports_count': 3}, - tf_fft.in_port(0).get_source().node) - tf_fft.in_port(1).get_source().connect(dft_node.in_port(2)) - - tf_fft.out_port(0).get_connection().set_source(dft_node.out_port(0)) - - rename_nodes([(tf_fft, tf_fft_name + '/to_be_removed'), (dft_node, tf_fft_name)]) - - if graph.graph['layout'] == 'NHWC': - dft_node['need_insert_transposes_for_dft'] = True diff --git a/tools/mo/openvino/tools/mo/front/tf/TFResizeToInterpolate.py b/tools/mo/openvino/tools/mo/front/tf/TFResizeToInterpolate.py deleted file mode 100644 index dc8a4b6742ed08..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/TFResizeToInterpolate.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.elementwise import Div -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.strided_slice import StridedSlice - - -def replace_tf_resize(graph: Graph, resize: Node, interpolation_mode: str): - resize_name = resize.soft_get('name', resize.id) - log.debug("Converting of {} to Interpolate-4 is triggered for node {}.".format(resize.op, resize_name)) - - num_of_inputs = len([port for port in resize.in_ports().values() if not port.disconnected()]) - assert num_of_inputs == 2, \ - "Number of inputs of {} (with name {}) should be equal to 2".format(resize.op, resize_name) - - attrs_msg = "If half_pixel_centers attribute of the node {} with op {} is True, " \ - "the attribute align_corners must be False" - assert not resize.half_pixel_centers or (resize.half_pixel_centers and not resize.align_corners), \ - attrs_msg.format(resize_name, resize.op) - - if resize.has_valid('data_type') and not np.issubdtype(resize.data_type, np.floating): - input_cast = Cast(graph, {'name': resize_name + '/to_f32', 'dst_type': np.float32}).create_node() - resize.in_port(0).get_connection().insert_node(input_cast, "source") - # casted tensor is not present in TF model, we don't need to propagate any name to output of this Convert - # therefore we use "source" attributes_save_mode for insert_node - - shape = Shape(graph, {'name': resize_name + '/shapeof'}).create_node() - - ss = create_op_with_const_inputs(graph, StridedSlice, - {1: int64_array([1]), - 2: int64_array([3]), - 3: int64_array([1]) - }, - {'name': resize_name + '/StridedSlice', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0]) - }) - - div_node = Div(graph, {'name': resize_name + '/Div'}).create_node() - - shape_to_float = Cast(graph, dict(dst_type=np.float32)).create_node() - size_to_float = Cast(graph, dict(dst_type=np.float32)).create_node() - - size_to_float.out_port(0).connect(div_node.in_port(0)) - shape_to_float.out_port(0).connect(div_node.in_port(1)) - ss.out_port(0).connect(shape_to_float.in_port(0)) - shape.out_port(0).connect(ss.in_port(0)) - - align_corners = resize.align_corners - half_pixel_centers = resize.half_pixel_centers - - nearest_mode = 'floor' if interpolation_mode == 'nearest' else 'round_prefer_floor' - if align_corners: - coordinate_transformation_mode = 'align_corners' - if interpolation_mode == 'nearest': - nearest_mode = 'round_prefer_ceil' - elif half_pixel_centers: - coordinate_transformation_mode = 'tf_half_pixel_for_nn' if interpolation_mode == 'nearest' else 'half_pixel' - else: - coordinate_transformation_mode = 'asymmetric' - - interpolate4 = create_op_with_const_inputs(graph, Interpolate, - { - 3: int64_array([1, 2]) - }, - { - 'name': resize_name + '/interpolate_4', - 'mode': interpolation_mode, - 'antialias': False, - 'coordinate_transformation_mode': coordinate_transformation_mode, - 'pads_begin': int64_array([0]), - 'pads_end': int64_array([0]), - 'nearest_mode': nearest_mode, - 'cube_coeff': -0.75, - 'shape_calculation_mode': 'sizes', - 'version': 'opset4', - 'in_ports_count': 4, - }) - - resize_input_connection = resize.in_port(0).get_connection() - resize_input_connection.set_destination(interpolate4.in_port(0)) - resize_input_connection.get_source().connect(shape.in_port(0)) - - div_node.out_port(0).connect(interpolate4.in_port(2)) - - sizes_connection = resize.in_port(1).get_connection() - sizes_connection.set_destination(interpolate4.in_port(1)) - sizes_connection.get_source().connect(size_to_float.in_port(0)) - - resize.out_port(0).get_connection().set_source(interpolate4.out_port(0)) - rename_nodes([(resize, resize_name + '/delete'), (interpolate4, resize_name)]) - - -class TFResizeToInterpolate(FrontReplacementOp): - """ - The transformation replaces TFResize with Interpolate-4. - """ - op = 'TFResize' - enabled = True - - def run_after(self): - from openvino.tools.mo.front.InterpolateNormalizer import InterpolateNormalizer - return [InterpolateNormalizer] - - def replace_sub_graph(self, graph: Graph, match: dict): - resize = match['op'] - replace_tf_resize(graph, resize, resize.mode) diff --git a/tools/mo/openvino/tools/mo/front/tf/TFScatterNDDecomposition.py b/tools/mo/openvino/tools/mo/front/tf/TFScatterNDDecomposition.py deleted file mode 100644 index eb558d359a783b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/TFScatterNDDecomposition.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array, int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.broadcast import Broadcast -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.scatternd import ScatterNDUpdate -from openvino.tools.mo.ops.ConvertLike import ConvertLike - - -class TFScatterNDDecomposition(FrontReplacementSubgraph): - """ - Replaces TensorFlow ScatterND with OpenVINO ScatterNDUpdate. TF ScatterND does not have input data, so - instead of this argument it expects its shape - - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for tf_scatter_nd in graph.get_op_nodes(op='TFScatterND'): - if not tf_scatter_nd.is_in_port_connected(0) or not tf_scatter_nd.is_in_port_connected(1) \ - or not tf_scatter_nd.is_in_port_connected(2): - continue - name = tf_scatter_nd.soft_get('name', tf_scatter_nd.soft_get('id')) - indices_port = tf_scatter_nd.in_port(0).get_source() - updates_port = tf_scatter_nd.in_port(1).get_source() - shape_port = tf_scatter_nd.in_port(2).get_source() - # need get type of const type - zero_const = Const(graph, {'value': int64_array(0.0), 'name': name + '/zero_const'}).create_node() - - # Convert zero value to type of updates node - convert_to_type = ConvertLike(graph, {'name': name + '/convert_like'}).create_node() - convert_to_type.in_port(0).connect(zero_const.out_port(0)) - convert_to_type.in_port(1).connect(updates_port) - - broad_cast_node = Broadcast(graph, {'name': name + '/broadcast'}).create_node() - broad_cast_node.in_port(0).connect(convert_to_type.out_port(0)) - broad_cast_node.in_port(1).connect(shape_port) - - scatter_nd_node = ScatterNDUpdate(graph, {'name': name + '/replaced'}).create_node() - scatter_nd_node.in_port(0).connect(broad_cast_node.out_port(0)) - scatter_nd_node.in_port(1).connect(indices_port) - scatter_nd_node.in_port(2).connect(updates_port) - - rename_nodes([(tf_scatter_nd, name + '/TBD'), (scatter_nd_node, name)]) - - tf_scatter_nd.out_port(0).get_connection().set_source(scatter_nd_node.out_port(0)) - tf_scatter_nd.in_port(0).disconnect() - tf_scatter_nd.in_port(1).disconnect() - tf_scatter_nd.in_port(2).disconnect() diff --git a/tools/mo/openvino/tools/mo/front/tf/TFSliceToSlice.py b/tools/mo/openvino/tools/mo/front/tf/TFSliceToSlice.py deleted file mode 100644 index 36a913cb6fd984..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/TFSliceToSlice.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.elementwise import Add, Equal -from openvino.tools.mo.ops.select import Select -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.slice import Slice - - -class TFSliceToSliceReplacer(FrontReplacementOp): - """ - This transformation converts TFSlice to internal Slice operation. - TFSlice has 'size' on the second input while Slice has 'ends', therefore we insert Add(begin, size). - size[i] == -1 is a magic number that means take the whole range along axis i up to the end. - To process the case when size[i] == -1 we insert subgraph with ShapeOf. - """ - op = 'TFSlice' - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - tf_slice_node = match['op'] - slice_name = tf_slice_node.soft_get('name', tf_slice_node.id) - slice_node = Slice(graph).create_node() - rename_nodes([(tf_slice_node, slice_name + '/to_be_removed'), (slice_node, slice_name)]) - ends_node = Add(graph, {'name': slice_name + '/ends'}).create_node() - - # reconnect input, begin, and size from TFSlice to the subgraph with Slice - tf_slice_node.in_port(0).get_connection().set_destination(slice_node.in_port(0)) - tf_slice_node.in_port(1).get_connection().set_destination(slice_node.in_port(1)) - tf_slice_node.in_port(2).get_connection().set_destination(ends_node.in_port(0)) - slice_node.in_port(1).get_connection().add_destination(ends_node.in_port(1)) - - max_ends = Shape(graph, {'name': slice_name + '/ShapeOf'}).create_node() - slice_node.in_port(0).get_connection().add_destination(max_ends.in_port(0)) - - # check if size[i] == -1, will be applied elementwisely: len(size) = len(begin) = input_rank - where_max_ends_is_needed = create_op_with_const_inputs(graph, Equal, {0: int64_array(-1)}, - {'name': slice_name + '/where_max_ends_is_needed'}) - ends_node.in_port(0).get_connection().add_destination(where_max_ends_is_needed.in_port(1)) - # select requires equal dtypes, need to convert ends to I64 - ends_casted_to_i64 = Cast(graph, {'name': slice_name + '/CastToI64', - 'dst_type': np.int64}).create_node([ends_node]) - # if size[i] == 1 then take max_ends values - correct_ends = Select(graph, {'name': slice_name + '/chosen_ends'}).create_node([where_max_ends_is_needed, - max_ends, ends_casted_to_i64]) - correct_ends.out_port(0).connect(slice_node.in_port(2)) - - tf_slice_node.out_port(0).get_connection().set_source(slice_node.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/tf/TensorArrayExtractors.py b/tools/mo/openvino/tools/mo/front/tf/TensorArrayExtractors.py deleted file mode 100644 index 7a88d31f12b273..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/TensorArrayExtractors.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.TensorArray import TensorArray -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_tensor_shape -from openvino.tools.mo.graph.graph import Node - - -class TensorArrayExtractor(FrontExtractorOp): - op = "TensorArrayV3" - enabled = True - - @classmethod - def extract(cls, node: Node): - attrs = { - 'op': __class__.op, - 'element_shape': tf_tensor_shape(node.pb.attr["element_shape"].shape), - } - TensorArray.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/TensorArrayGatherV3.py b/tools/mo/openvino/tools/mo/front/tf/TensorArrayGatherV3.py deleted file mode 100644 index c83bd298709839..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/TensorArrayGatherV3.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.TensorArrayGather import TensorArrayGather -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_tensor_shape -from openvino.tools.mo.graph.graph import Node - - -class TensorArrayGatherV3Extractor(FrontExtractorOp): - op = "TensorArrayGatherV3" - enabled = True - - @classmethod - def extract(cls, node: Node): - attrs = { - 'op': __class__.op, - 'element_shape': tf_tensor_shape(node.pb.attr["element_shape"].shape), - } - TensorArrayGather.update_node_stat(node, attrs) - return cls.enabled - diff --git a/tools/mo/openvino/tools/mo/front/tf/UnpackPackReverseInputChannels.py b/tools/mo/openvino/tools/mo/front/tf/UnpackPackReverseInputChannels.py deleted file mode 100644 index 0ce572d39fa617..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/UnpackPackReverseInputChannels.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.back.ReverseInputChannels import ReverseChannels -from openvino.tools.mo.front.Pack import Pack -from openvino.tools.mo.front.split_normalizer import AttributedSplitToSplit, SqueezeAxis -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph - - -class UnpackPackReverseInputChannels(FrontReplacementSubgraph): - r""" - Unpack - Pack nodes sequence from TensorFlow connected like it shown below is a way to ReverseChannels - - / 0 - 2 \ - Unpack - 1 - 1 - Pack - \ 2 - 0 / - - Converting it to internal ReverseChannels node to be fused to Convolution while running ApplyReverseChannels on back - """ - enabled = True - - def run_before(self): - # ordering transformations to keep matching pattern as small as possible - - # Unpack from TensorFlow is extracted as AttributedSplit with squeeze_axis=True attribute, - # so we should execute current transformation before AttributedSplitToSplit and SqueezeAxis - - # Pack from TensorFlow is an operation that creates new dimension, which we add by inserting Unsqueeze on all - # inputs at Pack transform, so we should execute current transformation before it - return [AttributedSplitToSplit, Pack, SqueezeAxis] - - def pattern(self): - return dict( - nodes=[ - ('unpack', dict(op='AttributedSplit')), - ('pack', dict(op='Pack')), - ], - edges=[ - ('unpack', 'pack', {'out': 0, 'in': 2}), - ('unpack', 'pack', {'out': 1, 'in': 1}), - ('unpack', 'pack', {'out': 2, 'in': 0}), - ]) - - def replace_sub_graph(self, graph: Graph, match: dict): - unpack = match['unpack'] - pack = match['pack'] - - if unpack.soft_get('axis', None) is None or unpack.axis != pack.soft_get('axis', None): - # axes doesn't match - not ReverseChannels case - return - - axis = unpack.axis - - connected_unpack_ports_count = len([port for port in unpack.out_ports().values() if not port.disconnected()]) - connected_pack_ports_count = len([port for port in pack.in_ports().values() if not port.disconnected()]) - if connected_pack_ports_count != connected_unpack_ports_count or connected_unpack_ports_count != 3: - # number of connected input ports of Concat and output ports of Split mismatch - not ReverseChannels case - return - - name = pack.soft_get('name', pack.id) - log.debug('Unpack - Pack sequence was detected `{}`'.format(name)) - - reverse_channels = ReverseChannels(graph, { - 'name': pack.soft_get('name', pack.id) + '/ReverseChannels', - 'axis': int64_array(axis), 'order': int64_array([2, 1, 0])}).create_node() - - pack.out_port(0).get_connection().set_source(reverse_channels.out_port(0)) - unpack.in_port(0).get_connection().set_destination(reverse_channels.in_port(0)) - log.debug('Unpack - Pack was converted to ReverseChannels {}'.format(name)) diff --git a/tools/mo/openvino/tools/mo/front/tf/WhereDecomposition.py b/tools/mo/openvino/tools/mo/front/tf/WhereDecomposition.py deleted file mode 100644 index f9fde8ae8a0fec..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/WhereDecomposition.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Node, Graph, rename_nodes -from openvino.tools.mo.ops.non_zero import NonZero -from openvino.tools.mo.ops.transpose import Transpose - - -class WhereDecomposition(FrontReplacementOp): - """ - This transformation decomposes the TF layer Where (when x = None, y = None) using the formula - Where(condition) = Transpose(NonZero(condition), [1, 0]) - """ - op = 'Where' - enabled = True - - def run_after(self): - from openvino.tools.mo.front.tf.embedding_segments_operation_fusing import \ - EmbeddingSegmentsOperationMultipleFeaturesFusing, EmbeddingSegmentsOperationSingleFeatureFusing - from openvino.tools.mo.front.TransposeOrderNormalizer import TransposeOrderNormalizer - return [EmbeddingSegmentsOperationMultipleFeaturesFusing, EmbeddingSegmentsOperationSingleFeatureFusing, - TransposeOrderNormalizer] - - def replace_op(self, graph: Graph, node: Node): - node_name = node.soft_get('name', node.id) - non_zero_node = NonZero(graph, {'name': node_name + '/NonZero_', 'output_type': np.int64}).create_node() - transpose_node = create_op_node_with_second_input(graph, Transpose, int64_array([1, 0]), op_attrs={}) - non_zero_node.out_port(0).connect(transpose_node.in_port(0)) - rename_nodes([(node, node_name + '/delete'), (transpose_node, node_name)]) - - non_zero_node.in_port(0).connect(node.in_port(0).get_source()) - return [transpose_node.id] diff --git a/tools/mo/openvino/tools/mo/front/tf/WhileNormalize.py b/tools/mo/openvino/tools/mo/front/tf/WhileNormalize.py deleted file mode 100644 index ad97ace0901c18..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/WhileNormalize.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.ops.loop import Loop -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.const import Const - - -class WhileNormalize(FrontReplacementSubgraph): - """ - Normalize inputs for Loop replacing TensorFlow 2 While operation: - 1) Remove external input port for current iteration - 2) Move trip count from port #1 to port #0 - 3) Occupy port #1 for execution condition - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='Loop'): - self.normalize_loop_node(graph, node) - - @staticmethod - def normalize_loop_node(graph: Graph, loop_node: Node): - loop_name = loop_node.soft_get('name', loop_node.id) - - # disconnect current iteration from external port #0 and move trip count to this port - loop_node.in_port(0).disconnect() - loop_node.in_port(1).get_connection().add_destination(loop_node.in_port(0)) - Loop.update_port_map_value(loop_node.input_port_map, 'external_port_id', 1, 0) - - # connect execution condition port - exec_cond_node = Const(graph, {'name': loop_name + '/ExecutionConditionValue', - 'value': mo_array(True, dtype=bool)}).create_node() - loop_node.in_port(1).get_connection().set_source(exec_cond_node.out_port(0)) - - loop_node.body.clean_up() - Loop.normalize_input_output_ports(loop_node) diff --git a/tools/mo/openvino/tools/mo/front/tf/__init__.py b/tools/mo/openvino/tools/mo/front/tf/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/tf/activation_ext.py b/tools/mo/openvino/tools/mo/front/tf/activation_ext.py deleted file mode 100644 index 61a9eb38d87c1d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/activation_ext.py +++ /dev/null @@ -1,259 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import * -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class AbsExtractor(FrontExtractorOp): - op = 'Abs' - enabled = True - - @classmethod - def extract(cls, node): - Abs.update_node_stat(node) - return cls.enabled - - -class EluFrontExtractor(FrontExtractorOp): - op = 'Elu' - enabled = True - - @classmethod - def extract(cls, node): - Elu.update_node_stat(node) - return cls.enabled - - -class ErfFrontExtractor(FrontExtractorOp): - op = 'Erf' - enabled = True - - @classmethod - def extract(cls, node): - Erf.update_node_stat(node) - return cls.enabled - - -class ExpExtractor(FrontExtractorOp): - op = 'Exp' - enabled = True - - @classmethod - def extract(cls, node): - Exp.update_node_stat(node) - return cls.enabled - - -class LeakyReLUFrontExtractor(FrontExtractorOp): - op = 'LeakyRelu' - enabled = True - - @classmethod - def extract(cls, node): - negative_slope = node.pb.attr['alpha'].f - if negative_slope == 0: - ReLU.update_node_stat(node) - else: - LeakyReLU.update_node_stat(node, {'negative_slope': negative_slope}) - return cls.enabled - - -class LogicalNotFrontExtractor(FrontExtractorOp): - op = 'LogicalNot' - enabled = True - - @classmethod - def extract(cls, node): - LogicalNot.update_node_stat(node) - return cls.enabled - - -class Relu6FrontExtractor(FrontExtractorOp): - op = 'Relu6' - enabled = True - - @classmethod - def extract(cls, node): - ReLU6.update_node_stat(node) - return cls.enabled - - -class ReluFrontExtractor(FrontExtractorOp): - op = 'Relu' - enabled = True - - @classmethod - def extract(cls, node): - ReLU.update_node_stat(node) - return cls.enabled - - -class SigmoidFrontExtractor(FrontExtractorOp): - op = 'Sigmoid' - enabled = True - - @classmethod - def extract(cls, node): - Sigmoid.update_node_stat(node) - return cls.enabled - - -class CosFrontExtractor(FrontExtractorOp): - op = 'Cos' - enabled = True - - @classmethod - def extract(cls, node): - Cos.update_node_stat(node) - return cls.enabled - - -class CoshFrontExtractor(FrontExtractorOp): - op = 'Cosh' - enabled = True - - @classmethod - def extract(cls, node): - Cosh.update_node_stat(node) - return cls.enabled - - -class AcoshFrontExtractor(FrontExtractorOp): - op = 'Acosh' - enabled = True - - @classmethod - def extract(cls, node): - Acosh.update_node_stat(node) - return cls.enabled - - -class SinFrontExtractor(FrontExtractorOp): - op = 'Sin' - enabled = True - - @classmethod - def extract(cls, node): - Sin.update_node_stat(node) - return cls.enabled - - -class SinhFrontExtractor(FrontExtractorOp): - op = 'Sinh' - enabled = True - - @classmethod - def extract(cls, node): - Sinh.update_node_stat(node) - return cls.enabled - - -class AsinhFrontExtractor(FrontExtractorOp): - op = 'Asinh' - enabled = True - - @classmethod - def extract(cls, node): - Asinh.update_node_stat(node) - return cls.enabled - - -class TanFrontExtractor(FrontExtractorOp): - op = 'Tan' - enabled = True - - @classmethod - def extract(cls, node): - Tan.update_node_stat(node) - return cls.enabled - - -class TanhFrontExtractor(FrontExtractorOp): - op = 'Tanh' - enabled = True - - @classmethod - def extract(cls, node): - Tanh.update_node_stat(node) - return cls.enabled - - -class AtanhFrontExtractor(FrontExtractorOp): - op = 'Atanh' - enabled = True - - @classmethod - def extract(cls, node): - Atanh.update_node_stat(node) - return cls.enabled - - -class CeilExtractor(FrontExtractorOp): - op = 'Ceil' - enabled = True - - @classmethod - def extract(cls, node): - Ceiling.update_node_stat(node) - return cls.enabled - - -class MishExtractor(FrontExtractorOp): - op = 'Mish' - enabled = True - - @classmethod - def extract(cls, node): - Mish.update_node_stat(node) - return cls.enabled - - -class LogExtractor(FrontExtractorOp): - op = 'Log' - enabled = True - - @classmethod - def extract(cls, node): - Log.update_node_stat(node) - return cls.enabled - - -class AsinExtractor(FrontExtractorOp): - op = 'Asin' - enabled = True - - @classmethod - def extract(cls, node): - Asin.update_node_stat(node) - return cls.enabled - - -class AcosExtractor(FrontExtractorOp): - op = 'Acos' - enabled = True - - @classmethod - def extract(cls, node): - Acos.update_node_stat(node) - return cls.enabled - - -class AtanExtractor(FrontExtractorOp): - op = 'Atan' - enabled = True - - @classmethod - def extract(cls, node): - Atan.update_node_stat(node) - return cls.enabled - - -class SoftSignExtractor(FrontExtractorOp): - op = 'Softsign' - enabled = True - - @classmethod - def extract(cls, node): - SoftSign.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/argmax_ext.py b/tools/mo/openvino/tools/mo/front/tf/argmax_ext.py deleted file mode 100644 index e14dfb109e15a7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/argmax_ext.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.argmax import ArgMaxOp -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor - - -class ArgMaxFrontExtractor(FrontExtractorOp): - op = 'ArgMax' - enabled = True - - @classmethod - def extract(cls, node): - ArgMaxOp.update_node_stat(node, {'out_max_val': 0, 'top_k': 1, 'axis': None, - 'dim_attrs': ['axis'], 'keepdims': 0, 'remove_values_output': True, - 'output_type': tf_dtype_extractor(node.pb.attr['output_type'].type, np.int64), - }) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/argmin_ext.py b/tools/mo/openvino/tools/mo/front/tf/argmin_ext.py deleted file mode 100644 index 5e1773fb22090b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/argmin_ext.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.argmin import ArgMinOp -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor - - -class ArgMinFrontExtractor(FrontExtractorOp): - op = 'ArgMin' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'top_k': 1, - 'axis': None, - 'keepdims': 0, - 'remove_values_output': True, - 'output_type': tf_dtype_extractor(node.pb.attr['output_type'].type, np.int64) - } - ArgMinOp.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/assign_elimination.py b/tools/mo/openvino/tools/mo/front/tf/assign_elimination.py deleted file mode 100644 index 20230117fecb1a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/assign_elimination.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class AssignAndAssertElimination(FrontReplacementPattern): - # The solution with removal of Assign and Assert operations is temporary. - # The proper solution is to keep these operations until the partial inference - # phase when control flow edges are properly handled and later unnecessary ones are eliminated. - # In order to achieve this we need to implement control flow inference function - # for these operations similar to "Merge" and "Switch" operations. - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(): - if node.soft_get('op') in ["Assign", "AssignSub", "AssignAdd", "Assert"]: - log.debug('"{}" op with id="{}" was removed'.format(node.op, node.id)) - graph.remove_node(node.id) diff --git a/tools/mo/openvino/tools/mo/front/tf/automl_efficientdet.json b/tools/mo/openvino/tools/mo/front/tf/automl_efficientdet.json deleted file mode 100644 index ebf13c68ab0495..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/automl_efficientdet.json +++ /dev/null @@ -1,18 +0,0 @@ -[ - { - "id": "AutomlEfficientDet", - "custom_attributes": { - "preprocessing_input_node": "strided_slice_1", - "preprocessing_output_node": "truediv", - "aspect_ratios": [1.0, 1.0, 1.4, 0.7, 0.7, 1.4], - "variance": [1.0, 1.0, 1.0, 1.0], - "min_level": 3, - "num_scales": 3, - "anchor_scale": 4.0, - "num_classes": 90, - "nms_threshold": 0.6, - "confidence_threshold": 0.2 - }, - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/basic_lstm_cell.py b/tools/mo/openvino/tools/mo/front/tf/basic_lstm_cell.py deleted file mode 100644 index 9e4e0b47e03083..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/basic_lstm_cell.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.split_normalizer import SplitInputsReconnect -from openvino.tools.mo.ops.lstm_cell import LSTMCell -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.result import Result - - -class BasicLSTMCell(FrontReplacementSubgraph): - enabled = True - - # list of names of all original nodes that are supported by IE - # this list is collected gradually by a separate transformation - # original name in this case is a selected node in the pattern - # that is returned from anchor() function - instances_supported_by_IE = [] - - def __init__(self): - - super().__init__() - - # Inputs that are required by LSTMCell operation definition - __class__.inputs = ['input_op', 'input_hidden_state', 'input_cell_state', 'weights', 'biases'] - - # Extra inputs that are not expected by LSTMCell but required for extra checks - # at middle-end partial inference stage. They are consumed by the extended infer function - # and then removed. - __class__.extra_inputs = ['concat_axis', 'split_axis', 'shift_const'] - - __class__.outputs = ['mul_2', 'add_1'] - - def run_after(self): - from openvino.tools.mo.front.split_normalizer import AttributedSplitToSplit - return [AttributedSplitToSplit, SplitInputsReconnect] - - def pattern(self): - return dict( - nodes=[ - ('concat_axis', dict()), - ('concat', dict(op='ConcatV2')), - ('weights', dict()), - ('matmul', dict(op='MatMul')), - ('biases', dict()), - ('biasadd', dict(op='Add')), - ('split_axis', dict()), - ('split', dict(op='Split')), - ('shift_const', dict()), - ('shift', dict(op='Add')), - ('sigmoid_0', dict(op='Sigmoid')), - ('mul_0', dict(op='Mul')), - ('sigmoid_1', dict(op='Sigmoid')), - ('tanh_0', dict(op='Tanh')), - ('mul_1', dict(op='Mul')), - ('add_1', dict(op='Add')), - ('tanh_1', dict(op='Tanh')), - ('sigmoid_2', dict(op='Sigmoid')), - ('mul_2', dict(op='Mul')) - ], - edges=[ - # This important block specifies how input/hidden are concatenated - ('concat_axis', 'concat', {'in': 2}), - - ('concat', 'matmul', {'in': 0}), - ('weights', 'matmul', {'in': 1}), - ('matmul', 'biasadd', {'in': 0}), - ('biases', 'biasadd', {'in': 1}), - - ('split_axis', 'split', {'in': 1}), - ('biasadd', 'split', {'in': 0}), - - # This important block specifies how gates are ordered in TF graph - ('split', 'sigmoid_1', {'out': 0}), # i - ('split', 'tanh_0', {'out': 1}), # c - ('split', 'shift', {'out': 2}), # f (this is unbiased f, there is an extra addition here) - ('split', 'sigmoid_2', {'out': 3}), # o - - ('shift_const', 'shift', {}), - ('shift', 'sigmoid_0', {}), - ('sigmoid_0', 'mul_0', {}), - - ('sigmoid_1', 'mul_1', {}), - ('tanh_0', 'mul_1', {}), - - ('mul_0', 'add_1', {}), - ('mul_1', 'add_1', {}), - - ('add_1', 'tanh_1', {}), - ('tanh_1', 'mul_2', {}), - ('sigmoid_2', 'mul_2', {}), - ]) - - @staticmethod - def anchor(): - """ Mnemonic name in the pattern that is used as an anchor name for this pattern in the original graph. - Used for the second round of the pattern application when only a part of instances is allowed for conversion. - """ - return 'concat' - - def replace_sub_graph(self, graph: Graph, match: dict): - - # node that is used to identify this pattern application instance for switching between supported - # and not supported LSTMCell sub-graphs; this value will be searched in __class__.instances_supported_by_IE. - anchor_node = match[__class__.anchor()] - assert anchor_node.has_valid('name'), \ - 'LSTMCell anchor node {} does\'t have attribute name; such nodes are not supported.' - - match['input_op'] = match['concat'].in_node(0) - match['input_hidden_state'] = match['concat'].in_node(1) - match['input_cell_state'] = match['mul_0'].in_node(0) \ - if match['mul_0'].in_node(0).id != match['sigmoid_0'].id else match['mul_0'].in_node(1) - - pattern_edges = self.pattern()['edges'] - pattern_edges.extend([('input_op', 'concat'), ('input_cell_state', 'mul_0'), ('input_hidden_state', 'concat')]) - inputs = graph.get_inputs_with_ports(match, pattern_edges, __class__.inputs + __class__.extra_inputs) - - lstm_op = LSTMCell(graph, dict( - name=match['concat'].name + '/LSTMCell', activations=None, - )) - lstm_node = lstm_op.create_node(inputs) - lstm_node['old_infer'] = lstm_node.infer - lstm_node.infer = __class__.infer - - # this node consumes one of the resulting LSTMCell outputs, - # it should be removed before reconnecting the nodes, - # otherwise it will be reconnected to the new cell output - graph.remove_node(match['tanh_1'].id) - - for i, output in enumerate(__class__.outputs): - match[output].replace_node(lstm_node, i) - - # Because of LSTMCell specification, this layer MUST have 2 outputs. - # => we need to create fake consumers for LSTMCell - # when this node haven't some outputs. - for i in [0, 1]: - if i not in lstm_node.out_nodes(): - fake_output_node = Result(graph, dict(name=lstm_node.name + "/Output_{}".format(i))) - fake_output_node.create_node(inputs=[lstm_node], edge_attrs={'out': i, 'in': 0}) - - lstm_node['tf'] = True - lstm_node['extra_inputs'] = {name: match[name].id for name in __class__.extra_inputs} - lstm_node['inputs'] = {name: match[name].id for name in __class__.inputs} - - @staticmethod - def infer(node: Node): - assert len(node.in_nodes()) == len(__class__.inputs) + len(__class__.extra_inputs) - - for axis in ['concat_axis', 'split_axis']: - axis_node = __class__.extra_inputs.index(axis) + len(__class__.inputs) - assert node.in_node(axis_node).has_valid('value') - assert node.in_node(axis_node).value == 1 - - shift_const = node.in_node(__class__.extra_inputs.index('shift_const') + len(__class__.inputs)) - assert shift_const.has_valid('value') - shift_const = shift_const.value - assert shift_const.ndim == 0 # expect scalar value - node['shift_const'] = shift_const.copy() - - weights_node = node.in_node(__class__.inputs.index('weights')) - biases_node = node.in_node(__class__.inputs.index('biases')) - - assert weights_node.has_valid('value') - assert biases_node.has_valid('value') - - # Restore original infer function (to avoid calling previous code twice) and call it - node.infer = node.old_infer - node.infer(node) diff --git a/tools/mo/openvino/tools/mo/front/tf/batch_to_space_ext.py b/tools/mo/openvino/tools/mo/front/tf/batch_to_space_ext.py deleted file mode 100644 index 5d87f971db7cc5..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/batch_to_space_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.space_to_batch import BatchToSpace - - -class SpaceToBatchFrontExtractor(FrontExtractorOp): - op = 'BatchToSpaceND' - enabled = True - - @classmethod - def extract(cls, node): - BatchToSpace.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/broadcast_ext.py b/tools/mo/openvino/tools/mo/front/tf/broadcast_ext.py deleted file mode 100644 index e7ec62e1d2cf1b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/broadcast_ext.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.broadcast import Broadcast - - -class BroadcastExtractor(FrontExtractorOp): - op = 'BroadcastTo' - enabled = True - - @classmethod - def extract(cls, node: Node): - Broadcast.update_node_stat(node, attrs={'mode': 'numpy'}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/bucketize.py b/tools/mo/openvino/tools/mo/front/tf/bucketize.py deleted file mode 100644 index 7ea131936e15f0..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/bucketize.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class BucketizeFrontReplacer(FrontReplacementSubgraph): - """ - Moves the boundaries data from attribute to the second input tensor. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for bucketize in graph.get_op_nodes(op='Bucketize'): - if bucketize.in_port(1).disconnected(): - assert bucketize.has_valid('boundaries'), 'The Bucketize node "{}" misses "boundaries" attribute'.format(bucketize.name) - boundaries_node = Const(graph, {'name': bucketize.name + '/Bucketize_boundaries_', 'value': bucketize.boundaries}).create_node() - bucketize.in_port(1).connect(boundaries_node.out_port(0)) - del bucketize['boundaries'] - else: - log.debug('The Bucketize node input "{}" is already normalized'.format(bucketize.name)) diff --git a/tools/mo/openvino/tools/mo/front/tf/bucketize_ext.py b/tools/mo/openvino/tools/mo/front/tf/bucketize_ext.py deleted file mode 100644 index 90476a87d9d32b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/bucketize_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import float32_array -from openvino.tools.mo.ops.bucketize import Bucketize -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class BucketizeFrontExtractor(FrontExtractorOp): - op = 'Bucketize' - enabled = True - - @classmethod - def extract(cls, node): - boundaries = float32_array(node.pb.attr['boundaries'].list.f) - Bucketize.update_node_stat(node, {'boundaries': boundaries, 'with_right_bound': False, 'output_type': np.int32}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/common.py b/tools/mo/openvino/tools/mo/front/tf/common.py deleted file mode 100644 index 74c9662369c1c7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/common.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -from tensorflow.core.framework import types_pb2 as tf_types # pylint: disable=no-name-in-module,import-error - -# Suppress false positive pylint warning about function with too many arguments -# pylint: disable=E1121 -# mapping between TF data type and numpy data type and function to extract data from TF tensor -_tf_np_mapping = [('DT_BOOL', bool, lambda pb: pb.bool_val, lambda x: bool_cast(x)), - ('DT_INT8', np.int8, lambda pb: pb.int_val, lambda x: np.int8(x)), - ('DT_INT16', np.int16, lambda pb: pb.int_val, lambda x: np.int16(x)), - ('DT_INT32', np.int32, lambda pb: pb.int_val, lambda x: np.int32(x)), - ('DT_INT64', np.int64, lambda pb: pb.int64_val, lambda x: np.int64(x)), - ('DT_UINT8', np.uint8, lambda pb: pb.uint8_val, lambda x: np.uint8(x)), - ('DT_UINT16', np.uint16, lambda pb: pb.int_val, lambda x: np.uint16(x)), - ('DT_UINT32', np.uint32, lambda pb: pb.uint32_val, lambda x: np.uint32(x)), - ('DT_UINT64', np.uint64, lambda pb: pb.uint64_val, lambda x: np.uint64(x)), - ('DT_HALF', np.float16, lambda pb: np.uint16(pb.half_val).view(np.float16), lambda x: np.float16(x)), - ('DT_FLOAT', np.float32, lambda pb: pb.float_val, lambda x: np.float32(x)), - ('DT_DOUBLE', np.double, lambda pb: pb.double_val, lambda x: np.double(x)), - ('DT_STRING', str, lambda pb: pb.string_val, lambda x: str(x)), - ] - -tf_data_type_decode = {getattr(tf_types, tf_dt): (np_type, func) for tf_dt, np_type, func, _ in _tf_np_mapping if - hasattr(tf_types, tf_dt)} - -tf_data_type_cast = {np_type: cast for tf_dt, np_type, _, cast in _tf_np_mapping if hasattr(tf_types, tf_dt)} - - -def bool_cast(x): - if isinstance(x, str): - return False if x.lower() in ['false', '0'] else True if x.lower() in ['true', '1'] else 'unknown_boolean_cast' - else: - return bool(x) diff --git a/tools/mo/openvino/tools/mo/front/tf/complex_ext.py b/tools/mo/openvino/tools/mo/front/tf/complex_ext.py deleted file mode 100644 index e93921e34e3339..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/complex_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.Complex import Complex -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ComplexOpFrontExtractor(FrontExtractorOp): - op = 'Complex' - enabled = True - - @classmethod - def extract(cls, node): - Complex.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/concat.py b/tools/mo/openvino/tools/mo/front/tf/concat.py deleted file mode 100644 index f28fdbc3ecd227..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/concat.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph - - -class Concat(FrontReplacementSubgraph): - enabled = True - - def pattern(self): - return dict( - nodes=[('concat', dict(op='Concat', simple_concat=True))], - edges=[] - ) - - def replace_sub_graph(self, graph: Graph, match: dict): - """ - There are Concat and ConcatV2 operations in TensorFlow - The main difference is incoming port of tensor representing axis of concatenation - In Concat it is the 0 port, in ConcatV2 it is the last port - To reuse ConcatV2 logic (infer) that already exists in the Model Optimizer here we renumber ports of Concat - """ - in_edges = list(graph.in_edges(match['concat'].id, data=True)) - for u, v, attrs in in_edges: - in_port = attrs['in'] - attrs['in'] = len(in_edges) - 1 if in_port == 0 else attrs['in'] - 1 - if match['concat'].has('axis'): - # we delete axis parameter here (it was set by default by Concat Op) to carefully get it from the last - # input in Concat infer function - del graph.node[match['concat'].id]['axis'] diff --git a/tools/mo/openvino/tools/mo/front/tf/concat_ext.py b/tools/mo/openvino/tools/mo/front/tf/concat_ext.py deleted file mode 100644 index 3c44e1745d6108..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/concat_ext.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.concat import Concat - - -class ConcatFrontExtractor(FrontExtractorOp): - op = 'Concat' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'N': node.pb.attr["N"].i, 'simple_concat': True} - Concat.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/const_ext.py b/tools/mo/openvino/tools/mo/front/tf/const_ext.py deleted file mode 100644 index 017f8d54c7eb0b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/const_ext.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape, tf_tensor_content -from openvino.tools.mo.ops.const import Const - - -class ConstExtractor(FrontExtractorOp): - op = 'Const' - enabled = True - - @classmethod - def extract(cls, node): - pb_tensor = node.pb.attr["value"].tensor - shape = tf_tensor_shape(pb_tensor.tensor_shape) - attrs = { - 'shape': shape, - 'value': tf_tensor_content(pb_tensor.dtype, shape, pb_tensor), - 'data_type': tf_dtype_extractor(pb_tensor.dtype), - } - Const.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/conv_ext.py b/tools/mo/openvino/tools/mo/front/tf/conv_ext.py deleted file mode 100644 index dd2964e7bdb16a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/conv_ext.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import convert_tf_padding_to_str, int64_array, dynamic_dimension -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_data_format_channel, tf_data_format_batch, \ - tf_int_list -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.ops.op import PermuteAttrs - - -class Conv2DFrontExtractor(FrontExtractorOp): - op = 'Conv2D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = tf_create_attrs(node, 2, 3) - - def get_num_groups(node): - if 'group' in node: - return node.group - elif node.in_node(0).shape is not None and node.kernel_shape is not None \ - and node.in_node(0).shape[node.channel_dims[0]] is not dynamic_dimension \ - and node.kernel_shape[node.input_feature_channel] is not dynamic_dimension: - # if group attribute is not defined, number of groups is calculated - # from number of input channels and filter channel size - return node.in_node(0).shape[node.channel_dims] // node.kernel_shape[node.input_feature_channel] - else: - return 1 - - attrs.update({'op': __class__.op, - 'get_group': get_num_groups, - 'get_output_feature_dim': lambda node: node.kernel_shape[node.output_feature_channel], - 'get_weights_permute': PermuteAttrs.Permutation(perm=int64_array([3, 2, 0, 1]), - inv=int64_array([2, 3, 1, 0])) - }) - - # update the attributes of the node - Convolution.update_node_stat(node, attrs) - return cls.enabled - - -class DepthwiseConv2dNativeFrontExtractor(FrontExtractorOp): - op = 'DepthwiseConv2dNative' - enabled = True - - @classmethod - def extract(cls, node): - attrs = tf_create_attrs(node, 2, 2) - attrs.update({'op': __class__.op, - 'kernel_spatial_idx': int64_array([0, 1]), - 'get_group': lambda node: node.kernel_shape[node.output_feature_channel], - 'get_output_feature_dim': lambda node: node.kernel_shape[-1] * node.kernel_shape[-2], - 'get_weights_permute': PermuteAttrs.Permutation(perm=int64_array([2, 3, 0, 1]), - inv=int64_array([2, 3, 0, 1])) - }) - - # update the attributes of the node - Convolution.update_node_stat(node, attrs) - return cls.enabled - - -class Conv3DFrontExtractor(FrontExtractorOp): - op = 'Conv3D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = tf_create_attrs(node, 3, 4) - attrs.update({'op': __class__.op, - 'get_group': lambda node: 1, - 'get_output_feature_dim': lambda node: node.kernel_shape[node.output_feature_channel], - 'get_weights_permute': PermuteAttrs.Permutation(perm=int64_array([4, 3, 0, 1, 2]), - inv=int64_array([2, 3, 4, 1, 0])) - }) - - # update the attributes of the node - Convolution.update_node_stat(node, attrs) - return cls.enabled - - -def tf_create_attrs(node, input_feature_channel, output_feature_channel): - data_format = node.pb.attr["data_format"] - dilations = tf_int_list(node.pb.attr["dilations"].list) - if len(dilations) == 0: - dilations = None - - attrs = { - 'type': 'Convolution', - 'auto_pad': convert_tf_padding_to_str(node.pb.attr['padding'].s.decode()), - 'bias_addable': True, - 'bias_term': False, - 'dilation': dilations, - 'stride': tf_int_list(node.pb.attr["strides"].list), - - 'channel_dims': tf_data_format_channel(data_format), - 'batch_dims': tf_data_format_batch(data_format), - - 'input_feature_channel': input_feature_channel, - 'output_feature_channel': output_feature_channel, - 'layout': data_format.s.decode(), - - # get_group and get_output_feature_dim are special attrs that stores lambdas ( lambda node, kernel_shape:...) - # this attrs calls in infer function to calculate output feature dimension and group attr - 'get_group': None, # lambda should return group attr for given node - 'get_output_feature_dim': None, # lamda should return output feature dimension - } - - return attrs diff --git a/tools/mo/openvino/tools/mo/front/tf/crop_and_resize_ext.py b/tools/mo/openvino/tools/mo/front/tf/crop_and_resize_ext.py deleted file mode 100644 index 06f6d127c5310b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/crop_and_resize_ext.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.roipooling import ROIPooling - - -class CropAndResizeFrontExtractor(FrontExtractorOp): - op = 'CropAndResize' - enabled = True - - @classmethod - def extract(cls, node): - # update the attributes of the node and force 'op' to be 'CropAndResize' so extension that merges two of its - # inputs would be called - method = node.pb.attr['method'].s.decode('utf-8') - if method != 'bilinear': - log.warning( - 'The crop and resize method "{}" for node "{}" is not supported.'.format(method, node.soft_get('name'))) - return False - ROIPooling.update_node_stat(node, {'spatial_scale': 1, 'op': 'CropAndResize', 'method': method}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/cumsum_ext.py b/tools/mo/openvino/tools/mo/front/tf/cumsum_ext.py deleted file mode 100644 index 0457af59c91c3a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/cumsum_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.cumsum import CumSum -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class CumSumExtractor(FrontExtractorOp): - op = 'Cumsum' - enabled = True - - @classmethod - def extract(cls, node): - exclusive = node.pb.attr['exclusive'].b - reverse = node.pb.attr['reverse'].b - CumSum.update_node_stat(node, {'exclusive': exclusive, 'reverse': reverse}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/custom_subgraph_call.py b/tools/mo/openvino/tools/mo/front/tf/custom_subgraph_call.py deleted file mode 100644 index 56ba139f53fcb6..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/custom_subgraph_call.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from re import findall - -from openvino.tools.mo.front.extractor import update_ie_fields -from openvino.tools.mo.graph.graph import Node, merge_edge_props, Graph -from openvino.tools.mo.utils.graph import is_connected_component - - -def internal_output_name_for_node(node_name: str, output_port: int): - return node_name + ":" + str(output_port) - - -def add_node_pb_if_not_yet_added(node: Node, mega_node: Node): - if node.has_valid('pb') and node.pb.name not in mega_node.pbs.keys(): - mega_node.pbs[node.pb.name] = node.pb - - -def find_input_port(node: Node, input_desc: list, search_node_name: str, search_node_port: int): - if input_desc is None: - return len(node.in_nodes()) - - for in_port, tensor_desc in enumerate(input_desc): - for node_pattern, node_port in tensor_desc: - if findall(node_pattern, search_node_name) and node_port == search_node_port: - return in_port - raise Exception('Did not find input port of the node "{}" with port "{}"'.format(search_node_name, - search_node_port)) - - -def find_output_port(node: Node, output_desc: list, search_node_name: str, search_node_port: int): - if output_desc is None: - return len(node.out_nodes()) - - for out_port, (node_pattern, node_port) in enumerate(output_desc): - if findall(node_pattern, search_node_name) and node_port == search_node_port: - return out_port - raise Exception('Did not find output port of the node "{}" with port "{}"'.format(search_node_name, - search_node_port)) - - -def merge_nodes(graph: Graph, nodes_to_merge_names: list, inputs_desc: list = None, - outputs_desc: list = None): - """ - Merges nodes specified in the set 'nodes_to_merge_names' into one mega-node, creating new edges between mega-node - and inputs/outputs nodes of the mega-node. The added edges contain name of input/output nodes which will be used for - generation of placeholders and will be saved to the IR xml so OV plug-in know how to map input/output data for the - layer. Also the function adds protobufs of the nodes of the sub-graph and 'Const' ops consumed by nodes in the - sub-graph to the node's attribute 'pbs'. - :param graph: the graph object to operate on. - :param nodes_to_merge_names: list of nodes names that should be merged into a single node. - :param inputs_desc: optional list describing input nodes order. - :param outputs_desc: optional list describing output nodes order. - """ - if not is_connected_component(graph, nodes_to_merge_names): - log.warning("The following nodes do not form connected sub-graph: {}".format(nodes_to_merge_names)) - # graph.dump_graph_for_graphviz(nodes_to_dump=nodes_to_merge_names) - - new_node_name = graph.unique_id("TFSubgraphCall_") - log.info("Create new node with name '{}' for nodes '{}'".format(new_node_name, ', '.join(nodes_to_merge_names))) - graph.add_node(new_node_name) - new_node_attrs = graph.node[new_node_name] - - new_node_attrs['name'] = new_node_name - set_tf_custom_call_node_attrs(new_node_attrs) - new_node = Node(graph, new_node_name) - - added_input_tensors_names = set() # set of tensors that are were added as input to the sub-graph - added_new_node_output_tensors = dict() # key - tensor name, value - out port - - for node_name in nodes_to_merge_names: - node = Node(graph, node_name) - add_node_pb_if_not_yet_added(node, new_node) - # TODO: any improvements? - for in_node_name, edge_attrs in Node(graph, node_name).get_inputs(): - in_node = Node(graph, in_node_name) - - # internal edges between nodes of the sub-graph - if in_node_name in nodes_to_merge_names: - add_node_pb_if_not_yet_added(in_node, new_node) - continue - - # edge outside of sub-graph into sub-graph - if in_node_name not in nodes_to_merge_names: - # we cannot use the 'in_node_name' as a protobuf operation name here - # because the 'in_node_name' could be a sub-graph matched before. - input_tensor_name = node.pb.input[edge_attrs['in']] - if input_tensor_name not in added_input_tensors_names: - if not new_node.has_port('in', edge_attrs['in']): - new_node.add_input_port(edge_attrs['in']) - graph.add_edge(in_node_name, new_node_name, - **merge_edge_props( - {'in': find_input_port(new_node, inputs_desc, node_name, edge_attrs['in']), - 'out': edge_attrs['out'], - 'internal_input_node_name': input_tensor_name, - 'original_dst_node_name': node_name, - 'original_dst_port': edge_attrs['in'], - 'in_attrs': ['in', 'internal_input_node_name', 'original_dst_node_name', - 'original_dst_port', 'placeholder_name'], - 'out_attrs': ['out']}, - edge_attrs) - ) - log.debug("Creating edge from outside of sub-graph to inside sub-graph: {} -> {}".format( - in_node_name, new_node_name)) - added_input_tensors_names.add(input_tensor_name) - - # edge from inside sub-graph to outside sub-graph - for out_node_name, edge_attrs in Node(graph, node_name).get_outputs(): - if out_node_name not in nodes_to_merge_names: - log.debug("Creating edge from inside of sub-graph to outside sub-graph: {} -> {}".format( - new_node_name, out_node_name)) - out_name = internal_output_name_for_node(node_name, edge_attrs['out']) - if out_name not in added_new_node_output_tensors.keys(): - added_new_node_output_tensors[out_name] = find_output_port(new_node, outputs_desc, node_name, - edge_attrs['out']) - if not new_node.has_port('out', added_new_node_output_tensors[out_name]): - new_node.add_output_port(added_new_node_output_tensors[out_name]) - graph.add_edge(new_node_name, out_node_name, - **merge_edge_props( - {'in': edge_attrs['in'], - 'out': added_new_node_output_tensors[out_name], - 'internal_output_node_name': out_name, - 'in_attrs': ['in', 'internal_input_node_name'], - 'out_attrs': ['out', 'internal_output_node_name']}, - edge_attrs) - ) - new_node['output_tensors_names'] = [val for val in - {v: k for k, v in added_new_node_output_tensors.items()}.values()] - - # add nodes using the same order as in initial GraphDef so we can dump them to IR in "correct" order - new_node['nodes_order'] = [node for node in graph.graph['initial_nodes_order'] if node in new_node['pbs'].keys()] - - for n in nodes_to_merge_names: - if graph.has_node(n): # check if not deleted by another (similar) pattern - graph.remove_node(n) - return Node(graph, new_node_name) - - -def set_tf_custom_call_node_attrs(node_attrs: dict): - from openvino.tools.mo.front.tf.partial_infer.tf import tf_subgraph_infer - update_ie_fields(node_attrs) - node_attrs['input_nodes_names'] = list() - node_attrs['output_tensors_names'] = list() - node_attrs['real_input_dims'] = list() - node_attrs['pbs'] = dict() - node_attrs['type'] = 'TFCustomSubgraphCall' - node_attrs['op'] = 'TFCustomSubgraphCall' - node_attrs['infer'] = tf_subgraph_infer - node_attrs['kind'] = 'op' - - -def skip_nodes_by_condition(current_node: Node, condition: callable, forward: bool = False): - if forward: - while condition(current_node): - current_node = current_node.out_node() - else: - while condition(current_node): - current_node = current_node.in_node() - return current_node diff --git a/tools/mo/openvino/tools/mo/front/tf/deconv_ext.py b/tools/mo/openvino/tools/mo/front/tf/deconv_ext.py deleted file mode 100644 index 0dd16a58d2d30a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/deconv_ext.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import convert_deconv_tf_padding_to_str, int64_array, \ - dynamic_dimension -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_data_format_spatial, tf_data_format_channel, \ - tf_data_format_batch, \ - tf_int_list -from openvino.tools.mo.ops.deconvolution import Deconvolution -from openvino.tools.mo.ops.op import PermuteAttrs - - -class Conv2DBackpropInputFrontExtractor(FrontExtractorOp): - op = 'Conv2DBackpropInput' - enabled = True - - @classmethod - def extract(cls, node): - attrs = tf_create_attrs(node, 3, 2) - attrs.update({'op': cls.op, - 'get_group': get_conv_backprop_groups, - 'get_weights_permute': PermuteAttrs.Permutation(perm=int64_array([3, 2, 0, 1]), - inv=int64_array([2, 3, 1, 0])), - 'swap_0_and_2_inputs': True, - 'shape_input': True, - }) - - # update the attributes of the node - Deconvolution.update_node_stat(node, attrs) - return cls.enabled - - -class Conv3DBackpropInputV2InputFrontExtractor(FrontExtractorOp): - op = 'Conv3DBackpropInputV2' - enabled = True - - @classmethod - def extract(cls, node): - attrs = tf_create_attrs(node, 4, 3) - attrs.update({'op': cls.op, - 'get_group': get_conv_backprop_groups, - 'get_weights_permute': PermuteAttrs.Permutation(perm=int64_array([4, 3, 0, 1, 2]), - inv=int64_array([2, 3, 4, 1, 0])), - 'swap_0_and_2_inputs': True, - 'shape_input': True, - }) - - # update the attributes of the node - Deconvolution.update_node_stat(node, attrs) - return cls.enabled - - -def tf_create_attrs(node, input_feature_channel, output_feature_channel): - data_format = node.pb.attr["data_format"] - - return { - 'auto_pad': convert_deconv_tf_padding_to_str(node.pb.attr['padding'].s.decode()), - 'bias_addable': True, - 'bias_term': False, - 'spatial_dims': tf_data_format_spatial(data_format), - 'channel_dims': tf_data_format_channel(data_format), - 'batch_dims': tf_data_format_batch(data_format), - 'pad': None, # will be inferred when input shape is known - 'pad_spatial_shape': None, - 'output_spatial_shape': None, - 'output_shape': None, - 'output': None, - 'stride': tf_int_list(node.pb.attr["strides"].list), - 'type': None, # don't set type until we are sure it is really translated to correct IR; see infer function - 'group': None, - 'layout': data_format.s.decode(), - 'input_feature_channel': input_feature_channel, - 'output_feature_channel': output_feature_channel, - } - - -def get_conv_backprop_groups(node): - # output shape is required input for TensorFlow ConvBackpropInput operation and contains output shape values - # in the form [batch_size, output_height, output_width, output_channel], so that - # groups number = output_channel // kernel_out_channels, where - # kernel shape is given as [kernel_height, kernel_width, kernel_out_channels, in_channels] - output_shape = node.in_port(2).data.get_value() - kernel_shape = node.in_port(1).data.get_shape() - if node.has_and_set('group'): - return node.group - elif output_shape is not None and kernel_shape is not None \ - and output_shape[node.channel_dims[0]] is not dynamic_dimension \ - and kernel_shape[node.output_feature_channel] is not dynamic_dimension: - return output_shape[node.channel_dims] // kernel_shape[node.output_feature_channel] - else: - return 1 diff --git a/tools/mo/openvino/tools/mo/front/tf/depth_to_space.py b/tools/mo/openvino/tools/mo/front/tf/depth_to_space.py deleted file mode 100644 index ca914cec748cf6..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/depth_to_space.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.depth_to_space import DepthToSpaceOp -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class DepthToSpaceFrontExtractor(FrontExtractorOp): - op = 'DepthToSpace' - enabled = True - - @classmethod - def extract(cls, node): - # update the attributes of the node - block_size = node.pb.attr['block_size'].i - data_format = node.pb.attr['data_format'].s.decode('utf-8') - DepthToSpaceOp.update_node_stat(node, {'block_size': block_size, 'data_format': data_format}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/efficient_det_support_api_v2.0.json b/tools/mo/openvino/tools/mo/front/tf/efficient_det_support_api_v2.0.json deleted file mode 100644 index f1333461f9d0b3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/efficient_det_support_api_v2.0.json +++ /dev/null @@ -1,50 +0,0 @@ -[ - { - "custom_attributes": { - "start_nodes": ["StatefulPartitionedCall/Preprocessor/unstack"], - "end_nodes": ["StatefulPartitionedCall/Preprocessor/stack", - "StatefulPartitionedCall/Preprocessor/stack_1"] - }, - "id": "ObjectDetectionAPIPreprocessor2Replacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "clip_before_nms": false, - "clip_after_nms": true, - "disable_prior_boxes_layers_generator": true - }, - "id": "ObjectDetectionAPISSDPostprocessorReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/Identity", - "StatefulPartitionedCall/Identity_1", - "StatefulPartitionedCall/Identity_2", - "StatefulPartitionedCall/Identity_3", - "StatefulPartitionedCall/Identity_4", - "StatefulPartitionedCall/Identity_5", - "StatefulPartitionedCall/Identity_6", - "StatefulPartitionedCall/Identity_7" - ], - "start_points": [ - "StatefulPartitionedCall/Postprocessor/Reshape_1", - "StatefulPartitionedCall/Postprocessor/scale_logits", - "StatefulPartitionedCall/Postprocessor/Tile", - "StatefulPartitionedCall/Postprocessor/Cast_1" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "outputs": "StatefulPartitionedCall/Identity,StatefulPartitionedCall/Identity_1,StatefulPartitionedCall/Identity_2,StatefulPartitionedCall/Identity_3,StatefulPartitionedCall/Identity_4,StatefulPartitionedCall/Identity_5,StatefulPartitionedCall/Identity_6,StatefulPartitionedCall/Identity_7" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/efficient_det_support_api_v2.4.json b/tools/mo/openvino/tools/mo/front/tf/efficient_det_support_api_v2.4.json deleted file mode 100644 index e1826d8b45fd2b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/efficient_det_support_api_v2.4.json +++ /dev/null @@ -1,50 +0,0 @@ -[ - { - "custom_attributes": { - "start_nodes": ["StatefulPartitionedCall/map/TensorArrayUnstack/TensorListFromTensor"], - "end_nodes": ["StatefulPartitionedCall/map/TensorArrayV2Stack/TensorListStack", - "StatefulPartitionedCall/map/TensorArrayV2Stack_1/TensorListStack"] - }, - "id": "ObjectDetectionAPIPreprocessor2Replacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "clip_before_nms": false, - "clip_after_nms": true, - "disable_prior_boxes_layers_generator": true - }, - "id": "ObjectDetectionAPISSDPostprocessorReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/Identity", - "StatefulPartitionedCall/Identity_1", - "StatefulPartitionedCall/Identity_2", - "StatefulPartitionedCall/Identity_3", - "StatefulPartitionedCall/Identity_4", - "StatefulPartitionedCall/Identity_5", - "StatefulPartitionedCall/Identity_6", - "StatefulPartitionedCall/Identity_7" - ], - "start_points": [ - "StatefulPartitionedCall/Postprocessor/Reshape_1", - "StatefulPartitionedCall/Postprocessor/scale_logits", - "StatefulPartitionedCall/Postprocessor/Tile", - "StatefulPartitionedCall/Postprocessor/Cast_1" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "outputs": "StatefulPartitionedCall/Identity,StatefulPartitionedCall/Identity_1,StatefulPartitionedCall/Identity_2,StatefulPartitionedCall/Identity_3,StatefulPartitionedCall/Identity_4,StatefulPartitionedCall/Identity_5,StatefulPartitionedCall/Identity_6,StatefulPartitionedCall/Identity_7" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/einsum_ext.py b/tools/mo/openvino/tools/mo/front/tf/einsum_ext.py deleted file mode 100644 index 3c622e20483275..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/einsum_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.einsum import Einsum -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class EinsumExtractor(FrontExtractorOp): - op = 'Einsum' - enabled = True - - @classmethod - def extract(cls, einsum_node): - einsum_name = einsum_node.soft_get('name', einsum_node.id) - equation = einsum_node.pb.attr['equation'].s.decode('utf-8') - normalized_equation = Einsum.normalize_equation(einsum_name, equation) - Einsum.update_node_stat(einsum_node, {'equation': normalized_equation}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/elementwise_ext.py b/tools/mo/openvino/tools/mo/front/tf/elementwise_ext.py deleted file mode 100644 index 1f244a708a421f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/elementwise_ext.py +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Add, Mul, Sub, Div, Maximum, Minimum, Pow, LogicalAnd, LogicalOr, Equal, \ - GreaterEqual, Greater, Less, LessEqual, NotEqual, FloorMod, BiasAdd, SquaredDifference, Round, Mod -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor -from openvino.tools.mo.ops.eltwise_n import EltwiseNAdd -from openvino.tools.mo.ops.power import AttributedPower - - -class AddExtractor(FrontExtractorOp): - op = 'Add' - enabled = True - - @classmethod - def extract(cls, node): - Add.update_node_stat(node, {'data_type': tf_dtype_extractor(node.pb.attr["T"].type)}) - return cls.enabled - - -class AddV2Extractor(FrontExtractorOp): - op = 'AddV2' - enabled = True - - @classmethod - def extract(cls, node): - Add.update_node_stat(node, {'data_type': tf_dtype_extractor(node.pb.attr["T"].type)}) - return cls.enabled - - -class AddNExtractor(FrontExtractorOp): - op = 'AddN' - enabled = True - - @classmethod - def extract(cls, node): - EltwiseNAdd.update_node_stat(node) - return cls.enabled - - -class BiasAddExtractor(FrontExtractorOp): - op = 'BiasAdd' - enabled = True - - @classmethod - def extract(cls, node): - BiasAdd.update_node_stat(node, {'data_type': tf_dtype_extractor(node.pb.attr["T"].type), - 'data_format': node.pb.attr["data_format"].s.decode()}) - return cls.enabled - - -class MulExtractor(FrontExtractorOp): - op = 'Mul' - enabled = True - - @classmethod - def extract(cls, node): - Mul.update_node_stat(node, {'data_type': tf_dtype_extractor(node.pb.attr["T"].type)}) - return cls.enabled - - -class SubExtractor(FrontExtractorOp): - op = 'Sub' - enabled = True - - @classmethod - def extract(cls, node): - Sub.update_node_stat(node, {'data_type': tf_dtype_extractor(node.pb.attr["T"].type)}) - return cls.enabled - - -class ModExtractor(FrontExtractorOp): - op = 'Mod' - enabled = True - - @classmethod - def extract(cls, node): - Mod.update_node_stat(node, {'data_type': tf_dtype_extractor(node.pb.attr["T"].type)}) - return cls.enabled - - -class DivExtractor(FrontExtractorOp): - op = 'RealDiv' - enabled = True - - @classmethod - def extract(cls, node): - Div.update_node_stat(node, {'data_type': tf_dtype_extractor(node.pb.attr["T"].type)}) - return cls.enabled - - -class SquaredDifferenceExtractor(FrontExtractorOp): - op = 'SquaredDifference' - enabled = True - - @classmethod - def extract(cls, node): - SquaredDifference.update_node_stat(node, {'data_type': tf_dtype_extractor(node.pb.attr["T"].type)}) - return cls.enabled - - -class SqrtExtractor(FrontExtractorOp): - op = 'Sqrt' - enabled = True - - @classmethod - def extract(cls, node): - AttributedPower.update_node_stat(node, {'power': 0.5}) - return cls.enabled - - -class RsqrtExtractor(FrontExtractorOp): - op = 'Rsqrt' - enabled = True - - @classmethod - def extract(cls, node): - AttributedPower.update_node_stat(node, {'power': -0.5}) - return cls.enabled - - -class SquareExtractor(FrontExtractorOp): - op = 'Square' - enabled = True - - @classmethod - def extract(cls, node): - data_type = tf_dtype_extractor(node.pb.attr["T"].type) - AttributedPower.update_node_stat(node, {'power': data_type(2), 'data_type': data_type}) - return cls.enabled - - -class NegExtractor(FrontExtractorOp): - op = 'Neg' - enabled = True - - @classmethod - def extract(cls, node): - AttributedPower.update_node_stat(node, {'scale': -1}) - return cls.enabled - - -class ZerosLike(FrontExtractorOp): - op = 'ZerosLike' - enabled = True - - @classmethod - def extract(cls, node): - AttributedPower.update_node_stat(node, {'scale': 0}) - return cls.enabled - - -class MaximumExtractor(FrontExtractorOp): - op = 'Maximum' - enabled = True - - @classmethod - def extract(cls, node): - Maximum.update_node_stat(node, {'data_type': tf_dtype_extractor(node.pb.attr["T"].type)}) - return cls.enabled - - -class MinimumExtractor(FrontExtractorOp): - op = 'Minimum' - enabled = True - - @classmethod - def extract(cls, node): - Minimum.update_node_stat(node, {'data_type': tf_dtype_extractor(node.pb.attr["T"].type)}) - return cls.enabled - - -class PowExtractor(FrontExtractorOp): - op = 'Pow' - enabled = True - - @classmethod - def extract(cls, node): - Pow.update_node_stat(node, {'data_type': tf_dtype_extractor(node.pb.attr["T"].type)}) - return cls.enabled - - -class LogicalAndFrontExtractor(FrontExtractorOp): - op = 'LogicalAnd' - enabled = True - - @classmethod - def extract(cls, node): - LogicalAnd.update_node_stat(node) - return cls.enabled - - -class LogicalOrFrontExtractor(FrontExtractorOp): - op = 'LogicalOr' - enabled = True - - @classmethod - def extract(cls, node): - LogicalOr.update_node_stat(node) - return cls.enabled - - -class EqualExtractor(FrontExtractorOp): - op = 'Equal' - enabled = True - - @classmethod - def extract(cls, node): - Equal.update_node_stat(node) - return cls.enabled - - -class LessEqualExtractor(FrontExtractorOp): - op = 'LessEqual' - enabled = True - - @classmethod - def extract(cls, node): - LessEqual.update_node_stat(node) - return cls.enabled - - -class LessExtractor(FrontExtractorOp): - op = 'Less' - enabled = True - - @classmethod - def extract(cls, node): - Less.update_node_stat(node) - return cls.enabled - - -class GreaterExtractor(FrontExtractorOp): - op = 'Greater' - enabled = True - - @classmethod - def extract(cls, node): - Greater.update_node_stat(node) - return cls.enabled - - -class GreaterEqualExtractor(FrontExtractorOp): - op = 'GreaterEqual' - enabled = True - - @classmethod - def extract(cls, node): - GreaterEqual.update_node_stat(node) - return cls.enabled - - -class NotEqualExtractor(FrontExtractorOp): - op = 'NotEqual' - enabled = True - - @classmethod - def extract(cls, node): - NotEqual.update_node_stat(node) - return cls.enabled - - -class FloorModFrontExtractor(FrontExtractorOp): - op = 'FloorMod' - enabled = True - - @classmethod - def extract(cls, node): - FloorMod.update_node_stat(node) - return cls.enabled - - -class RoundExtractor(FrontExtractorOp): - op = 'Round' - enabled = True - - @classmethod - def extract(cls, node): - Round.update_node_stat(node, {'mode': 'half_to_even'}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/embedding_segments_mean_decomposition.py b/tools/mo/openvino/tools/mo/front/tf/embedding_segments_mean_decomposition.py deleted file mode 100644 index ab151dd5c1735d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/embedding_segments_mean_decomposition.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.ConvertLike import ConvertLike -from openvino.tools.mo.ops.ReduceOps import ReduceSum -from openvino.tools.mo.ops.broadcast import Broadcast -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.elementwise import Div, Equal -from openvino.tools.mo.ops.embedding_bag import EmbeddingSegmentsSum -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.ops.select import Select -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class EmbeddingSegmentsMeanDecomposition(FrontReplacementPattern): - """ - This transformation decomposes EmbeddingSegmentsMean operation into EmbeddingSegmentSum operations taking into - account that summed up embedding vectors for each vector must be normalized appropriately by a coefficient - equal to a number of gathered embedding vectors for each object. If there is no gathered embedding vector - for an object, the coefficient equals one. - - Approximate computation scheme (Cast operations omitted) for the normalization coefficients: - - Const(0) - segment_ids -> Unsqueeze(axis=1) -----------------\ | - \ \/ - ---> Equal() --> Select --> ReduceSum(axis=0) --> Norm. Coeff. - / /\ - Range(0, num_segments) -> Unsqueeze(axis=0)------ / | - Const(1) - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.front.tf.embedding_segments_operation_fusing import \ - EmbeddingSegmentsOperationMultipleFeaturesFusing, EmbeddingSegmentsOperationSingleFeatureFusing - return [EmbeddingSegmentsOperationMultipleFeaturesFusing, EmbeddingSegmentsOperationSingleFeatureFusing] - - def find_and_replace_pattern(self, graph: Graph): - for embedding_segments_mean in graph.get_op_nodes(op='EmbeddingSegmentsMean'): - embedding_segments_mean_name = embedding_segments_mean.soft_get('name', - embedding_segments_mean.id) - embedding_table_input = embedding_segments_mean.in_port(0) - segment_ids_input = embedding_segments_mean.in_port(2) - num_segments_input = embedding_segments_mean.in_port(3) - - # TODO: support EmbeddingSegmentsMean with specified weights vector. - # now this case has not appeared in models so far so EmbeddingSegmentsOperation fusion - # transformations do not handle it either - if embedding_segments_mean.is_in_port_connected(5): - return - - # 1. compute indices membership matrix, i.e. which indices belong to some object - # the shape of this matrix is [num_segments, num_indices] - non_norm_range_1_to_num_segments = create_op_with_const_inputs(graph, Range, - {0: int64_array(0), - 2: int64_array(1)}, - {'name': embedding_segments_mean_name + - '/Range1ToNumSegments', - 'output_type': np.int64}) - num_segments_input.get_connection().add_destination(non_norm_range_1_to_num_segments.in_port(1)) - - range_1_to_num_segments = ConvertLike(graph, {'name': embedding_segments_mean_name + - '/Range1ToNumSegmentsNorm'} - ).create_node() - range_1_to_num_segments.in_port(0).connect(non_norm_range_1_to_num_segments.out_port(0)) - num_segments_input.get_connection().add_destination(range_1_to_num_segments.in_port(1)) - - unsqueeze_range_1_to_num_segments = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array(1)}, - {'name': embedding_segments_mean_name + - '/Range1ToNumSegmentsUnsqueeze'}) - unsqueeze_range_1_to_num_segments.in_port(0).connect(range_1_to_num_segments.out_port(0)) - unsqueeze_segment_ids = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array(0)}, - {'name': embedding_segments_mean_name + - '/SegmentIdsUnsqueeze'}) - segment_ids_input.get_connection().add_destination(unsqueeze_segment_ids.in_port(0)) - boolean_membership_matrix = Equal(graph, {'name': embedding_segments_mean_name + - '/BooleanMembershipMatrix'} - ).create_node() - boolean_membership_matrix.in_port(0).connect(unsqueeze_range_1_to_num_segments.out_port(0)) - boolean_membership_matrix.in_port(1).connect(unsqueeze_segment_ids.out_port(0)) - shape_of_membership_matrix = Shape(graph, {'name': embedding_segments_mean_name + - '/ShapeOfMembershipMatrix'} - ).create_node([boolean_membership_matrix]) - one_scalar_constant = Const(graph, {'name': embedding_segments_mean_name + '/OneScalar', - 'value': int64_array([1])}).create_node() - one_constant = Broadcast(graph, {'name': embedding_segments_mean_name + '/One'} - ).create_node([one_scalar_constant, - shape_of_membership_matrix]) - zero_constant = Const(graph, {'name': embedding_segments_mean_name + '/Zero', - 'value': int64_array(0)}).create_node() - membership_matrix = Select(graph, {'name': embedding_segments_mean_name + '/MembershipMatrix', - 'auto_broadcast': 'numpy'}).create_node([boolean_membership_matrix, - one_constant, - zero_constant]) - - # 2. compute a number of indices belong to each object from the batch - # it computes the normalization coefficients - num_indices_per_object = create_op_with_const_inputs(graph, ReduceSum, - {1: int64_array(1)}, - {'name': embedding_segments_mean_name + - '/NumIndicesPerObject'}) - num_indices_per_object.in_port(0).connect(membership_matrix.out_port(0)) - - # 3. replace zero coefficient (zero number of indices belong to an object) with one - # because for such object the single default embedding vector is used - where_zero_number = Equal(graph, {'name': embedding_segments_mean_name + - '/WhereZeroIndicesNumber'} - ).create_node([num_indices_per_object, zero_constant]) - normalized_num_indices_per_object = Select(graph, {'name': embedding_segments_mean_name + - '/NormNumIndicesPerObject', - 'auto_broadcast': 'numpy'} - ).create_node([where_zero_number, - one_scalar_constant, - num_indices_per_object]) - - # 4. cast normalized_num_indices_per_object to the same type as embedding vector table - norm_coefficients = ConvertLike(graph, {'name': embedding_segments_mean_name + - '/NormCoefficients'} - ).create_node() - norm_coefficients.in_port(0).connect(normalized_num_indices_per_object.out_port(0)) - embedding_table_input.get_connection().add_destination(norm_coefficients.in_port(1)) - - # 5. replace EmbeddingSegmentMean with EmbeddingSegmentSum - embedding_segments_sum = EmbeddingSegmentsSum(graph, {'name': embedding_segments_mean_name + - '/EmbeddingSegmentsSum'} - ).create_node() - for in_port in embedding_segments_mean.in_ports(): - if embedding_segments_mean.is_in_port_connected(in_port): - embedding_segments_mean.in_port(in_port).get_connection().set_destination( - embedding_segments_sum.in_port(in_port)) - - # 6. normalize EmbeddingSegmentSum results by computed coefficients - result_node = Div(graph, {'name': embedding_segments_mean_name + - '/Div'} - ).create_node([embedding_segments_sum, norm_coefficients]) - embedding_segments_mean.out_port(0).get_connection().set_source(result_node.out_port(0)) - - rename_nodes([(embedding_segments_mean, embedding_segments_mean_name + '/AbandonedName'), - (result_node, embedding_segments_mean_name)]) - graph.remove_nodes_from([embedding_segments_mean.id]) diff --git a/tools/mo/openvino/tools/mo/front/tf/embedding_segments_operation_fusing.py b/tools/mo/openvino/tools/mo/front/tf/embedding_segments_operation_fusing.py deleted file mode 100644 index a70219307d8ed4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/embedding_segments_operation_fusing.py +++ /dev/null @@ -1,258 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.embedding_bag import EmbeddingSegmentsMean, EmbeddingSegmentsSum -from openvino.tools.mo.ops.split import Split -from openvino.tools.mo.ops.squeeze import Squeeze - - -class EmbeddingSegmentsOperationSingleFeatureFusing(FrontReplacementSubgraph): - """ - The transformation looks for pattern (sub-graph) that performs extraction of embedding vectors from the parameters - table for object feature values, and sum up these embedding vectors for every object or compute their mean value. - Such sub-graph is met in the Wide and Deep model in case of the SINGLE categorical feature. - """ - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('identity_spw', dict(op='Identity')), - ('gather0_1', dict(type='Gather')), - ('gather0_2', dict(type='Gather')), - ('reshape0', dict(op='Reshape')), - ('where0', dict(op='Where')), - ('greaterequal0', dict(op='GreaterEqual')), - ('sparse_fill_empty_rows', dict(op='SparseFillEmptyRows')), - ('unique', dict(op='Unique')), - ('strided_slice', dict(op='StridedSlice')), - ('cast', dict(op='Cast')), - ('gather', dict(type='Gather')), - ('sparse_segment_op', dict(op=lambda op: op in ['SparseSegmentSum', 'SparseSegmentMean'])), - ('reshape', dict(op='Reshape')), - ('tile', dict(type='Tile')), - ('select', dict(op='Select')) - ], - edges=[ - ('identity_spw', 'sparse_fill_empty_rows', {'out': 0, 'in': 2}), - ('gather0_1', 'sparse_fill_empty_rows', {'out': 0, 'in': 0}), - ('gather0_2', 'sparse_fill_empty_rows', {'out': 0, 'in': 1}), - ('reshape0', 'gather0_1', {'out': 0, 'in': 1}), - ('reshape0', 'gather0_2', {'out': 0, 'in': 1}), - ('where0', 'reshape0', {'out': 0, 'in': 0}), - ('greaterequal0', 'where0', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'unique', {'out': 1, 'in': 0}), - ('sparse_fill_empty_rows', 'strided_slice', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'reshape', {'out': 2, 'in': 0}), - ('unique', 'sparse_segment_op', {'out': 1, 'in': 1}), - ('unique', 'gather', {'out': 0, 'in': 1}), - ('strided_slice', 'cast', {'out': 0, 'in': 0}), - ('gather', 'sparse_segment_op', {'out': 0, 'in': 0}), - ('cast', 'sparse_segment_op', {'out': 0, 'in': 2}), - ('sparse_segment_op', 'select', {'out': 0, 'in': 2}), - ('reshape', 'tile', {'out': 0, 'in': 0}), - ('tile', 'select', {'out': 0, 'in': 0}) - ]) - - def replace_sub_graph(self, graph: Graph, match: dict): - identity_spw = match['identity_spw'] - gather0_1 = match['gather0_1'] - gather0_2 = match['gather0_2'] - greaterequal0 = match['greaterequal0'] - sparse_fill_empty_rows = match['sparse_fill_empty_rows'] - gather = match['gather'] - select = match['select'] - where0 = match['where0'] - sparse_segment_op = match['sparse_segment_op'] - output_node_name = select.soft_get('name', select.id) - - log.debug('Found EmbeddingSparseSegmentsSingleFeature pattern after {} with name {}'.format( - sparse_fill_empty_rows.op, - sparse_fill_empty_rows.name)) - - split_for_indices = create_op_with_const_inputs(graph, Split, {1: int64_array(1)}, {'num_splits': 2}) - squeeze_for_indices = create_op_with_const_inputs(graph, Squeeze, {1: int64_array([1])}) - split_for_dense_shape = create_op_with_const_inputs(graph, Split, {1: int64_array(0)}, {'num_splits': 2}) - squeeze_to_scalar = create_op_with_const_inputs(graph, Squeeze, {1: int64_array([0])}) - - # TODO: remove Cast nodes once we start to support EmbeddingSegmentSum (new version) with segment_ids, - # indices, and num_segments of different integer type. - # Because the real cases show that it is possible to have it in TensorFlow - cast_indices = Cast(graph, {'name': output_node_name + '/CastIndices', 'dst_type': np.int32}).create_node() - cast_segment_ids = Cast(graph, {'name': output_node_name + '/CastSegmentIds', - 'dst_type': np.int32}).create_node() - cast_default_value = Cast(graph, {'name': output_node_name + '/CastDefaultValue', - 'dst_type': np.int32}).create_node() - cast_num_segments = Cast(graph, {'name': output_node_name + '/CastSegmentsNumber', - 'dst_type': np.int32}).create_node() - if sparse_segment_op.op == 'SparseSegmentSum': - embedding_segments_op = EmbeddingSegmentsSum(graph, {'name': output_node_name}).create_node() - else: - embedding_segments_op = EmbeddingSegmentsMean(graph, {'name': output_node_name}).create_node() - rename_nodes([(select, output_node_name + '/AbandonedName'), (embedding_segments_op, output_node_name)]) - - # connect parameters table - gather.in_port(0).get_connection().set_destination(embedding_segments_op.in_port(0)) - # connect indices values - greaterequal0.in_port(0).get_connection().set_destination(cast_indices.in_port(0)) - embedding_segments_op.in_port(1).connect(cast_indices.out_port(0)) - # split and connect segment ids - gather0_1.in_port(0).get_connection().set_destination(split_for_indices.in_port(0)) - squeeze_for_indices.in_port(0).connect(split_for_indices.out_port(0)) - cast_segment_ids.in_port(0).connect(squeeze_for_indices.out_port(0)) - embedding_segments_op.in_port(2).connect(cast_segment_ids.out_port(0)) - # split and connect number of segments - identity_spw.in_port(0).get_connection().set_destination(split_for_dense_shape.in_port(0)) - squeeze_to_scalar.in_port(0).connect(split_for_dense_shape.out_port(0)) - cast_num_segments.in_port(0).connect(squeeze_to_scalar.out_port(0)) - embedding_segments_op.in_port(3).connect(cast_num_segments.out_port(0)) - # connect default value - sparse_fill_empty_rows.in_port(3).get_connection().set_destination(cast_default_value.in_port(0)) - embedding_segments_op.in_port(4).connect(cast_default_value.out_port(0)) - # no input port for per_sample_weight - - identity_spw.in_port(0).disconnect() - gather0_1.in_port(0).disconnect() - gather0_2.in_port(0).disconnect() - greaterequal0.in_port(0).disconnect() - sparse_fill_empty_rows.in_port(2).disconnect() - gather.in_port(0).disconnect() - - select.out_port(0).get_connection().set_source(embedding_segments_op.out_port(0)) - graph.remove_nodes_from( - [gather0_1.id, gather0_2.id, greaterequal0.id, sparse_fill_empty_rows.id, select.id, where0.id]) - - -class EmbeddingSegmentsOperationMultipleFeaturesFusing(FrontReplacementSubgraph): - """ - The transformation looks for pattern (sub-graph) that performs extraction of embedding vectors from the parameters - table for object feature values, and sum up these embedding vectors for every object or compute their mean value. - Such sub-graph is met in the Wide and Deep model in case of MULTIPLE categorical features. - """ - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('identity_spw', dict(op='Identity')), - ('gather0_1', dict(type='Gather')), - ('gather0_2', dict(type='Gather')), - ('reshape0', dict(op='Reshape')), - ('where0', dict(op='Where')), - ('greaterequal0', dict(op='GreaterEqual')), - ('sparse_fill_empty_rows', dict(op='SparseFillEmptyRows')), - ('unique', dict(op='Unique')), - ('strided_slice', dict(op='StridedSlice')), - ('cast', dict(op='Cast')), - ('gather', dict(type='Gather')), - ('identity', dict(op='Identity')), - ('identity_1', dict(op='Identity')), - ('sparse_segment_op', dict(op=lambda op: op in ['SparseSegmentSum', 'SparseSegmentMean'])), - ('reshape', dict(op='Reshape')), - ('tile', dict(type='Tile')), - ('select', dict(op='Select')) - ], - edges=[ - ('identity_spw', 'sparse_fill_empty_rows', {'out': 0, 'in': 2}), - ('gather0_1', 'sparse_fill_empty_rows', {'out': 0, 'in': 0}), - ('gather0_2', 'sparse_fill_empty_rows', {'out': 0, 'in': 1}), - ('reshape0', 'gather0_1', {'out': 0, 'in': 1}), - ('reshape0', 'gather0_2', {'out': 0, 'in': 1}), - ('where0', 'reshape0', {'out': 0, 'in': 0}), - ('greaterequal0', 'where0', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'unique', {'out': 1, 'in': 0}), - ('sparse_fill_empty_rows', 'strided_slice', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'reshape', {'out': 2, 'in': 0}), - ('unique', 'sparse_segment_op', {'out': 1, 'in': 1}), - ('unique', 'gather', {'out': 0, 'in': 1}), - ('strided_slice', 'cast', {'out': 0, 'in': 0}), - ('gather', 'identity', {'out': 0, 'in': 0}), - ('identity', 'identity_1', {'out': 0, 'in': 0}), - ('identity_1', 'sparse_segment_op', {'out': 0, 'in': 0}), - ('cast', 'sparse_segment_op', {'out': 0, 'in': 2}), - ('sparse_segment_op', 'select', {'out': 0, 'in': 2}), - ('reshape', 'tile', {'out': 0, 'in': 0}), - ('tile', 'select', {'out': 0, 'in': 0}) - ]) - - def replace_sub_graph(self, graph: Graph, match: dict): - identity_spw = match['identity_spw'] - gather0_1 = match['gather0_1'] - gather0_2 = match['gather0_2'] - greaterequal0 = match['greaterequal0'] - sparse_fill_empty_rows = match['sparse_fill_empty_rows'] - gather = match['gather'] - select = match['select'] - where0 = match['where0'] - sparse_segment_op = match['sparse_segment_op'] - output_node_name = select.soft_get('name', select.id) - - log.debug('Found EmbeddingSparseSegmentsMultipleFeatures pattern after {} with name {}'.format( - sparse_fill_empty_rows.op, - sparse_fill_empty_rows.name)) - - split_for_indices = create_op_with_const_inputs(graph, Split, {1: int64_array(1)}, - {'num_splits': 2, - 'name': output_node_name + '/SplitForIndices'}) - squeeze_for_indices = create_op_with_const_inputs(graph, Squeeze, {1: int64_array([1])}) - split_for_dense_shape = create_op_with_const_inputs(graph, Split, {1: int64_array(0)}, - {'num_splits': 2, - 'name': output_node_name + '/SplitForDenseShape'}) - squeeze_to_scalar = create_op_with_const_inputs(graph, Squeeze, {1: int64_array([0])}) - - # TODO: remove Cast nodes once we start to support EmbeddingSegmentSum (new version) with segment_ids, - # indices, and num_segments of different integer type. - # Because the real cases show that it is possible to have it in TensorFlow - cast_indices = Cast(graph, {'name': output_node_name + '/CastIndices', 'dst_type': np.int32}).create_node() - cast_segment_ids = Cast(graph, {'name': output_node_name + '/CastSegmentIds', - 'dst_type': np.int32}).create_node() - cast_default_value = Cast(graph, {'name': output_node_name + '/CastDefaultValue', - 'dst_type': np.int32}).create_node() - cast_num_segments = Cast(graph, {'name': output_node_name + '/CastSegmentsNumber', - 'dst_type': np.int32}).create_node() - - if sparse_segment_op.op == 'SparseSegmentSum': - embedding_segments_op = EmbeddingSegmentsSum(graph, {'name': output_node_name}).create_node() - else: - embedding_segments_op = EmbeddingSegmentsMean(graph, {'name': output_node_name}).create_node() - rename_nodes([(select, output_node_name + '/AbandonedName'), (embedding_segments_op, output_node_name)]) - - # connect parameters table - gather.in_port(0).get_connection().set_destination(embedding_segments_op.in_port(0)) - # connect indices values - greaterequal0.in_port(0).get_connection().set_destination(cast_indices.in_port(0)) - embedding_segments_op.in_port(1).connect(cast_indices.out_port(0)) - # split and connect segment ids - gather0_1.in_port(0).get_connection().set_destination(split_for_indices.in_port(0)) - squeeze_for_indices.in_port(0).connect(split_for_indices.out_port(0)) - cast_segment_ids.in_port(0).connect(squeeze_for_indices.out_port(0)) - embedding_segments_op.in_port(2).connect(cast_segment_ids.out_port(0)) - # split and connect number of segments - identity_spw.in_port(0).get_connection().set_destination(split_for_dense_shape.in_port(0)) - squeeze_to_scalar.in_port(0).connect(split_for_dense_shape.out_port(0)) - cast_num_segments.in_port(0).connect(squeeze_to_scalar.out_port(0)) - embedding_segments_op.in_port(3).connect(cast_num_segments.out_port(0)) - # connect default value - sparse_fill_empty_rows.in_port(3).get_connection().set_destination(cast_default_value.in_port(0)) - embedding_segments_op.in_port(4).connect(cast_default_value.out_port(0)) - # no input port for per_sample_weight - - identity_spw.in_port(0).disconnect() - gather0_1.in_port(0).disconnect() - gather0_2.in_port(0).disconnect() - greaterequal0.in_port(0).disconnect() - sparse_fill_empty_rows.in_port(2).disconnect() - gather.in_port(0).disconnect() - - select.out_port(0).get_connection().set_source(embedding_segments_op.out_port(0)) - graph.remove_nodes_from([gather0_1.id, gather0_2.id, greaterequal0.id, sparse_fill_empty_rows.id, - select.id, where0.id]) diff --git a/tools/mo/openvino/tools/mo/front/tf/expand_dims_ext.py b/tools/mo/openvino/tools/mo/front/tf/expand_dims_ext.py deleted file mode 100644 index 8e40ca397774c7..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/expand_dims_ext.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class ExpandDimsExtractor(FrontExtractorOp): - """ - Due to historical reasons the ExpandDims operation in the Model Optimizer has one input with data and the attribute - which specifies the dimension to expand. But in the TensorFlow the ExpandDims operation has 2 inputs where the - second input specifies the dimensions to expand. In the Model Optimizer this operation corresponds to the Unsqueeze. - """ - op = 'ExpandDims' - enabled = True - - @classmethod - def extract(cls, node: Node): - Unsqueeze.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/extract_image_patches_ext.py b/tools/mo/openvino/tools/mo/front/tf/extract_image_patches_ext.py deleted file mode 100644 index 9445d08ea73f44..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/extract_image_patches_ext.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ExtractImagePatches import ExtractImagePatches -from openvino.tools.mo.front.common.partial_infer.utils import convert_tf_padding_to_str -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_int_list - - -class ExtractImagePatchesExtractor(FrontExtractorOp): - op = 'ExtractImagePatches' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'spatial_dims': int64_array([1, 2]), - 'sizes': tf_int_list(node.pb.attr['ksizes'].list), - 'strides': tf_int_list(node.pb.attr['strides'].list), - 'rates': tf_int_list(node.pb.attr['rates'].list), - 'auto_pad': convert_tf_padding_to_str(node.pb.attr['padding'].s.decode()), - } - - ExtractImagePatches.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/extractor.py b/tools/mo/openvino/tools/mo/front/tf/extractor.py deleted file mode 100644 index eaf4468ee34b44..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/extractor.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.tf.extractors.concat import tf_concat_ext -from openvino.tools.mo.front.tf.extractors.fused_bn import tf_fused_bn_extractor -from openvino.tools.mo.front.tf.extractors.native_tf import native_tf_node_extractor -from openvino.tools.mo.front.tf.extractors.pack import tf_pack_ext -from openvino.tools.mo.front.tf.extractors.utils import get_tf_node_port -from openvino.tools.mo.graph.graph import Node - - -def get_tf_edges(node: Node): - """ - By TF/NX node find all inputs and return list of all edges. - Edge direction represents data flow (from source op to this node). - So the resulting list contains all input edges for a given node. - Edge attributes: 'in' is index of input port for a given node, 'out' is an index - of output port of some other node that produces input data for this node. - """ - edge_list = [] - for in_port, src_node_id in enumerate(node.pb.input): - edge_list.append(create_tf_edge(src_node_id, node.id, in_port)) - return edge_list - - -def create_tf_edge(src_node_id: str, dst_node_id: str, in_port: int): - """ - Creates an edge for given nodes and input port. - """ - src_node, src_port = get_tf_node_port(src_node_id) - tensor_name = src_node + ":" + str(src_port) - cf_flag = False - if src_node[0] == '^': - src_node = src_node[1:] - cf_flag = True - return (src_node, dst_node_id, { - 'in': in_port, - 'out': src_port, - # debug anchor for a framework name, out port and tensor name - 'fw_tensor_debug_info': [(src_node_id, tensor_name)], - 'in_attrs': ['in', 'control_flow_edge', 'permutation'], - 'out_attrs': ['out', 'permutation'], - 'data_attrs': ['fw_tensor_debug_info'], - 'control_flow_edge': cf_flag - }) - - -def node_pb_arg(pb_extractor: callable): - return lambda node: pb_extractor(node.pb) - - -tf_op_extractors = { - 'TFCustomSubgraphCall': node_pb_arg(lambda pb: None), - 'FusedBatchNorm': node_pb_arg(tf_fused_bn_extractor), - 'FusedBatchNormV2': node_pb_arg(tf_fused_bn_extractor), - 'FusedBatchNormV3': node_pb_arg(tf_fused_bn_extractor), - 'ConcatV2': node_pb_arg(tf_concat_ext), - 'Pack': node_pb_arg(tf_pack_ext), -} - - -def common_tf_fields(node: Node): - return { - 'kind': 'op', - 'name': node.pb.name, - 'op': node.pb.op, - } - - -def tf_op_extractor(node: Node, lowered_keys_map: dict): - # all required attributes for the 'TFCustomSubgraphCall' are set during their initialization - if (node.has('op') and node.op == 'TFCustomSubgraphCall') or (not node.has_valid('pb')): - return True, node.graph.node[node.id] - - result = common_tf_fields(node) - node.graph.node[node.id].update(result) - supported = False - op = result['op'].lower() - if op in lowered_keys_map: - op = lowered_keys_map[op] - assert op in tf_op_extractors - attrs = tf_op_extractors[op](node) - if attrs: - result.update(attrs) - supported = True - new_attrs = native_tf_node_extractor(node.pb) - new_attrs.update(result) - result = new_attrs - return supported, result diff --git a/tools/mo/openvino/tools/mo/front/tf/extractors/__init__.py b/tools/mo/openvino/tools/mo/front/tf/extractors/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/extractors/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/tf/extractors/concat.py b/tools/mo/openvino/tools/mo/front/tf/extractors/concat.py deleted file mode 100644 index afef7cb10de481..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/extractors/concat.py +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.concat import concat_infer - - -def tf_concat_ext(pb): - return { - 'type': 'Concat', - 'N': pb.attr["N"].i, - 'infer': concat_infer - } diff --git a/tools/mo/openvino/tools/mo/front/tf/extractors/fused_bn.py b/tools/mo/openvino/tools/mo/front/tf/extractors/fused_bn.py deleted file mode 100644 index 8ca251a23cef66..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/extractors/fused_bn.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array, reverse_bypass_infer, shape_array -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor - - -def tf_fused_bn_infer(node): - output_shape = mo_array(node.in_node(0).shape) - for port, out_node in node.out_nodes().items(): - out_node.shape = shape_array(output_shape) - - -def tf_fused_bn_extractor(pb): - is_training = pb.attr['is_training'].b - if is_training: - log.warning('FusedBatchNorm doesn\'t support is_training=True') - - return { - 'data_format': pb.attr["data_format"].s.decode(), - 'data_type': tf_dtype_extractor(pb.attr["T"].type), - 'eps': pb.attr['epsilon'].f, - 'infer': tf_fused_bn_infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - 'is_training': is_training - } diff --git a/tools/mo/openvino/tools/mo/front/tf/extractors/identity.py b/tools/mo/openvino/tools/mo/front/tf/extractors/identity.py deleted file mode 100644 index 1e136bbab38fc4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/extractors/identity.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer - - -def tf_identity_ext(pb): - return { - 'infer': copy_shape_infer - } diff --git a/tools/mo/openvino/tools/mo/front/tf/extractors/native_tf.py b/tools/mo/openvino/tools/mo/front/tf/extractors/native_tf.py deleted file mode 100644 index ce04ea0c0d38c2..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/extractors/native_tf.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.tf.partial_infer.tf import tf_native_tf_node_infer - - -def native_tf_node_extractor(pb): - return { - 'infer': tf_native_tf_node_infer, - } diff --git a/tools/mo/openvino/tools/mo/front/tf/extractors/pack.py b/tools/mo/openvino/tools/mo/front/tf/extractors/pack.py deleted file mode 100644 index e8fecf3e5a5296..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/extractors/pack.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -def tf_pack_ext(pb): - assert (pb.attr["N"].i == len(pb.input)) - return { - 'axis': pb.attr["axis"].i, - 'N': pb.attr["N"].i, - 'infer': None - } diff --git a/tools/mo/openvino/tools/mo/front/tf/extractors/strided_slice.py b/tools/mo/openvino/tools/mo/front/tf/extractors/strided_slice.py deleted file mode 100644 index ad1b81eed155b2..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/extractors/strided_slice.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.strided_slice import StridedSlice - - -def int_to_array_bit_mask(im): - list_repr = list(np.binary_repr(im)) - list_repr.reverse() - list_repr = [int(li) for li in list_repr] - return mo_array(list_repr, dtype=np.int32) - - -class StridedSliceFrontExtractor(FrontExtractorOp): - op = 'StridedSlice' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.pb - bm = int_to_array_bit_mask(pb.attr["begin_mask"].i) - bm = mo_array([1 - b for b in bm], dtype=np.int32) - em = int_to_array_bit_mask(pb.attr["end_mask"].i) - em = mo_array([1 - b for b in em], dtype=np.int32) - attrs = { - 'begin_mask': bm, - 'end_mask': em, - 'ellipsis_mask': int_to_array_bit_mask(pb.attr["ellipsis_mask"].i), - 'new_axis_mask': int_to_array_bit_mask(pb.attr["new_axis_mask"].i), - 'shrink_axis_mask': int_to_array_bit_mask(pb.attr["shrink_axis_mask"].i), - } - - StridedSlice.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/extractors/subgraph_utils.py b/tools/mo/openvino/tools/mo/front/tf/extractors/subgraph_utils.py deleted file mode 100644 index 5f3df8af960bd9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/extractors/subgraph_utils.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import copy - -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.front.tf.extractor import tf_op_extractor, tf_op_extractors, create_tf_edge -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor -from openvino.tools.mo.graph.graph import Graph, Node, add_opoutput -from openvino.tools.mo.ops.op import PermuteAttrs - - -def update_body_graph(body_graph: Graph, subgraph_proto: dict, - body_parameter_names: list, body_results: list): - """ - Updates the loop body graph with a sub-graph (for body or condition functions) - :param body_graph: a loop body graph to be updated - :param subgraph_proto: a sub-graph in a protobuf format to be added into the loop body graph - :param body_parameter_names: a (unchanged) list of parameters in the loop body graph - :param body_results: a list of Result nodes that is extended with a list from a sub-graph - """ - # create a map from a node name in original model to a name in a loop body graph assuming - # that names in the original model are unique - # initially, the map contains names for parameters that are common for the body and condition graphs - map_original_name = {} - for idx, pb_node in enumerate(subgraph_proto['input_arg']): - map_original_name[pb_node.name] = body_parameter_names[idx] - - # walk through all nodes (non-parameter and non-result nodes) and add into the loop body graph - for pb_node in subgraph_proto['node_def']: - # create an NX node - id = body_graph.unique_id(pb_node.name) - map_original_name[pb_node.name] = id - body_graph.add_node(id, pb=pb_node, kind='op') - if hasattr(body_graph, 'op_names_statistic') and hasattr(pb_node, 'op'): - body_graph.op_names_statistic[pb_node.op] += 1 - - # add incoming edges based on data_nodes_map - for dst_port, inp in enumerate(pb_node.input): - orig_src_id = inp.split(":")[0] - - # TODO: avoid this temporal workaround for TF 2.4 or higher RNN layers: - # skip control flow dependency - if orig_src_id[0] == '^': - continue - - src_id = map_original_name[orig_src_id] - src_port = 0 if len(inp.split(":")) == 1 else int(inp.split(":")[-1]) - assert (body_graph.has_node(src_id)) - - body_graph.add_edges_from([create_tf_edge(src_id + ":" + str(src_port), id, dst_port)]) - - # create Result nodes in the loop body graph - for output in subgraph_proto['output_arg']: - output_name = subgraph_proto['ret'][output.name] - orig_src_id = output_name.split(":")[0] - src_id = map_original_name[orig_src_id] - src_port = 0 if len(output_name.split(":")) == 1 \ - else int(output_name.split(":")[-1]) - assert body_graph.has_node(src_id), 'The body graph does not contain output with name "{}"'.format( - src_id) - body_results.append(Node(body_graph, add_opoutput(body_graph, src_id, src_port, False))) - - return True - - -def get_graph_proto(external_graph: Graph, graph_id: str, node_with_graph: Node): - graph_name = node_with_graph.pb.attr[graph_id].func.name - node_name = node_with_graph.soft_get('name', node_with_graph.id) - - assert 'library' in external_graph.graph, 'The graph does not contain a library that is required ' \ - 'by node with name "{}".'.format(node_name) - - library_graph = external_graph.graph['library'] - - assert graph_name in library_graph, 'The library does not contain a function with name "{}" ' \ - 'that is required by node ' \ - 'with name "{}".'.format(graph_name, node_name) - return library_graph[graph_name] - - -def create_internal_graph(external_graph: Graph): - internal_graph = Graph() - # fill the body graph - for attr_key in external_graph.graph.keys(): - if attr_key != 'library': - internal_graph.graph[attr_key] = copy.deepcopy(external_graph.graph[attr_key]) - else: - # it is sufficient to have a link to the library - internal_graph.graph['library'] = external_graph.graph['library'] - return internal_graph - - -def convert_graph_inputs_to_parameters(internal_graph, internal_graph_proto): - # create Parameter nodes for the body graph - body_parameters = [] - body_parameter_names = [] - for idx, pb_node in enumerate(internal_graph_proto['input_arg']): - param_id = internal_graph.unique_id(pb_node.name) - internal_graph.add_node(param_id, name=param_id, kind='op', op='Parameter', pb=None, shape=None) - parameter_node = Node(internal_graph, pb_node.name) - Parameter.update_node_stat(parameter_node, - {'data_type': tf_dtype_extractor(pb_node.type), - 'permute_attrs': PermuteAttrs().update_attrs(attrs=[('shape', 'output:0')])} - ) - body_parameters.append(parameter_node) - body_parameter_names.append(param_id) - return body_parameters, body_parameter_names diff --git a/tools/mo/openvino/tools/mo/front/tf/extractors/utils.py b/tools/mo/openvino/tools/mo/front/tf/extractors/utils.py deleted file mode 100644 index 9f0f835a0a075d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/extractors/utils.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array, int64_array -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value -from openvino.tools.mo.front.tf.common import tf_data_type_decode -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def tf_tensor_shape(pb): - return shape_array([dim.size if dim.size >= 0 else dynamic_dimension_value for dim in pb.dim]) - - -def tf_int_list(pb): - return int64_array(pb.i) - - -def tf_dtype_extractor(pb_dtype, default=None): - return tf_data_type_decode[pb_dtype][0] if pb_dtype in tf_data_type_decode else default - - -def tf_data_format_spatial(pb): - if b"DHW" in pb.s: - return [pb.s.index(c) for c in b"DHW"] - return [pb.s.index(c) for c in b"HW"] - - -def tf_data_format_channel(pb): - return [pb.s.index(b'C')] - - -def tf_data_format_batch(pb): - return [pb.s.index(b'N')] - - -def get_tf_node_port(tensor): - delim = ':' - # tensor should have form 'name:port' or just 'name' - name_parts = tensor.split(delim) - if len(name_parts) == 1: - # just 'name', then port is 0 by default - return name_parts[0], 0 - else: - # 'name:port', note name can contain ':' also but port is the last part - # TODO Is 'name' that contains other ':'s considered valid by TF? - return delim.join(name_parts[:-1]), int(name_parts[-1]) - - -def tf_tensor_content(tf_dtype, shape, pb_tensor): - type_helper = tf_data_type_decode[tf_dtype] if tf_dtype in tf_data_type_decode else None - if type_helper is None: - raise Error("Data type is unsupported: {}. " + - refer_to_faq_msg(50), tf_dtype) - - decode_err_msg = 'Failed to parse a tensor with Unicode characters. Note that OpenVINO does not support ' \ - 'string literals, so the string constant should be eliminated from the graph.' - if pb_tensor.tensor_content: - value = mo_array(np.frombuffer(pb_tensor.tensor_content, type_helper[0])) - else: - # load typed value - if type_helper[0] != str: - value = mo_array(type_helper[1](pb_tensor), dtype=type_helper[0]) - else: - try: - value = mo_array(type_helper[1](pb_tensor), dtype=type_helper[0]) - except UnicodeDecodeError: - log.error(decode_err_msg, extra={'is_warning': True}) - value = mo_array(type_helper[1](pb_tensor)) - - # Ignore an empty value, if len(shape) > 1 - # For example, value = [] and shape = [1, 1, 0] - # This is needed to reshape this value later and to return reshaped value = [[[]]] - # Otherwise there can be failures during partial inference, because we are storing an empty value with incorrect - # shape - if len(shape) == 0 or (len(shape) == 1 and shape.prod() == 0): - try: - value_length = len(value) - except TypeError: - # case, when value is a scalar - return value - if value_length == 1: - # return scalar if shape is [] otherwise broadcast according to shape - try: - return mo_array(value[0], dtype=type_helper[0]) - except UnicodeDecodeError: - log.error(decode_err_msg, extra={'is_warning': True}) - return mo_array(value[0]) - else: - if len(shape) == 0 and value_length == 0: - # Since TF 2.10 the model freezing can produce constants with non-empty tensor - # but with undefined value [] - # in this case, the tensor is filled with the default value - # that is 0 for numeric types and "" for string - default_value = 0 if type_helper[0] != str else "" - value = mo_array(default_value, dtype=type_helper[0]) - # no shape, return value as is - return value - - if len(value) != shape.prod(): - log.warning("Shape and content size of tensor don't match, shape: {} content size: {}". - format(shape, len(value))) - - if len(value) == 0: - # Since TF 2.10 the model freezing can produce constants with non-empty tensor but with undefined value [] - # In this case, the tensor is filled with the default value that is 0 for numeric types and "" for string - default_value = 0 if type_helper[0] != str else "" - value_flatten = mo_array([default_value], dtype=type_helper[0]) - else: - value_flatten = value.flatten() - - # broadcast semantics according to TensorFlow v1.5 documentation: - # The argument value can be a constant value, or a list of values of type dtype. If value is a list, - # then the length of the list must be less than or equal to the number of elements implied by the shape - # argument (if specified). In the case where the list length is less than the number of elements specified - # by shape, the last element in the list will be used to fill the remaining entries. - add_value = value_flatten[-1] - add_length = shape.prod() - len(value_flatten) - value = np.concatenate([value_flatten, np.full([add_length], add_value)]) - - return value.reshape(shape) - - -def check_attr_type(a): - """ - Check type of attribute from TF prototxt message - param: a - attribute from TF prototxt message - return: type of attribute - """ - if a.s: - return 's' - if a.i: - return 'i' - if a.f: - return 'f' - if a.b: - return 'b' - if a.type: - return 'type' - if a.shape and a.shape.dim: - return 'shape' - if a.list: - return 'list' - - -def collect_tf_attrs(attrs): - """ - Function generates map for attributes and parsing functions - param: attrs - TF proto message with attributes - return: mapping attributes and parsing functions ready for use in update_node_stat function - """ - ret_attrs = {} - type_parsers = { - 's': lambda x: x.s, - 'i': lambda x: x.i, - 'f': lambda x: x.f, - 'b': lambda x: x.b, - 'type': lambda x: tf_dtype_extractor(x.type), - 'shape': lambda x: tf_tensor_shape(x.shape), - 'list': lambda x: x.list - } - - for a in attrs: - t = check_attr_type(attrs[a]) - a_l = attrs[a] - while t == 'list': - a_l = type_parsers[t](attrs[a]) - t = check_attr_type(a_l) - - ret_attrs[a] = type_parsers[t](a_l) - - return ret_attrs diff --git a/tools/mo/openvino/tools/mo/front/tf/eye_ext.py b/tools/mo/openvino/tools/mo/front/tf/eye_ext.py deleted file mode 100644 index 650d7fd18a483f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/eye_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.eye import TFEye -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor - - -class EyeExtractor(FrontExtractorOp): - op = 'Eye' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'output_type': tf_dtype_extractor(node.pb.attr["dtype"].type), - } - TFEye.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/eye_tf_to_eye.py b/tools/mo/openvino/tools/mo/front/tf/eye_tf_to_eye.py deleted file mode 100644 index 5c89279dd84a2e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/eye_tf_to_eye.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph, rename_node -from openvino.tools.mo.ops.eye import Eye -from openvino.tools.mo.utils.error import Error - - -class EyeTFToEye(FrontReplacementPattern): - """ - This transformation converts TFEye operation (TensorFlow semantic) to Eye operation (OpenVINO semantic). - Refer to the Op implementation for the operations semantics description. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for tfeye in graph.get_op_nodes(op='TFEye'): - # save the original node name to use it in the new Eye op instance - original_name = tfeye.soft_get('name', tfeye.id) - tfeye['name'] = original_name + '/to_be_removed' - - if not tfeye.has_valid('output_type'): - raise Error("TFEye should have valid ''output_type'' attribute.") - output_type = tfeye.soft_get('output_type') - - new_eye = Eye(graph, {'output_type': output_type}).create_node() - rename_node(new_eye, original_name) - - # num_rows - tfeye.in_port(0).get_connection().set_destination(new_eye.in_port(0)) - # num_columns - if not tfeye.in_port(1).disconnected: - tfeye.in_port(1).get_connection().set_destination(new_eye.in_port(1)) - # batch_shape - if not tfeye.in_port(2).disconnected: - tfeye.in_port(2).get_connection().set_destination(new_eye.in_port(3)) - - diagonal_index = Const(graph, {'name': original_name + '/diagonal_index', - 'value': 0}).create_node() - diagonal_index.out_port(0).connect(new_eye.in_port(2)) - - tfeye.out_port(0).get_connection().set_source(new_eye.out_port(0)) - graph.remove_node(tfeye.id) diff --git a/tools/mo/openvino/tools/mo/front/tf/fake_const_ext.py b/tools/mo/openvino/tools/mo/front/tf/fake_const_ext.py deleted file mode 100644 index 5b2cbb35fa7c52..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/fake_const_ext.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class FakeConstToConst(FrontReplacementOp): - op = "FakeConst" - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['op'] - if not node.has_valid('value'): - log.debug("No value in FakeConst node {}".format(node.id)) - return - node_value = node.value - extracted_attrs = { - 'data_type': tf_dtype_extractor(node.pb.attr['dtype'].type), - 'shape': int64_array(node_value.shape), - 'value': node_value - } - Const.update_node_stat(node, extracted_attrs) - log.debug('FakeConst op was translated to Const op with shape = {} and value.shape = {}' - ''.format(extracted_attrs['shape'], extracted_attrs['value'].shape)) diff --git a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support.json b/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support.json deleted file mode 100644 index d143ae53000f94..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support.json +++ /dev/null @@ -1,112 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "clip_before_nms": true, - "clip_after_nms": false - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_1/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "FirstStageBoxPredictor/Reshape", - "FirstStageBoxPredictor/Reshape_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": true, - "clip_after_nms": false - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "detection_boxes,detection_scores,num_detections" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.10.json b/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.10.json deleted file mode 100644 index 94e0d871e6a648..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.10.json +++ /dev/null @@ -1,113 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_1/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "concat", - "concat_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "detection_boxes,detection_scores,num_detections" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.13.json b/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.13.json deleted file mode 100644 index 1ce6b0a59a76c9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.13.json +++ /dev/null @@ -1,113 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_2/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "concat", - "concat_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "detection_boxes,detection_scores,num_detections" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.14.json b/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.14.json deleted file mode 100644 index 8439fa05e9f2ff..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.14.json +++ /dev/null @@ -1,113 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_2/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_5/TensorArrayGatherV3" - ], - "start_points": [ - "concat", - "concat_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "detection_boxes,detection_scores,num_detections" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.15.json b/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.15.json deleted file mode 100644 index 3c45df9133e74e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.15.json +++ /dev/null @@ -1,113 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_2/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_5/TensorArrayGatherV3" - ], - "start_points": [ - "concat/concat", - "concat_1/concat", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "detection_boxes,detection_scores,num_detections" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.7.json b/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.7.json deleted file mode 100644 index f6ab3ff3195c36..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v1.7.json +++ /dev/null @@ -1,113 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": true, - "clip_after_nms": false - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_1/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "concat", - "concat_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": true, - "clip_after_nms": false - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "detection_boxes,detection_scores,num_detections" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v2.0.json b/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v2.0.json deleted file mode 100644 index 3a4c575272cdc8..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v2.0.json +++ /dev/null @@ -1,82 +0,0 @@ -[ - { - "custom_attributes": { - "start_nodes": ["StatefulPartitionedCall/Preprocessor/unstack"], - "end_nodes": ["StatefulPartitionedCall/Preprocessor/stack", - "StatefulPartitionedCall/Preprocessor/stack_1"] - }, - "id": "ObjectDetectionAPIPreprocessor2Replacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/stack_3", - "StatefulPartitionedCall/BatchMultiClassNonMaxSuppression/stack_10", - "StatefulPartitionedCall/Shape" - ], - "start_points": [ - "StatefulPartitionedCall/concat/concat", - "StatefulPartitionedCall/concat_1/concat", - "StatefulPartitionedCall/GridAnchorGenerator/Identity", - "StatefulPartitionedCall/Cast_1", - "StatefulPartitionedCall/Cast_2", - "StatefulPartitionedCall/Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true, - "background_label_id": 0 - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "Cast_3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "StatefulPartitionedCall/SecondStagePostprocessor/Cast_3" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v2.4.json b/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v2.4.json deleted file mode 100644 index 01d4f2facbbba6..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/faster_rcnn_support_api_v2.4.json +++ /dev/null @@ -1,82 +0,0 @@ -[ - { - "custom_attributes": { - "start_nodes": ["StatefulPartitionedCall/map/TensorArrayUnstack/TensorListFromTensor"], - "end_nodes": ["StatefulPartitionedCall/map/TensorArrayV2Stack/TensorListStack", - "StatefulPartitionedCall/map/TensorArrayV2Stack_1/TensorListStack"] - }, - "id": "ObjectDetectionAPIPreprocessor2Replacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/stack_3", - "StatefulPartitionedCall/BatchMultiClassNonMaxSuppression/stack_10", - "StatefulPartitionedCall/Shape" - ], - "start_points": [ - "StatefulPartitionedCall/concat/concat", - "StatefulPartitionedCall/concat_1/concat", - "StatefulPartitionedCall/GridAnchorGenerator/Identity", - "StatefulPartitionedCall/Cast_1", - "StatefulPartitionedCall/Cast_2", - "StatefulPartitionedCall/Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true, - "background_label_id": 0 - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "Cast_3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "StatefulPartitionedCall/SecondStagePostprocessor/Cast_3" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/fft_ext.py b/tools/mo/openvino/tools/mo/front/tf/fft_ext.py deleted file mode 100644 index 3e80fe208c69b2..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/fft_ext.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.TFFFT import TFFFT - - -class FFT1DOpFrontExtractor(FrontExtractorOp): - op = 'FFT' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 1, 'fft_kind': 'DFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled - - -class FFT2DOpFrontExtractor(FrontExtractorOp): - op = 'FFT2D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 2, 'fft_kind': 'DFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled - - -class FFT3DOpFrontExtractor(FrontExtractorOp): - op = 'FFT3D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 3, 'fft_kind': 'DFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled - - -class IFFT1DOpFrontExtractor(FrontExtractorOp): - op = 'IFFT' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 1, 'fft_kind': 'IDFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled - - -class IFFT2DOpFrontExtractor(FrontExtractorOp): - op = 'IFFT2D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 2, 'fft_kind': 'IDFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled - - -class IFFT3DOpFrontExtractor(FrontExtractorOp): - op = 'IFFT3D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 3, 'fft_kind': 'IDFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled - - -class RFFT1DOpFrontExtractor(FrontExtractorOp): - op = 'RFFT' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 1, 'fft_kind': 'RDFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled - - -class RFFT2DOpFrontExtractor(FrontExtractorOp): - op = 'RFFT2D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 2, 'fft_kind': 'RDFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled - - -class RFFT3DOpFrontExtractor(FrontExtractorOp): - op = 'RFFT3D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 3, 'fft_kind': 'RDFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled - - -class IRFFT1DOpFrontExtractor(FrontExtractorOp): - op = 'IRFFT' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 1, 'fft_kind': 'IRDFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled - - -class IRFFT2DOpFrontExtractor(FrontExtractorOp): - op = 'IRFFT2D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 2, 'fft_kind': 'IRDFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled - - -class IRFFT3DOpFrontExtractor(FrontExtractorOp): - op = 'IRFFT3D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'num_of_dimensions': 3, 'fft_kind': 'IRDFT'} - TFFFT.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/fifo_queue_v2_ext.py b/tools/mo/openvino/tools/mo/front/tf/fifo_queue_v2_ext.py deleted file mode 100644 index 781ef7067d0e60..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/fifo_queue_v2_ext.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor -from openvino.tools.mo.ops.op import Op - - -class FIFOQueueV2Extractor(FrontExtractorOp): - op = 'FIFOQueueV2' - enabled = True - - @classmethod - def extract(cls, node): - shapes = node.pb.attr['shapes'].list.shape - tf_types = node.pb.attr['component_types'].list.type - extracted_types = [] - for t in tf_types: - extracted_types.append(tf_dtype_extractor(t)) - result_shapes = [] - for shape_pb in shapes: - shape = shape_pb.dim - if len(shape) == 3: - result_shapes.append(int64_array([1, shape[0].size, shape[1].size, shape[2].size])) - else: - result_shapes.append(int64_array([dim.size for dim in shape])) - Op.update_node_stat(node, {'shapes': result_shapes, 'types': extracted_types}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/fifo_replacer.py b/tools/mo/openvino/tools/mo/front/tf/fifo_replacer.py deleted file mode 100644 index 561e98328070a9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/fifo_replacer.py +++ /dev/null @@ -1,172 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from collections import defaultdict - -import numpy as np - -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph, FrontReplacementPattern -from openvino.tools.mo.front.extractor import add_input_ops -from openvino.tools.mo.front.output_cut import OutputCut -from openvino.tools.mo.front.user_data_repack import UserDataRepack -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_precision, SUPPORTED_DATA_TYPES -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.utils.error import Error - - -class FIFOQueue(FrontReplacementSubgraph): - enabled = True - - def run_before(self): - from openvino.tools.mo.front.override_batch import OverrideBatch - return [OverrideBatch] - - @staticmethod - def pattern(**kwargs): - return dict( - nodes=[ - ('placeholder', dict(op='Parameter', data_type=np.int32)), - ('fifo_queue', dict(op='FIFOQueueV2')), - ('batch_join', dict(op='QueueDequeueUpToV2')), - ('image_batch', dict(op='Identity', data_type=np.float32)) - ], - edges=[ - ('placeholder', 'batch_join', {'out': 0}), - ('fifo_queue', 'batch_join', {'out': 0}), - ('batch_join', 'image_batch', {'out': 0}) - ] - ) - - @staticmethod - def replace_sub_graph(graph: Graph, match: dict, **kwargs): - r""" - Usually graph looks like: - - main_graph - ... Result - | | - image_batch label_batch - \ / - batch_join - / \ - placeholder fifo_queue - - Replacer works for both cases (that's why we have loop - 68 line): - label_batch was marked as output - there is no label_batch node - """ - true_placeholder_shape = match['placeholder'].shape - placeholder_shape = match['fifo_queue'].shapes[0] - placeholder_data_type = match['fifo_queue'].types[0] - # in case OOB conversion batch_size placeholder shape is not required - # so use a shape specified in FIFOQueueV2 shapes list attribute - assert true_placeholder_shape is None or true_placeholder_shape.ndim <= 1 - if true_placeholder_shape is not None and true_placeholder_shape.ndim == 1 and len(true_placeholder_shape) > 1: - log.warning( - 'Placeholder \'{}\' got non 0-dimensional shape {} in FIFOQueue pattern. Placeholder will have the ' - 'same shape after folding the pattern instead of {} shape which is original for the network.' - ''.format(match['placeholder'].id, true_placeholder_shape, placeholder_shape)) - placeholder_shape = true_placeholder_shape - placeholder_name = match['fifo_queue'].name - graph.erase_node(match['fifo_queue']) - graph.erase_node(match['placeholder']) - for _, out in match['batch_join'].out_nodes().items(): - if out.id != match['image_batch'].id: - if out.out_node().op == 'Result': - graph.remove_node(out.out_node().id) - graph.remove_node(out.id) - graph.remove_node(match['batch_join'].id) - placeholder = Parameter(graph, {'name': placeholder_name, 'shape': placeholder_shape, - 'data_type': placeholder_data_type}).create_node() - graph.create_edge(placeholder, match['image_batch']) - log.info("FIFOQueueV2 pattern was detected. New shape of placeholder {} is {}. Use -b to set batch size if " - "needed".format(placeholder.id, placeholder['shape'])) - - -class QueueDequeueManyV2(FrontReplacementSubgraph): - """ - Replaces the combination of the FIFOQueueV2 + QueueDequeueManyV2 operations with a number of Placeholders. - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.front.override_batch import OverrideBatch - return [OverrideBatch] - - @staticmethod - def pattern(**kwargs): - return dict( - nodes=[ - ('fifo_queue', dict(op='FIFOQueueV2')), - ('queue_deque', dict(op='QueueDequeueManyV2')), - ], - edges=[ - ('fifo_queue', 'queue_deque', {'out': 0}), - ] - ) - - @staticmethod - def replace_sub_graph(graph: Graph, match: dict, **kwargs): - inputs_dict = {} - for u, v, edge_attrs in graph.out_edges(match['queue_deque'].id, data=True): - out_port = edge_attrs['out'] - shape = match['fifo_queue'].shapes[out_port] - if out_port not in inputs_dict: - input_op = Parameter(graph, {'shape': shape.copy()}) - inputs_dict[out_port] = input_op.create_node([]) - graph.create_edge(inputs_dict[out_port], Node(graph, v), edge_attrs['out'], edge_attrs['in'], edge_attrs) - - graph.remove_node(match['queue_deque'].id) - graph.remove_node(match['fifo_queue'].id) - - -class FIFOQueueDequeueCut(FrontReplacementPattern): - """ - Cuts FIFOQueue -> QueueDequeue pattern in order to enable Out Of the Box (OOB) usage. - Pass runs only if user didn't specify any input names and shapes. - This transformation relies on output shapes and types extracted from QueueDequeue node. - In the meantime, the transformations FIFOQueue and QueueDequeueManyV2 expects output shapes and types extracted - from FIFOQueue node. - """ - enabled = True - graph_condition = [lambda graph: graph.graph['cmd_params'].input is None] - - def run_before(self): - return [OutputCut] - - def run_after(self): - return [UserDataRepack] - - def find_and_replace_pattern(self, graph: Graph): - fifo_qd_shapes = defaultdict(list) - for node in graph.get_op_nodes(): - if node.op not in ["QueueDequeue", "QueueDequeueV2"]: - continue - - new_inputs = "" - fifo_qd_name = node.soft_get('name', node.id) - for port_idx, port in node.out_ports().items(): - if port.disconnected(): - continue - if not np_data_type_to_precision(node.types[port_idx]) in SUPPORTED_DATA_TYPES: - raise Error("Data type {} is not supported for the" - "node {}".format(node.types[port_idx], fifo_qd_name)) - - fifo_qd_shapes[fifo_qd_name].append(dict( - shape=node.shapes[port_idx], - out=port_idx, - data_type=node.types[port_idx] - )) - new_inputs += "{}:{}, ".format(fifo_qd_name, port_idx) - - log.error( - "Found TF {} operation in the model. " - "PLEASE NOTE, the model will contain new input(s) ".format(node.op) - + new_inputs + - "created due to automatically triggered pruning transformation for this operation.", - extra={'is_warning': True} - ) - - add_input_ops(graph, fifo_qd_shapes, True) diff --git a/tools/mo/openvino/tools/mo/front/tf/fill_ext.py b/tools/mo/openvino/tools/mo/front/tf/fill_ext.py deleted file mode 100644 index 637883011af8a1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/fill_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.fill import Fill - - -class FillExtractor(FrontExtractorOp): - op = 'Fill' - enabled = True - - @classmethod - def extract(cls, node): - Fill.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/floor_div_decomposition.py b/tools/mo/openvino/tools/mo/front/tf/floor_div_decomposition.py deleted file mode 100644 index d352435f6ba2e4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/floor_div_decomposition.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import Floor -from openvino.tools.mo.ops.elementwise import Div -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph, Node, rename_node - - -class FloorDivDecomposition(FrontReplacementPattern): - r""" - BEFORE: AFTER: - input_0 input_1 input_0 input_1 - \ / \ / - FloorDiv Div - | | - output Floor - | - output - """ - enabled = True - - @staticmethod - def floor_div_replacement(floor_div: Node): - graph = floor_div.graph - name = floor_div.soft_get('name', floor_div.id) - - div = Div(graph, {'name': name + '/Div'}).create_node() - floor = Floor(graph, {'name': name}).create_node() - div.out_port(0).connect(floor.in_port(0)) - - div.in_port(0).connect(floor_div.in_port(0).get_source()) - div.in_port(1).connect(floor_div.in_port(1).get_source()) - floor_div.out_port(0).get_connection().set_source(floor.out_port(0)) - - graph.remove_node(floor_div.id) - rename_node(floor, name) - - def find_and_replace_pattern(self, graph: Graph): - for floor_div in graph.get_op_nodes(op='FloorDiv'): - self.floor_div_replacement(floor_div) diff --git a/tools/mo/openvino/tools/mo/front/tf/floor_ext.py b/tools/mo/openvino/tools/mo/front/tf/floor_ext.py deleted file mode 100644 index ad64fcea20e39f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/floor_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import Floor -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class FloorExtractor(FrontExtractorOp): - op = 'Floor' - enabled = True - - @classmethod - def extract(cls, node): - Floor.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/gather_ext.py b/tools/mo/openvino/tools/mo/front/tf/gather_ext.py deleted file mode 100644 index 1a11b05ac7d84b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/gather_ext.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.gather import Gather, AttributedGather -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class GatherFrontExtractor(FrontExtractorOp): - op = 'Gather' - enabled = True - - @classmethod - def extract(cls, node): - AttributedGather.update_node_stat(node, {'axis': 0}) - return cls.enabled - - -class ResourceGatherFrontExtractor(FrontExtractorOp): - op = 'ResourceGather' - enabled = True - - @classmethod - def extract(cls, node): - AttributedGather.update_node_stat(node, {'axis': 0}) - return cls.enabled - - -class GatherV2FrontExtractor(FrontExtractorOp): - op = 'GatherV2' - enabled = True - - @classmethod - def extract(cls, node): - Gather.update_node_stat(node, {'batch_dims': node.pb.attr['batch_dims'].i}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/gathernd_ext.py b/tools/mo/openvino/tools/mo/front/tf/gathernd_ext.py deleted file mode 100644 index f0672ec5750651..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/gathernd_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.gathernd import GatherND -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class GatherNDFrontExtractor(FrontExtractorOp): - op = 'GatherNd' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'batch_dims': 0, - } - GatherND.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/graph_utils.py b/tools/mo/openvino/tools/mo/front/tf/graph_utils.py deleted file mode 100644 index 723eb9619ded6f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/graph_utils.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import Dict - -import numpy as np - -from openvino.tools.mo.middle.InsertLayoutPropagationTransposes import mark_input_as_in_correct_layout, \ - mark_output_as_in_correct_layout -from openvino.tools.mo.ops.activation_ops import Sigmoid -from openvino.tools.mo.ops.elementwise import Add, Less, Mul -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.ops.crop import Crop -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.softmax import Softmax -from openvino.tools.mo.utils.error import Error - - -def create_op_node_with_second_input(graph: Graph, op: callable, second_input_value: np.array, op_attrs=None, - input_node=None): - operation = op(graph, op_attrs) - node = operation.create_node() - if input_node is not None: - input_node.out_port(0).connect(node.in_port(0)) - second_input_node = Const(graph, {'name': node.name + '/value', 'value': second_input_value}).create_node() - second_input_node.out_port(0).connect(node.in_port(1)) - if graph.stage != 'front': - second_input_node.infer(second_input_node) - return node - - -def create_op_with_const_inputs(graph: Graph, op: callable, port_value_dict: Dict[int, np.array], - op_attrs=None, input_node=None): - operation = op(graph, op_attrs) - node = operation.create_node() - if input_node is not None: - input_node.out_port(0).connect(node.in_port(0)) - - for idx, value in port_value_dict.items(): - node.add_input_port(idx, skip_if_exist=True) - value_input_node = Const(graph, {'name': node.name + '_input_port_' + str(idx) + '/value', - 'value': value}).create_node() - value_input_node.out_port(0).connect(node.in_port(idx)) - if graph.stage != 'front': - value_input_node.infer(value_input_node) - return node - - -def add_convolution_to_swap_xy_coordinates(graph: Graph, input_node: Node, coordinates_size: int): - """ - The function add convolution node after the node 'input_node' to swap xy coordinates of the boxes produced - by the node 'input_node'. It is expected that box coordinates are located in the fastest changing dimension of the - 'input_node' output, i.e. the input tensor could be reshaped to [num_boxes, 4] or [num_boxes, 5]. If the size is 5, - then the 0-th element for each of num_boxes blocks is not changed and element 1 is swapped with element 2, element 3 - is swapped with element 4. This is the case when boxes coordinates are produced by the layer "Proposal". The exact - amount of elements in each block is equal to the 'coordinates_size' parameter. - :param graph: graph to operate on. - :param input_node: node producing boxes coordinates. - :param coordinates_size: integer value equal to 4 or 5. - :return convolution node that swaps coordinates. - """ - # swap of input tensor with 4 or 5 numbers describing boxes are supported - assert (coordinates_size in [4, 5]) - - input_reshape_4d_node = create_op_node_with_second_input(graph, Reshape, int64_array([-1, 1, 1, coordinates_size]), - dict(name=input_node.name + '/reshape_4d'), input_node) - mark_input_as_in_correct_layout(input_reshape_4d_node, 0) - # do not mark second input because the reshape works in initial model layout and needs to be transformed to NCHW - mark_output_as_in_correct_layout(input_reshape_4d_node, 0) - - if coordinates_size == 5: - # zero indexed element is not box coordinate ("batch id" in case of Proposal) - conv_filter_data = mo_array(mo_array([[[[1, 0, 0, 0, 0], - [0, 0, 1, 0, 0], - [0, 1, 0, 0, 0], - [0, 0, 0, 0, 1], - [0, 0, 0, 1, 0]]]], - dtype=np.float32)) - else: - conv_filter_data = mo_array(mo_array([[[[0, 1, 0, 0], - [1, 0, 0, 0], - [0, 0, 0, 1], - [0, 0, 1, 0]]]], - dtype=np.float32)) - - conv_filter_data = np.transpose(conv_filter_data, [2, 3, 0, 1]) - - conv_filter_const_op = Const(graph, dict(value=conv_filter_data)) - conv_filter_const_node = conv_filter_const_op.create_node([], dict(name=input_node.name + '/weights')) - - conv_op = Convolution(graph, { - 'bias_addable': True, - 'channel_dims': mo_array([3]), - 'batch_dims': mo_array([0]), - 'input_feature_channel': 0, - 'output_feature_channel': 1, - 'group': 1, - 'layout': 'NHWC', - }) - return conv_op.create_node([input_reshape_4d_node, conv_filter_const_node], dict(name=input_node.name + "/conv")) - - -def add_fake_background_loc(graph: Graph, input_node: Node): - r""" - DetectionOutput layer expects that box coordinates contains coordinates of boxes for the "background" class also, - but in the TensorFlow\* Object Detection API the tensor contains information about real object classes only. - The function copies a slice of the output data of the node 'input_node' and then concats it to the beginning of the - data. The data in this slice is not used by the Detection Output layer so the actual values are not important. This - approach allows the model to be reshape-able and does not introduce many layers. - "background" class box coordinates. - :param graph: graph to operate on. - :param input_node: node producing the boxes coordinates. - :return convolution node that adds slice of data for the "background" class. - """ - crop_op = Crop(graph, dict(axis=mo_array([1]), offset=mo_array([0]), dim=mo_array([1]), nchw_layout=True)) - crop_node = crop_op.create_node([input_node], dict(name='crop_locs')) - - concat_op = Concat(graph, dict(axis=1, in_ports_count=2, nchw_layout=True)) - return concat_op.create_node([crop_node, input_node], dict(name=input_node.id + '/locs_with_fake_background')) - - -def add_activation_function_after_node(graph: Graph, node: Node, activation_function: str): - """ - The function adds node with activation function defined by string 'activation_function' which gets input from the - node 'node'. - :param graph: graph to operate on. - :param node: node to add activation after. - :param activation_function: string defining the activation function. These values are read from TensorFlow* object - detection API pipeline configuration file - :return: activation function node. - """ - if activation_function == 'SOFTMAX': - # softmax to be applied to the confidence - softmax_conf_op = Softmax(graph, dict(axis=-1, nchw_layout=True)) - activation_node = softmax_conf_op.create_node([node], dict(name=node.name + '/softmax')) - elif activation_function == 'SIGMOID': - # sigmoid activation function to be applied to the confidence - sigmoid_conf_op = Sigmoid(graph, dict(nchw_layout=True)) - activation_node = sigmoid_conf_op.create_node([node], dict(name=node.name + '/sigmoid')) - elif activation_function == 'IDENTITY': - # in case of Identity do nothing and just use result from the input node - activation_node = node - else: - raise Error('Unknown post-processing activation function "{}".'.format(activation_function)) - return activation_node - - -def add_constant_to_negative_values(node: Node, port_idx: int, added_value: np.array): - """ - This function adds the given values to negative elements of value from the given input port. - :param node: node with corrected values in the input port port_idx - :param port_idx: input port index for negative values - :param added_value: the value to add - :return: None - """ - negative_values_source = node.in_port(port_idx).get_source() - negative_values_node = node.in_port(port_idx).get_source().node - negative_values_node_name = negative_values_node.soft_get('name', negative_values_node.id) - - graph = node.graph - - less_node = create_op_with_const_inputs(graph, Less, - {1: mo_array(0, dtype=added_value.dtype)}, - {'name': negative_values_node_name + '/Less'}) - mul_node = create_op_with_const_inputs(graph, Mul, {1: added_value}, {'name': negative_values_node_name + '/Mul'}) - - node.in_port(port_idx).get_connection().set_destination(less_node.in_port(0)) - less_node.out_port(0).connect(mul_node.in_port(0)) - - add_node = Add(graph, {}).create_node() - mul_node.out_port(0).connect(add_node.in_port(1)) - negative_values_source.connect(add_node.in_port(0)) - add_node.out_port(0).connect(node.in_port(port_idx)) diff --git a/tools/mo/openvino/tools/mo/front/tf/identityN_to_identity.py b/tools/mo/openvino/tools/mo/front/tf/identityN_to_identity.py deleted file mode 100644 index cbef3a36ac9a91..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/identityN_to_identity.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.identity import Identity -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph, Node - - -class IdentityN_to_Identity(FrontReplacementPattern): - r""" - Replaces IdentityN op with several Identity ops. - - Example: - input_0 input_1 input_0 input_1 - \ / | | - IdentityN Identity Identity - / \ | | - output_0 output_1 output_0 output_1 - - ATTENTION: not all in/outputs of the IdentityN may survive during ModelOptimizer pipeline. - And it breaks the original operation semantics. - For example, output_1 may be not be used during network output computations. - To preserve this unused in/output ports we disconnect the corresponding out/input port. - """ - enabled = True - - @staticmethod - def replace_identityN(node: Node): - graph = node.graph - name = node.soft_get('name', node.id) - - assert node.has_valid('data_types'), 'IdentityN {} has no `data_types` attribute'.format(name) - dtypes = node.data_types - - for idx, port in node.in_ports().items(): - if not node.is_in_port_connected(idx) or not node.is_out_port_connected(idx): - # ATTENTION section in the description above - continue - assert idx < len(dtypes), 'IdentityN {} has inconsistent `data_types` attribute {}'.format(name, dtypes) - identity = Identity(graph, {'name': '{}/{}_port'.format(name, idx), 'data_type': dtypes[idx]}).create_node() - port.get_connection().set_destination(identity.in_port(0)) - node.out_port(idx).get_connection().set_source(identity.out_port(0)) - - # ATTENTION section in the description above - for in_port in node.in_ports().values(): - in_port.disconnect() - for out_port in node.out_ports().values(): - out_port.disconnect() - - def find_and_replace_pattern(self, graph: Graph): - for identityN in graph.get_op_nodes(op='IdentityN'): - self.replace_identityN(identityN) diff --git a/tools/mo/openvino/tools/mo/front/tf/identity_ext.py b/tools/mo/openvino/tools/mo/front/tf/identity_ext.py deleted file mode 100644 index 97e8b50503ac24..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/identity_ext.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.identity import Identity, IdentityN -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor -from openvino.tools.mo.graph.graph import Node - - -class IdentityFrontExtractor(FrontExtractorOp): - op = 'Identity' - enabled = True - - @classmethod - def extract(cls, node: Node): - Identity.update_node_stat(node, { - 'data_type': tf_dtype_extractor(node.pb.attr["T"].type), - }) - return cls.enabled - - -class IdentityNFrontExtractor(FrontExtractorOp): - op = 'IdentityN' - enabled = True - - @classmethod - def extract(cls, node: Node): - dtypes = [tf_dtype_extractor(t) for t in node.pb.attr["T"].list.type] - IdentityN.update_node_stat(node, { - 'data_types': dtypes, - 'in_ports_count': len(dtypes), - 'out_ports_count': len(dtypes), - }) - return cls.enabled - - -class ReadVariableOpFrontExtractor(FrontExtractorOp): - op = 'ReadVariableOp' - enabled = True - - @classmethod - def extract(cls, node: Node): - Identity.update_node_stat(node, { - 'data_type': tf_dtype_extractor(node.pb.attr["T"].type), - }) - return cls.enabled - - -class StopGradientExtractor(FrontExtractorOp): - op = 'StopGradient' - enabled = True - - @classmethod - def extract(cls, node: Node): - Identity.update_node_stat(node, {'op': 'StopGradient'}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/if_ext.py b/tools/mo/openvino/tools/mo/front/tf/if_ext.py deleted file mode 100644 index f0aa3994816e35..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/if_ext.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.If import If -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.front.common.register_custom_ops import check_for_duplicates -from openvino.tools.mo.front.extractor import FrontExtractorOp, extract_node_attrs -from openvino.tools.mo.front.tf.extractor import tf_op_extractor, tf_op_extractors -from openvino.tools.mo.front.tf.extractors.subgraph_utils import update_body_graph, convert_graph_inputs_to_parameters, \ - get_graph_proto, create_internal_graph -from openvino.tools.mo.graph.graph import Node, Graph - - -def extract_if(cls, if_node: Node): - If.update_node_stat(if_node, {}) - - # check that required body and condition functions exist in the graph library - main_graph = if_node.graph - then_graph_proto = get_graph_proto(main_graph, 'then_branch', if_node) - else_graph_proto = get_graph_proto(main_graph, 'else_branch', if_node) - - then_graph = create_internal_graph(main_graph) - if_node['then_graph'] = then_graph - - else_graph = create_internal_graph(main_graph) - if_node['else_graph'] = else_graph - - # create Parameter nodes for the then/else graphs - for input_index, (body_graph, body_graph_proto) in enumerate(zip((then_graph, else_graph), (then_graph_proto, - else_graph_proto))): - - body_parameters, body_parameter_names = convert_graph_inputs_to_parameters(body_graph, body_graph_proto) - - # update the If body graph with the body function graph - body_results = [] - update_body_graph(body_graph, body_graph_proto, body_parameter_names, body_results) - - body_graph.stage = 'front' - - # connect external input ports with body parameter nodes except input with condition - for idx in range(0, len(body_parameters)): - If.connect_body_input(if_node, not input_index, idx + 1, body_parameters[idx]) - - # connect body outputs with If operation output ports - for idx in range(len(body_results)): - If.connect_body_output(if_node, not input_index, idx, body_results[idx]) - - # run function to parse body nodes attributes similar to the main graph - extract_node_attrs(body_graph, lambda node: tf_op_extractor(node, check_for_duplicates(tf_op_extractors))) - - return cls.enabled - - -class IfExtractor(FrontExtractorOp): - op = 'If' - enabled = True - - @classmethod - def extract(cls, if_node: Node): - return extract_if(cls, if_node) - - -class StatelessIfExtractor(FrontExtractorOp): - op = 'StatelessIf' - enabled = True - - @classmethod - def extract(cls, if_node: Node): - return extract_if(cls, if_node) diff --git a/tools/mo/openvino/tools/mo/front/tf/loader.py b/tools/mo/openvino/tools/mo/front/tf/loader.py deleted file mode 100644 index 4a7980eb86e00c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/loader.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import logging as log -import os -import re -from packaging.version import parse, Version -from pathlib import Path - -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error, FrameworkError -from openvino.tools.mo.utils.utils import refer_to_faq_msg -from openvino.tools.mo.utils.environment_setup_utils import get_environment_setup # pylint: disable=no-name-in-module,import-error - -# do not print INFO and WARNING messages from TensorFlow -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -try: - import tensorflow.compat.v1 as tf_v1 - import tensorflow as tf - from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 -except ImportError: - import tensorflow as tf_v1 - -# in some environment suppressing through TF_CPP_MIN_LOG_LEVEL does not work -tf_v1.get_logger().setLevel("ERROR") - -from google.protobuf import text_format -from openvino.tools.mo.graph.graph import fill_graph_with_nodes, Graph -from openvino.tools.mo.utils.summarize_graph import summarize_graph - - -def freeze_checkpoints(graph_def: tf_v1.GraphDef, checkpoint_dir: str, output_node_names: list): - """ - Loads all the variables in a graph and stores them in a separate dictionary. Freezes output nodes in the graph - :param graph_def: GraphDef object holding the network. - :param checkpoint_dir: path to directory with checkpoint files with values of graph variables. - :param output_node_names: list of output node names. - :return: GraphDef containing a simplified version of the original. - """ - log.debug("Loading checkpoint files from directory: {}".format(checkpoint_dir)) - checkpoint_files = [] - for checkpoint_name in sorted(os.listdir(checkpoint_dir)): - checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name) - if os.path.isfile(checkpoint_path): - checkpoint_files.append(checkpoint_path) - log.debug("File {} will be loaded".format(checkpoint_path)) - else: - log.debug("Path {} is not a file. Skipping") - - if len(checkpoint_files) == 0: - raise Error("There are no checkpoint files in directory: {}".format(checkpoint_dir)) - - tf_v1.import_graph_def(graph_def, name='') - - with tf_v1.Session() as sess: - uninitialized_variables = [str(v, 'utf-8') for v in set(sess.run(tf_v1.report_uninitialized_variables()))] - all_variables = [n.name for n in sess.graph.as_graph_def().node if n.op in ['Variable', 'VariableV2']] - white_list = [v for v in all_variables if v not in uninitialized_variables] - black_list = [v for v in all_variables if v in uninitialized_variables] - output_graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, graph_def, output_node_names, - variable_names_whitelist=white_list, - variable_names_blacklist=black_list) - variable_values = {} - for checkpoint_file in checkpoint_files: - log.debug("Loading {}".format(checkpoint_file)) - with tf_v1.Session() as sess: - var_list = {} - var_to_shape_map = tf_v1.train.load_checkpoint(checkpoint_file).get_variable_to_shape_map() - for key in var_to_shape_map: - try: - tensor = sess.graph.get_operation_by_name(key).outputs[0] - except KeyError: - continue - var_list[key] = tensor - tf_v1.train.Saver(var_list=var_list).restore(sess, checkpoint_file) - for name, tensor in var_list.items(): - variable_values[name] = sess.run(tensor) - return output_graph_def, variable_values - - -def freeze_checkpoint(graph_def, checkpoint, output_node_names): - """ - Replaces all the variables in a graph with constants of the same values. - :param graph_def: GraphDef object holding the network. - :param checkpoint: path to checkpoint file with values of variables. - :param output_node_names: list of output node names - :return: GraphDef containing a simplified version of the original. - """ - tf_v1.import_graph_def(graph_def, name="") - - with tf_v1.Session() as sess: - var_list = {} - var_to_shape_map = tf_v1.train.NewCheckpointReader(checkpoint).get_variable_to_shape_map() - for key in var_to_shape_map: - try: - tensor = sess.graph.get_operation_by_name(key).outputs[0] - except KeyError: - continue - var_list[key] = tensor - tf_v1.train.Saver(var_list=var_list).restore(sess, checkpoint) - output_graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, graph_def, output_node_names) - return output_graph_def - - -def read_file_to_graph_def(graph_def: [tf_v1.GraphDef, tf_v1.MetaGraphDef], graph_file_name: str = "", - is_binary: bool = True): - """ - Reads file to protobuf - :param graph_def: GraphDef orr MetaGraphDef object to store the network - :param graph_file_name: path to file with graph - :param is_binary: flag to switch between binary and test protobuf format of graph file - :return: GraphDef or MetaGaphDef containing the network with cleared device info. - """ - try: - if is_binary: - with open(graph_file_name, "rb") as f: - graph_def.ParseFromString(f.read()) - else: - with open(graph_file_name, "r") as f: - text_format.Merge(f.read(), graph_def) - nodes_to_clear_device = graph_def.node if isinstance(graph_def, tf_v1.GraphDef) else graph_def.graph_def.node - for node in nodes_to_clear_device: - node.device = "" - except Exception as e: - raise FrameworkError( - 'TensorFlow cannot read the model file: "{}" is incorrect TensorFlow model file. ' - '\nThe file should contain one of the following TensorFlow graphs:' - '\n1. frozen graph in text or binary format' - '\n2. inference graph for freezing with checkpoint (--input_checkpoint) in text or binary format' - '\n3. meta graph' - '\n\nMake sure that --input_model_is_text is provided for a model in text format. ' - 'By default, a model is interpreted in binary format. Framework error details: {}. ' + - refer_to_faq_msg(43), - graph_file_name, - str(e) - ) from e - return graph_def - - -def get_output_node_names_list(graph_def, user_defined_output_node_names_list: list): - return summarize_graph(graph_def)['outputs'] \ - if user_defined_output_node_names_list is None or len(user_defined_output_node_names_list) == 0 \ - else user_defined_output_node_names_list - - -def deducing_metagraph_path(meta_graph_file: str): - match = re.search(r'^(.*)\.(data-\d*-of-\d*|index|meta)$', meta_graph_file) - if match is not None: - deduced_meta_graph_file = match.group(1) + '.meta' - if not os.path.isfile(deduced_meta_graph_file): - raise Error('\n\nMetaGraph freezing mechanism was enabled. ' - '\n{} file does not represent MetaGraph. ' - '\n{} path to MetaGraph was deduced, but it does not exist' - '\n\nModel with MetaGraph consists of 3-4 files:' - '\n1. model_name.meta' - '\n2. model_name.index' - '\n3. model_name.data-00000-of-00001 (digit part may vary)' - '\n4. checkpoint (optional)'.format(meta_graph_file, deduced_meta_graph_file)) - else: - meta_graph_file = deduced_meta_graph_file - else: - raise Error('\n\nMetaGraph freezing mechanism was enabled. ' - '\n{} file does not represent MetaGraph. ' - '\n\nModel with MetaGraph consists of 3-4 files:' - '\n1. model_name.meta' - '\n2. model_name.index' - '\n3. model_name.data-00000-of-00001 (digit part may vary)' - '\n4. checkpoint (optional)' - '\n\nTo load this model, simply run:' - '\npython3 mo_tf.py --input_meta_graph model_name.meta' - ''.format(meta_graph_file)) - return meta_graph_file - - -def freeze_tf2_concrete_function(model, concrete_func, env_setup): - - if "tensorflow" in env_setup and Version(env_setup["tensorflow"]) >= parse("2.2.0"): - frozen_func = convert_variables_to_constants_v2(concrete_func, - lower_control_flow=False, - aggressive_inlining=True) # pylint: disable=E1123 - else: - frozen_func = convert_variables_to_constants_v2(concrete_func, - lower_control_flow=False) # pylint: disable=E1123 - graph_def = frozen_func.graph.as_graph_def(add_shapes=True) - - input_names = [] - if hasattr(model, 'inputs') and model.inputs is not None: - # Extract tensor names order from Keras model - input_names = [tensor.name for tensor in model.inputs] - - # After model freezing output tensor names are changing and recieve "Func/PartitionedCall" prefix, - # so output_names from saved_model cannot be used. Here tensor names from frozen graph are used, - # as TF adds indexed Identity nodes during freezing to each output, so this indexing is used for - # order alignment. - output_names = [tensor.name for tensor in frozen_func.outputs] - - inputs_outputs_order = (input_names, output_names) - - return graph_def, {}, 'tf2', inputs_outputs_order - - -def prepare_graph_def(model): - env_setup = get_environment_setup("tf") - if isinstance(model, tf_v1.GraphDef): - nodes_to_clear_device = model.node - for node in nodes_to_clear_device: - node.device = "" - return model, {}, "tf", None - if isinstance(model, tf.keras.Model): # pylint: disable=no-member - - assert hasattr(model, "inputs") and model.inputs is not None, "Model inputs specification is required." - - model_inputs = [] - for inp in model.inputs: - if isinstance(inp, tf.Tensor): - model_inputs.append(inp) - elif tf.keras.backend.is_keras_tensor(inp): # pylint: disable=no-member - model_inputs.append(inp.type_spec) - else: - raise Error("Unknown input tensor type {}".format(type(input))) - - @tf.function - def tf_function(x): - return model(x) - - conc_func = tf_function.get_concrete_function(model_inputs) - return freeze_tf2_concrete_function(model, conc_func, env_setup) - if Version(env_setup["tensorflow"]) >= parse("2.6.0") and isinstance(model, tf.types.experimental.GenericFunction): - - assert hasattr(model, "input_signature") and model.input_signature is not None, \ - "'input_signature' needs to be set for model conversion." - - conc_func = model.get_concrete_function(*tuple(model.input_signature)) - return freeze_tf2_concrete_function(model, conc_func, env_setup) - raise Exception("Unknown model type {}.".format(type(model))) - - -def saved_model_load(imported, env_setup): - # to get a signature by key throws KeyError for TF 1.x SavedModel format in case TF 2.x installed - concrete_func = imported.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY] - # the aggressive inlining parameter needs to freeze a table of embeddings for Keras Embedding operation - # and a model with Embedding operation cannot properly converted to IR without this function parameter - - return freeze_tf2_concrete_function(imported, concrete_func, env_setup) - - -def load_tf_graph_def(graph_file_name: str = "", is_binary: bool = True, checkpoint: str = "", - model_dir: str = "", saved_model_tags: list = [], meta_graph_file: str = "", - user_output_node_names_list: list = []): - if not isinstance(graph_file_name, str) and graph_file_name is not None: - return prepare_graph_def(graph_file_name) - # As a provisional solution, use a native TF methods to load a model protobuf - graph_def = tf_v1.GraphDef() - if isinstance(graph_file_name, str) and (re.match(r'.*\.(ckpt|meta)$', graph_file_name)): - print('[ WARNING ] The value for the --input_model command line parameter ends with ".ckpt" or ".meta" ' - 'extension.\n' - 'It means that the model is not frozen.\n' - 'To load non frozen model to Model Optimizer run:' - '\n\n1. For "*.ckpt" file:' - '\n- if inference graph is in binary format' - '\npython3 mo_tf.py --input_model "path/to/inference_graph.pb" --input_checkpoint "path/to/*.ckpt"' - '\n- if inference graph is in text format' - '\npython3 mo_tf.py --input_model "path/to/inference_graph.pbtxt" --input_model_is_text ' - '--input_checkpoint "path/to/*.ckpt"' - '\n\n2. For "*.meta" file:' - '\npython3 mo_tf.py --input_meta_graph "path/to/*.meta"') - variables_values = {} - try: - if graph_file_name and not meta_graph_file and not checkpoint: - # frozen graph - return read_file_to_graph_def(graph_def, graph_file_name, is_binary), variables_values, 'tf', None - if graph_file_name and not meta_graph_file and checkpoint: - # inference graph and checkpoint - graph_def = read_file_to_graph_def(graph_def, graph_file_name, is_binary) - outputs = get_output_node_names_list(graph_def, user_output_node_names_list) - if os.path.isfile(checkpoint): - graph_def = freeze_checkpoint(graph_def=graph_def, checkpoint=checkpoint, output_node_names=outputs) - elif os.path.isdir(checkpoint): - graph_def, variables_values = freeze_checkpoints(graph_def=graph_def, checkpoint_dir=checkpoint, - output_node_names=outputs) - # we are sure that checkpoint is existing file or directory due to cli_parser configuration - return graph_def, variables_values, 'tf', None - if not graph_file_name and meta_graph_file: - meta_graph_file = deducing_metagraph_path(meta_graph_file) - input_meta_graph_def = read_file_to_graph_def(tf_v1.MetaGraphDef(), meta_graph_file, is_binary) - # Since version 2.2 TF can fail with internal error while loading graph from .meta file. - # It happens because some operation may has an _output_shapes attribute inconsistent with the GraphDef - # calculated value. To avoid this problem we must delete `_output_shapes` attributes from operations - for node in input_meta_graph_def.graph_def.node: - if '_output_shapes' in node.attr: - del node.attr['_output_shapes'] - tf_v1.reset_default_graph() - # pylint: disable=no-member - with tf_v1.Session() as sess: - restorer = tf_v1.train.import_meta_graph(input_meta_graph_def) - restorer.restore(sess, re.sub(r'\.meta$', '', meta_graph_file)) - outputs = get_output_node_names_list(input_meta_graph_def.graph_def, user_output_node_names_list) - graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, input_meta_graph_def.graph_def, - outputs) - return graph_def, variables_values, 'tf', None - if model_dir: - # saved model directory - try: - env_setup = get_environment_setup("tf") - - try: - # Code to extract Keras model. - # tf.keras.models.load_model function throws TypeError,KeyError or IndexError - # for TF 1.x SavedModel format in case TF 1.x installed - imported = tf.keras.models.load_model(model_dir, compile=False) # pylint: disable=no-member - except: - imported = tf.saved_model.load(model_dir, saved_model_tags) # pylint: disable=E1120 - - return saved_model_load(imported, env_setup) - except: - # code to extract GraphDef for TF 1.0 SavedModel format - tags = saved_model_tags if saved_model_tags is not None else [tf_v1.saved_model.tag_constants.SERVING] - with tf_v1.Session() as sess: - meta_graph_def = tf_v1.saved_model.loader.load(sess, tags, model_dir) - outputs = get_output_node_names_list(meta_graph_def.graph_def, user_output_node_names_list) - graph_def = tf_v1.graph_util.convert_variables_to_constants(sess, meta_graph_def.graph_def, outputs) - return graph_def, variables_values, 'tf', None - except Exception as e: - raise FrameworkError('Cannot load input model: {}', e) from e - raise Error("Unknown configuration of input model parameters") - - -def protobuf_attrs(pb: tf_v1.NodeDef): - return {'pb': pb} - - -def protobuf2nx(graph, pb: tf_v1.GraphDef): - fill_graph_with_nodes(graph, pb.node, get_id=lambda pb: pb.name, get_attrs=protobuf_attrs) - - if hasattr(graph, 'op_names_statistic'): - for node_name in graph.nodes: - node = Node(graph, node_name) - node_pb = node.soft_get('pb', None) - if node_pb is not None: - if hasattr(node_pb, 'op'): - graph.op_names_statistic[node_pb.op] += 1 - - # Create a library with auxiliary functions used in TensorFlow 2 operations - if hasattr(pb, 'library') and hasattr(pb.library, 'function'): - graph.graph['library'] = {} - for library_function in pb.library.function: - function_name = library_function.signature.name - graph.graph['library'][function_name] = {} - graph.graph['library'][function_name]['input_arg'] = library_function.signature.input_arg - graph.graph['library'][function_name]['output_arg'] = library_function.signature.output_arg - graph.graph['library'][function_name]['node_def'] = library_function.node_def - graph.graph['library'][function_name]['ret'] = library_function.ret - # initial order of nodes in the GraphDef. It is used to specify order in - # which merged nodes are added to the generated sub-graph GraphDef for the TensorFlow offload feature. - graph.graph['initial_nodes_order'] = [node.name for node in pb.node] - - # Remove data dependency edges. This is needed for the TF offload case - for _, attrs in list(graph.nodes(data=True)): - pb = attrs['pb'] - if '_class' in pb.attr: - index = 0 - while index < len(pb.attr['_class'].list.s): - if re.match('^loc:@.*', pb.attr['_class'].list.s[index].decode('utf-8')): - del pb.attr['_class'].list.s[index] - else: - index = index + 1 - - -def variables_to_constants(graph: Graph, variables_values: dict): - """ - Converts `Variable` operations to FakeConst operations with `value` from `variables_values` dictionary - :param graph: graph to operate on - :param variables_values: dictionary with variable names as keys and np.array data as values - """ - for node in graph.get_op_nodes(op='FakeConst'): - node_name = node.name - - if node_name not in variables_values: - log.debug("There is no value for '{}': {} in checkpoint variable values".format(node.op, node_name)) - continue - - node['value'] = variables_values[node_name] diff --git a/tools/mo/openvino/tools/mo/front/tf/log_softmax_ext.py b/tools/mo/openvino/tools/mo/front/tf/log_softmax_ext.py deleted file mode 100644 index b114a0f038f0a3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/log_softmax_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.log_softmax import LogSoftmax - - -class LogSoftmaxExtractor(FrontExtractorOp): - op = 'LogSoftmax' - enabled = True - - @classmethod - def extract(cls, node): - # the default value for the TF LogSoftmax is -1 - axis = -1 - if 'axis' in node.pb.attr: - axis = node.pb.attr['axis'].i - LogSoftmax.update_node_stat(node, {'axis': axis}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/lrn_ext.py b/tools/mo/openvino/tools/mo/front/tf/lrn_ext.py deleted file mode 100644 index 342e866c7338fc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/lrn_ext.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.lrn import AttributedLRN - - -class LRNExtractor(FrontExtractorOp): - """ - TF and IE(CAFFE) parameters in LRN differs in several places : - region (IE) : in TF there is no such parameter, they just use last dimension (feature dimension in case of NHWC) - local-size (IE) : it's the size of 1D vector in Caffe. In TF they have 'depth_radius' that eq - '(local-size * 2) + 1' - alpha (IE) : in Caffe 'alpha' divides on local-size, so we should multiply alpha on local-size - - Caffe ref : http://caffe.berkeleyvision.org/tutorial/layers/lrn.html - TF ref : https://www.tensorflow.org/api_docs/python/tf/nn/local_response_normalization - """ - op = 'LRN' - enabled = True - - @classmethod - def extract(cls, node): - pb = node.pb - AttributedLRN.update_node_stat(node, { - 'alpha': pb.attr['alpha'].f * (2. * pb.attr['depth_radius'].i + 1.), - 'beta': pb.attr['beta'].f, - 'bias': pb.attr['bias'].f, - 'local_size': (2 * pb.attr['depth_radius'].i + 1), - }) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support.json b/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support.json deleted file mode 100644 index 33a2aa1981e729..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support.json +++ /dev/null @@ -1,120 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": true, - "clip_after_nms": false - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_1/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "FirstStageBoxPredictor/Reshape", - "FirstStageBoxPredictor/Reshape_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": true, - "clip_after_nms": false - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack_2/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack/TensorArrayGatherV3" - ], - "start_points": [ - "SecondStageBoxPredictor/Reshape", - "SecondStageBoxPredictor/Reshape_1", - "ExpandDims_7", - "ToFloat_6" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "CropAndResize_1" - ], - "start_points": [ - "CropAndResize_1" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIMaskRCNNSigmoidReplacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "outputs": "SecondStageBoxPredictor_1/Conv_3/BiasAdd|SecondStageBoxPredictor_1/Conv_1/BiasAdd" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.11.json b/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.11.json deleted file mode 100644 index 7abcdf96f821e4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.11.json +++ /dev/null @@ -1,121 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_1/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "concat", - "concat_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack_2/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack/TensorArrayGatherV3" - ], - "start_points": [ - "SecondStageBoxPredictor/Reshape", - "SecondStageBoxPredictor/Reshape_1", - "ExpandDims_6", - "ToFloat_6" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "Reshape_10" - ], - "start_points": [ - "CropAndResize_1/CropAndResize" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "masks_node_prefix_name": "SecondStageBoxPredictor" - }, - "id": "ObjectDetectionAPIMaskRCNNSigmoidReplacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "outputs": "SecondStageBoxPredictor_1/Conv_3/BiasAdd|SecondStageBoxPredictor_1/Conv_1/BiasAdd" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.13.json b/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.13.json deleted file mode 100644 index bdd113ea81e3f8..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.13.json +++ /dev/null @@ -1,122 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_2/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "concat", - "concat_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack_2/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack/TensorArrayGatherV3" - ], - "start_points": [ - "SecondStageBoxPredictor/Reshape", - "SecondStageBoxPredictor/Reshape_1", - "ExpandDims_6", - "ToFloat_3" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "Reshape_15" - ], - "start_points": [ - "CropAndResize_1/CropAndResize", - "CropAndResize_1/Shape_1" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "masks_node_prefix_name": "SecondStageBoxPredictor" - }, - "id": "ObjectDetectionAPIMaskRCNNSigmoidReplacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "outputs": "SecondStageBoxPredictor_1/Conv_3/BiasAdd|SecondStageBoxPredictor_1/Conv_1/BiasAdd" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.14.json b/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.14.json deleted file mode 100644 index c825b3eed285a3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.14.json +++ /dev/null @@ -1,122 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_2/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_5/TensorArrayGatherV3" - ], - "start_points": [ - "concat", - "concat_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack_2/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack/TensorArrayGatherV3" - ], - "start_points": [ - "SecondStageBoxPredictor/Reshape", - "SecondStageBoxPredictor/Reshape_1", - "ExpandDims_6", - "Cast_5" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "Reshape_10" - ], - "start_points": [ - "CropAndResize_1/CropAndResize", - "CropAndResize_1/Shape_1" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "masks_node_prefix_name": "SecondStageBoxPredictor" - }, - "id": "ObjectDetectionAPIMaskRCNNSigmoidReplacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "outputs": "SecondStageBoxPredictor_1/Conv_3/BiasAdd|SecondStageBoxPredictor_1/Conv_1/BiasAdd" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.15.json b/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.15.json deleted file mode 100644 index c4db36de05e2cb..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.15.json +++ /dev/null @@ -1,122 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_2/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_5/TensorArrayGatherV3" - ], - "start_points": [ - "concat/concat", - "concat_1/concat", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack_2/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack/TensorArrayGatherV3" - ], - "start_points": [ - "SecondStageBoxPredictor/Reshape", - "SecondStageBoxPredictor/Reshape_1", - "ExpandDims_6", - "Cast_5" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "Reshape_10" - ], - "start_points": [ - "CropAndResize_1/CropAndResize", - "CropAndResize_1/Shape_1" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "masks_node_prefix_name": "SecondStageBoxPredictor" - }, - "id": "ObjectDetectionAPIMaskRCNNSigmoidReplacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "outputs": "SecondStageBoxPredictor_1/Conv_3/BiasAdd|SecondStageBoxPredictor_1/Conv_1/BiasAdd" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.7.json b/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.7.json deleted file mode 100644 index c488580d307c90..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v1.7.json +++ /dev/null @@ -1,121 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": true, - "clip_after_nms": false - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_1/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "concat", - "concat_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": true, - "clip_after_nms": false - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack_2/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression_1/map/TensorArrayStack/TensorArrayGatherV3" - ], - "start_points": [ - "SecondStageBoxPredictor/Reshape", - "SecondStageBoxPredictor/Reshape_1", - "ExpandDims_7", - "ToFloat_6" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "CropAndResize_1" - ], - "start_points": [ - "CropAndResize_1" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "masks_node_prefix_name": "SecondStageBoxPredictor" - }, - "id": "ObjectDetectionAPIMaskRCNNSigmoidReplacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "outputs": "SecondStageBoxPredictor_1/Conv_3/BiasAdd|SecondStageBoxPredictor_1/Conv_1/BiasAdd" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - }, - { - "custom_attributes": - { - "replacements": [["mul/y", "first_stage_max_proposals"]] - }, - "id": "ObjectDetectionAPIConstValueOverride", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v2.0.json b/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v2.0.json deleted file mode 100644 index fc0432c8c29875..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v2.0.json +++ /dev/null @@ -1,91 +0,0 @@ -[ - { - "custom_attributes": { - "start_nodes": ["StatefulPartitionedCall/Preprocessor/unstack"], - "end_nodes": ["StatefulPartitionedCall/Preprocessor/stack", - "StatefulPartitionedCall/Preprocessor/stack_1"] - }, - "id": "ObjectDetectionAPIPreprocessor2Replacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/stack_3", - "StatefulPartitionedCall/BatchMultiClassNonMaxSuppression/stack_10", - "StatefulPartitionedCall/Shape" - ], - "start_points": [ - "StatefulPartitionedCall/concat/concat", - "StatefulPartitionedCall/concat_1/concat", - "StatefulPartitionedCall/GridAnchorGenerator/Identity", - "StatefulPartitionedCall/Cast_1", - "StatefulPartitionedCall/Cast_2", - "StatefulPartitionedCall/Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true, - "background_label_id": 0 - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/BatchMultiClassNonMaxSuppression_1/stack_8", - "StatefulPartitionedCall/BatchMultiClassNonMaxSuppression_1/stack_6" - ], - "start_points": [ - "StatefulPartitionedCall/Reshape_4", - "StatefulPartitionedCall/Reshape_5", - "StatefulPartitionedCall/ExpandDims_6", - "StatefulPartitionedCall/Cast_5" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/Reshape_10" - ], - "start_points": [ - "StatefulPartitionedCall/CropAndResize_1/CropAndResize", - "StatefulPartitionedCall/CropAndResize_1/Reshape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "masks_node_prefix_name": "StatefulPartitionedCall/mask_rcnn_keras_box_predictor/mask_rcnn_mask_head/" - }, - "id": "ObjectDetectionAPIMaskRCNNSigmoidReplacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "outputs": "StatefulPartitionedCall/mask_rcnn_keras_box_predictor/mask_rcnn_mask_head/MaskPredictor_last_conv2d/BiasAdd,StatefulPartitionedCall/Reshape_13" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v2.4.json b/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v2.4.json deleted file mode 100644 index 3849547e05b38e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/mask_rcnn_support_api_v2.4.json +++ /dev/null @@ -1,91 +0,0 @@ -[ - { - "custom_attributes": { - "start_nodes": ["StatefulPartitionedCall/map/TensorArrayUnstack/TensorListFromTensor"], - "end_nodes": ["StatefulPartitionedCall/map/TensorArrayV2Stack/TensorListStack", - "StatefulPartitionedCall/map/TensorArrayV2Stack_1/TensorListStack"] - }, - "id": "ObjectDetectionAPIPreprocessor2Replacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/stack_3", - "StatefulPartitionedCall/BatchMultiClassNonMaxSuppression/stack_10", - "StatefulPartitionedCall/Shape" - ], - "start_points": [ - "StatefulPartitionedCall/concat/concat", - "StatefulPartitionedCall/concat_1/concat", - "StatefulPartitionedCall/GridAnchorGenerator/Identity", - "StatefulPartitionedCall/Cast_1", - "StatefulPartitionedCall/Cast_2", - "StatefulPartitionedCall/Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true, - "background_label_id": 0 - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/BatchMultiClassNonMaxSuppression_1/stack_8", - "StatefulPartitionedCall/BatchMultiClassNonMaxSuppression_1/stack_6" - ], - "start_points": [ - "StatefulPartitionedCall/Reshape_4", - "StatefulPartitionedCall/Reshape_5", - "StatefulPartitionedCall/ExpandDims_6", - "StatefulPartitionedCall/Cast_5" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIMaskRCNNROIPoolingSecondReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/Reshape_10" - ], - "start_points": [ - "StatefulPartitionedCall/CropAndResize_1/CropAndResize", - "StatefulPartitionedCall/CropAndResize_1/Reshape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "masks_node_prefix_name": "StatefulPartitionedCall/mask_rcnn_keras_box_predictor/mask_rcnn_mask_head/" - }, - "id": "ObjectDetectionAPIMaskRCNNSigmoidReplacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "outputs": "StatefulPartitionedCall/mask_rcnn_keras_box_predictor/mask_rcnn_mask_head/MaskPredictor_last_conv2d/BiasAdd,StatefulPartitionedCall/Reshape_13" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/matmul_ext.py b/tools/mo/openvino/tools/mo/front/tf/matmul_ext.py deleted file mode 100644 index 11739ce1d6ad7b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/matmul_ext.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.MatMul import MatMul -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error - - -class MatMulExtractor(FrontExtractorOp): - op = 'MatMul' - enabled = True - - @classmethod - def extract(cls, node: Node): - unsupported_attrs = [] - for attr_name in ['adjoint_a', 'adjoint_b', 'a_is_sparse', 'b_is_sparse']: - if attr_name in node.pb.attr and node.pb.attr[attr_name].b: - unsupported_attrs.append(attr_name) - if len(unsupported_attrs) != 0: - raise Error('MatMul operation {} use unsupported attrs: {}'.format(node.id, unsupported_attrs)) - - MatMul.update_node_stat(node, - { - 'transpose_a': node.pb.attr['transpose_a'].b, - 'transpose_b': node.pb.attr['transpose_b'].b, - }) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/mvn.py b/tools/mo/openvino/tools/mo/front/tf/mvn.py deleted file mode 100644 index a8415cd7e23069..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/mvn.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.ops.elementwise import Mul, Add -from openvino.tools.mo.ops.mvn import MVN -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Node, Graph - - -class MVNReplacer(FrontReplacementSubgraph): - enabled = True - - def pattern(self): - log.debug('Enabled MVN replacement') - return dict( - nodes=[ - ('mean', dict(op='ReduceMean')), - ('stop_grad', dict(op='StopGradient')), - ('sqdiff', dict(op='SquaredDifference')), - ('variance', dict(op='ReduceMean')), - ('squeeze_mean', dict(op='Squeeze')), - ('squeeze_variance', dict(op='Squeeze')), - ('fbn', dict(op=lambda op: op in ['FusedBatchNorm', 'FusedBatchNormV2', 'FusedBatchNormV3'])), - ], - edges=[ - ('mean', 'stop_grad', {'in': 0}), - ('stop_grad', 'sqdiff', {'in': 1}), - ('sqdiff', 'variance', {'in': 0}), - ('mean', 'squeeze_mean', {'in': 0}), - ('variance', 'squeeze_variance', {'in': 0}), - ('squeeze_mean', 'fbn', {'in': 3}), - ('squeeze_variance', 'fbn', {'in': 4}), - ]) - - def replace_sub_graph(self, graph: Graph, match: dict): - fbn = match['fbn'] - input = fbn.in_node(0) - log.debug('Found potential MVN pattern after {} with name {}'.format(input.op, input.name)) - if input.id != match['mean'].in_node(0).id or input.id != match['sqdiff'].in_node(0).id: - return - - log.debug('Confirmed MVN pattern after {} with name {}'.format(input.op, input.name)) - - mvn = MVN(graph, dict( - name=fbn.name + '/MVN_', - eps=fbn.eps, - eps_mode='outside_sqrt', - normalize_variance=1 - )) - mvn.attrs['old_infer'] = mvn.attrs['infer'] - mvn.attrs['infer'] = __class__.infer - - mul = Mul(graph, dict(operation='mul', name=fbn.name + '/Mul_')) - add = Add(graph, dict(operation='sum', name=fbn.name + '/Add_')) - - input_gamma = fbn.in_node(1) - input_beta = fbn.in_node(2) - - mean_reduction = match['mean'].in_node(1) - variance_reduction = match['variance'].in_node(1) - - new_subgraph = add.create_node([ - mul.create_node([ - mvn.create_node([input, mean_reduction, variance_reduction]), - input_gamma - ]), - input_beta - ]) - fbn.replace_node(new_subgraph) - - @staticmethod - def infer(node: Node): - axes_1_value = node.in_port(1).data.get_value() - axes_2_value = node.in_port(2).data.get_value() - if axes_1_value is None or axes_2_value is None: - log.warning('Reduction indices for mean and variance for MVN node {} are not constants'.format(node.name)) - return - - if not (all(axes_1_value == axes_2_value)): - log.warning('Reduction indices for mean {} and variance {} do not match'.format( - axes_1_value, - axes_2_value - )) - return - - node.in_port(2).disconnect() - node.old_infer(node) - node.infer = node.old_infer - del node['old_infer'] diff --git a/tools/mo/openvino/tools/mo/front/tf/mvn_unrolled.py b/tools/mo/openvino/tools/mo/front/tf/mvn_unrolled.py deleted file mode 100644 index 985d66a81c03bc..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/mvn_unrolled.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.PowerToEltwises import PowerToEltwises -from openvino.tools.mo.ops.mvn import MVN -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Node, Graph - - -class MVNUnrolled(FrontReplacementSubgraph): - enabled = True - - def run_after(self): - return [PowerToEltwises] - - def pattern(self): - log.debug('Enabled MVN replacement') - return dict( - nodes=[ - ('mean', dict(kind='op', op='ReduceMean')), - ('stop_grad', dict(kind='op', op='StopGradient')), - ('sqdiff', dict(kind='op', op='SquaredDifference')), - ('variance', dict(kind='op', op='ReduceMean')), - ('add', dict(kind='op', op='Add')), - ('pow', dict(kind='op', op='Pow')), - ('sub', dict(kind='op', op='Sub')), - ('truediv', dict(kind='op', op='Div')), - ], - edges=[ - ('mean', 'stop_grad', {'in': 0}), - ('stop_grad', 'sqdiff', {'in': 1}), - ('sqdiff', 'variance', {'in': 0}), - ('mean', 'sub', {'in': 1}), - ('variance', 'add'), - ('add', 'pow', {'in': 0}), - ('pow', 'truediv', {'in': 1}), - ('sub', 'truediv', {'in': 0}), - ]) - - @staticmethod - def replace_sub_graph(graph: Graph, match: dict): - mvn = MVN(graph, dict( - name=match['truediv'].name + '/MVN_', - eps_mode='outside_sqrt', - normalize_variance=1 - )) - mvn.attrs['old_infer'] = mvn.attrs['infer'] - mvn.attrs['infer'] = __class__.infer - - mean_reduction = match['mean'].in_node(1) - variance_reduction = match['variance'].in_node(1) - pow2 = match['pow'].in_node(1) - eps = match['add'].in_node(0 if match['add'].in_node(0).id != match['variance'].id else 1) - - new_subgraph = mvn.create_node([match['mean'].in_node(0), mean_reduction, variance_reduction, pow2, eps]) - - match['truediv'].replace_node(new_subgraph) - - @staticmethod - def infer(node: Node): - axes_1_value = node.in_port(1).data.get_value() - axes_2_value = node.in_port(2).data.get_value() - if axes_1_value is None or axes_2_value is None: - log.warning('Reduction indices for mean and variance for MVN node {} are not constants'.format(node.name)) - return - - if not (all(axes_1_value == axes_2_value)): - log.warning('Reduction indices for mean {} and variance {} do not match'.format( - axes_1_value, - axes_2_value - )) - return - - power_value = node.in_port(3).data.get_value() - eps_value = node.in_port(4).data.get_value() - if power_value is None or eps_value is None: - log.warning('Power or/and epsilon values for MVN node {} are not constants'.format(node.name)) - return - - if power_value != 0.5: - log.warning('Power for MVN node {} ({}) is not equal to 0.5'.format(node.name, power_value)) - return - - node['eps'] = eps_value - - for i in range(2, 5): - node.in_port(i).disconnect() - node.old_infer(node) - node.infer = node.old_infer - del node['old_infer'] diff --git a/tools/mo/openvino/tools/mo/front/tf/nearest_neighbor_upsampling.py b/tools/mo/openvino/tools/mo/front/tf/nearest_neighbor_upsampling.py deleted file mode 100644 index f5bba726f7c443..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/nearest_neighbor_upsampling.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.Pack import Pack -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array, float32_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const - - -class NearestNeighborUpsampling(FrontReplacementSubgraph): - enabled = True - - def run_before(self): - return [Pack] - - def pattern(self): - return dict( - nodes=[('op', dict(kind='op')), - ('shape', dict(kind='op', op='ShapeOf')), - ('strided_slice', dict(kind='op', op='StridedSlice')), - ('pack_1', dict(kind='op', op='Pack')), - ('reshape_1', dict(kind='op', op='Reshape')), - ('mul_const', dict(kind='op', op='Const')), - ('mul', dict(kind='op', op='Mul')), - ('pack_2', dict(kind='op', op='Pack')), - ('reshape_2', dict(kind='op', op='Reshape')), - ], - edges=[ - ('op', 'shape'), - ('op', 'reshape_1'), - ('shape', 'strided_slice'), - ('strided_slice', 'pack_1'), - ('strided_slice', 'pack_2'), - ('pack_1', 'reshape_1'), - ('pack_2', 'reshape_2'), - ('reshape_1', 'mul'), - ('mul_const', 'mul'), - ('mul', 'reshape_2'), - ] - ) - - def replace_sub_graph(self, graph: Graph, match: dict): - log.debug('Matched NearestNeighborUpsampling pattern: {}'.format([node.id for node in match.values()])) - try: - input_height = match['pack_1'].in_node(1).value.item() - input_width = match['pack_1'].in_node(3).value.item() - - height_scale = match['mul_const'].shape[-4] - width_scale = match['mul_const'].shape[-2] - except Exception as ex: - log.warning('Failed to determine scaling parameters from the topology. Do not apply pattern.') - return - - reshape2_name = match['reshape_2'].name - resample_op = Interpolate(graph, - {'mode': 'nearest', 'antialias': 0, 'pads_begin': int64_array([0]), - 'pads_end': int64_array([0]), 'coordinate_transformation_mode': 'half_pixel', - 'nearest_mode': 'round_prefer_floor', 'cube_coeff': -0.75, 'version': 'opset4', - 'name': reshape2_name + '/Resample', 'shape_calculation_mode': 'scales', - 'in_ports_count': 4}) - resample_node = resample_op.create_node([match['op']]) - axes_node = Const(graph, - { - 'name': resample_node.name + '/axes', - 'value': int64_array([2, 3]) if graph.graph['layout'] == 'NCHW' else int64_array([1, 2]) - }).create_node() - sizes_node = Const(graph, {'value': mo_array([input_height * height_scale, input_width * width_scale]), - 'name': resample_node.name + '/target_shape'}).create_node() - scales_node = Const(graph, {'value': float32_array([height_scale, width_scale]), - 'name': resample_node.name + '/scales'}).create_node() - - match['reshape_2'].replace_node(resample_node) - - resample_node.add_input_port(1, skip_if_exist=True) - assert resample_node.in_port(1).disconnected() - sizes_node.out_port(0).connect(resample_node.in_port(1)) - scales_node.out_port(0).connect(resample_node.in_port(2)) - axes_node.out_port(0).connect(resample_node.in_port(3)) - - graph.remove_nodes_from([node.id for node in match.values() if node.id != match['op'].id]) diff --git a/tools/mo/openvino/tools/mo/front/tf/next_iteration_ext.py b/tools/mo/openvino/tools/mo/front/tf/next_iteration_ext.py deleted file mode 100644 index e70b25db3249f5..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/next_iteration_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node - - -class NextIterationExtractor(FrontExtractorOp): - op = "NextIteration" - enabled = True - - @classmethod - def extract(cls, node: Node): - node['is_cyclic'] = True - node['infer'] = copy_shape_infer - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/non_max_suppression_ext.py b/tools/mo/openvino/tools/mo/front/tf/non_max_suppression_ext.py deleted file mode 100644 index 18b1c5c3f6eed6..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/non_max_suppression_ext.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.non_max_suppression import NonMaxSuppression -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class NonMaxSuppressionV2Extractor(FrontExtractorOp): - op = 'NonMaxSuppressionV2' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'sort_result_descending': 1, 'box_encoding': 'corner', 'output_type': np.int32} - NonMaxSuppression.update_node_stat(node, attrs) - return cls.enabled - - -class NonMaxSuppressionV3Extractor(FrontExtractorOp): - op = 'NonMaxSuppressionV3' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {'sort_result_descending': 1, 'box_encoding': 'corner', 'output_type': np.int32} - NonMaxSuppression.update_node_stat(node, attrs) - return cls.enabled - - -class NonMaxSuppressionV4Extractor(FrontExtractorOp): - op = 'NonMaxSuppressionV4' - enabled = True - - @classmethod - def extract(cls, node): - pad_to_max_output_size = node.pb.attr["pad_to_max_output_size:"].b - if not pad_to_max_output_size: - log.warning('The attribute "pad_to_max_output_size" of node {} is equal to False which is not supported. ' - 'Forcing it to be equal to True'.format(node.soft_get('name'))) - attrs = {'sort_result_descending': 1, 'box_encoding': 'corner', 'output_type': np.int32} - NonMaxSuppression.update_node_stat(node, attrs) - return cls.enabled - - -class NonMaxSuppressionV5Extractor(FrontExtractorOp): - op = 'NonMaxSuppressionV5' - enabled = True - - @classmethod - def extract(cls, node): - pad_to_max_output_size = node.pb.attr["pad_to_max_output_size:"].b - if not pad_to_max_output_size: - log.warning('The attribute "pad_to_max_output_size" of node {} is equal to False which is not supported. ' - 'Forcing it to be equal to True'.format(node.soft_get('name'))) - attrs = {'sort_result_descending': 1, 'box_encoding': 'corner', 'output_type': np.int32} - NonMaxSuppression.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/non_max_suppression_normalize.py b/tools/mo/openvino/tools/mo/front/tf/non_max_suppression_normalize.py deleted file mode 100644 index 57164a184f0248..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/non_max_suppression_normalize.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.crop import Crop -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class TFNonMaxSuppressionNormalize(FrontReplacementSubgraph): - """ - The inputs and outputs format of the TF implementation of the NMS layer is different from the OpenVINO - implementation and supports just one batch and image class. This transformation converts inputs and outputs to - match the OpenVINO implementation. - - TF inputs: boxes = [num_boxes, 4] - scores = [num_boxes] - outputs: box_indices [selected_boxes_count] - box_scores [selected_boxes_count] - valid_outputs selected_boxes_count - - OV inputs: boxes = [num_batches, num_boxes, 4] - scores = [num_batches, num_classes, num_boxes] - outputs: selected_indices [num_selected_indices, 3] where each element is [batch_index, class_index, box_index] - selected_scores [num_selected_indices, 3] where each element is [batch_index, class_index, box_score] - valid_outputs num_selected_indices - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.front.non_max_suppression_normalize import NonMaxSuppressionNormalize - return [NonMaxSuppressionNormalize] - - def find_and_replace_pattern(self, graph: Graph): - for nms in graph.get_op_nodes(op='NonMaxSuppression'): - # prepare inputs to the NonMaximumSuppression Node - unsqueeze_boxes = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]), - {'name': nms.soft_get('name') + '/Unsqueeze_0'}) - nms.in_port(0).get_connection().insert_node(unsqueeze_boxes) - - unsqueeze_box_scores = create_op_node_with_second_input(graph, Reshape, int64_array([1, 1, -1]), - {'name': nms.soft_get('name') + '/Unsqueeze_1'}) - nms.in_port(1).get_connection().insert_node(unsqueeze_box_scores) - - nms_name = nms.soft_get('name', nms.id) - - # prepare output #0 - crop_box_indices_name = nms_name + '/Crop_boxes_' - crop_box_indices = Crop(graph, {'name': crop_box_indices_name, 'axis': int64_array([1]), - 'offset': int64_array([2]), 'dim': int64_array([1])}).create_node() - nms.out_port(0).get_connection().insert_node(crop_box_indices) - squeeze_output_boxes = create_op_node_with_second_input(graph, Squeeze, int64_array([1]), - {'name': crop_box_indices_name + '/Squeeze'}) - crop_box_indices.out_port(0).get_connection().insert_node(squeeze_output_boxes) - - num_of_outputs = len([port for port in nms.out_ports().values() if not port.disconnected()]) - - if num_of_outputs == 1: - continue - - # prepare output #1 - crop_score_indices_name = nms_name + '/Crop_scores_' - crop_score_indices = Crop(graph, {'name': crop_score_indices_name, 'axis': int64_array([1]), - 'offset': int64_array([2]), 'dim': int64_array([1])}).create_node() - nms.out_port(1).get_connection().insert_node(crop_score_indices) - squeeze_output_scores = create_op_node_with_second_input(graph, Squeeze, int64_array([1]), - {'name': crop_score_indices_name + '/Squeeze'}) - crop_score_indices.out_port(0).get_connection().insert_node(squeeze_output_scores) diff --git a/tools/mo/openvino/tools/mo/front/tf/noop.py b/tools/mo/openvino/tools/mo/front/tf/noop.py deleted file mode 100644 index 9a945af23fd79e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/noop.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import networkx as nx - -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.error import Error - - -class NoOpElimination(FrontReplacementOp): - """ - NoOp does nothing and it has no data flow edges. - It operates only with control flow edges. - """ - op = "NoOp" - enabled = True - - def replace_sub_graph(self, graph: Graph, match: dict): - node = match['op'] - in_edges = node.in_edges() - out_edges = node.out_edges() - if len(in_edges) == 0 and len(out_edges) == 0: - graph.remove_node(node.id) - log.debug('NoOp op was removed {}'.format(node.id)) - else: - raise Error('NoOp node {} contains data flow edges'.format(node.id)) diff --git a/tools/mo/openvino/tools/mo/front/tf/one_hot_ext.py b/tools/mo/openvino/tools/mo/front/tf/one_hot_ext.py deleted file mode 100644 index 6b4519b80efe2d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/one_hot_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.one_hot import OneHot -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor - - -class OneHotFrontExtractor(FrontExtractorOp): - op = 'OneHot' - enabled = True - - @classmethod - def extract(cls, node): - OneHot.update_node_stat(node, {'axis': node.pb.attr['axis'].i, - 'data_type': tf_dtype_extractor(node.pb.attr["T"].type, np.float32)}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/pad_ext.py b/tools/mo/openvino/tools/mo/front/tf/pad_ext.py deleted file mode 100644 index a42affcdbc111c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/pad_ext.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.pad import TFPad - - -class PadFrontExtractor(FrontExtractorOp): - op = 'Pad' - enabled = True - - @classmethod - def extract(cls, node): - TFPad.update_node_stat(node) - return cls.enabled - - -class PadV2FrontExtractor(FrontExtractorOp): - op = 'PadV2' - enabled = True - - @classmethod - def extract(cls, node): - TFPad.update_node_stat(node) - return cls.enabled - - -class MirrorPadFrontExtractor(FrontExtractorOp): - op = 'MirrorPad' - enabled = True - - @classmethod - def extract(cls, node): - TFPad.update_node_stat(node, {'mode': node.pb.attr['mode'].s.decode('utf-8').lower()}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/pad_tf_to_pad.py b/tools/mo/openvino/tools/mo/front/tf/pad_tf_to_pad.py deleted file mode 100644 index 1eeb69d513dd7a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/pad_tf_to_pad.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ConvertLike import ConvertLike -from openvino.tools.mo.ops.split import Split -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_node -from openvino.tools.mo.ops.pad import Pad -from openvino.tools.mo.ops.squeeze import Squeeze - - -class PadTFToPad(FrontReplacementPattern): - """ - This transformation converts TFPad operation (TensorFlow semantic) to Pad operation (OpenVINO semantic). - Refer to the Op implementation for the operations semantics description. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for tfpad in graph.get_op_nodes(op='TFPad'): - # save the original node name to use it in the new Pad op instance - original_name = tfpad.soft_get('name', tfpad.id) - tfpad['name'] = original_name + '/to_be_removed' - - new_pad = Pad(graph, {'mode': tfpad.soft_get('mode', None), }).create_node() - rename_node(new_pad, original_name) - - tfpad.in_port(0).get_connection().set_destination(new_pad.in_port(0)) - - if tfpad.soft_get('mode') == 'constant': - # the input with fill value is an optional third input in TF - if not tfpad.in_port(2).disconnected(): - tfpad.in_port(2).get_connection().set_destination(new_pad.in_port(3)) - - # convert TF representation of the pads as [N, 2] to MO representation: [N] and [N] - transposed_pads = create_op_with_const_inputs(graph, Transpose, {1: int64_array([1, 0])}) - tfpad.in_port(1).get_connection().set_destination(transposed_pads.in_port(0)) - split_pads = create_op_with_const_inputs(graph, Split, {1: int64_array(0)}, {'num_splits': 2}) - transposed_pads.out_port(0).connect(split_pads.in_port(0)) - for port_ind in range(2): - split_pads.add_output_port(port_ind, skip_if_exist=True) - new_pad.in_port(port_ind + 1).connect(split_pads.out_port(port_ind)) - new_pad.in_port(port_ind + 1).get_connection().insert_node( - create_op_with_const_inputs(graph, Squeeze, {1: int64_array([0])})) - - tfpad.out_port(0).get_connection().set_source(new_pad.out_port(0)) - graph.remove_node(tfpad.id) diff --git a/tools/mo/openvino/tools/mo/front/tf/partial_infer/__init__.py b/tools/mo/openvino/tools/mo/front/tf/partial_infer/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/partial_infer/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/front/tf/partial_infer/tf.py b/tools/mo/openvino/tools/mo/front/tf/partial_infer/tf.py deleted file mode 100644 index 194e8f64d96548..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/partial_infer/tf.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import os -from re import match - -import numpy as np - -# do not print INFO and WARNING messages from TensorFlow -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -try: - import tensorflow.compat.v1 as tf_v1 -except ImportError: - import tensorflow as tf_v1 - -#in some environment suppressing through TF_CPP_MIN_LOG_LEVEL does not work -tf_v1.get_logger().setLevel("ERROR") - -from google.protobuf import text_format -from tensorflow.python.eager.context import graph_mode # pylint: disable=no-name-in-module,import-error - -from openvino.tools.mo.front.extractor import node_defs_to_str -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape, get_tf_node_port -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.graph import node_incoming_neighbourhood, node_outcoming_neighbourhood -from openvino.tools.mo.front.common.partial_infer.utils import mo_array - -def tf_native_tf_node_infer(node: Node): - """ - The infer function should be used to infer shape and data type of the TF operation not supported by IE. - :param node: node to infer. - :return: None - """ - log.info('Called "tf_native_tf_node_infer" for node "{}"'.format(node.id)) - - # create a sub-graph only to make inference. The sub-graph contains desired node and it's inputs neighbourhood of - # depth 10. The number 10 is quite big to be sure that determine_data_type function will be able to identify the - # data type of input tensors, but not too huge to contain the whole graph. - # Also the sub-graph contains names of the output nodes of the node to perform native infer. - nodes_to_extract = node_incoming_neighbourhood(node.graph, node.id, 10) + node_outcoming_neighbourhood(node.graph, - node.id, 1) - tmp_graph = node.graph.create_sub_graph_copy(nodes_to_extract) - - tmp_node_attrs = tmp_graph.node[node.id] - tmp_node = Node(tmp_graph, node.id) - - # node attributes that are required by 'infer_subgraph_output_nodes' function - lists_to_init = ['input_nodes_names', 'output_tensors_names', 'nodes_order', 'internal_output_node_name', - 'real_input_dims'] - - for item in lists_to_init: - tmp_node_attrs[item] = list() - tmp_node_attrs['pbs'] = {tmp_node.name: tmp_node.pb} - tmp_node_attrs['nodes_order'].append(tmp_node.id) - for ind in range(len(tmp_node.out_edges())): - tmp_node_attrs['output_tensors_names'].append(tmp_node.id + ":" + str(ind)) - - with graph_mode(): - tf_subgraph_infer(tmp_node) - - # the shape and value has been inferred and saved to the tmp_node's out nodes attribute. Let's copy it back! - for tmp_out_port, tmp_out_node in tmp_node.out_nodes().items(): - if tmp_out_node.value is not None: - node.out_node(tmp_out_port).value = mo_array(tmp_out_node.value) - if tmp_out_node.shape is not None: - node.out_node(tmp_out_port).shape = mo_array(tmp_out_node.shape) - if tmp_out_node.data_type is not None: - node.out_node(tmp_out_port).data_type = tmp_out_node.data_type - # lets cleanup the temporary graph - tmp_graph.clear() - - -def generate_feed_dict(graph: tf_v1.Graph, node: Node): - """ - The first value in the return tuple is True if all inputs for the node has constant values. - The second returned value is mapping of placeholder tensor to the numpy arrays with the values for these - placeholders. - :param graph: the TensorFlow Graph to generate feed dictionary to. - :param node: the node which represents TensorFlow sub-graph of operations. - :return: pair where the first element is a flag that specifies that all node inputs are constants and a dictionary - where key is the input Tensor object and the value is the tensor value. - """ - all_constants = True - feed_dict = dict() - for in_data_node_name, edge_attrs in node.get_inputs(): - if 'control_flow_edge' in edge_attrs and edge_attrs['control_flow_edge']: - continue - value = node.in_node(edge_attrs['in']).value - if value is None: - all_constants = False - placeholder_pb = node['pbs'][edge_attrs['placeholder_name']] - value = np.ones(shape=tf_tensor_shape(placeholder_pb.attr['shape'].shape), - dtype=tf_dtype_extractor(placeholder_pb.attr['dtype'].type)) - feed_dict[graph.get_tensor_by_name(edge_attrs['placeholder_name'] + ":0")] = value - return all_constants, feed_dict - - -def get_subgraph_output_tensors(node: Node): - """ - Infer output shapes of the node. The function uses TF to infer the values of output tensors and then getting tensor - shape. - TODO: try to not infer values but just infer the output tensors shapes. - :param node: sub-graph node to infer. - :return: pair where the first element is a flag that specifies that all node inputs are constants and a dictionary - where key is the output port and the value is the tensor value. - """ - result = dict() - graph_def = tf_v1.GraphDef() - text_format.Merge(node_defs_to_str(node), graph_def) - tf_v1.reset_default_graph() - graph = tf_v1.Graph() - sess = tf_v1.Session(graph=graph) - with graph.as_default(): # pylint: disable=not-context-manager - with sess.as_default(): # pylint: disable=not-context-manager - tf_v1.import_graph_def(graph_def, name='') - all_constants, feed_dict = generate_feed_dict(graph, node) - for out_port, out_tensor_name in enumerate(node['output_tensors_names']): - if not match(r'.*:\d+', out_tensor_name): - out_tensor_name = out_tensor_name + ":" + str(out_port) - result_tensor = sess.run(graph.get_tensor_by_name(out_tensor_name), feed_dict=feed_dict) - result[out_port] = result_tensor - return all_constants, result - - -def tf_subgraph_infer(node: Node): - """ - Infer output shapes of the node using TF to infer the values of output tensors and then getting tensor shapes. - If all inputs of the node are constants then the node's attribute 'value' is updated also. - :param node: sub-graph node to infer. The function updates 'shape' and 'data_type' attributes of the node. - :return: None - """ - # TODO: try to not infer values but just infer the output tensors shapes. - add_placeholders_to_subgraph(node) - - all_constants, output_tensors = get_subgraph_output_tensors(node) - for out_port, tensor_value in output_tensors.items(): - out_node = node.out_node(out_port) - out_node.shape = mo_array([dim for dim in tensor_value.shape]) - out_node.data_type = tensor_value.dtype - log.debug("Inferred shape of the output tensor with index '{}' of the node '{}': '{}'".format(str(out_port), - node.name, - out_node.shape)) - if all_constants: - out_node.value = tensor_value - - -def add_node_def_to_subgraph(subgraph_node: Node, node_def: tf_v1.NodeDef, name: str = None, position: int = 0, - is_input: bool = False): - """ - Adds NodeDef definition of the node to the internal structures of the sub-graph's_node object that represents a - sub-graph of operations. - :param subgraph_node: the node that represents sub-graph where new node should be added. - :param node_def: the NodeDef (TF operation, variable or constant) to be added to the sub-graph. - :param name: name how to save added node. Default value is None which means take name from the NodeDef. - :param position: position in the GraphDef where to put the NodeDef. Default value is 0. - :param is_input: flag that specifies whether the node is input for the sub-graph. Default value is False. - :return: None - """ - name = name or node_def.name - assert (name not in subgraph_node['pbs'].keys()) - if is_input: - subgraph_node['input_nodes_names'].append(name) - subgraph_node['pbs'][node_def.name] = node_def - subgraph_node['nodes_order'].insert(position, name) - - -def determine_data_type(node: Node): - """ - Tries to determine data type of the node. The input node could be either data or op node. If we don't know the data - type of the node then we recursively check the first parent of the node. - :param node: node to determine data type. - :return: data type of the node output in the numpy format. - """ - if node.has_and_set('data_type'): - return node.data_type - if node.has_and_set('kind') and node.kind == 'op': - if node.has_and_set('pb'): - if 'dtype' in node.pb.attr: - return tf_dtype_extractor(node.pb.attr['dtype'].type) - if 'T' in node.pb.attr: - return tf_dtype_extractor(node.pb.attr['T'].type) - if node.has_and_set('kind') and node.kind == 'data': - if 'value' in node and node.value is not None: - return node.value.dtype - if len(node.in_nodes()) != 0: # try to guess data type from the first parent - return determine_data_type(node.in_node(0)) - log.error('Failed to determine data type for node "{}"'.format(node.name)) - return None - - -def add_placeholders_to_subgraph(node: Node): - """ - Adds placeholders to the node's list of protobufs based on input nodes to the subgraph (the value of - 'internal_input_node_name' property). - The function also updates input tensors for nodes which consume output of nodes that were replaced with - placeholders. - :param node: the node to add placeholders to. - :return: None - """ - inputs_replacements = list() - for index, (in_data_node, edge_attrs) in enumerate(node.get_sorted_inputs()): - if 'control_flow_edge' in edge_attrs and edge_attrs['control_flow_edge']: - continue - - if 'internal_input_node_name' in edge_attrs.keys(): - input_tensor_name = edge_attrs['internal_input_node_name'] - else: - input_tensor_name = node['pb'].input[index] - - input_node_name, port = get_tf_node_port(input_tensor_name) - - placeholder_name = placeholder_name_for_node(input_node_name, port) - edge_attrs['placeholder_name'] = placeholder_name - in_node = node.in_node(index) - - assert in_node.shape is not None - - if placeholder_name not in node['pbs'].keys(): - placeholder = tf_v1.placeholder(determine_data_type(in_node), in_node.shape, placeholder_name) - inputs_replacements.append((input_tensor_name, placeholder_name)) - add_node_def_to_subgraph(node, placeholder.op.node_def, is_input=True) - log.debug("Added placeholder with name '{}'".format(placeholder_name)) - - # update initial input names to a transposed ones - for old_input_tensor_name, new_name in inputs_replacements: - update_input_in_pbs(node, old_input_tensor_name, new_name) - - -def update_input_in_pbs(node: Node, old_input_tensor_name: str, new_input_name: str): - """ - The function replaces all inputs with name 'old_input_tensor_name' with a - new input with name 'new_input_name'. This transformation is applied - for all NodeDef objects in the 'pbs' list. - """ - log.debug("update_input_in_pbs: replace input '%s' with input '%s'" % (old_input_tensor_name, new_input_name)) - old_input_tensor_name_without_port = old_input_tensor_name.split(":")[0] - for pb in node['pbs'].values(): - if hasattr(pb, 'input'): - for ind in range(len(pb.input)): - if pb.input[ind] == old_input_tensor_name or pb.input[ind] == old_input_tensor_name_without_port: - pb.input[ind] = new_input_name - log.debug("Replacing input '{}' of the node '{}' with placeholder '{}'".format(ind, pb.name, - new_input_name)) - - -def placeholder_name_for_node(node_name: str, output_port: int): - return node_name + "_port_" + str(output_port) + "_ie_placeholder" diff --git a/tools/mo/openvino/tools/mo/front/tf/placeholder_ext.py b/tools/mo/openvino/tools/mo/front/tf/placeholder_ext.py deleted file mode 100644 index 01bb83f00baf21..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/placeholder_ext.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape -from openvino.tools.mo.ops.op import PermuteAttrs -from openvino.tools.mo.ops.parameter import Parameter - - -class PlaceholderFrontExtractor(FrontExtractorOp): - op = 'Placeholder' - enabled = True - - @classmethod - def extract(cls, node): - shape = shape_array([]) - # Extract output shape from `shape` attribute - extracted_shape = tf_tensor_shape(node.pb.attr["shape"].shape) - if len(extracted_shape) != 0: - shape = extracted_shape - else: - # Extract output shape from `_output_shapes` attribute if it is possible - extracted_output_shapes = node.pb.attr["_output_shapes"].list.shape - if len(extracted_output_shapes) == 1: # check if attribute not empty - extracted_output_shapes = tf_tensor_shape(extracted_output_shapes[0]) - - # Check equality of extracted shapes. We know some cases then Placeholder operation has empty `shape` - # attribute value and non-empty `_output_shapes` attribute value and need co handle and support it. - if len(extracted_output_shapes) > len(extracted_shape): - log.warning('Extracted shapes for Placeholder operation {} have different lengths: `shape` {} and ' - '`_output_shapes` {}. Please, check if model is consistent'.format( - node.pb.name, extracted_shape, extracted_output_shapes)) - if len(extracted_output_shapes) != 0: - shape = extracted_output_shapes - - attrs = { - 'data_type': tf_dtype_extractor(node.pb.attr["dtype"].type), - 'shape': shape, - 'permute_attrs': PermuteAttrs().update_attrs(attrs=[('shape', 'output:0')]) - } - if node.pb.attr["shape"].shape.unknown_rank: - attrs['shape'] = None - Parameter.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/placeholder_with_default_ext.py b/tools/mo/openvino/tools/mo/front/tf/placeholder_with_default_ext.py deleted file mode 100644 index 0d718e7936a244..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/placeholder_with_default_ext.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer, copy_value -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor, tf_tensor_shape -from openvino.tools.mo.ops.op import Op - - -class PlaceholderWithDefaultExtractor(FrontExtractorOp): - op = 'PlaceholderWithDefault' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'data_type': tf_dtype_extractor(node.pb.attr["dtype"].type), - 'shape': tf_tensor_shape(node.pb.attr["shape"].shape), - 'identity': True, - 'infer': lambda node: copy_shape_infer(node, value_infer=copy_value), - } - Op.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/pooling_ext.py b/tools/mo/openvino/tools/mo/front/tf/pooling_ext.py deleted file mode 100644 index 9f224cd433047c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/pooling_ext.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import convert_tf_padding_to_str -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_data_format_spatial, tf_int_list -from openvino.tools.mo.ops.pooling import Pooling, PoolingV2 - - -class AvgPoolFrontExtractor(FrontExtractorOp): - op = 'AvgPool' - enabled = True - - @classmethod - def extract(cls, node): - attrs = create_pooling_attrs(node, 'avg') - attrs.update({'op': __class__.op}) - # update the attributes of the node - Pooling.update_node_stat(node, attrs) - return cls.enabled - - -class MaxPoolFrontExtractor(FrontExtractorOp): - op = 'MaxPool' - enabled = True - - @classmethod - def extract(cls, node): - attrs = create_pooling_attrs(node, 'max') - attrs.update({'op': __class__.op}) - # update the attributes of the node - Pooling.update_node_stat(node, attrs) - return cls.enabled - - -class MaxPool3DFrontExtractor(FrontExtractorOp): - op = 'MaxPool3D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = create_pooling_attrs(node, 'max') - attrs.update({'op': __class__.op}) - # update the attributes of the node - Pooling.update_node_stat(node, attrs) - return cls.enabled - - -class AvgPool3DFrontExtractor(FrontExtractorOp): - op = 'AvgPool3D' - enabled = True - - @classmethod - def extract(cls, node): - attrs = create_pooling_attrs(node, 'avg') - attrs.update({'op': __class__.op}) - # update the attributes of the node - Pooling.update_node_stat(node, attrs) - return cls.enabled - - -class AvgPoolV2FrontExtractor(FrontExtractorOp): - op = 'AvgPoolV2' - enabled = True - - @classmethod - def extract(cls, node): - attrs = create_pooling_attrs(node, 'avg') - PoolingV2.update_node_stat(node, attrs) - return cls.enabled - - -class MaxPoolV2FrontExtractor(FrontExtractorOp): - op = 'MaxPoolV2' - enabled = True - - @classmethod - def extract(cls, node): - attrs = create_pooling_attrs(node, 'max') - PoolingV2.update_node_stat(node, attrs) - return cls.enabled - - -def create_pooling_attrs(node, pool_method): - data_format = node.pb.attr["data_format"] - - attrs = { - 'auto_pad': convert_tf_padding_to_str(node.pb.attr['padding'].s.decode()), - 'window': tf_int_list(node.pb.attr['ksize'].list), - 'spatial_dims': tf_data_format_spatial(data_format), - 'pad': None, # will be inferred when input shape is known - 'stride': tf_int_list(node.pb.attr['strides'].list), - 'pad_spatial_shape': None, - 'output_spatial_shape': None, - 'pool_method': pool_method, - 'layout': data_format.s.decode(), - 'exclude_pad': True, - } - return attrs diff --git a/tools/mo/openvino/tools/mo/front/tf/prelu.py b/tools/mo/openvino/tools/mo/front/tf/prelu.py deleted file mode 100644 index 2c9d6246effdb2..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/prelu.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.PowerToEltwises import PowerToEltwises -from openvino.tools.mo.ops.prelu import PReLU -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.pattern_match import check_node_usages_out_of_match - - -class PReLUPatternFuse(FrontReplacementSubgraph): - enabled = True - - def run_before(self): - return [PowerToEltwises] - - def pattern(self): - return dict( - nodes=[('op', dict(kind='op')), - ('pos_relu', dict(kind='op', op='ReLU')), - ('neg', dict(kind='op', op='AttributedPower', scale=-1, power=1, shift=0)), - ('neg_relu', dict(kind='op', op='ReLU')), - ('neg_1', dict(kind='op', op='AttributedPower', scale=-1, power=1, shift=0)), - ('mul', dict(kind='op', op='Mul')), - ('add', dict(kind='op', op='Add')), - ], - edges=[ - ('op', 'pos_relu'), - ('op', 'neg'), - ('pos_relu', 'add'), - ('neg', 'neg_relu'), - ('neg_relu', 'neg_1'), - ('neg_1', 'mul'), - ('mul', 'add') - ] - ) - - def replace_sub_graph(self, graph: Graph, match: dict): - consumers = [n for n in match if n not in ['mul', 'op', 'add'] and not check_node_usages_out_of_match(match, n)] - if consumers: - log.warning('PReLU pattern was detected. Non pattern consumers of nodes: "{}" were found. Won\'t replace' - ''.format(', '.join([match[n].id for n in consumers]))) - return - gamma = match['mul'].in_node(0) if match['mul'].in_node(1).id == match['neg_1'].id else match['mul'].in_node(1) - prelu_node = PReLU(graph, {'name': '{}/PReLU'.format(match['add'].id)}).create_node([match['op'], gamma]) - match['add'].replace_node(prelu_node) - log.debug('PReLU pattern starting from "{}" was collapsed to "{}"'.format(match['op'].id, prelu_node.id)) - - -class PReLUWithAbsPatternFuse(FrontReplacementSubgraph): - enabled = True - - def pattern(self): - return dict( - nodes=[('op', dict(kind='op')), - ('relu', dict(kind='op', op='ReLU')), - ('abs', dict(kind='op', op='Abs')), - ('sub', dict(kind='op', op='Sub')), - ('mul', dict(kind='op', op='Mul')), - ('mul_1', dict(kind='op', op='Mul')), - ('add', dict(kind='op', op='Add')), - ], - edges=[ - ('op', 'relu'), - ('op', 'abs'), - ('op', 'sub'), - ('abs', 'sub'), - ('sub', 'mul'), - ('mul', 'mul_1'), - ('relu', 'add'), - ('mul_1', 'add'), - ] - ) - - def replace_sub_graph(self, graph: Graph, match: dict): - consumers = [n for n in match if - n not in ['mul', 'mul_1', 'op', 'add', 'abs', 'sub'] and not check_node_usages_out_of_match(match, - n)] - if consumers: - log.warning('PReLUWithAbs pattern was detected. Non pattern consumers of nodes: "{}" were found. Won\'t ' - 'replace '.format(', '.join([match[n].id for n in consumers]))) - return - gamma = match['mul'].in_node(0) if match['mul'].in_node(1).id == match['sub'].id else match['mul'].in_node(1) - prelu_node = PReLU(graph, {'name': '{}/PReLU'.format(match['add'].id)}).create_node([match['op'], gamma]) - match['add'].replace_node(prelu_node) - log.debug('PReLUWithAbs pattern starting from "{}" was collapsed to "{}"'.format(match['op'].id, prelu_node.id)) diff --git a/tools/mo/openvino/tools/mo/front/tf/random_uniform_ext.py b/tools/mo/openvino/tools/mo/front/tf/random_uniform_ext.py deleted file mode 100644 index 1c4919e3aba7ac..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/random_uniform_ext.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.random_uniform import AttributedRandomUniform -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor - - -class RandomUniformExtractor(FrontExtractorOp): - op = 'RandomUniform' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'output_type': tf_dtype_extractor(node.pb.attr["dtype"].type), - 'global_seed': node.pb.attr['seed'].i, - 'op_seed': node.pb.attr['seed2'].i - } - AttributedRandomUniform.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/random_uniform_int_ext.py b/tools/mo/openvino/tools/mo/front/tf/random_uniform_int_ext.py deleted file mode 100644 index 5a335022472a6d..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/random_uniform_int_ext.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.random_uniform import RandomUniform -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor - - -class RandomUniformIntExtractor(FrontExtractorOp): - op = 'RandomUniformInt' - enabled = True - - @classmethod - def extract(cls, node): - attrs = { - 'output_type': tf_dtype_extractor(node.pb.attr["Tout"].type), - 'global_seed': node.pb.attr['seed'].i, - 'op_seed': node.pb.attr['seed2'].i - } - RandomUniform.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/range_ext.py b/tools/mo/openvino/tools/mo/front/tf/range_ext.py deleted file mode 100644 index df390b442ea6d0..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/range_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor -from openvino.tools.mo.graph.graph import Node - - -class RangeFrontExtractor(FrontExtractorOp): - op = 'Range' - enabled = True - - @classmethod - def extract(cls, node: Node): - Range.update_node_stat(node, {'output_type': tf_dtype_extractor(node.pb.attr['Tidx'].type)}) - return cls.enabled - diff --git a/tools/mo/openvino/tools/mo/front/tf/reduce_ext.py b/tools/mo/openvino/tools/mo/front/tf/reduce_ext.py deleted file mode 100644 index 81f7403ffbed68..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/reduce_ext.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ReduceOps import ReduceProd, ReduceAnd, ReduceMax, ReduceMean, ReduceSum, ReduceL2, \ - ReduceMin, ReduceLogicalOr -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node - - -class AllFrontExtractor(FrontExtractorOp): - op = 'All' - enabled = True - - @classmethod - def extract(cls, node: Node): - keep_dims = node.pb.attr['keep_dims'].b - ReduceAnd.update_node_stat(node, {'keep_dims': keep_dims}) - return cls.enabled - - -class AnyExtractor(FrontExtractorOp): - op = 'Any' - enabled = True - - @classmethod - def extract(cls, node): - ReduceLogicalOr.update_node_stat(node, {'keep_dims': node.pb.attr['keep_dims'].b}) - return cls.enabled - - -class MaxFrontExtractor(FrontExtractorOp): - op = 'Max' - enabled = True - - @classmethod - def extract(cls, node: Node): - ReduceMax.update_node_stat(node, {'keep_dims': node.pb.attr['keep_dims'].b}) - return cls.enabled - - -class MinFrontExtractor(FrontExtractorOp): - op = 'Min' - enabled = True - - @classmethod - def extract(cls, node: Node): - ReduceMin.update_node_stat(node, {'keep_dims': node.pb.attr['keep_dims'].b}) - return cls.enabled - - -class MeanExtractor(FrontExtractorOp): - op = 'Mean' - enabled = True - - @classmethod - def extract(cls, node: Node): - ReduceMean.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b}) - return cls.enabled - - -class ProdFrontExtractor(FrontExtractorOp): - op = 'Prod' - enabled = True - - @classmethod - def extract(cls, node: Node): - ReduceProd.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b}) - return cls.enabled - - -class SumFrontExtractor(FrontExtractorOp): - op = 'Sum' - enabled = True - - @classmethod - def extract(cls, node: Node): - ReduceSum.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b}) - return cls.enabled - - -class EuclideanNormFrontExtractor(FrontExtractorOp): - op = 'EuclideanNorm' - enabled = True - - @classmethod - def extract(cls, node: Node): - ReduceL2.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/register_custom_ops.py b/tools/mo/openvino/tools/mo/front/tf/register_custom_ops.py deleted file mode 100644 index 1e2be280b06390..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/register_custom_ops.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementOp, FrontReplacementPattern, FrontReplacementSubgraph -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileSubGraph, FrontReplacementFromConfigFileOp, \ - FrontReplacementFromConfigFileGeneral - - -def get_front_classes(): - front_classes = [FrontExtractorOp, FrontReplacementOp, FrontReplacementPattern, FrontReplacementSubgraph, - FrontReplacementFromConfigFileSubGraph, FrontReplacementFromConfigFileOp, - FrontReplacementFromConfigFileGeneral] - return front_classes diff --git a/tools/mo/openvino/tools/mo/front/tf/replacement.py b/tools/mo/openvino/tools/mo/front/tf/replacement.py deleted file mode 100644 index 62069a1ea80006..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/replacement.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.front.common.custom_replacement_registry import CustomReplacementRegistry -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph, FrontReplacementPattern -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatcher, SubgraphMatch -from openvino.tools.mo.front.tf.custom_subgraph_call import merge_nodes -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils import class_registration -from openvino.tools.mo.utils.graph import is_connected_component -from openvino.tools.mo.utils.replacement_pattern import ReplacementPattern - - -class FrontReplacementFromConfigFileGeneral(FrontReplacementPattern): - """ - Translates graph to transform with the configuration files with custom attributes - """ - replacement_id = "" - - def __init__(self): - super().__init__() - - def transform_graph(self, graph, replacement_descriptions): - raise Exception('Function "transform_graph" must be overridden in the sub-class') - - def find_and_replace_pattern(self, graph: Graph): - replacement_descriptions = CustomReplacementRegistry().get_custom_replacement_description(self.replacement_id) - if replacement_descriptions is None or len(replacement_descriptions) < 1: - log.info("Failed to find custom replacement description with id '{}'".format(self.replacement_id)) - return - for desc in replacement_descriptions: - if 'custom_attributes' in desc._replacement_desc: - self.transform_graph(graph, desc._replacement_desc['custom_attributes']) - else: - log.info("Failed to find \'custom_attributes\' in replacement description with id '{}'".format( - self.replacement_id)) - - registered_ops = {} - registered_cls = [] - - @classmethod - def class_type(cls): - return class_registration.ClassType.FRONT_REPLACER - - -ReplacementPattern.excluded_replacers.append(FrontReplacementFromConfigFileGeneral) - - -class FrontReplacementFromConfigFileSubGraph(FrontReplacementSubgraph): - """ - Replace sub-graph defined in the configuration files with a sub-graph of operations. - """ - replacement_id = "" - - def __init__(self): - super().__init__() - - def nodes_to_remove(self, graph: Graph, match: SubgraphMatch): - return match.matched_nodes_names() - - def find_and_replace_pattern(self, graph: Graph): - replacement_descriptions = CustomReplacementRegistry().get_custom_replacement_description(self.replacement_id) - if replacement_descriptions is None: - log.info("Failed to find custom replacement description with id '{}'".format(self.replacement_id)) - return - # there are a list of custom replacements descriptions that have the same replacement id - for replacement_description in replacement_descriptions: - sub_graph_matcher = SubgraphMatcher(replacement_description) - matched_instances = list(sub_graph_matcher.matched_sub_graph_instances(graph)) - if not len(matched_instances): - log.error("Failed to match nodes from custom replacement description with id '{}':\nIt means model and " - "custom replacement description are incompatible.\nTry to correct custom replacement " - "description according to documentation with respect to model node names" - "".format(self.replacement_id)) - for match in matched_instances: - if not is_connected_component(graph, match.matched_nodes_names()): - log.warning("The following nodes don't form connected sub-graph: {}".format( - match.matched_nodes_names())) - # graph.dump_graph_for_graphviz(match.matched_nodes_names()) - self.replace_sub_graph(graph, match) - - registered_ops = {} - registered_cls = [] - - @classmethod - def class_type(cls): - return class_registration.ClassType.FRONT_REPLACER - - -ReplacementPattern.excluded_replacers.append(FrontReplacementFromConfigFileSubGraph) - - -class FrontReplacementFromConfigFileOp(FrontReplacementFromConfigFileSubGraph): - """ - Replace sub-graph defined in the configuration file with as single operation. - """ - replacement_id = "" - - def __init__(self): - super().__init__() - - def input_edges_match(self, # pylint: disable=method-hidden - graph: Graph, - match: SubgraphMatch, - new_sub_graph: dict): - """ - Function that generates matching of sub-graph input edges to a new sub-graph input edges. It works in case when - the sub-graph is replaced with a single custom-layer node. - :param graph: networkX graph to operate on. - :param match: object describing matched sub-graph. - :param new_sub_graph: dictionary of Nodes objects that forms new sub-graph. - :return: object describing edges matching. - """ - input_edges_match = dict() - inputs_count = match.inputs_count() - for sub_graph_input_port in range(inputs_count): - # just create single edge for each input port of the sub-graph - input_node, input_port = match.input_nodes(sub_graph_input_port)[0] - input_edges_match[(input_node.id, input_port)] = (new_sub_graph['new_node'].id, sub_graph_input_port) - return input_edges_match - - def output_edges_match(self, # pylint: disable=method-hidden - graph: Graph, - match: SubgraphMatch, - new_sub_graph: dict): - """ - Function that generates matching of sub-graph output edges to a new sub-graph output edges. It works in case - when the sub-graph is replaced with a single custom-layer node. - :param graph: networkX graph to operate on. - :param match: object describing matched sub-graph. - :param new_sub_graph: dictionary of Nodes objects that forms new sub-graph. - :return: object describing edges matching. - """ - output_edges_match = dict() - outputs_count = match.outputs_count() - # prepare output_edges_match based on custom replacement configuration file - for sub_graph_output_port in range(outputs_count): - output_node, output_port = match.output_node(sub_graph_output_port) - output_edges_match[(output_node.id, output_port)] = (new_sub_graph['new_node'].id, sub_graph_output_port) - return output_edges_match - - def generate_sub_graph(self, graph: Graph, match: SubgraphMatch): - replacement_desc = match.custom_replacement_desc - op = Op.get_op_class_by_name(replacement_desc.op)(graph, match.custom_replacement_desc.custom_attributes) - op.default_backend_attrs = list(match.custom_replacement_desc.custom_attributes.keys()) - if 'infer' not in op.attrs: - # update OV attrs - op.substitute_ie_attrs(op.attrs) - node = merge_nodes(graph, match.matched_nodes_names(), replacement_desc.get_inputs_description(), - replacement_desc.get_outputs_description()) - node.name = graph.unique_id(op.attrs['type']) - node_attrs = graph.node[node.id] - # copy attributes which are defined in the custom operation - for key in op.attrs.keys(): - if key not in ['name', 'op']: - node_attrs[key] = op.attrs[key] - # functions below should return nothing because 'merge_nodes' already created input/output edges - self.input_edges_match = lambda gr, ma, new_sub_graph: dict() # pylint: disable=method-hidden - self.output_edges_match = lambda gr, ma, new_sub_graph: dict() # pylint: disable=method-hidden - else: - node = op.add_node(name=op.attrs['type'] + '_') - node.type = op.attrs['type'] - return {'new_node': node} - - registered_ops = {} - registered_cls = [] - - @classmethod - def class_type(cls): - return class_registration.ClassType.FRONT_REPLACER - - -ReplacementPattern.excluded_replacers.append(FrontReplacementFromConfigFileOp) diff --git a/tools/mo/openvino/tools/mo/front/tf/reshape_related_ext.py b/tools/mo/openvino/tools/mo/front/tf/reshape_related_ext.py deleted file mode 100644 index 985070cd7dddc9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/reshape_related_ext.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.rank import Rank -from openvino.tools.mo.ops.size import Size -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_int_list, tf_dtype_extractor -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.squeeze import Squeeze - - -class RankFrontExtractor(FrontExtractorOp): - op = 'Rank' - enabled = True - - @classmethod - def extract(cls, node: Node): - Rank.update_node_stat(node, {'output_type': np.int32}) - return cls.enabled - - -class ReshapeExtractor(FrontExtractorOp): - op = 'Reshape' - enabled = True - - @classmethod - def extract(cls, node: Node): - Reshape.update_node_stat(node, {'special_zero': False}) - return cls.enabled - - -class ShapeExtractor(FrontExtractorOp): - op = 'Shape' - enabled = True - - @classmethod - def extract(cls, node: Node): - Shape.update_node_stat(node, {'output_type': tf_dtype_extractor(node.pb.attr['out_type'].type, np.int32)}) - return cls.enabled - - -class SizeFrontExtractor(FrontExtractorOp): - op = 'Size' - enabled = True - - @classmethod - def extract(cls, node): - Size.update_node_stat(node, {'output_type': tf_dtype_extractor(node.pb.attr['out_type'].type, np.int32)}) - return cls.enabled - - -class SqueezeExtractor(FrontExtractorOp): - op = 'Squeeze' - enabled = True - - @classmethod - def extract(cls, node: Node): - Squeeze.update_node_stat(node, {'squeeze_dims': tf_int_list(node.pb.attr['squeeze_dims'].list)}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/resize_bilinear.py b/tools/mo/openvino/tools/mo/front/tf/resize_bilinear.py deleted file mode 100644 index 8112489d6e7428..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/resize_bilinear.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.TFResize import TFResize -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.front.tf.extractors.utils import tf_dtype_extractor - - -class ResizeBilinearFrontExtractor(FrontExtractorOp): - op = 'ResizeBilinear' - enabled = True - - @classmethod - def extract(cls, node): - align_corners = False - if 'align_corners' in node.pb.attr: - align_corners = node.pb.attr['align_corners'].b - - half_pixel_centers = False - if 'half_pixel_centers' in node.pb.attr: - half_pixel_centers = node.pb.attr['half_pixel_centers'].b - - attrs = { - 'align_corners': align_corners, - 'half_pixel_centers': half_pixel_centers, - 'mode': 'linear', - 'data_type': tf_dtype_extractor(node.pb.attr["T"].type), - } - TFResize.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/resize_nearest_neighbor.py b/tools/mo/openvino/tools/mo/front/tf/resize_nearest_neighbor.py deleted file mode 100644 index a9be8e7aa50cc1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/resize_nearest_neighbor.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.TFResize import TFResize -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ResizeNearestNeighborFrontExtractor(FrontExtractorOp): - op = 'ResizeNearestNeighbor' - enabled = True - - @classmethod - def extract(cls, node): - align_corners = False - if 'align_corners' in node.pb.attr: - align_corners = node.pb.attr['align_corners'].b - - half_pixel_centers = False - if 'half_pixel_centers' in node.pb.attr: - half_pixel_centers = node.pb.attr['half_pixel_centers'].b - - attrs = { - 'align_corners': align_corners, - 'half_pixel_centers': half_pixel_centers, - 'mode': 'nearest' - } - TFResize.update_node_stat(node, attrs) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/retinanet.json b/tools/mo/openvino/tools/mo/front/tf/retinanet.json deleted file mode 100644 index 0254fe9d61964a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/retinanet.json +++ /dev/null @@ -1,30 +0,0 @@ -[ - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CORNER", - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "confidence_threshold": 0.05, - "top_k": 6000, - "keep_top_k": 300, - "variance": [0.2, 0.2, 0.2, 0.2] - }, - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "id": "RetinaNetFilteredDetectionsReplacement", - "instances": { - "end_points": [ - "filtered_detections/map/TensorArrayStack/TensorArrayGatherV3", - "filtered_detections/map/TensorArrayStack_1/TensorArrayGatherV3", - "filtered_detections/map/TensorArrayStack_2/TensorArrayGatherV3" - ], - "start_points": [ - "regression/concat", - "classification/concat", - "anchors/concat", - "clipped_boxes/Shape" - ] - }, - "match_kind": "points" - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/reverse_sequence.py b/tools/mo/openvino/tools/mo/front/tf/reverse_sequence.py deleted file mode 100644 index 94b40bb6bc5777..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/reverse_sequence.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.reverse_sequence import ReverseSequence -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ReverseSequenceFrontExtractor(FrontExtractorOp): - op = 'ReverseSequence' - enabled = True - - @classmethod - def extract(cls, node): - if node.has_valid('seq_dim'): - return - - ReverseSequence.update_node_stat(node, { - 'seq_axis': node.pb.attr['seq_dim'].i, - 'batch_axis': node.pb.attr['batch_dim'].i, - }) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/reverse_v2.py b/tools/mo/openvino/tools/mo/front/tf/reverse_v2.py deleted file mode 100644 index 9ae3c412331008..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/reverse_v2.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.Reverse import Reverse -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ReverseV2FrontExtractor(FrontExtractorOp): - op = 'ReverseV2' - enabled = True - - @classmethod - def extract(cls, node): - Reverse.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/rfcn_support.json b/tools/mo/openvino/tools/mo/front/tf/rfcn_support.json deleted file mode 100644 index e69be6ee1c56b1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/rfcn_support.json +++ /dev/null @@ -1,106 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "clip_before_nms": true, - "clip_after_nms": false, - "do_not_swap_proposals": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_1/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "FirstStageBoxPredictor/Reshape", - "FirstStageBoxPredictor/Reshape_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": true, - "clip_after_nms": false, - "swap_proposals": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "detection_boxes" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/rfcn_support_api_v1.10.json b/tools/mo/openvino/tools/mo/front/tf/rfcn_support_api_v1.10.json deleted file mode 100644 index b630bc9c9b26db..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/rfcn_support_api_v1.10.json +++ /dev/null @@ -1,145 +0,0 @@ -[ - { - "custom_attributes": {}, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_1/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "FirstStageBoxPredictor/Reshape", - "FirstStageBoxPredictor/Reshape_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": {}, - "id": "ObjectDetectionAPIPSROIPoolingReplacement", - "inputs": [ - [ - { - "node": "Shape$", - "port": 0 - }, - { - "node": "TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ], - [ - { - "node": "TensorArrayUnstack_1/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - }, - { - "node": "TensorArrayUnstack_1/Shape$", - "port": 0 - } - ] - ], - "instances": [ - "SecondStageBoxPredictor/map/", - "SecondStageBoxPredictor/map_1/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "detection_boxes" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/rfcn_support_api_v1.13.json b/tools/mo/openvino/tools/mo/front/tf/rfcn_support_api_v1.13.json deleted file mode 100644 index 5c014ae51357ab..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/rfcn_support_api_v1.13.json +++ /dev/null @@ -1,145 +0,0 @@ -[ - { - "custom_attributes": {}, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_2/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_4/TensorArrayGatherV3" - ], - "start_points": [ - "FirstStageBoxPredictor/Reshape", - "FirstStageBoxPredictor/Reshape_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": {}, - "id": "ObjectDetectionAPIPSROIPoolingReplacement", - "inputs": [ - [ - { - "node": "Shape$", - "port": 0 - }, - { - "node": "TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ], - [ - { - "node": "TensorArrayUnstack_1/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - }, - { - "node": "TensorArrayUnstack_1/Shape$", - "port": 0 - } - ] - ], - "instances": [ - "SecondStageBoxPredictor/map/", - "SecondStageBoxPredictor/map_1/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "detection_boxes" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/rfcn_support_api_v1.14.json b/tools/mo/openvino/tools/mo/front/tf/rfcn_support_api_v1.14.json deleted file mode 100644 index 1b147c79cee82a..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/rfcn_support_api_v1.14.json +++ /dev/null @@ -1,145 +0,0 @@ -[ - { - "custom_attributes": {}, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "operation_to_add": "Proposal", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIProposalReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "map/TensorArrayStack/TensorArrayGatherV3", - "map_2/TensorArrayStack/TensorArrayGatherV3", - "BatchMultiClassNonMaxSuppression/map/TensorArrayStack_5/TensorArrayGatherV3" - ], - "start_points": [ - "FirstStageBoxPredictor/Reshape", - "FirstStageBoxPredictor/Reshape_1", - "GridAnchorGenerator/Identity", - "Shape" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPIDetectionOutputReplacement", - "inputs": [ - [ - { - "node": "Reshape$", - "port": 0 - } - ], - [ - { - "node": "Reshape_1$", - "port": 0 - } - ], - [ - { - "node": "ExpandDims$", - "port": 0 - } - ] - ], - "instances": [ - ".*SecondStagePostprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "BatchMultiClassNonMaxSuppression/map/TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": {}, - "id": "ObjectDetectionAPIPSROIPoolingReplacement", - "inputs": [ - [ - { - "node": "Shape$", - "port": 0 - }, - { - "node": "TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ], - [ - { - "node": "TensorArrayUnstack_1/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - }, - { - "node": "TensorArrayUnstack_1/Shape$", - "port": 0 - } - ] - ], - "instances": [ - "SecondStageBoxPredictor/map/", - "SecondStageBoxPredictor/map_1/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "TensorArrayStack/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "outputs": "detection_boxes" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/roll_ext.py b/tools/mo/openvino/tools/mo/front/tf/roll_ext.py deleted file mode 100644 index de52db2272cc78..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/roll_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.roll import Roll -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class RollExtractor(FrontExtractorOp): - op = 'Roll' - enabled = True - - @classmethod - def extract(cls, node): - Roll.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/scatter_nd_ext.py b/tools/mo/openvino/tools/mo/front/tf/scatter_nd_ext.py deleted file mode 100644 index a9539afbf4f0ef..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/scatter_nd_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.scatternd import TFScatterND -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class ScatterNDExtractor(FrontExtractorOp): - op = 'ScatterNd' - enabled = True - - @classmethod - def extract(cls, node): - TFScatterND.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/select_ext.py b/tools/mo/openvino/tools/mo/front/tf/select_ext.py deleted file mode 100644 index 711e2524db835b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/select_ext.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.select import Select -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node - - -class SelectExtractor(FrontExtractorOp): - op = 'Select' - enabled = True - - @classmethod - def extract(cls, node: Node): - Select.update_node_stat(node, {'format': 'tf',}) - return cls.enabled - - -class SelectV2Extractor(FrontExtractorOp): - op = 'SelectV2' - enabled = True - - @classmethod - def extract(cls, node: Node): - Select.update_node_stat(node, {'format': 'tf'}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/sign_ext.py b/tools/mo/openvino/tools/mo/front/tf/sign_ext.py deleted file mode 100644 index f162bc17f46c1f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/sign_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import Sign -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class SignExtractor(FrontExtractorOp): - op = 'Sign' - enabled = True - - @classmethod - def extract(cls, node): - Sign.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/slice_ext.py b/tools/mo/openvino/tools/mo/front/tf/slice_ext.py deleted file mode 100644 index 6ae0bbd3701626..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/slice_ext.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.slice import TFSlice - - -class SliceExtractor(FrontExtractorOp): - op = 'Slice' - enabled = True - - @classmethod - def extract(cls, node: Node): - TFSlice.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/softmax_ext.py b/tools/mo/openvino/tools/mo/front/tf/softmax_ext.py deleted file mode 100644 index 5679cdfe1fac01..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/softmax_ext.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.softmax import Softmax - - -class SoftmaxExtractor(FrontExtractorOp): - op = 'Softmax' - enabled = True - - @classmethod - def extract(cls, node): - # the default value for the TF Softmax is -1 - axis = -1 - if 'axis' in node.pb.attr: - axis = node.pb.attr['axis'].i - Softmax.update_node_stat(node, {'axis': axis}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/softplus_ext.py b/tools/mo/openvino/tools/mo/front/tf/softplus_ext.py deleted file mode 100644 index 83e969404f4a2b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/softplus_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.activation_ops import SoftPlus - - -class SoftPlusExtractor(FrontExtractorOp): - op = 'Softplus' - enabled = True - - @classmethod - def extract(cls, node): - SoftPlus.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/space_to_batch.py b/tools/mo/openvino/tools/mo/front/tf/space_to_batch.py deleted file mode 100644 index d5c7b6f79f9721..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/space_to_batch.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.elementwise import Sub -from openvino.tools.mo.ops.rank import Rank -from openvino.tools.mo.ops.split import Split -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs, create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.pad import Pad -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class BatchToSpaceNormalizer(FrontReplacementPattern): - """ - This transformation converts BatchToSpace, SpaceToBatch operations (TensorFlow semantic) - to BatchToSpace, SpaceToBatch operations (OpenVINO semantic). - Refer to the Op implementation for the operations semantics description. - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.front.rank_decomposer import RankDecomposer - return [RankDecomposer] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='SpaceToBatch') + graph.get_op_nodes(op='BatchToSpace'): - node.add_input_port(3, skip_if_exist=True) - - # convert TF representation of the pads/crops as [N, 2] to OV representation: [N] and [N] - transposed_pads = create_op_with_const_inputs(graph, Transpose, {1: int64_array([1, 0])}) - node.in_port(2).get_connection().set_destination(transposed_pads.in_port(0)) - split_pads = create_op_with_const_inputs(graph, Split, {1: int64_array(0)}, {'num_splits': 2}) - transposed_pads.out_port(0).connect(split_pads.in_port(0)) - for port_ind in range(2): - node.in_port(port_ind + 2).connect(split_pads.out_port(port_ind)) - node.in_port(port_ind + 2).get_connection().insert_node( - create_op_with_const_inputs(graph, Squeeze, {1: int64_array([0])})) - - # add zeros/ones to related inputs to align it with data input - in0_rank = Rank(graph, {'name': node.name + '/rank_0'}).create_node() - in1_shape = Shape(graph, {'name': node.name + '/rank_1'}).create_node() - - diff_size = Sub(graph, {'name': node.name + '/sub_0'}).create_node() - diff = Sub(graph, {'name': node.name + '/sub_1'}).create_node() - const_begin = Const(graph, {'value': int64_array([1])}).create_node() - const_pad_val = Const(graph, {'value': int64_array(1)}).create_node() - - block_shape = Pad(graph, {'name': node.name + '/aligned_block_shape', 'mode': 'constant'}).create_node() - - # in case of SpaceToBatch begin = pads_begin, end = pads_end - # in case of BatchToSpace begin = crops_begin, end = crops_end - new_begin_name = '/aligned_pads_begin' - new_end_name = '/aligned_pads_end' - if node.type == 'BatchToSpace': - new_begin_name = '/aligned_crops_begin' - new_end_name = '/aligned_crops_end' - - begin = Pad(graph, {'name': node.name + new_begin_name, 'mode': 'constant'}).create_node() - end = Pad(graph, {'name': node.name + new_end_name, 'mode': 'constant'}).create_node() - - in0_rank_1d = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]), - {'name': node.name + '/1d_rank_of_0'}, in0_rank) - - node.in_port(0).get_source().connect(in0_rank.in_port(0)) - node.in_port(1).get_source().connect(in1_shape.in_port(0)) - in0_rank_1d.out_port(0).connect(diff_size.in_port(0)) - in1_shape.out_port(0).connect(diff_size.in_port(1)) - diff_size.out_port(0).connect(diff.in_port(0)) - const_begin.out_port(0).connect(diff.in_port(1)) - const_pad_val.out_port(0).connect(block_shape.in_port(3)) - - inputs_array = [block_shape, begin, end] - for idx, input_to_node in enumerate(inputs_array): - name_of_input_to_node = input_to_node.name - node.in_port(idx + 1).get_connection().set_destination(input_to_node.in_port(0)) - const_begin.out_port(0).connect(input_to_node.in_port(1)) - diff.out_port(0).connect(input_to_node.in_port(2)) - input_to_node.out_port(0).connect(node.in_port(idx + 1)) - convert = Cast(graph, {'name': name_of_input_to_node + '/i64', 'dst_type': np.int64}).create_node() - input_to_node.in_port(0).get_connection().insert_node(convert) diff --git a/tools/mo/openvino/tools/mo/front/tf/space_to_batch_ext.py b/tools/mo/openvino/tools/mo/front/tf/space_to_batch_ext.py deleted file mode 100644 index 5cfa28b6afb19e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/space_to_batch_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.space_to_batch import SpaceToBatch - - -class SpaceToBatchFrontExtractor(FrontExtractorOp): - op = 'SpaceToBatchND' - enabled = True - - @classmethod - def extract(cls, node): - SpaceToBatch.update_node_stat(node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/space_to_depth_ext.py b/tools/mo/openvino/tools/mo/front/tf/space_to_depth_ext.py deleted file mode 100644 index 6e99271c404d11..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/space_to_depth_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.space_to_depth import SpaceToDepth -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class SpaceToDepthFrontExtractor(FrontExtractorOp): - op = 'SpaceToDepth' - enabled = True - - @classmethod - def extract(cls, node): - # update the attributes of the node - block_size = node.pb.attr['block_size'].i - data_format = node.pb.attr['data_format'].s.decode('utf-8') - SpaceToDepth.update_node_stat(node, {'block_size': block_size, 'data_format': data_format}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/sparse_fill_empty_rows_ext.py b/tools/mo/openvino/tools/mo/front/tf/sparse_fill_empty_rows_ext.py deleted file mode 100644 index 9ff68256f2d888..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/sparse_fill_empty_rows_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.sparse_fill_empty_rows import SparseFillEmptyRows -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class SparseFillEmptyRowsFrontExtractor(FrontExtractorOp): - op = 'SparseFillEmptyRows' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {} - - SparseFillEmptyRows.update_node_stat(node, attrs) - - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/sparse_segment_mean_ext.py b/tools/mo/openvino/tools/mo/front/tf/sparse_segment_mean_ext.py deleted file mode 100644 index 232f85791bfc32..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/sparse_segment_mean_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.sparse_segment_mean import SparseSegmentMean -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class SparseSegmentMeanFrontExtractor(FrontExtractorOp): - op = 'SparseSegmentMean' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {} - - SparseSegmentMean.update_node_stat(node, attrs) - - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/sparse_segment_sqrtn_ext.py b/tools/mo/openvino/tools/mo/front/tf/sparse_segment_sqrtn_ext.py deleted file mode 100644 index 8e53bf8637df6f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/sparse_segment_sqrtn_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.sparse_segment_sqrtn import SparseSegmentSqrtN -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class SparseSegmentSqrtNFrontExtractor(FrontExtractorOp): - op = 'SparseSegmentSqrtN' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {} - - SparseSegmentSqrtN.update_node_stat(node, attrs) - - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/sparse_segment_sum_ext.py b/tools/mo/openvino/tools/mo/front/tf/sparse_segment_sum_ext.py deleted file mode 100644 index 593d426fed53cb..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/sparse_segment_sum_ext.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.sparse_segment_sum import SparseSegmentSum -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class SparseSegmentSumFrontExtractor(FrontExtractorOp): - op = 'SparseSegmentSum' - enabled = True - - @classmethod - def extract(cls, node): - attrs = {} - - SparseSegmentSum.update_node_stat(node, attrs) - - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/sparse_to_dense_replacer.py b/tools/mo/openvino/tools/mo/front/tf/sparse_to_dense_replacer.py deleted file mode 100644 index 6a4857ab547da4..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/sparse_to_dense_replacer.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.replacement import FrontReplacementOp -from openvino.tools.mo.graph.graph import Node, Graph, rename_nodes -from openvino.tools.mo.ops.broadcast import Broadcast -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.scatternd import ScatterNDUpdate - - -class SparseToDenseReplacer(FrontReplacementOp): - """ - This replacer substitutes TensorFlow SparseToDense operation with Broadcast -> ScatterND chain. - The Broadcast operation creates a tensor filled with default value and of required shape. - The ScatterND operation updates the created tensor with required values at required locations. - """ - op = "SparseToDense" - enabled = True - - def run_after(self): - from openvino.tools.mo.front.tf.CTCGreedyDecoderReplacement import CTCGreedyDecoderReplacement - from openvino.tools.mo.front.tf.CTCLossReplacement import CTCLossReplacement - return [CTCGreedyDecoderReplacement, CTCLossReplacement] - - def replace_op(self, graph: Graph, node: Node): - node_name = node.soft_get('name', node.id) - - # broadcast default value to required shape - broadcast_node = Broadcast(graph, {'name': node_name + '/Broadcast_'}).create_node() - node.in_port(1).get_connection().set_destination(broadcast_node.in_port(1)) - if not node.in_port(3).disconnected(): - node.in_port(3).get_connection().set_destination(broadcast_node.in_port(0)) - else: - broadcast_node.in_port(0).connect(Const(graph, {'name': broadcast_node.name + '/FillValue_', - 'value': np.float32(0)} - ).create_node().out_port(0)) - - # update broadcasted tensor with required values at required locations - scatternd_node = ScatterNDUpdate(graph, {'name': node_name + '/ScatterNDUpdate_'}).create_node() - scatternd_node.in_port(0).connect(broadcast_node.out_port(0)) - node.in_port(0).get_connection().set_destination(scatternd_node.in_port(1)) - node.in_port(2).get_connection().set_destination(scatternd_node.in_port(2)) - - rename_nodes([(node, node_name + "/AbandonedName"), (scatternd_node, node_name)]) - - return [scatternd_node.id] diff --git a/tools/mo/openvino/tools/mo/front/tf/split_ext.py b/tools/mo/openvino/tools/mo/front/tf/split_ext.py deleted file mode 100644 index 6f5ded238c7376..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/split_ext.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.split import VariadicSplit, Split, AttributedSplit -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node - - -class SplitVExtractor(FrontExtractorOp): - op = 'SplitV' - enabled = True - - @classmethod - def extract(cls, node: Node): - VariadicSplit.update_node_stat(node, {'out_ports_count': node.pb.attr['num_split'].i, - 'swap_axis_and_split_size_inputs': True}) - return cls.enabled - - -class UnpackExtractor(FrontExtractorOp): - op = 'Unpack' - enabled = True - - @classmethod - def extract(cls, node: Node): - pb = node.pb - AttributedSplit.update_node_stat(node, - { - 'axis': pb.attr['axis'].i, - 'num_splits': pb.attr['num'].i, - 'squeeze_axis': True, - }) - return cls.enabled - - -class SplitExtractor(FrontExtractorOp): - op = 'Split' - enabled = True - - @classmethod - def extract(cls, node: Node): - pb = node.pb - Split.update_node_stat(node, { - 'num_splits': pb.attr['num_split'].i, - 'input_port': 1, - }) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/ssd_support.json b/tools/mo/openvino/tools/mo/front/tf/ssd_support.json deleted file mode 100644 index 3c728ec956119c..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ssd_support.json +++ /dev/null @@ -1,64 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "clip_before_nms": true, - "clip_after_nms": false - }, - "id": "ObjectDetectionAPISSDPostprocessorReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "detection_boxes", - "detection_scores", - "num_detections" - ], - "start_points": [ - "Postprocessor/Shape", - "Postprocessor/Slice", - "Postprocessor/ExpandDims", - "Postprocessor/Reshape_1", - "Postprocessor/ToFloat" - ] - }, - "match_kind": "points" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v1.14.json b/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v1.14.json deleted file mode 100644 index 5fee821300e0c0..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v1.14.json +++ /dev/null @@ -1,64 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPISSDPostprocessorReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "detection_boxes", - "detection_scores", - "num_detections" - ], - "start_points": [ - "Postprocessor/Shape", - "Postprocessor/scale_logits", - "Postprocessor/Tile", - "Postprocessor/Reshape_1", - "Postprocessor/Cast" - ] - }, - "match_kind": "points" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v1.15.json b/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v1.15.json deleted file mode 100644 index b3e76e94099e34..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v1.15.json +++ /dev/null @@ -1,64 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPISSDPostprocessorReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "detection_boxes", - "detection_scores", - "num_detections" - ], - "start_points": [ - "Postprocessor/Shape", - "Postprocessor/scale_logits", - "Postprocessor/Tile", - "Postprocessor/Reshape_1", - "Postprocessor/Cast_1" - ] - }, - "match_kind": "points" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v2.0.json b/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v2.0.json deleted file mode 100644 index 8c62fdd3340e77..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v2.0.json +++ /dev/null @@ -1,49 +0,0 @@ -[ - { - "custom_attributes": { - "start_nodes": ["StatefulPartitionedCall/Preprocessor/unstack"], - "end_nodes": ["StatefulPartitionedCall/Preprocessor/stack"] - }, - "id": "ObjectDetectionAPIPreprocessor2Replacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "clip_before_nms": false, - "clip_after_nms": true, - "disable_prior_boxes_layers_generator": true - }, - "id": "ObjectDetectionAPISSDPostprocessorReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/Identity", - "StatefulPartitionedCall/Identity_1", - "StatefulPartitionedCall/Identity_2", - "StatefulPartitionedCall/Identity_3", - "StatefulPartitionedCall/Identity_4", - "StatefulPartitionedCall/Identity_5", - "StatefulPartitionedCall/Identity_6", - "StatefulPartitionedCall/Identity_7" - ], - "start_points": [ - "StatefulPartitionedCall/Postprocessor/Reshape_1", - "StatefulPartitionedCall/Postprocessor/scale_logits", - "StatefulPartitionedCall/Postprocessor/Tile", - "StatefulPartitionedCall/Postprocessor/Cast_1" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "outputs": "StatefulPartitionedCall/Identity,StatefulPartitionedCall/Identity_1,StatefulPartitionedCall/Identity_2,StatefulPartitionedCall/Identity_3,StatefulPartitionedCall/Identity_4,StatefulPartitionedCall/Identity_5,StatefulPartitionedCall/Identity_6,StatefulPartitionedCall/Identity_7" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v2.4.json b/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v2.4.json deleted file mode 100644 index 8ccb3d0de7c5bf..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ssd_support_api_v2.4.json +++ /dev/null @@ -1,52 +0,0 @@ -[ - { - "custom_attributes": { - "start_nodes": ["StatefulPartitionedCall/map/TensorArrayUnstack/TensorListFromTensor", - "StatefulPartitionedCall/map/Shape"], - "end_nodes": ["StatefulPartitionedCall/map/TensorArrayV2Stack/TensorListStack", - "StatefulPartitionedCall/map/TensorArrayV2Stack_1/TensorListStack"] - }, - "id": "ObjectDetectionAPIPreprocessor2Replacement", - "match_kind": "general" - }, - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "clip_before_nms": false, - "clip_after_nms": true, - "disable_prior_boxes_layers_generator": true - }, - "id": "ObjectDetectionAPISSDPostprocessorReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "StatefulPartitionedCall/Identity", - "StatefulPartitionedCall/Identity_1", - "StatefulPartitionedCall/Identity_2", - "StatefulPartitionedCall/Identity_3", - "StatefulPartitionedCall/Identity_4", - "StatefulPartitionedCall/Identity_5", - "StatefulPartitionedCall/Identity_6", - "StatefulPartitionedCall/Identity_7" - ], - "start_points": [ - "StatefulPartitionedCall/Postprocessor/raw_box_encodings", - "StatefulPartitionedCall/Postprocessor/scale_logits", - "StatefulPartitionedCall/Postprocessor/Tile", - "StatefulPartitionedCall/Postprocessor/Cast_1", - "StatefulPartitionedCall/Postprocessor/Cast" - ] - }, - "match_kind": "points" - }, - { - "custom_attributes": { - "outputs": "StatefulPartitionedCall/Identity,StatefulPartitionedCall/Identity_1,StatefulPartitionedCall/Identity_2,StatefulPartitionedCall/Identity_3,StatefulPartitionedCall/Identity_4,StatefulPartitionedCall/Identity_5,StatefulPartitionedCall/Identity_6,StatefulPartitionedCall/Identity_7" - }, - "id": "ObjectDetectionAPIOutputReplacement", - "match_kind": "general" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/ssd_toolbox_detection_output.json b/tools/mo/openvino/tools/mo/front/tf/ssd_toolbox_detection_output.json deleted file mode 100644 index 2407e02d96bef1..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ssd_toolbox_detection_output.json +++ /dev/null @@ -1,28 +0,0 @@ -[ - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "confidence_threshold": 0.01, - "keep_top_k": 200, - "nms_threshold": 0.45, - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP" - }, - "id": "SSDToolboxDetectionOutput", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "SSD/concat_reshape_softmax/mbox_loc_final", - "SSD/concat_reshape_softmax/mbox_conf_final", - "SSD/fc7_priorbox" - ], - "start_points": [ - "SSD/concat_reshape_softmax/mbox_loc_final", - "SSD/concat_reshape_softmax/mbox_conf_final", - "SSD/fc7_priorbox" - ] - }, - "match_kind": "points" - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/ssd_toolbox_multihead_detection_output.json b/tools/mo/openvino/tools/mo/front/tf/ssd_toolbox_multihead_detection_output.json deleted file mode 100644 index bc5674dd521526..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ssd_toolbox_multihead_detection_output.json +++ /dev/null @@ -1,28 +0,0 @@ -[ - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "confidence_threshold": 0.01, - "keep_top_k": 200, - "nms_threshold": 0.45, - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP" - }, - "id": "SSDToolboxDetectionOutput", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "SSD/concat_reshape_softmax/mbox_loc_final", - "SSD/concat_reshape_softmax/mbox_conf_final", - "SSD/concat_reshape_softmax/mbox_priorbox" - ], - "start_points": [ - "SSD/concat_reshape_softmax/mbox_loc_final", - "SSD/concat_reshape_softmax/mbox_conf_final", - "SSD/concat_reshape_softmax/mbox_priorbox" - ] - }, - "match_kind": "points" - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/ssd_v2_support.json b/tools/mo/openvino/tools/mo/front/tf/ssd_v2_support.json deleted file mode 100644 index 70c40f0f709b98..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/ssd_v2_support.json +++ /dev/null @@ -1,64 +0,0 @@ -[ - { - "custom_attributes": { - }, - "id": "ObjectDetectionAPIPreprocessorReplacement", - "inputs": [ - [ - { - "node": "map/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/Shape$", - "port": 0 - }, - { - "node": "map/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3$", - "port": 2 - } - ] - ], - "instances": [ - ".*Preprocessor/" - ], - "match_kind": "scope", - "outputs": [ - { - "node": "sub$", - "port": 0 - }, - { - "node": "map/TensorArrayStack_1/TensorArrayGatherV3$", - "port": 0 - } - ] - }, - { - "custom_attributes": { - "code_type": "caffe.PriorBoxParameter.CENTER_SIZE", - "pad_mode": "caffe.ResizeParameter.CONSTANT", - "resize_mode": "caffe.ResizeParameter.WARP", - "clip_before_nms": false, - "clip_after_nms": true - }, - "id": "ObjectDetectionAPISSDPostprocessorReplacement", - "include_inputs_to_sub_graph": true, - "include_outputs_to_sub_graph": true, - "instances": { - "end_points": [ - "detection_boxes", - "detection_scores", - "num_detections" - ], - "start_points": [ - "Postprocessor/Shape", - "Postprocessor/scale_logits", - "Postprocessor/Tile", - "Postprocessor/Reshape_1", - "Postprocessor/ToFloat" - ] - }, - "match_kind": "points" - } -] diff --git a/tools/mo/openvino/tools/mo/front/tf/swap_deconv_inputs.py b/tools/mo/openvino/tools/mo/front/tf/swap_deconv_inputs.py deleted file mode 100644 index 2dca7e883a4775..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/swap_deconv_inputs.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.graph.graph import Graph - - -class SwapDeconvInputs(FrontReplacementSubgraph): - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(swap_0_and_2_inputs=True): - shape_src = node.in_port(0).get_source() - node.in_port(0).disconnect() - - node.in_port(2).get_connection().set_destination(node.in_port(0)) - shape_src.connect(node.in_port(2)) - node['swap_0_and_2_inputs'] = False diff --git a/tools/mo/openvino/tools/mo/front/tf/swish_ext.py b/tools/mo/openvino/tools/mo/front/tf/swish_ext.py deleted file mode 100644 index bd1b5cb7dd0a8f..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/swish_ext.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.activation_ops import Swish -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.graph.graph import Node - - -class SwishExtractor(FrontExtractorOp): - op = 'swish_f32' - enabled = True - - @classmethod - def extract(cls, node: Node): - Swish.update_node_stat(node, {}) - return cls.enabled - diff --git a/tools/mo/openvino/tools/mo/front/tf/tensorflow_custom_operations_config_update.py b/tools/mo/openvino/tools/mo/front/tf/tensorflow_custom_operations_config_update.py deleted file mode 100644 index 9f2539a0767690..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/tensorflow_custom_operations_config_update.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import json - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.custom_replacement_config import parse_custom_replacement_config_file -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class TensorflowCustomOperationsConfigUpdate(FrontReplacementPattern): - enabled = True - graph_condition = [lambda graph: graph.graph['cmd_params'].tensorflow_custom_operations_config_update is not None] - - def run_before(self): - return [] - - def run_after(self): - from openvino.tools.mo.front.freeze_placeholder_value import FreezePlaceholderValue - return [FreezePlaceholderValue] - - @staticmethod - def save_custom_replacement_config_file(descriptions: list, file_name: str): - """ - Save custom layer(s) description(s) to the file. - :param file_name: file to save description information to. - :param descriptions: list with instances of the CustomLayerDescriptor classes. - :return: True if operation is successful. - """ - try: - json.dump([replacement_desc.get_config_file_representation() for replacement_desc in descriptions], - open(file_name, "w"), indent=4, sort_keys=True) - except Exception as ex: - raise Error("failed to update configuration file {}: {}".format(file_name, str(ex))) - - def find_and_replace_pattern(self, graph: Graph): - argv = graph.graph['cmd_params'] - file_name = argv.tensorflow_custom_operations_config_update - - data = parse_custom_replacement_config_file(file_name) - if data is None: - raise Error("Cannot update the file '{}' because it is broken. ".format(file_name) + refer_to_faq_msg(73)) - - for replacement_desc in data: - replacement_desc.update_custom_replacement_attributes(graph) - - self.save_custom_replacement_config_file(data, file_name) diff --git a/tools/mo/openvino/tools/mo/front/tf/tile_ext.py b/tools/mo/openvino/tools/mo/front/tf/tile_ext.py deleted file mode 100644 index 9eac84939f9f26..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/tile_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.tile import Tile - - -class TileExtractor(FrontExtractorOp): - op = 'Tile' - enabled = True - - @classmethod - def extract(cls, node): - Tile.update_node_stat(node, {}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/topk_ext.py b/tools/mo/openvino/tools/mo/front/tf/topk_ext.py deleted file mode 100644 index 5b06a097228f26..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/topk_ext.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.topk import TopK -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class TopKExtractor(FrontExtractorOp): - op = 'TopK' - enabled = True - - @classmethod - def extract(cls, node): - sort = 'value' if node.pb.attr['sorted'] else 'none' - TopK.update_node_stat(node, {'mode': 'max', 'axis': -1, 'sort': sort, 'k': node.pb.attr['k'].i, - 'index_element_type': np.int32}) - - return cls.enabled - - -class TopKV2Extractor(FrontExtractorOp): - op = 'TopKV2' - enabled = True - - @classmethod - def extract(cls, node): - sort = 'value' if node.pb.attr['sorted'] else 'none' - TopK.update_node_stat(node, {'mode': 'max', 'axis': -1, 'sort': sort, 'index_element_type': np.int32}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/transpose_ext.py b/tools/mo/openvino/tools/mo/front/tf/transpose_ext.py deleted file mode 100644 index b2baf696c5ce18..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/transpose_ext.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class TransposeFrontExtractorTF(FrontExtractorOp): - op = 'Transpose' - enabled = True - - @classmethod - def extract(cls, node): - Transpose.update_node_stat(node, {'order': None}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/transposed_mvn_unrolled.py b/tools/mo/openvino/tools/mo/front/tf/transposed_mvn_unrolled.py deleted file mode 100644 index beab52745940ee..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/transposed_mvn_unrolled.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.PowerToEltwises import PowerToEltwises -from openvino.tools.mo.front.tf.mvn_unrolled import MVNUnrolled -from openvino.tools.mo.ops.elementwise import Add, Mul -from openvino.tools.mo.ops.mvn import MVN -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.replacement import FrontReplacementSubgraph -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input, create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape - - -def check_applicability(match: dict) -> bool: - mean = match['mean'] - mean_reduction = mean.in_port(1).get_connection().get_source().node - variance_reduction = match['variance'].in_port(1).get_connection().get_source().node - pow2 = match['pow']['power'] - add = match['add'] - variance = match['variance'] - eps_port_num = 0 if add.in_port(0).get_connection().get_source().node.id != variance.id else 1 - eps = add.in_port(eps_port_num).get_connection().get_source().node - - new_name = match['division'].name + '/MVN/MVN_T_' - - if not (mean_reduction.has_valid('value') and variance_reduction.has_valid('value')): - log.debug('Reduction indices for mean and variance for MVN node {} are not constants'.format(new_name)) - return False - - if not (all(mean_reduction.value == variance_reduction.value)): - log.debug('Reduction indices for mean {} and variance {} do not match.'.format( - mean_reduction.value, - variance_reduction.value - )) - return False - - if not eps.has_valid('value'): - log.debug('epsilon value for MVN node {} is not constant'.format(new_name)) - return False - - if pow2 != 0.5: - log.debug('Power for MVN node {} ({}) is not equal to 0.5'.format(new_name, pow2)) - return False - - return True - - -class TransposedMVNUnrolled(FrontReplacementSubgraph): - """ - This transformation looks for mean value normalization (across selected channels) implemented using simple - operations and replaces found pattern with a sequence Reshape, Transpose, MVN, Transpose, Reshape, Mul, Add. - - Here we assume that - 1) the input of 'transpose' is in NHWC layout and is a 4D-tensor - 2) the constant for 'transpose' is equal to [0, 3, 1, 2] - 3) the shape for 'reshape' is [N, C1, C2, H, W] - 4) reduction indices for 'mean' and 'variance' are [2, 3, 4] - 5) the shape of 'reshape2' is equal to [N, C, H, W] - 6) the constant for 'transpose2' is [0, 2, 3, 1] - - Found pattern will be replaced with - nodes=[ - ('new_reshape', dict(kind='op', op='Reshape')), - ('first_permute', dict(kind='op', op='Transpose')), - ('mvn_node', dict(kind='op', op='MVN')), - ('second_permute', dict(kind='op', op='Transpose')), - ('new_reshape2', dict(kind='op', op='Reshape')), - ('new_mul', dict(kind='op', op='Mul')), - ('new_add_2', dict(kind='op', op='Add')) - ], - edges=[ - ('new_reshape', 'first_permute', {'in': 0}), - ('first_permute', 'mvn_node', {'in': 0}), - ('mvn_node', 'second_permute', {'in': 0}), - ('second_permute', 'new_reshape2', {'in': 0}), - ('new_reshape2', 'new_mul', {'in': 0}), - ('new_mul', 'new_add_2', {'in': 0}), - ] - - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.front.tf.mvn import MVNReplacer - return [MVNReplacer, MVNUnrolled, PowerToEltwises] - - def pattern(self): - log.debug('Enabled Transposed MVN replacement') - return dict( - nodes=[ - ('transpose', dict(kind='op', op='Transpose')), - ('reshape', dict(kind='op', op='Reshape')), - ('mean', dict(kind='op', op='ReduceMean')), - ('stop_grad', dict(kind='op', op='StopGradient')), - ('sqdiff', dict(kind='op', op='SquaredDifference')), - ('variance', dict(kind='op', op='ReduceMean')), - ('add', dict(kind='op', op='Add')), - ('pow', dict(kind='op', op='AttributedPower')), - ('sub', dict(kind='op', op='Sub')), - ('division', dict(kind='op', op='Div')), - ('reshape2', dict(kind='op', op='Reshape')), - ('reshape3', dict(kind='op', op='Reshape')), - ('reshape4', dict(kind='op', op='Reshape')), - ('gamma_identity', dict(kind='op', op='Identity')), - ('mul', dict(kind='op', op='Mul')), - ('beta_identity', dict(kind='op', op='Identity')), - ('add2', dict(kind='op', op='Add')), - ('transpose2', dict(kind='op', op='Transpose')), - ], - edges=[ - ('transpose', 'reshape'), - ('reshape', 'mean'), - ('reshape', 'sub', {'in': 0}), - ('reshape', 'sqdiff', {'in': 0}), - ('mean', 'stop_grad', {'in': 0}), - ('stop_grad', 'sqdiff', {'in': 1}), - ('sqdiff', 'variance', {'in': 0}), - ('mean', 'sub', {'in': 1}), - ('variance', 'add'), - ('add', 'pow', {'in': 0}), - ('pow', 'division', {'in': 1}), - ('sub', 'division', {'in': 0}), - ('division', 'reshape2'), - ('reshape2', 'mul', {'in': 0}), - ('reshape3', 'mul', {'in': 1}), - ('gamma_identity', 'reshape3'), - ('mul', 'add2', {'in': 0}), - ('reshape4', 'add2', {'in': 1}), - ('beta_identity', 'reshape4'), - ('add2', 'transpose2'), - ] - ) - - def replace_sub_graph(self, graph: Graph, match: dict): - if not check_applicability(match): - return - - reshape = match['reshape'] - div_name = match['division'].name - - input_shape = Shape(graph, dict(name=div_name + '/shape/MVN_T_')).create_node() - shape_of_reshape = reshape.in_port(1).get_connection().get_source().node.value - c1, c2 = shape_of_reshape[1], shape_of_reshape[2] - c = c1 * c2 - - new_reshape = create_op_node_with_second_input(graph, Reshape, int64_array([0, 0, 0, c1, c2]), - dict(name=div_name + '/first_reshape/MVN_T_')) - permute_order = int64_array([0, 1, 2, 4, 3]) - first_permute = create_op_node_with_second_input(graph, Transpose, permute_order, - dict(name=div_name + '/first_permute/MVN_T_'), new_reshape) - - add = match['add'] - variance = match['variance'] - eps_port_num = 0 if add.in_port(0).get_connection().get_source().node.id != variance.id else 1 - eps = add.in_port(eps_port_num).get_connection().get_source().node - mvn_node = create_op_with_const_inputs(graph, MVN, {1: int64_array([1, 2, 3])}, - dict(name=div_name + '/MVN/MVN_T_', - eps=eps.value, normalize_variance=1, - eps_mode='inside_sqrt')) - first_permute.out_port(0).connect(mvn_node.in_port(0)) - - second_permute = create_op_node_with_second_input(graph, Transpose, permute_order, - dict(name=div_name + '/second_permute/MVN_T_'), mvn_node) - new_reshape2 = Reshape(graph, dict(name=div_name + '/second_reshape/MVN_T_')).create_node() - second_permute.out_port(0).connect(new_reshape2.in_port(0)) - gamma_val = np.reshape(match['gamma_identity'].in_port(0).get_connection().get_source().node.value, - int64_array([1, 1, 1, c])) - new_mul = create_op_node_with_second_input(graph, Mul, gamma_val, - dict(name=match['mul'].name + '/MVN_T_'), new_reshape2) - beta_val = np.reshape(match['beta_identity'].in_port(0).get_connection().get_source().node.value, - int64_array([1, 1, 1, c])) - new_add2 = create_op_node_with_second_input(graph, Add, beta_val, - dict(name=match['add2'].name + '/MVN_T_'), new_mul) - - transpose_connection = match['transpose'].in_port(0).get_connection() - before_transpose = transpose_connection.get_source().node - transpose_connection.set_destination(new_reshape.in_port(0)) - input_shape.out_port(0).connect(new_reshape2.in_port(1)) - before_transpose.out_port(0).connect(input_shape.in_port(0)) - match['transpose2'].out_port(0).get_connection().set_source(new_add2.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/front/tf/unique_ext.py b/tools/mo/openvino/tools/mo/front/tf/unique_ext.py deleted file mode 100644 index c74fbcb70f23d2..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/unique_ext.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.unique import Unique -from openvino.tools.mo.front.extractor import FrontExtractorOp - - -class UniqueFrontExtractor(FrontExtractorOp): - op = 'Unique' - enabled = True - - @classmethod - def extract(cls, node): - # TensorFlow Unique operation always returns two outputs: unique elements and indices - # The unique elements in the output are not sorted - attrs = { - 'sorted': 'false', - 'return_inverse': 'true', - 'return_counts': 'false' - } - - Unique.update_node_stat(node, attrs) - - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/variable_ext.py b/tools/mo/openvino/tools/mo/front/tf/variable_ext.py deleted file mode 100644 index cf27d737a74114..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/variable_ext.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import FrontExtractorOp -from openvino.tools.mo.ops.op import Op - - -class VariableExtractor(FrontExtractorOp): - op = 'Variable' - enabled = True - - @classmethod - def extract(cls, node): - Op.update_node_stat(node, {'op': 'FakeConst'}) - return cls.enabled - - -class VariableV2Extractor(FrontExtractorOp): - op = 'VariableV2' - enabled = True - - @classmethod - def extract(cls, node): - Op.update_node_stat(node, {'op': 'FakeConst'}) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/variables_values_freezing.py b/tools/mo/openvino/tools/mo/front/tf/variables_values_freezing.py deleted file mode 100644 index 9deeb6456810b9..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/variables_values_freezing.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.loader import variables_to_constants -from openvino.tools.mo.graph.graph import Graph - - -class VariablesToConstants(FrontReplacementPattern): - enabled = True - force_clean_up = True - graph_condition = [lambda graph: graph.graph['variables_values']] - - def run_after(self): - from openvino.tools.mo.front.input_cut import InputCut - return [InputCut] - - def run_before(self): - from openvino.tools.mo.front.freeze_placeholder_value import FreezePlaceholderValue - return [FreezePlaceholderValue] - - def find_and_replace_pattern(self, graph: Graph): - variables_to_constants(graph, graph.graph['variables_values']) - del graph.graph['variables_values'] diff --git a/tools/mo/openvino/tools/mo/front/tf/while_ext.py b/tools/mo/openvino/tools/mo/front/tf/while_ext.py deleted file mode 100644 index 81deccfc117041..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/while_ext.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.loop import Loop -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.front.common.register_custom_ops import check_for_duplicates -from openvino.tools.mo.front.extractor import extract_node_attrs, FrontExtractorOp -from openvino.tools.mo.front.tf.extractor import tf_op_extractor, tf_op_extractors, create_tf_edge -from openvino.tools.mo.front.tf.extractors.subgraph_utils import update_body_graph, convert_graph_inputs_to_parameters, \ - get_graph_proto, create_internal_graph -from openvino.tools.mo.graph.graph import add_opoutput, Graph, Node - - -class WhileExtractor(FrontExtractorOp): - """ - The While operation is a variation of the while_loop primitive from TensorFlow 2 Python API. - While can have stateful operations in the body and condition graphs that does not influence on inference so - the logic for handling While and StatelessWhile (see below) is the same. - """ - op = 'While' - enabled = True - - @classmethod - def extract(cls, loop_node): - Loop.update_node_stat(loop_node, {}) - - # check that required body and condition functions exist in the graph library - main_graph = loop_node.graph - body_graph_proto = get_graph_proto(main_graph, 'body', loop_node) - cond_graph_proto = get_graph_proto(main_graph, 'cond', loop_node) - - body_graph = create_internal_graph(main_graph) - loop_node['body'] = body_graph - # create Parameter nodes for the body graph - body_parameters, body_parameter_names = convert_graph_inputs_to_parameters(body_graph, body_graph_proto) - - # update the loop body graph with the body function graph - body_results = [] - update_body_graph(body_graph, body_graph_proto, body_parameter_names, body_results) - - # update the loop body graph with the condition function graph - update_body_graph(body_graph, cond_graph_proto, body_parameter_names, body_results) - - # add 'internal_layer_id' attribute which is a must have attribute for the loop body node - for idx, body_node in enumerate(body_graph.get_op_nodes()): - body_node['internal_layer_id'] = idx - - body_graph.stage = 'front' - - # Currently, - # Loop Inputs Order: - # 0 - current iteration - # 1 - trip count - # 2.. - "loop carried" dependencies variables - # - # Body Inputs Order: - # 0 - current iteration - # 1 - trip count - # 2.. - "loop carried" dependencies variables - # - # Body Outputs Order: - # 0 - current iteration - # 1 - trip count - # 2.. - "loop carried" dependencies variables - # - # Loop Outputs Order: - # 0 - current iteration - # 1 - trip count - # 2.. - "loop carried" dependencies variables - # - # so inputs must be reordered and execution condition must be created in the front transformation - # to be aligned with the specification - - # connect external input ports with body parameter nodes except current iteration - # since it must be disconnected from external port - for idx in range(1, len(body_parameters)): - Loop.connect_body_input(loop_node, idx, body_parameters[idx]) - - # mark current iteration input Parameter node and execution condition Result node - Loop.mark_current_iteration_parameter_node(loop_node, body_parameters[0]) - Loop.mark_execution_condition_result_node(loop_node, body_results[-1]) - - # connect back edges in the body except current iteration - for idx in range(1, len(body_parameters)): - Loop.add_back_edge(loop_node, body_parameters[idx], body_results[idx]) - - # connect body outputs with Loop operation output ports except the execution condition result - for idx in range(len(body_results) - 1): - Loop.connect_body_output(loop_node, idx, body_results[idx]) - - # run function to parse body nodes attributes similar to the main graph - extract_node_attrs(body_graph, lambda node: tf_op_extractor(node, check_for_duplicates(tf_op_extractors))) - return cls.enabled - - -class StatelessWhileExtractor(FrontExtractorOp): - """ - The StatelessWhile operation is a variation of the while_loop primitive from TensorFlow 2 Python API. - StatelessWhile does not have stateful operations in the body and condition graphs. - """ - op = 'StatelessWhile' - enabled = True - - @classmethod - def extract(cls, loop_node): - WhileExtractor.extract(loop_node) - return cls.enabled diff --git a/tools/mo/openvino/tools/mo/front/tf/yolo_v1.json b/tools/mo/openvino/tools/mo/front/tf/yolo_v1.json deleted file mode 100644 index 78f2e97c8a3acd..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/yolo_v1.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "id": "TFYOLO", - "match_kind": "general", - "custom_attributes": { - "classes": 20, - "coords": 4, - "num": 3, - "do_softmax": 0 - } - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/yolo_v1_tiny.json b/tools/mo/openvino/tools/mo/front/tf/yolo_v1_tiny.json deleted file mode 100644 index a14d5166e2f7ea..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/yolo_v1_tiny.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "id": "TFYOLO", - "match_kind": "general", - "custom_attributes": { - "classes": 20, - "coords": 4, - "num": 2, - "do_softmax": 0 - } - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/yolo_v2.json b/tools/mo/openvino/tools/mo/front/tf/yolo_v2.json deleted file mode 100644 index b77df24f2b1a21..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/yolo_v2.json +++ /dev/null @@ -1,13 +0,0 @@ -[ - { - "id": "TFYOLO", - "match_kind": "general", - "custom_attributes": { - "anchors": [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828], - "classes": 80, - "coords": 4, - "num": 5, - "do_softmax": 1 - } - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/yolo_v2_tiny.json b/tools/mo/openvino/tools/mo/front/tf/yolo_v2_tiny.json deleted file mode 100644 index b77df24f2b1a21..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/yolo_v2_tiny.json +++ /dev/null @@ -1,13 +0,0 @@ -[ - { - "id": "TFYOLO", - "match_kind": "general", - "custom_attributes": { - "anchors": [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828], - "classes": 80, - "coords": 4, - "num": 5, - "do_softmax": 1 - } - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/yolo_v2_tiny_voc.json b/tools/mo/openvino/tools/mo/front/tf/yolo_v2_tiny_voc.json deleted file mode 100644 index e2c3cd6a435ad5..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/yolo_v2_tiny_voc.json +++ /dev/null @@ -1,13 +0,0 @@ -[ - { - "id": "TFYOLO", - "match_kind": "general", - "custom_attributes": { - "anchors": [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52], - "classes": 20, - "coords": 4, - "num": 5, - "do_softmax": 1 - } - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/yolo_v2_voc.json b/tools/mo/openvino/tools/mo/front/tf/yolo_v2_voc.json deleted file mode 100644 index 6be299dd1120cd..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/yolo_v2_voc.json +++ /dev/null @@ -1,13 +0,0 @@ -[ - { - "id": "TFYOLO", - "match_kind": "general", - "custom_attributes": { - "anchors": [1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.0071], - "classes": 20, - "coords": 4, - "num": 5, - "do_softmax": 1 - } - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/yolo_v3.json b/tools/mo/openvino/tools/mo/front/tf/yolo_v3.json deleted file mode 100644 index 3d94074221649b..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/yolo_v3.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "id": "TFYOLOV3", - "match_kind": "general", - "custom_attributes": { - "classes": 80, - "anchors": [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326], - "coords": 4, - "num": 9, - "masks":[[6, 7, 8], [3, 4, 5], [0, 1, 2]], - "entry_points": ["detector/yolo-v3/Reshape", "detector/yolo-v3/Reshape_4", "detector/yolo-v3/Reshape_8"] - } - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/yolo_v3_tiny.json b/tools/mo/openvino/tools/mo/front/tf/yolo_v3_tiny.json deleted file mode 100644 index 0c0d8718235ede..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/yolo_v3_tiny.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "id": "TFYOLOV3", - "match_kind": "general", - "custom_attributes": { - "classes": 80, - "anchors": [10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319], - "coords": 4, - "num": 6, - "masks": [[3, 4, 5], [0, 1, 2]], - "entry_points": ["detector/yolo-v3-tiny/Reshape", "detector/yolo-v3-tiny/Reshape_4"] - } - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/tf/yolo_v3_voc.json b/tools/mo/openvino/tools/mo/front/tf/yolo_v3_voc.json deleted file mode 100644 index b8abcc0c6a67c6..00000000000000 --- a/tools/mo/openvino/tools/mo/front/tf/yolo_v3_voc.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "id": "TFYOLOV3", - "match_kind": "general", - "custom_attributes": { - "classes": 20, - "anchors": [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326], - "coords": 4, - "num": 9, - "masks":[[6, 7, 8], [3, 4, 5], [0, 1, 2]], - "entry_points": ["detector/yolo-v3/Reshape", "detector/yolo-v3/Reshape_4", "detector/yolo-v3/Reshape_8"] - } - } -] \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/front/transformations_config.py b/tools/mo/openvino/tools/mo/front/transformations_config.py deleted file mode 100644 index ea75b66820a33e..00000000000000 --- a/tools/mo/openvino/tools/mo/front/transformations_config.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.custom_replacement_registry import CustomReplacementRegistry -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.tf.replacement import FrontReplacementFromConfigFileOp -from openvino.tools.mo.graph.graph import Graph - - -class TransformationsConfig(FrontReplacementPattern): - enabled = True - # do not run this transformation recursively otherwise transformations which are enabled with a configuration file - # will be registered multiple times - run_not_recursively = True - graph_condition = [lambda graph: graph.graph['cmd_params'].transformations_config is not None] - - def run_before(self): - from openvino.tools.mo.front.pass_separator import FrontStart - return [FrontStart] - - def run_after(self): - from openvino.tools.mo.load.loader import LoadFinish - return [LoadFinish] - - def find_and_replace_pattern(self, graph: Graph): - argv = graph.graph['cmd_params'] - transformations_config = argv.transformations_config - registry = CustomReplacementRegistry() - registry.add_custom_replacement_description_from_config(transformations_config) - - # automatically generate sub-classes for custom replacements that replace sub-graph with a single node - for replacement_desc in registry.get_all_replacements_descriptions(): - if replacement_desc.has('op'): - transform = type('FrontReplacementFromConfigFileOp' + replacement_desc.op, - (FrontReplacementFromConfigFileOp,), - {'replacement_id': replacement_desc.id}) - transform().find_and_replace_pattern(graph) diff --git a/tools/mo/openvino/tools/mo/front/user_data_repack.py b/tools/mo/openvino/tools/mo/front/user_data_repack.py deleted file mode 100644 index 6819084418a1c3..00000000000000 --- a/tools/mo/openvino/tools/mo/front/user_data_repack.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.replacement import FrontReplacementPattern -from openvino.tools.mo.front.extractor import user_data_repack -from openvino.tools.mo.graph.graph import Graph - - -class UserDataRepack(FrontReplacementPattern): - enabled = True - run_not_recursively = True - - def run_after(self): - return [] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - argv = graph.graph['cmd_params'] - - packed_user_shapes, packed_outputs, freeze_placeholder = user_data_repack( - graph, argv.placeholder_shapes, argv.placeholder_data_types, - argv.output, argv.freeze_placeholder_with_value) - - # save packed user shapes in arguments since nodes names and their ports - # will be required to compose placeholder names with custom types - # for MOCLegacyTransformations - argv.packed_user_shapes = packed_user_shapes - - graph.graph['user_shapes'] = packed_user_shapes - graph.graph['packed_outputs'] = packed_outputs - graph.graph['freeze_placeholder'] = freeze_placeholder - - if argv.inputs_list is not None and isinstance(argv.inputs_list, list) and len(argv.inputs_list) > 0: - graph.inputs_order = argv.inputs_list - if argv.output is not None and isinstance(argv.output, list) and len(argv.output) > 0: - graph.outputs_order = argv.output - - inputs = list(packed_user_shapes.keys()) \ - if packed_user_shapes is not None and isinstance(packed_user_shapes, dict) else None - graph.graph['inputs'] = inputs # save user defined inputs for other extensions diff --git a/tools/mo/openvino/tools/mo/graph/__init__.py b/tools/mo/openvino/tools/mo/graph/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/graph/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/graph/connection.py b/tools/mo/openvino/tools/mo/graph/connection.py deleted file mode 100644 index 348948baabee5c..00000000000000 --- a/tools/mo/openvino/tools/mo/graph/connection.py +++ /dev/null @@ -1,332 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from collections import namedtuple -from copy import deepcopy - -from openvino.tools.mo.utils.error import Error - - -class Connection: - def __init__(self, graph, source, destinations: list, control_flow=False): - self.graph = graph - self.source = source - self.destinations = destinations - self.control_flow = control_flow - self.data = namedtuple('Data', ['get_value', 'get_shape']) - self.data.get_value = self._get_value - self.data.get_shape = self._get_shape - - def _get_value(self): - if self.graph.stage == 'front': - return None - return self.source.node.out_node(self.source.idx, control_flow=self.control_flow).value - - def _get_shape(self): - if self.graph.stage == 'front': - return None - return self.source.node.out_node(self.source.idx, control_flow=self.control_flow).shape - - @staticmethod - def _get_new_tensor_debug_info(attributes_save_mode: str, source_attrs: dict, dest_attrs: dict): - source_debug_info = [] - attr_name = 'fw_tensor_debug_info' - if attr_name in source_attrs: - source_debug_info = source_attrs[attr_name] - dest_debug_info = [] - if attr_name in dest_attrs: - dest_debug_info = dest_attrs[attr_name] - if attributes_save_mode == "merge": - return source_debug_info + dest_debug_info - elif attributes_save_mode == "source": - return source_debug_info - else: - return dest_debug_info - - @staticmethod - def _update_tensor_debug_info(attrs: dict, new_value: list): - if new_value is not None and len(new_value) > 0: - attrs['fw_tensor_debug_info'] = new_value - else: - if 'fw_tensor_debug_info' in attrs: - del attrs['fw_tensor_debug_info'] - - def get_source(self): - return self.source - - def get_destination(self): - if self.destinations and len(self.destinations) > 1: - raise Error("Connection has more than one destination: {}".format(len(self.destinations))) - return self.destinations[0] if self.destinations else None - - def get_destinations(self): - return self.destinations - - def set_source(self, port, attributes_save_mode=None): - # In this method we are changing source for a connection with given port. - # See detailed example below. - # - # SOURCE - Op1(out_port:0) - # - # | Op4(in_port:0) - # DESTINATIONS - | Op3(in_port:0) - # | Op2(in_port:0) - # - # NEW PORT - Op5(out_port:0) - # - # ,--->Op4(in_port:0) - # CONNECTION ,--->Op3(in_port:0) - # Op1(out_port:0)--->Op2(in_port:0) - # - # When we set source for connection we disconnect existing source and reconnect all consumers to - # the new given port with type='out'. - # - # UPDATED CONNECTION ,--->Op4(in_port:0) - # ,--->Op3(in_port:0) - # Op5(out_port:0)--->Op2(in_port:0) - # - # attributes_save_mode defines which attributes with tensor debug information should be - # transferred to resulting connection. - # 'source' - attributes are transferred from the outgoing edge (front phase) or - # outgoing data node (middle/back phase) of the source of resulting connection. - # 'dest' - attributes are transferred from the incoming edge (front phase) or - # incoming data node (middle/back phase) of the destination of resulting connection. - # 'merge' - attributes from source and destination are merged. - - if port.type == 'in': - raise Error("Wrong port type in set_source method. Should be 'out' but given 'in'") - - if self.control_flow is True: - raise Error("Cannot operate with connection with control_flow=True") - - if attributes_save_mode is None: - attributes_save_mode = "merge" - if self.source is not None: - scr_node = self.source.node - - # Force "source" mode for "Parameter" source node, which preserves tensor names for - # source node in connection. - if scr_node.soft_get("type") == "Parameter": - attributes_save_mode = "source" - - if self.graph.stage == 'front': - scr_node = port.node - - source_fw_names = [] - for dst_port in port.get_connection().destinations: - edge_attrs, u, v, key = dst_port.get_in_edge_attrs(data=True) - for attr in edge_attrs: - if attr == "fw_tensor_debug_info": - source_fw_names += edge_attrs[attr] - # remove duplicates - source_fw_names = list(set(source_fw_names)) - if not source_fw_names: - attrs = {} - else: - attrs = {'fw_tensor_debug_info': source_fw_names} - - # Reconnecting all destinations as consumers to the source port preserving edge attrs - for dst_port in self.destinations: - edge_attrs, u, v, key = dst_port.get_in_edge_attrs(data=True) - if u is not None: - edge_attrs['out'] = port.idx - - new_tensor_info = self._get_new_tensor_debug_info(attributes_save_mode, attrs, edge_attrs) - self._update_tensor_debug_info(edge_attrs, new_tensor_info) - - self.graph.remove_edge(u, v, key=key) - self.graph.add_edge(scr_node.id, v, **edge_attrs) - else: - if attributes_save_mode == "dest": - attrs = {} - self.graph.create_edge(scr_node, dst_port.node, port.idx, dst_port.idx, edge_attrs=attrs) - else: - # Create out data node if not exists and mark node with need_shape_inference = True - # In case if data node exists just use it. - port._create_data_if_necessary() - port_out_data = port.node.out_node(port.idx) - - attrs = {} - if self.source is not None and self.source.idx in self.source.node.out_nodes(): - source_out_data = self.source.node.out_node(self.source.idx) - # Copy attrs from source_out_data to port_out_data - attrs = deepcopy(source_out_data.attrs()) - if attributes_save_mode != "source": - # Remove debug info - if 'fw_tensor_debug_info' in source_out_data.attrs(): - del self.graph.node[source_out_data.id]['fw_tensor_debug_info'] - # Copy attrs to new data node - for attr in attrs: - if attr != 'fw_tensor_debug_info': - port_out_data[attr] = attrs[attr] - - new_tensor_info = self._get_new_tensor_debug_info(attributes_save_mode, port_out_data.attrs(), attrs) - self._update_tensor_debug_info(port_out_data.attrs(), new_tensor_info) - - for dst_port in self.destinations: - edge_attrs, u, v, key = dst_port.get_in_edge_attrs(data=True) - if u is not None: - self.graph.remove_edge(u, v, key=key) - self.graph.add_edge(port_out_data.id, v, **edge_attrs) - else: - self.graph.add_edge(port_out_data.id, dst_port.node.id, **{'in': dst_port.idx}) - - def set_destination(self, port, attributes_save_mode=None): - # In this method we are changing destination for a connection with given port with type 'in'. - # This method requires exactly one destination or empty destinations list. - # See detailed example below. - # - # SOURCE - Op1(out_port:0) - # - # DESTINATIONS - Op2(in_port:0) - # - # NEW PORT - Op3(in_port:0) - # - # CONNECTION - # Op1(out_port:0)--->Op2(in_port:0) - # - # When we set destination for connection we disconnect destination port if exists and connect source to - # the new given port with type='in'. - # - # UPDATED CONNECTION - # - # Op1(out_port:0)--->Op3(in_port:0) - # - # attributes_save_mode defines which attributes with tensor debug information should be - # transferred to resulting connection. - # 'source' - attributes are transferred from the outgoing edge (front phase) or - # outgoing data node (middle/back phase) of the source of resulting connection. - # 'dest' - attributes are transferred from the incoming edge (front phase) or - # incoming data node (middle/back phase) of the destination of resulting connection. - # 'merge' - attributes from source and destination are merged. - - def check_and_remove_edge(): - if self.destinations: - for destination in self.destinations: - edge_attrs, u, v, key = destination.get_in_edge_attrs(data=True) - if u is None: - raise Error( - "Broken Connection object! Destination (node:{}) is not connected to source.".format( - destination.node.name)) - destination.disconnect() - return edge_attrs, key - return {}, None - - if self.destinations and len(self.destinations) > 1: - raise Error("set_destination applicable only for connections that has exactly one destination or " - "when there is no destinations") - - if port.type == 'out': - raise Error("Wrong port type in set_destination method. Should be 'in' but given 'out'") - - if self.control_flow is True: - raise Error("Cannot operate with connection with control_flow=True") - - if attributes_save_mode is None: - attributes_save_mode = "merge" - if self.source is not None: - scr_node = self.source.node - - # Force "source" mode for "Parameter" source node, which preserves tensor names for - # source node in connection. - if scr_node.soft_get("type") == "Parameter": - attributes_save_mode = "source" - - if self.graph.stage == 'front': - if self.source is not None: - node = self.source.node - source_attrs, _ = check_and_remove_edge() - dest_attrs = port.get_in_edge_attrs() or {} - - edge_attrs = {} - new_tensor_info = self._get_new_tensor_debug_info(attributes_save_mode, source_attrs, dest_attrs) - self._update_tensor_debug_info(edge_attrs, new_tensor_info) - - self.graph.create_edge(node, port.node, out_port=self.source.idx, in_port=port.idx, - edge_attrs=edge_attrs) - self.destinations = [port] - else: - # create out node if not exists and mark node with need_shape_inference = True - # in case if data node exists just use it as is - if self.source is not None: - data_node = self.source._create_data_if_necessary() - edge_attrs, key = check_and_remove_edge() - edge_attrs.update({'in': port.idx}) - - dest_attrs = {} - if port.idx in port.node.in_nodes(): - dest_attrs = port.node.in_node(port.idx).attrs() - - new_tensor_info = self._get_new_tensor_debug_info(attributes_save_mode, data_node.attrs(), dest_attrs) - self._update_tensor_debug_info(data_node.attrs(), new_tensor_info) - - self.graph.add_edge(data_node.id, port.node.id, key=key, **edge_attrs) - self.destinations = [port] - - def add_destination(self, port): - # In this method we are adding destination port with type 'in' for a connection. - # See detailed example below. - # - # SOURCE - Op1(out_port:0) - # - # DESTINATIONS - Op2(in_port:0) - # - # NEW PORT - Op3(in_port:0) - # - # CONNECTION - # Op1(out_port:0)--->Op2(in_port:0) - # - # When we set destination for connection we disconnect destination port if exists and connect source to - # the new given port with type='in'. - # - # UPDATED CONNECTION - # ,-->Op3(in_port:0) - # Op1(out_port:0)--->Op2(in_port:0) - # - - if self.control_flow is True: - raise Error("Cannot operate with connection with control_flow=True") - - if self.source is None: - raise Error("Can not add destination for connection without source port!") - - if self.graph.stage == 'front': - node = self.source.node - self.graph.create_edge(node, port.node, out_port=self.source.idx, in_port=port.idx) - else: - data_node = self.source._create_data_if_necessary() - self.graph.add_edge(data_node.id, port.node.id, **{'in': port.idx}) - - self.destinations.append(port) - - def remove(self): - # This method deletes all edges in connection. After that connection is not more accessible. - # See detailed example below. - # - # SOURCE - Op1(out_port:0) - # - # | Op4(in_port:0) - # DESTINATIONS - | Op3(in_port:0) - # | Op2(in_port:0) - # - # ,--->Op4(in_port:0) - # CONNECTION ,--->Op3(in_port:0) - # Op1(out_port:0)--->Op2(in_port:0) - # - # After removing edges connection will be empty - # - # REMOVED CONNECTION - # Op5(out_port:0) Op4(in_port:0) Op2(in_port:0) Op3(in_port:0) - # - - if self.destinations: - for dst_port in self.destinations: - dst_port.disconnect() - self.source = None - self.destinations = [] - - def insert_node(self, new_node, attributes_save_mode: str = "merge"): - assert len(new_node.out_ports()) == 1, 'The node {} has several output ports'.format(new_node.soft_get('name')) - source_port = self.get_source() - self.set_source(new_node.out_port(0), attributes_save_mode) - source_port.connect(new_node.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/graph/graph.py b/tools/mo/openvino/tools/mo/graph/graph.py deleted file mode 100644 index db0f97efebed23..00000000000000 --- a/tools/mo/openvino/tools/mo/graph/graph.py +++ /dev/null @@ -1,1348 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import collections -import logging as log -from copy import deepcopy -from typing import List - -import networkx as nx -import numpy as np - -from openvino.tools.mo.graph.port import Port -from openvino.tools.mo.middle.passes.eliminate import mark_output_reachable_nodes, shape_inference, mark_undead_nodes, \ - mark_const_producer_nodes, eliminate_dead_nodes, add_constant_operations -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg, deprecated_api, shrink_str_value - - -def dict_to_ordered_dict(d: dict, func=lambda t: t): - return collections.OrderedDict(sorted(d.items(), key=lambda t: func(t[0]))) - - -class Node: - def __init__(self, graph, node: str): - assert node in graph, "Attempt to access node {} that not in graph".format(node) - - super(Node, self).__setattr__('graph', graph) - super(Node, self).__setattr__('node', node) # obsolete - super(Node, self).__setattr__('id', node) - - def __str__(self, max_length: int = 100): - node_dict = self.graph.node[self.id] - print_dict = {k: v if k != 'value' else shrink_str_value(v, max_symbols=max_length) for k, v in - node_dict.items()} - return str(print_dict) - - def __setattr__(self, k, v): - # you can assign only existing attributes - attrs = self.graph.node[self.node] - if not k in attrs: - raise AttributeError("Attribute {} missing in {} node".format(k, self.name)) - if k == 'version' and attrs.get(k, v) != v: - raise AttributeError("Attribute 'version' cannot be updated in {} node".format(self.name)) - - attrs[k] = v - - def __getattr__(self, k): - return self.graph.node[self.node][k] - - def __getitem__(self, k): - return self.graph.node[self.node][k] - - def __setitem__(self, k, v): - if k == 'version' and self.graph.node[self.node].get(k, v) != v: - raise AttributeError("Attribute 'version' cannot be updated in {} node".format(self.name)) - self.graph.node[self.node][k] = v - - def __contains__(self, k): - return self.has(k) - - def __eq__(self, other): - return ( - self.__class__ == other.__class__ and - self.graph == other.graph and - self.id == other.id - ) - - def __hash__(self): - return hash((self.graph, self.id)) - - def __delitem__(self, k): - del self.graph.node[self.node][k] - - def add_input_port(self, idx, skip_if_exist=False, **kwargs): - if not self.has_valid('_in_ports'): - Node(self.graph, self.id)['_in_ports'] = {} - control_flow = kwargs['control_flow'] if kwargs.get('control_flow') is not None else False - if skip_if_exist is False and idx in self.in_ports(control_flow=control_flow): - raise Error("Input port with {} index already exists for {} node.".format(idx, self.name)) - self._in_ports.update({idx: kwargs}) - - def delete_input_ports(self, idx_set, skip_if_absent=False): - if len(idx_set) == 0: - return # there is nothing to delete - for idx in idx_set: - self.delete_input_port(idx, skip_if_absent) - - def delete_input_port(self, idx, skip_if_absent=False): - if not self.has_valid('_in_ports'): - raise Error( - 'Cannot removed ports with indices {} from node {} because node doesn\'t ' - 'have _in_ports attribute'.format(idx, self.soft_get('name'))) - # no handling of control flow edges -- TODO - control_flow = False - if not skip_if_absent and idx not in self.in_ports(control_flow=control_flow): - raise Error("Input port with index {} doesn't exist in node {}.".format(idx, self.soft_get('name'))) - if not self.in_port(idx).disconnected(): - self.in_port(idx).disconnect() - del self._in_ports[idx] - # update in_ports_count for consistency but it is unlikely have any effect somewhere in the code - self['in_ports_count'] = len(self._in_ports) - - def delete_output_port(self, idx, skip_if_absent=False): - if not self.has_valid('_out_ports'): - raise Error( - 'Cannot removed ports with indices {} from node {} because node doesn\'t ' - 'have _out_ports attribute'.format(idx, self.soft_get('name'))) - # no handling of control flow edges -- TODO - control_flow = False - if not skip_if_absent and idx not in self.out_ports(control_flow=control_flow): - raise Error("Output port with index {} doesn't exist in node {}.".format(idx, self.soft_get('name'))) - if not self.out_port(idx).disconnected(): - self.out_port(idx).disconnect() - del self._out_ports[idx] - # update in_ports_count for consistency but it is unlikely have any effect somewhere in the code - self['out_ports_count'] = len(self._out_ports) - - def add_output_port(self, idx, skip_if_exist=False, **kwargs): - if not self.has_valid('_out_ports'): - Node(self.graph, self.id)['_out_ports'] = {} - control_flow = kwargs['control_flow'] if kwargs.get('control_flow') is not None else False - if skip_if_exist is False and idx in self.out_ports(control_flow=control_flow): - raise Error("Output port with {} index already exists for {} node.".format(idx, self.name)) - self._out_ports.update({idx: kwargs}) - - def add_sequence_of_ports(self, type: str, rng): - assert type in ['in', 'out'] - for idx in rng: - if type == 'in': - self.add_input_port(idx, skip_if_exist=True) - if type == 'out': - self.add_output_port(idx, skip_if_exist=True) - - def in_port(self, idx=None, control_flow=False) -> Port: - if not self.has_valid('_in_ports'): - raise Error("Operation {} {} has no _in_ports attribute", self.op, self.name) - if idx not in self._in_ports: - raise Error("Input port with index {} is not in node {}".format(idx, self.name)) - if not control_flow and 'control_flow' in self._in_ports[idx] and self._in_ports[idx]['control_flow']: - raise Error("Attempt to access control flow port when it's prohibited for node {}".format(self.name)) - return Port(node=self, idx=idx, type='in', **self._in_ports[idx]) - - def in_ports(self, control_flow=False): - if not self.has_valid('_in_ports'): - raise Error("Operation {} {} has no _in_ports attribute", self.op, self.name) - ports = {} - for idx in self._in_ports: - if control_flow or 'control_flow' not in self._in_ports[idx] or not self._in_ports[idx]['control_flow']: - ports.update({idx: self.in_port(idx, control_flow=control_flow)}) - return dict_to_ordered_dict(ports, func=lambda t: int(str(t).replace('control_flow_', ''))) - - def out_port(self, idx=None, control_flow=False) -> Port: - if not self.has_valid('_out_ports'): - raise Error("Operation {} {} has no _out_ports attribute", self.op, self.name) - if idx not in self._out_ports: - raise Error("Output port with index {} is not in node {}".format(idx, self.name)) - if not control_flow and 'control_flow' in self._out_ports[idx] and self._out_ports[idx]['control_flow']: - raise Error("Attempt to access control flow port when it's prohibited for node {}".format(self.name)) - return Port(node=self, idx=idx, type='out', **self._out_ports[idx]) - - def out_ports(self, control_flow=False): - if not self.has_valid('_out_ports'): - raise Error("Operation {} {} has no _out_ports attribute", self.op, self.name) - ports = {} - for idx in self._out_ports: - if control_flow or 'control_flow' not in self._out_ports[idx] or not self._out_ports[idx]['control_flow']: - ports.update({idx: self.out_port(idx, control_flow=control_flow)}) - return dict_to_ordered_dict(ports, func=lambda t: int(str(t).replace('control_flow_', ''))) - - def has_port(self, port_type, idx, control_flow=False): - assert port_type in ['in', 'out'], "Invalid usage of has_port method" - - if port_type == 'in': - return self.has_valid('_in_ports') and idx in self.in_ports(control_flow=control_flow) - else: - return self.has_valid('_out_ports') and idx in self.out_ports(control_flow=control_flow) - - def is_in_port_connected(self, idx, control_flow=False): - return self.has_port('in', idx, control_flow) and not self.in_port(idx, control_flow).disconnected() - - def is_out_port_connected(self, idx, control_flow=False): - return self.has_port('out', idx, control_flow) and not self.out_port(idx, control_flow).disconnected() - - def attrs(self): - return self.graph.node[self.node] - - def has(self, k): - return k in self.graph.node[self.node] - - def has_valid(self, k): - return self.has(k) and not self.graph.node[self.node][k] is None - - def has_and_set(self, k): - return self.has_valid(k) and self[k] - - def in_nodes_edges(self, control_flow: bool = False): - return dict_to_ordered_dict({x[1]['in']: (Node(self.graph, x[0]), x[1]) for x in - self.get_inputs(control_flow=control_flow)}, - func=lambda t: int(str(t).replace('control_flow_', ''))) - - def in_nodes(self, control_flow: bool = False): - if self.kind == 'op': - return dict_to_ordered_dict({x[1]['in']: Node(self.graph, x[0]) for x in - self.get_inputs(control_flow=control_flow)}, - func=lambda t: int(str(t).replace('control_flow_', ''))) - elif self.kind == 'data': - return [Node(self.graph, n) for n, d in self.get_inputs(control_flow=control_flow)] - - def in_node(self, key=0, control_flow: bool = False): - return self.in_nodes(control_flow=control_flow)[key] - - def in_edges(self, control_flow: bool = False): - assert self.has('kind') - assert self.kind in ['op', 'data'] - if self.kind == 'op': - return dict_to_ordered_dict({x[1]['in']: x[1] for x in self.get_inputs(control_flow=control_flow)}, - func=lambda t: int(str(t).replace('control_flow_', ''))) - elif self.kind == 'data': - return [d for n, d in self.get_inputs(control_flow=control_flow)] - - def out_nodes_edges(self, control_flow: bool = False): - return dict_to_ordered_dict({x[1]['out']: (Node(self.graph, x[0]), x[1]) for x in - self.get_outputs(control_flow=control_flow)}, - func=lambda t: int(str(t).replace('control_flow_', ''))) - - def out_nodes(self, control_flow: bool = False): - assert self.has('kind') - assert self.kind in ['op', 'data'] - if self.kind == 'op': - return dict_to_ordered_dict({x[1]['out']: Node(self.graph, x[0]) for x in - self.get_outputs(control_flow=control_flow)}, - func=lambda t: int(str(t).replace('control_flow_', ''))) - elif self.kind == 'data': - return [Node(self.graph, n) for n, d in self.get_outputs(control_flow=control_flow)] - - def out_edges(self, control_flow: bool = False): - assert self.has('kind') - assert self.kind in ['op', 'data'] - if self.kind == 'op': - return dict_to_ordered_dict({x[1]['out']: x[1] for x in self.get_outputs(control_flow=control_flow)}, - func=lambda t: int(str(t).replace('control_flow_', ''))) - elif self.kind == 'data': - return [d for n, d in self.get_outputs(control_flow=control_flow)] - - def out_node(self, key=0, control_flow: bool = False): - return self.out_nodes(control_flow=control_flow)[key] - - def in_edge(self, key=0, control_flow: bool = False): - return self.in_edges(control_flow=control_flow)[key] - - def out_edge(self, key=0, control_flow: bool = False): - return self.out_edges(control_flow=control_flow)[key] - - def get_attrs(self): - return self.graph.node[self.node] - - def get_inputs(self, edge_attr: dict = None, control_flow: bool = False): - if edge_attr is None: - edge_attr = {} - in_edges = self.graph.in_edges(self.id, data=True) - if not control_flow: - in_edges = [(u, v, d) for u, v, d in in_edges if 'control_flow_edge' not in d or not d['control_flow_edge']] - return [(u, d) for u, v, d in in_edges if all([attr in d and d[attr] == edge_attr[attr] for attr in edge_attr])] - - def get_outputs(self, edge_attr: dict = None, control_flow: bool = False): - if edge_attr is None: - edge_attr = {} - out_edges = self.graph.out_edges(self.id, data=True) - if not control_flow: - out_edges = [(u, v, d) for u, v, d in out_edges if - 'control_flow_edge' not in d or not d['control_flow_edge']] - return [(v, d) for u, v, d in out_edges if - all([attr in d and d[attr] == edge_attr[attr] for attr in edge_attr])] - - def get_sorted_inputs(self, control_flow: bool = False): - return sorted([x for x in self.get_inputs(control_flow=control_flow) if 'in' in x[1]], - key=lambda x: x[1]['in']) - - def get_sorted_outputs(self, control_flow: bool = False): - return sorted([x for x in self.get_outputs(control_flow=control_flow) if 'out' in x[1]], - key=lambda x: x[1]['out']) - - def soft_get(self, k, default=''): - return self[k] if self.has_valid(k) else default - - def edges(self, attrs: dict = None): - """ Get a single edge with specified set of attributes. - - If none or multiple edges satisfies this criteria, exception is raised - Edge is represented as tuple (u, v, d), where u is source node, - v is destination node and d is edge attributes. - """ - edges = list(self.graph.in_edges([self.id], data=True)) + list(self.graph.out_edges([self.id], data=True)) - return [(u, v, d) for u, v, d in edges if dict_includes(d, attrs)] - - def edge(self, attrs: dict = None): - """ Get a single edge with specified set of attributes. - - If none or multiple edges satisfies this criteria, exception is raised - Edge is represented as tuple (u, v, d), where u is source node, - v is destination node and d is edge attributes. - """ - edges = self.edges(attrs) - assert len(edges) == 1, 'edges: {}, required attributes: {}'.format(edges, attrs) - return edges[0] - - def copy_node(self, new_attrs: dict = None, dst_graph=None): - ''' Copies node with all attributes (optionally updated) within the same graph or to different graph.''' - if new_attrs is None: - new_attrs = {} - if dst_graph is None: - dst_graph = self.graph - - attrs = deepcopy(self.attrs()) - new_id = dst_graph.unique_id(attrs['name']) if 'name' in attrs else dst_graph.unique_id() - attrs['name'] = new_id - attrs.update(new_attrs) - dst_graph.add_node(new_id, **attrs) - return Node(dst_graph, new_id) - - def insert_node_with_data_before(self, inp, new_op_class: callable, op_before_params: dict = None, - infer_current: bool = False, additional_inputs: list = None): - """ - Inserts operation node with op_before_params and data node before current operation - - :param inp: input data node of current node - :param new_op_class: class of operation that will be inserted before current operation node - :param op_before_params: parameters to be added to operation that will be inserted before current operation - - Before calling: - [...] -> inp -> Cur_Op -> Cur_Data -> [...] - - After calling: - [...] -> inp -> New_Op_bef -> New_Data_bef -> Cur_Op -> Cur_Data -> [...] - [op_before_params] - """ - graph = self.graph - node = Node(graph, self.node) - cls_name = new_op_class.op - op_before_params = {} if op_before_params is None else op_before_params - - # operating with input - new_op_before = new_op_class(graph, op_before_params) - edge_attrs = deepcopy(graph.get_edge_data(inp.id, node.id)[0]) - graph.remove_edge(inp.id, node.id) - # form a list of input nodes for a new op node combining new_out and additional_inputs - inputs = [inp] + (additional_inputs if additional_inputs else []) - new_inp = new_op_before.create_node_with_data(inputs, {'name': node.name + cls_name + '/Before'}) - graph.add_edge(new_inp.id, node.id, **edge_attrs) - if infer_current: - node.infer(node) - - def insert_node_with_data_after(self, out, new_op_class: callable, op_after_params: dict = None, - additional_inputs: list = None): - """ - Inserts operation node with op_after_params and data node after current operation - - :param out: output data node of current node - :param new_op_class: class of operation that will be inserted after current operation node - :param op_after_params: parameters to be added to operation that will be inserted after current operation - :param additional_inputs: other parameters for a new operation node in addition to one that is created - at the 'out' placed; new nodes are added after 0-th input - - TODO Allow indexing for input parameters as well as for 'out' data node to explicitly - specify ports that are connected to. - - Before calling: - [...] -> Cur_Op -> Cur_Data -> [...] - - After calling: - [...] -> Cur_Op -> Cur_Data -> New_Op_aft -> New_Data_aft(==out) -> [...] - [op_after_params] - """ - # we import it here because Op imports Node and unique_id from this file - from openvino.tools.mo.ops.op import Op - - graph = self.graph - node = Node(graph, self.node) - cls_name = new_op_class.op - op_after_params = {} if op_after_params is None else op_after_params - - new_op_after = new_op_class(graph, op_after_params) - graph.remove_edge(node.id, out.id) - new_out = Op.create_data_node(graph, node) - node.infer(node) - # form a list of input nodes for a new op node combining new_out and additional_inputs - inputs = [new_out] + (additional_inputs if additional_inputs else []) - new_op_after.create_node_with_data(inputs, {'name': node.name + cls_name + '/After'}, data_nodes=out) - - def bracket_with_different_nodes_with_data(self, inp, out, new_op_class_before: callable, - new_op_class_after: callable, - op_before_params: dict = None, op_after_params: dict = None): - """ - Inserts one operation node with op_before_params and data node before current operation node and - inserts one operation node with op_after_params and data node after current operation node - :param inp: input data node of self.node node - :param out: output data node of self.node node - :param new_op_class_before: class of operation that will be inserted before current operation node - :param new_op_class_after: class of operation that will be inserted after current operation node - :param op_before_params: parameters to be added to operation that will be inserted before current operation - :param op_after_params: parameters to be added to operation that will be inserted after current operation - - Before calling: - [...] -> inp -> Cur_Op -> out -> [...] - - After calling: - [...] -> inp -> New_Op_bef -> New_Data_bef -> Cur_Op -> Cur_Data -> New_Op_aft -> New_Data_aft(==out) -> [...] - [op_before_params] [op_after_params] - """ - op_before_params = {} if op_before_params is None else op_before_params - op_after_params = {} if op_after_params is None else op_after_params - self.insert_node_with_data_before(inp, new_op_class_before, op_before_params) - self.insert_node_with_data_after(out, new_op_class_after, op_after_params) - - def bracket_op_with_another_op(self, inp, out, new_op_class: callable, - op_before_params: dict = None, op_after_params: dict = None): - """ - Covers current operation with two similar another ones of class new_op_class: - :param inp: input data node of self.node node - :param out: output data node of self.node node - :param new_op_class: class of operation with which current operation will be covered - :param op_before_params: parameters to be added to operation that will be inserted before current operation - :param op_after_params: parameters to be added to operation that will be inserted after current operation - - Before calling: - [...] -> inp -> Cur_Op -> out -> [...] - - After calling: - [...] -> inp -> New_Op_bef -> New_Data_bef -> Cur_Op -> Cur_Data -> New_Op_aft -> New_Data_aft(==out) -> [...] - [op_before_params] [op_after_params] - """ - self.bracket_with_different_nodes_with_data(inp=inp, out=out, - new_op_class_before=new_op_class, new_op_class_after=new_op_class, - op_before_params=op_before_params, op_after_params=op_after_params) - - def insert_node_after(self, new_node, node_out_port: int = 0): - """ - Insert node 'new_node' after output with index 'node_out_port' of the node 'node'. All consumers of node 'node' - output with index 'node_out_port' will be changed to consume node 'new_node'. - The function should be used when graph doesn't contain data nodes yet. - :param node: node after which new node should be inserted. - :param new_node: node to be inserted. - :param node_out_port: the output index for the node 'node' to insert - :return: None - """ - assert self.graph is new_node.graph - assert (len([name for name in self.graph.nodes() if Node(self.graph, name).soft_get('kind') == 'data']) == 0) - - graph = self.graph - old_edges = list(graph.out_edges(self.id, data=True, keys=True)) - # create new edges first and then remove all old edges. This is needed for case when 'node' has several consumers - # getting input from 'node_out_port'. - # save tuple ("name of the destination edge", "edge key") to be removed - node_name_and_edge_key = [] - for _, dst_name, edge_key, edge_attrs in old_edges: - if edge_attrs['out'] == node_out_port: - log.debug('Create edge from "{}" to "{}"'.format(new_node.name, dst_name)) - graph.create_edge(new_node, Node(graph, dst_name), 0, edge_attrs['in']) - node_name_and_edge_key.append((dst_name, edge_key)) - for dst_name, edge_key in node_name_and_edge_key: - log.debug('Remove edge from "{}" to "{}"'.format(self.id, dst_name)) - graph.remove_edge(self.id, dst_name, edge_key) - graph.create_edge(self, new_node, node_out_port, 0, {}) - - def insert_op_on_input_port(self, in_port_idx: int, new_op_class: callable, new_op_attrs: dict, - value: np.ndarray = None): - """ - Inserts new operation of new_op_class on in_port_index input port with new_op_attrs - Connects Const operation with value to 1 input port of new node if value was passed - - Returns new operation node - """ - graph = self.graph - name = self.soft_get('name', self.id) - - op_node = new_op_class(graph, new_op_attrs).create_node() - - assert self.has_port('in', in_port_idx), \ - 'Node `{}` should have input port with idx `{}` but it does not'.format(name, in_port_idx) - - in_port_source = self.in_port(in_port_idx).get_source() - self.in_port(in_port_idx).get_connection().set_source(op_node.out_port(0)) - op_node.in_port(0).connect(in_port_source) - - if value is not None: - from openvino.tools.mo.ops.const import Const - constant = Const(graph, {'value': value, 'name': op_node.name + '/value'}).create_node() - op_node.in_port(1).connect(constant.out_port(0)) - - return op_node - - def replace_node(self, new_node, new_node_out_port: int = None): - """ - Replaces node 'old_node' with a node 'new_node' preserving edge attributes. - :param old_node: node to be replaced. - :param new_node: node to replace with. - :return: None - """ - assert self.graph is new_node.graph - assert self.id != new_node.id, "New node and replaceable node are the same" - graph = self.graph - # save output edges and reconnect them to new node - for _, dst_node_name, edge_attrs in graph.out_edges(self.id, data=True): - new_edge_attrs = deepcopy(edge_attrs) - if new_node_out_port is not None: - assert 'out' not in edge_attrs or edge_attrs['out'] == 0, \ - 'replace_node function can replace old node with a single output port only if new_node_out_port is ' \ - 'specified' - new_edge_attrs.update({'out': new_node_out_port}) - graph.add_edge(new_node.id, dst_node_name, **new_edge_attrs) - - # if the node for replace is output node then we propagate this attribute to a new node - if len(self.out_nodes()) == 1 and self.out_node().has('op') and self.out_node().op == 'Result': - graph.remove_node(self.out_node().id) - add_opoutput(graph, new_node.id, 0, False) - graph.remove_node(self.id) - - def input_ports_with(self, node): - """ - Returns a list of integers that specify input ports that connected to a given node. - :param node: node in the graph that is expected to appear at input port for self node - :return: a list of integers with port indices that are connected to self node - """ - return [i for i in range(len(self.in_nodes())) if self.in_node(i).id == node.id] - - def update_node(self): - """ - Update internal node attributes. Currently it just add input/output ports. - :return: None - """ - in_ports_count = self.in_ports_count if self.has_valid('in_ports_count') else None - out_ports_count = self.out_ports_count if self.has_valid('out_ports_count') else None - - if not self.has_valid('_in_ports'): - Node(self.graph, self.id)['_in_ports'] = dict() - if not self.has_valid('_out_ports'): - Node(self.graph, self.id)['_out_ports'] = dict() - - if in_ports_count is not None: - for idx in range(in_ports_count): - if idx not in self._in_ports: - self.add_input_port(idx=idx) - - if out_ports_count is not None: - for idx in range(out_ports_count): - if idx not in self._out_ports: - self.add_output_port(idx=idx) - - def get_opset(self): - """ - Gets the operation set version where the operation was introduced. - If the version is not defined then consider it an extension - :return: the string with the opset name - """ - return self.soft_get('version', 'extension') - - -class Graph(nx.MultiDiGraph): - def __init__(self, data=None, **attr): - self.stage = None - self.strict_mode = True - super().__init__(data, **attr) - - if not hasattr(self, 'node'): - self.node = self.nodes - - unique_id_count = 0 - op_names_statistic = collections.Counter() - inputs_order = [] - outputs_order = [] - - # SAFE API DESCRIPTION - # all provided methods below are designed to be more safe and convenient - # be careful while using other methods from nx.MultiDiGraph - - def add_node(self, node_for_adding, **attrs): - # TODO: check required attrs for node - super().add_node(node_for_adding, **attrs) - node = Node(self, node_for_adding) - node.update_node() - - def add_edge(self, u_for_edge, v_for_edge, key=None, **attr): - - # TODO: turn on strict mode - if self.strict_mode: - unode = Node(self, u_for_edge) - vnode = Node(self, v_for_edge) - - # Check that we connect Op->Op in front phase, and data->Op or Op->data in middle(back) phase - # Also check that all necessary ports are exists - message = "Attempt to connect {} to {}.".format(u_for_edge, v_for_edge) - if self.stage == 'front': - assert unode.kind == 'op' and vnode.kind == 'op', "{} Wrong add_adge usage! You can connect only two " \ - "operations in front phase".format(message) - assert 'in' in attr and 'out' in attr, "Missing necessary attribute in or out when adding edge " \ - "between {} and {}".format(u_for_edge, v_for_edge) - is_control_flow = 'control_flow_edge' in attr and attr['control_flow_edge'] is True - in_port = 'control_flow_{}'.format(attr['in']) if is_control_flow else attr['in'] - out_port = 'control_flow_{}'.format(attr['out']) if is_control_flow else attr['out'] - assert unode.has_port('out', out_port, control_flow=is_control_flow), \ - "{} Missing out port ({}) in {} node".format(message, out_port, unode.soft_get('name', unode.id)) - assert vnode.has_port('in', in_port, control_flow=is_control_flow), \ - "{} Missing in port ({}) in {} node".format(message, in_port, vnode.soft_get('name', vnode.id)) - elif self.stage in ['middle', 'back']: - assert (unode.kind == 'data' and vnode.kind == 'op') or (unode.kind == 'op' and vnode.kind == 'data') - if unode.kind == 'data' and vnode.kind == 'op': - assert 'in' in attr, "Attribute in is missing when adding edge to {}".format(v_for_edge) - assert vnode.has_port('in', attr['in']), "{} Node {} has no in port ({})" \ - "".format(message, vnode.name, attr['in']) - if unode.kind == 'op' and vnode.kind == 'data': - assert 'out' in attr, "Attribute out is missing when adding edge from {}".format(u_for_edge) - assert unode.has_port('out', attr['out']), "{} Node {} has no out port ({})" \ - "".format(message, unode.name, attr['out']) - - return super().add_edge(u_for_edge, v_for_edge, key=key, **attr) - - def add_edges_from(self, ebunch_to_add, **attr): - for e in ebunch_to_add: - ne = len(e) - if ne == 4: - u, v, key, dd = e - elif ne == 3: - u, v, dd = e - key = None - elif ne == 2: - u, v = e - dd = {} - key = None - else: - raise Error("Edge tuple %s must be a 2-tuple, 3-tuple or 4-tuple." % (e,)) - ddd = attr.copy() - ddd.update(dd) - self.add_edge(u, v, key=key, **ddd) - - def remove_edge(self, u, v, key=None): - return super().remove_edge(u, v, key=key) - - def erase_node(self, node: Node): - """ - Erases node from the graph and reconnect edges from input node(s) to output node(s) - Produces assertion error if the node being removed has multiple inputs or outputs. - The function can be used in the front phase only (when there are no data nodes in the graph). - :param node: Node to erase - """ - node_id = node.id - - inputs = list(self.in_edges(node_id, data=True)) - outputs = list(self.out_edges(node_id, data=True)) - - assert node.kind == 'op' and (len(node.out_nodes()) == 0 or list(node.out_nodes().values())[0].kind != 'data'), \ - "The function must be used before the partial infer when graph doesn't contain data nodes." - assert len(node.out_nodes()) <= 1, "The node {} must produce just one output tensor".format( - node.soft_get('name')) - assert len(inputs) <= 1, "The node {} must have just one input".format(node.soft_get('name')) - - if len(outputs) == 0 and len(inputs) != 0: - from openvino.tools.mo.front.extractor import add_output_ops - input_ids = {input_node_id: {'port': {'out': [attrs['out']]}} for input_node_id, _, attrs in inputs} - if node.has('op') and node.op == 'Result': - add_output_ops(self, input_ids) - - if len(outputs) == 0 or len(inputs) == 0: - self.remove_node(node_id) - return - - input_node_id = inputs[0][0] - for src, dst, attrs in outputs: - self.remove_edge(src, dst) - # update the 'out' attribute of the edge from the node being removed - attrs['out'] = inputs[0][2]['out'] - self.add_edge(input_node_id, dst, **attrs) - self.remove_node(node_id) - - def get_edge_data(self, u, v, key=None, default=None): - return super().get_edge_data(u, v, key=key, default=default) - - def get_inputs_with_ports(self, match, pattern_edges, input_names_in_pattern): - """ - Front replacements of multi-input nodes should specify output port to add_node-like functions - This function is a helper to get such information out of matched nodes - :param graph: graph to operate on - :param match: dictionary returned by matching function - :param pattern_edges: edges that are specified in pattern - :param input_names_in_pattern: names of matched nodes as they were specified in pattern that should be in - resulting list - :return: list of tuples of node and output port - """ - inputs = [] - for name in input_names_in_pattern: - assert name in match, "node named {} not in match {}".format(name, match) - src = match[name] - dst = [] - for edge in pattern_edges: - if edge[0] == name: - assert edge[1] in match, "name from pattern_edges {} not in match {}".format(edge[1], match) - dst.append(match[edge[1]]) - if len(dst) != 1: - raise Error('Multiple output ports detected for node {} as {} in pattern'.format(match[name].id, name)) - dst = dst[0] - out_port = self.get_edge_data(src.id, dst.id)[0]['out'] - inputs.append((src, out_port)) - return inputs - - def get_node_id_by_name(self, name: str): - nodes = self.get_nodes_with_attributes(name=name) - if len(nodes) == 0: - raise Error('No node with name {}. ' + refer_to_faq_msg(51), name) - elif len(nodes) > 1: - raise Error('Multiple nodes with name {}'.format(name)) - else: - return nodes[0] - - def get_op_nodes(self, **attrs): - nodes = self.get_nodes_with_attributes(**dict(kind='op', **attrs)) - return [Node(self, node) for node in nodes] - - def get_data_nodes(self, has_value=None): - """ - Returns list of data nodes. - If has_value = True, returns data nodes with value - If has_value = False, returns data nodes without value - """ - data_nodes = [Node(self, node) for node in self.nodes() if Node(self, node).soft_get('kind') == 'data'] - return [node for node in data_nodes if has_value is None or node.has_valid('value') == has_value] - - def get_nodes_with_attributes(self, **attrs: dict): - node_attrs = self.nodes(data=True) - return [n for n, d in node_attrs if all(a in d.items() for a in attrs.items())] - - def unique_id(self, prefix: str = ""): - """ - Generates a unique node id for a new node in a given graph. - The optional string prefix can be specified. - """ - # TODO thread safety? - self.unique_id_count = max(self.unique_id_count, self.number_of_nodes()) + 1 - if prefix and not self.has_node(prefix): - return str(prefix) - while self.has_node(prefix + str(self.unique_id_count)): - self.unique_id_count += 1 - return prefix + str(self.unique_id_count) - - def check_empty_graph(self, description: str): - if len(self.nodes()) <= 1: - raise Error( - "Graph contains {} node after executing {}. It considered as error because resulting IR will be " - "empty which is not usual".format(len(self.nodes()), description)) - - def check_shapes_consistency(self): - data_nodes = self.get_data_nodes() - data_nodes_with_wrong_shapes = [] - for data_node in data_nodes: - if not data_node.has('shape'): - data_nodes_with_wrong_shapes.append((data_node.name, "no shape attribute")) - continue - if data_node.shape is not None and not isinstance(data_node.shape, np.ndarray): - data_nodes_with_wrong_shapes.append((data_node.name, type(data_node.shape))) - if len(data_nodes_with_wrong_shapes) > 0: - raise Error("Graph contains data nodes ({}) with inconsistent shapes: {}".format( - len(data_nodes_with_wrong_shapes), - data_nodes_with_wrong_shapes - )) - - def check_nodes_ports_are_consecutive(self): - # Check that all operation nodes has consecutive ports indexes - op_nodes = self.get_op_nodes() - for node in op_nodes: - for idx in range(len(node.in_ports())): - if idx not in node.in_ports(): - raise Error("Node {} has not consecutive in ports indexes: {}".format(node.name, - list(node.in_ports().keys()))) - for idx in range(len(node.out_ports())): - if idx not in node.out_ports(): - raise Error("Node {} has not consecutive out ports indexes: {}".format(node.name, - list( - node.out_ports().keys()))) - - def dump_graph_for_graphviz(self, node_attrs: list = ['kind', 'op', 'shape', 'correct_data_layout', 'nchw_layout', - 'internal_layer_id'], - edge_attrs: list = ['in', 'out'], nodes_to_dump: list = None, - save_to_svg=False, highlight_nodes: list = None): - - from openvino.tools.mo.ops.tensor_iterator import _get_internal_output_node_id, _get_internal_input_node_id - - fill_color = {'op': 'lightblue', 'data': 'whitesmoke', 'highlight': 'firebrick'} - fill_color_by_type = {'Const': 'lightpink', 'Parameter': 'yellowgreen', 'TensorIterator': 'lemonchiffon'} - style = {'op': 'filled,bold', 'data': 'filled,rounded'} - - subgraphs = {} - if highlight_nodes is None: - highlight_nodes = [] - - def _subgraph_label(node_id, node_attrs: dict, attrs_to_print: list): - subgraphs[node_id] = "cluster_{}".format(node_id) - label = 'subgraph "cluster_{}" '.format(node_id) + '{\n' - label += 'label = "{}"; \n'.format(node_id) - label += 'color={}; \nstyle="filled,rounded";\n'.format(fill_color_by_type[node_attrs['op']]) - - subgraph_name = node_attrs['sub_graphs'] - assert len(subgraph_name) == 1 - body = node_attrs[subgraph_name[0]].dump_graph_for_graphviz() - body = body.split('\n')[2:-1] - label += '\n'.join(body) - label += '\n}\n' - return label - - def _node_label(node_id, node_attrs: dict, attrs_to_print: list): - label = str(node_id) + '\\n' + '\\n'.join([str(key) + '=' + str(node_attrs.get(key, 'None')) - for key in attrs_to_print if key in node_attrs]) - if node_attrs.get('type', '') == 'Const': - if 'value' not in attrs_to_print and 'value' in node_attrs: - if node_attrs['value'] is not None: - label += '\\nvalue=\\"' + \ - ','.join([str(val) for val in node_attrs['value'].flatten()])[:40] + '\\"' - else: - label += '\\nvalue=None' - return label - - def _dump_nodes_attrs(): - string = '' - for node_id in nodes_to_dump: - attrs = self.node[node_id] - color = fill_color_by_type.get(attrs.get('type', ''), fill_color[attrs['kind']]) - - if node_id in highlight_nodes or 'highlight' in node_attrs and node_attrs['highlight']: - color = fill_color['highlight'] - - if attrs.get('op') == 'TensorIterator': - string += _subgraph_label(node_id, attrs, node_attrs) - else: - string += '"{}" [fillcolor={} style="{}" shape=box label="{}"];\n'.format( - node_id, color, style[attrs['kind']], _node_label(node_id, attrs, node_attrs)) - return string - - def _dump_edges_attrs(): - string = '' - for src_node_id, dst_node_id, attrs in self.edges(data=True): - if src_node_id not in nodes_to_dump or dst_node_id not in nodes_to_dump: - continue - - if src_node_id in subgraphs: - edge_label = subgraphs[src_node_id] - edge_label_name = 'ltail' - src_node_id = _get_internal_output_node_id(self, src_node_id, attrs['external_port_id']) - elif dst_node_id in subgraphs: - edge_label = subgraphs[dst_node_id] - edge_label_name = 'lhead' - dst_node_id = _get_internal_input_node_id(self, dst_node_id, attrs['external_port_id']) - else: - edge_label = ' '.join( - [str(key) + '=' + str(attrs.get(key, 'None')) for key in edge_attrs if key in attrs]) - edge_label_name = 'label' - - string += '"{}" -> "{}" [{} = "{}"];\n'.format(src_node_id, dst_node_id, edge_label_name, edge_label) - return string - - log.debug("---- GRAPHVIZ OUTPUT STARTS ----") - - if nodes_to_dump is None: - nodes_to_dump = self.nodes() - - string = '\ndigraph {\n' - - string += _dump_nodes_attrs() - string += _dump_edges_attrs() - - string += '}' - log.debug("---- GRAPHVIZ OUTPUT ENDS ----") - - if save_to_svg: - try: - import graphviz - import os - file_name = "{}_{}.txt".format(self.name.replace('/', '_'), 0) - id = 1 - while os.path.exists(file_name): - file_name = "{}_{}.txt".format(self.name.replace('/', '_'), id) - id += 1 - with open(file_name, "w") as f: - f.write(string) - graphviz.render('dot', 'svg', file_name) - print('Graph was saved to {}.{}'.format(file_name, 'svg')) - except ImportError: - raise ImportError('Can\'t import graphviz') - except Exception as e: - raise Error('Can\'t save graph to svg') from e - - return string - - def print_graph_stat(self): - log.debug('Number of nodes in graph: {}'.format(self.number_of_nodes())) - log.debug('Number of edges in graph: {}'.format(len(list(self.edges())))) - ops = collections.defaultdict(int) - for _node in self.nodes(): - node = Node(self, _node) - kind = node.kind if node.has('kind') else '' - if node.has('op'): - ops['op/' + node.op] += 1 - else: - ops[kind] += 1 - if node.has('shape') and np.any(node.shape == 0): - log.error("Found bad shape: '{}' for node '{}'".format(node.shape, node.node)) - for k, v in ops.items(): - log.debug(' {} : {}'.format(k, v)) - - def create_sub_graph_copy(self, nodes_to_extract: list): - """ - Create new graph which is a sub-graph of the 'graph' that contains just nodes from 'nodes_to_extract' list. The - returned sub-graph is a deep copy of the provided graph nodes. - :param graph: graph to create a sub-graph from. - :param nodes_to_extract: list of node names to extract. - :return: new graph. - """ - return self.subgraph(nodes_to_extract).copy() - - def create_edge(self, src_node: Node, dst_node: Node, out_port: int = 0, in_port: int = 0, edge_attrs: dict = None): - """ - Creates edge from node 'src_node' from output with index 'out_port' to node 'dst_node' with input index 'in_port'. - :param src_node: node to create edge from. - :param dst_node: node to create edge to. - :param out_port: the index of output tensor of the 'src_node'. - :param in_port: the input index of the node 'dst_node'. - :param edge_attrs: dictionary with edge attrs. - :return: None - """ - # edges must belong to the same graph - assert src_node.graph is dst_node.graph - graph = src_node.graph - - if edge_attrs is None: - edge_attrs = dict() - else: - edge_attrs = edge_attrs.copy() - edge_attrs.update( - {'in': in_port, 'out': out_port, 'in_attrs': ['in', 'permutation'], 'out_attrs': ['out', 'permutation'], - 'data_attrs': ['fw_tensor_debug_info']}) - - # TODO: in case if in_port do not exists, we should raise an Exception here - graph.add_edges_from([(src_node.id, dst_node.id, edge_attrs)]) - - def dfs(self, node_name: str, visited: set): - """ - Implementation of the depth-first search algorithm starting from the specific node. - :param graph: networkx graph to operate on. - :param node_name: node name to start search from. - :param visited: set of already visited nodes. - :return: list of nodes in the DFS-visit order. - """ - order = [] - stack = [node_name] - while len(stack) != 0: - node_name = stack[0] - stack.pop(0) - visited.add(node_name) - has_child = False - for _, out_node_name in self.out_edges(node_name): - if out_node_name not in visited: - stack.insert(0, node_name) - stack.insert(0, out_node_name) - has_child = True - break - if not has_child: - order.append(node_name) - return order - - def pseudo_topological_sort(self, reverse: bool = False): - """ - The function performs topological sort but doesn't check for cycle existence. So it may produce wrong nodes order - for some applications. - :param graph: graph to pseudo-topologically sort. - :param reverse: flag indicating whether need to reverse nodes order. - :return: nodes in the topological sort if cycle doesn't exist and in pseudo-topological sort if not. - """ - nodes_without_inputs = list() - for node_name in self.nodes(): - if len(self.in_edges(node_name)) == 0: - nodes_without_inputs.append(node_name) - order = list() - visited = set() - for node_name in nodes_without_inputs: - if node_name not in visited: - order.extend(self.dfs(node_name, visited)) - - order = [Node(self, node) for node in order] - - if reverse: - return order - else: - return list(reversed(order)) - - def pseudo_topological_sort_with_start_node(self, start_node: Node, reverse: bool = False): - nodes_without_inputs = [start_node.soft_get('name')] - visited = set() - order = self.dfs(nodes_without_inputs[0], visited) - - order = [Node(self, node) for node in order] - - if reverse: - return order - else: - return list(reversed(order)) - - def clean_up(self, undead_node_types: list = None): - if undead_node_types is None: - undead_node_types = [] - - if not getattr(self.graph['cmd_params'], 'static_shape', False): - undead_node_types.extend(['ShapeOf', 'Shape', 'slice_like']) - - mark_output_reachable_nodes(self) - shape_inference(self) - mark_undead_nodes(self, undead_node_types) - mark_const_producer_nodes(self) - eliminate_dead_nodes(self) - # Add Const op for constant data nodes - add_constant_operations(self) - - def get_tensor_names_set(self): - """ - Get set of tensor names of the graph. - """ - tensor_names_set = set() - for node in self.get_op_nodes(): - if self.stage is None: - for out_edge_idx in node.out_edges(): - out_edge = node.out_edge(out_edge_idx) - if "fw_tensor_debug_info" in out_edge: - for _, tensor_name in out_edge["fw_tensor_debug_info"]: - tensor_names_set.add(tensor_name) - else: - for _, port in node.out_ports().items(): - tensor_names = port.get_tensor_names() - tensor_names_set = tensor_names_set.union(set(tensor_names)) - return tensor_names_set - - def has_tensor_name(self, tensor_name: str): - """ - Checks if graph has tensor name. - """ - # TODO: This can be optimized. Tensor names can be stored as set, which is initialized after model loading. - names = self.get_tensor_names_set() - return tensor_name in names - - - def topological_sort(self, reverse: bool = False): - sorted_node_ids = nx.topological_sort(self) - - sorted_nodes = [Node(self, node_id) for node_id in sorted_node_ids] - - if not reverse: - return sorted_nodes - else: - return list(reversed(sorted_nodes)) - - def set_node_attributes(self, name: str, values): - return nx.set_node_attributes(self, values=values, name=name) - - -def fill_graph_with_nodes(graph, src_nodes, get_id: callable, get_attrs: callable): - """ - Go over all nodes in src_nodes that should be enumerable and create new NX nodes - using get_id and get_attrs functions to create node id and node attributes correspondingly. - """ - for node in src_nodes: - graph.add_node(get_id(node), **get_attrs(node)) - - -def dict_includes_compare_attrs(attr, attr_probe): - if callable(attr_probe) and not isinstance(attr_probe, type): - return attr_probe(attr) - else: - res = (attr == attr_probe) - # check if the result of comparison is a numpy scalar value which occur when attr is python scalar and - # attr_probe is a numpy scalar - if hasattr(res, 'ndim') and res.ndim == 0: - return res.item() - return res if isinstance(res, bool) else all(res) - - -def dict_includes(big: dict, sub_dict: dict, skip_attr_names=[]): - """ Searches attributes from sub_dict in big and ensures that all values match. - - Entries in sub_dict can be of two types: callable or not callable. If callable is specified - it is treated as probing function for attribute value from big dictionary by callable(attr) expression. - If it is not callable, the values are compared with == operator. - """ - return all( - dict_includes_compare_attrs(big.get(attr, None), sub_dict[attr]) - for attr in sub_dict.keys() if attr not in skip_attr_names - ) - - -def add_opoutput(graph: Graph, node_name: str, port: int, cut: bool = True, keep_output_port: bool = False, - user_defined_name=None): - """ - Creates and connects Result node to node_name port. Cuts existing port if requested. - :param graph: graph to operate with - :param node_name: name of existing node in the graph that we want to add Result to - :param port: output port of node to connect Result to - :param cut: determines way of operating with edge specified by node_name and port - :param keep_output_port: special attribute determines if this operation is saved in IR or not - :param user_defined_name: User defined operation name, which should be added to tensor names list - """ - # we import it here because Op imports add_attrs_props and update_ie_fields from this file - from openvino.tools.mo.ops.result import Result - node = Node(graph, node_name) - if cut and len(node.out_edges()) != 0: - opoutput_node = Result(graph).create_node_on_port(node, port, {'name': node_name + '/sink_port_' + str(port), - 'keep_output_port': keep_output_port}) - else: - opoutput_node = Result(graph).create_node([(node, port)], {'name': node_name + '/sink_port_' + str(port), - 'keep_output_port': keep_output_port}) - opoutput_node.in_edge()['data_attrs'] = ['fw_tensor_debug_info'] - - if user_defined_name is not None and (graph.stage == 'front' or graph.stage is None): - # Following code adds user_defined_name to tensor names list - # Not applicable for middle stage - prev_op_tensor_names = set() - in_edge_attrs = opoutput_node.in_edge() - if 'fw_tensor_debug_info' in in_edge_attrs: - for _, tensor_name in opoutput_node.in_edge()['fw_tensor_debug_info']: - prev_op_tensor_names.add(tensor_name) - if user_defined_name not in prev_op_tensor_names: - if graph.has_tensor_name(user_defined_name): - log.warning('Could not add user defined output name {} to tensor names list of {} node as ' - 'graph contains tensor name with same name.'.format(user_defined_name, - opoutput_node.soft_get('name'))) - else: - if 'fw_tensor_debug_info' not in in_edge_attrs: - in_edge_attrs['fw_tensor_debug_info'] = [] - in_edge_attrs['fw_tensor_debug_info'].append([user_defined_name, user_defined_name]) - - log.debug('Sink: {} for node {}'.format(opoutput_node.id, node_name)) - log.debug(str(graph.node[opoutput_node.id])) - log.debug("Add edge from {} to {}".format(node_name, opoutput_node.id)) - return opoutput_node.id - - -# TODO implement merging for keys with dictionary values? -def merge_edge_props(attrs: dict, additional_attrs: dict): - """ - Update edge attributes without changing 'in' and 'out' keys. - It is necessary to copy edge attributes during merging of nodes when - result of one subgraph call is passed as input to another subgraph call - """ - result = attrs - for (key, value) in additional_attrs.items(): - if key not in ['in', 'out']: - if type(additional_attrs[key]) is list: - if key not in result: - result[key] = [] - result[key].extend(additional_attrs[key]) - result[key] = list(set(result[key])) # silly solution to find unique elements - else: - result[key] = value - return result - - -def rename_node(node: Node, name): - if not node.graph.get_nodes_with_attributes(name=name): - node.name = name - else: - assert 'Node with name {} already exists'.format(name) - - -def rename_nodes(nodes: List[tuple]): - for node, name in nodes: - rename_node(node, name) - - -def get_edge_attribute_between_nodes(node1: Node, node2: Node, attr_name: str): - """ - Gets edge attribute value between two nodes. - This method is introduced for implementation of manual replacing of nodes attributes - with tensor debug information. It is needed after removing of fake outputs. - Also there are cases when graph transformations lead to mismatch of tensor name - and input node, so manual attribute change is needed. - This method should only be used during the front phase. - And it is applicable only for cases when there is just one edge between two given nodes. - """ - for edge_idx in node1.out_edges(): - edge = node1.out_edge(edge_idx) - out_port = edge['out'] - out_node = node1.out_node(out_port) - if out_node.id == node2.id: - if attr_name in edge: - return edge[attr_name] - return None - - -def set_edge_attribute_between_nodes(node1: Node, node2: Node, attr_name: str, new_value): - """ - Sets edge attribute value between two nodes. - This method is introduced for implementation of manual replacing of nodes attributes - with tensor debug information. It is needed after removing of fake outputs. - Also there are cases when graph transformations lead to mismatch of tensor name - and input node, so manual attribute change is needed. - This method should only be used during the front phase. - And it is applicable only for cases when there is just one edge between two given nodes. - """ - for edge_idx in node1.out_edges(): - edge = node1.out_edge(edge_idx) - out_port = edge['out'] - out_node = node1.out_node(out_port) - if out_node.id == node2.id: - edge[attr_name] = new_value - -# All functions below are deprecated and will be removed in next release -# Please, use methods from Graph/Node classes instead - - -@deprecated_api(Graph) -def get_node_id_by_name(graph: Graph, name: str): - return graph.get_node_id_by_name(name=name) - - -@deprecated_api(Graph) -def print_graph_stat(graph: Graph): - return graph.print_graph_stat() - - -@deprecated_api(Graph) -def get_inputs_with_ports(graph: Graph, match, pattern_edges, input_names_in_pattern): - """ - Front replacements of multi-input nodes should specify output port to add_node-like functions - This function is a helper to get such information out of matched nodes - :param graph: graph to operate on - :param match: dictionary returned by matching function - :param pattern_edges: edges that are specified in pattern - :param input_names_in_pattern: names of matched nodes as they were specified in pattern that should be in - resulting list - :return: list of tuples of node and output port - """ - return graph.get_inputs_with_ports(match=match, - pattern_edges=pattern_edges, - input_names_in_pattern=input_names_in_pattern) - - -@deprecated_api(Graph) -def dump_graph_for_graphviz(graph: Graph, node_attrs: list = ['kind', 'op', 'shape'], - edge_attrs: list = ['in', 'out'], - nodes_to_dump: list = None, save_to_svg=False): - return graph.dump_graph_for_graphviz(node_attrs=node_attrs, - edge_attrs=edge_attrs, - nodes_to_dump=nodes_to_dump, - save_to_svg=save_to_svg) - - -@deprecated_api(Graph) -def create_sub_graph_copy(graph: Graph, nodes_to_extract: list): - """ - Create new graph which is a sub-graph of the 'graph' that contains just nodes from 'nodes_to_extract' list. The - returned sub-graph is a deep copy of the provided graph nodes. - :param graph: graph to create a sub-graph from. - :param nodes_to_extract: list of node names to extract. - :return: new graph. - """ - return graph.create_sub_graph_copy(nodes_to_extract=nodes_to_extract) - - -@deprecated_api(Graph) -def get_graph_ops(graph: Graph): - return graph.get_op_nodes() - - -@deprecated_api(Graph) -def check_empty_graph(graph: Graph, description: str): - return graph.check_empty_graph(description=description) - - -@deprecated_api(Graph) -def create_edge(src_node: Node, dst_node: Node, out_port: int = 0, in_port: int = 0, edge_attrs: dict = None): - """ - Creates edge from node 'src_node' from output with index 'out_port' to node 'dst_node' with input index 'in_port'. - :param src_node: node to create edge from. - :param dst_node: node to create edge to. - :param out_port: the index of output tensor of the 'src_node'. - :param in_port: the input index of the node 'dst_node'. - :param edge_attrs: dictionary with edge attrs. - :return: None - """ - assert src_node.graph is dst_node.graph - graph = src_node.graph - return graph.create_edge(src_node=src_node, dst_node=dst_node, out_port=out_port, in_port=in_port, - edge_attrs=edge_attrs) - - -@deprecated_api(Graph) -def erase_node(node: Node): - """ - Erases node from the graph and reconnect edges from input node(s) to output node(s) - Produces assertion error if the node being removed has multiple inputs or outputs. - The function can be used in the front phase only (when there are no data nodes in the graph). - :param node: Node to erase - """ - graph = node.graph - return graph.erase_node(node) - - -@deprecated_api(Node) -def get_sorted_inputs(node: Node, control_flow: bool = False): - return node.get_sorted_inputs(control_flow=control_flow) - - -@deprecated_api(Node) -def get_sorted_outputs(node: Node, control_flow: bool = False): - return node.get_sorted_outputs(control_flow=control_flow) - - -@deprecated_api(Node) -def insert_node_after(node: Node, new_node: Node, node_out_port: int = 0): - """ - Insert node 'new_node' after output with index 'node_out_port' of the node 'node'. All consumers of node 'node' - output with index 'node_out_port' will be changed to consume node 'new_node'. - The function should be used when graph doesn't contain data nodes yet. - :param node: node after which new node should be inserted. - :param new_node: node to be inserted. - :param node_out_port: the output index for the node 'node' to insert - :return: None - """ - return node.insert_node_after(new_node=new_node, node_out_port=node_out_port) - - -@deprecated_api(Node) -def replace_node(old_node: Node, new_node: Node, new_node_out_port: int = None): - """ - Replaces node 'old_node' with a node 'new_node' preserving edge attributes. - :param old_node: node to be replaced. - :param new_node: node to replace with. - :return: None - """ - return old_node.replace_node(new_node=new_node, new_node_out_port=new_node_out_port) - - -@deprecated_api(Node) -def copy_node(src_node: Node, new_attrs: dict = None, dst_graph: nx.MultiDiGraph = None): - """ Copies node with all attributes (optionally updated) within the same graph or to different graph.""" - return src_node.copy_node(new_attrs=new_attrs, dst_graph=dst_graph) - - -@deprecated_api(Node) -def get_inputs(graph: Graph, node: str, edge_attr: dict = None, control_flow: bool = False): - return Node(graph, node).get_inputs(edge_attr=edge_attr, control_flow=control_flow) - - -@deprecated_api(Node) -def get_outputs(graph: Graph, node: str, edge_attr: dict = None, control_flow: bool = False): - return Node(graph, node).get_outputs(edge_attr=edge_attr, control_flow=control_flow) diff --git a/tools/mo/openvino/tools/mo/graph/perm_inputs.py b/tools/mo/openvino/tools/mo/graph/perm_inputs.py deleted file mode 100644 index ddae286ccf150f..00000000000000 --- a/tools/mo/openvino/tools/mo/graph/perm_inputs.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import networkx as nx - -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.const import Const - - -def get_node_with_permutation(node: Node, port_info: str): - node_type, port = port_info.split(':') - port = int(port) - return node.in_node(port) if node_type == 'input' else node.out_node(port) - - -def axis(op_node: Node, port_info: str, input_port: int): - """ - Performs layout change related transformation of the data on the in_port_idx port of op_node. - Translates shape indexes from one layout to another according to inverse permutation - - Transformation inserts Gather operation with - permutation as 0-port input data and - actual data to translate as 1-port input indexes of Gather - - For example: - NHWC Reduce operation has 0-port input with data of shape [1, 2, 3, 4] and - 1-port input with axis indices [0, 1]. - - After translating such operation to NCHW layout: - 0-port input shape = [1, 4, 2, 3] - 1-port input axis indices = [0, 2] - """ - graph = op_node.graph - - permutation_data_node = get_node_with_permutation(op_node, port_info) - assert permutation_data_node.has_and_set('permutation'), 'Data node "{}" does not have permutation for node {}, ' \ - 'port_info "{}".'.format(permutation_data_node.id, - op_node.id, port_info) - permutation = permutation_data_node.permutation - if len(permutation.perm) == 0: - return - - data_node = op_node.in_node(input_port) - - gather_name = op_node.soft_get('name', op_node.id) + '/AxisGather' - const = Const(graph, {'value': permutation.inv, 'name': gather_name + '/const', - 'need_shape_inference': True}).create_node_with_data() - axis_const = Const(graph, {'value': int64_array(0), 'name': gather_name + '/axis'}).create_node_with_data() - gather = Gather(graph, {'name': gather_name, 'need_shape_inference': True}).create_node_with_data( - [const, data_node, axis_const]) - attrs = graph.get_edge_data(data_node.id, op_node.id, key=0).copy() - graph.add_edge(gather.id, op_node.id, **attrs) - graph.remove_edge(data_node.id, op_node.id) - op_node['need_shape_inference'] = True - - -def order(op_node: Node, port_info: str, input_port: int): - """ - Performs layout change related transformation of the data on the in_port_idx port of op_node. - Translates ordered shape indexes from one layout to another according to permutation - - Transformation inserts two Gather operations - - 1 Gather reorders data to new layout according to direct permutation: - actual data to translate as 1-port input indexes of Gather and - permutation as 0-port input data - 2 Gather translates shape indexes from one layout to another according to inverse permutation - permutation as 0-port input data and - actual data to translate as 1-port input indexes of Gather - - For example: - NHWC Transpose operation has 0-port input with data of shape [1, 2, 3, 4] and - 1-port input with new order indices [0, 1, 3, 2]. - - After translating such operation to NCHW layout: - 0-port input shape = [1, 4, 2, 3] - - 1 phase (after first Gather insertion): - 1-port input order indices = [0, 2, 1, 3] - 2 phase (after second Gather insertion): - 1-port input order indices = [0, 3, 2, 1] - """ - graph = op_node.graph - permutation_data_node = get_node_with_permutation(op_node, port_info) - assert permutation_data_node.has_and_set('permutation'), 'Data node "{}" does not have permutation for node {}, ' \ - 'port_info "{}".'.format(permutation_data_node.id, - op_node.id, port_info) - permutation = permutation_data_node.permutation - if len(permutation.perm) == 0: - return - - data_node = op_node.in_node(input_port) - - gather_name = op_node.soft_get('name', op_node.id) + '/OrderGather_1' - const = Const(graph, {'value': permutation.perm, 'name': gather_name + '/const', - 'need_shape_inference': True}).create_node_with_data() - axis_const = Const(graph, {'value': int64_array(0), 'name': gather_name + '/axis'}).create_node_with_data() - gather = Gather(graph, {'name': gather_name, - 'need_shape_inference': True}).create_node_with_data([data_node, const, axis_const]) - - gather_1_name = op_node.soft_get('name', op_node.id) + '/OrderGather_2' - const_1 = Const(graph, {'value': permutation.inv, 'name': gather_1_name + '/const', - 'need_shape_inference': True}).create_node_with_data() - axis_const_1 = Const(graph, {'value': int64_array(0), 'name': gather_1_name + '/axis'}).create_node_with_data() - gather_1 = Gather(graph, {'name': gather_1_name, - 'need_shape_inference': True}).create_node_with_data([const_1, gather, axis_const_1]) - - attrs = graph.get_edge_data(data_node.id, op_node.id, key=0).copy() - graph.add_edge(gather_1.id, op_node.id, **attrs) - graph.remove_edge(data_node.id, op_node.id) - op_node['need_shape_inference'] = True - - -def strided_slice(op_node: Node, port_info: str, input_port: int): - """ - StridedSLice must be permuted even if input or output tensors have rank lesser than 4 - e.g. input_shape = (1, 10, 10), out = input[:, 0:10, :, new_axis], input_rank < 4 - input_shape = (1, 10, 10, 3), out = input[:, 0:5, 0:4, 0], output_rank < 4 - in both examples slice_rank is >= 4 - slice_rank is defined by length of begin, end, strides (they all are of the same length) - """ - permutation_data_node = get_node_with_permutation(op_node, port_info) - assert permutation_data_node.has_and_set('permutation'), 'Data node "{}" does not have permutation for node {}, ' \ - 'port_info "{}".'.format(permutation_data_node.id, - op_node.id, port_info) - permute_indices_for_gather = permutation_data_node.permutation.perm - if len(permute_indices_for_gather) == 0: - return - from openvino.tools.mo.ops.op import PermuteAttrs - - slice_rank = op_node.in_port(input_port).data.get_shape()[0] # length of begin, end or strides - permute_indices_for_gather = PermuteAttrs.get_nhwc_to_nchw_permutation(slice_rank).perm - reorder_inputs_for_shape_or_slice(op_node, input_port, permute_indices_for_gather) - - -def shape(op_node: Node, port_info: str, input_port: int): - permutation_data_node = get_node_with_permutation(op_node, port_info) - assert permutation_data_node.has_and_set('permutation'), 'Data node "{}" does not have permutation for node {}, ' \ - 'port_info "{}".'.format(permutation_data_node.id, - op_node.id, port_info) - permute_indices_for_gather = permutation_data_node.permutation.perm - if len(permute_indices_for_gather) == 0: - return - reorder_inputs_for_shape_or_slice(op_node, input_port, permute_indices_for_gather) - - -def reorder_inputs_for_shape_or_slice(op_node: Node, input_port: int, permute_indices_for_gather: list): - """ - axis and slice permutations are almost the same the only difference is that for slice in general - case permutation depends from slice_rank not from input_rank or output_rank - """ - graph = op_node.graph - data_node = op_node.in_node(input_port) - - gather_name = op_node.soft_get('name', op_node.id) + '/ShapeGather' - const = Const(graph, {'value': permute_indices_for_gather, 'name': gather_name + '/const', - 'need_shape_inference': True}).create_node_with_data() - axis_const = Const(graph, {'value': int64_array(0), 'name': gather_name + '/axis'}).create_node_with_data() - gather = Gather(graph, {'name': gather_name, - 'need_shape_inference': True}).create_node_with_data([data_node, const, axis_const]) - attrs = graph.get_edge_data(data_node.id, op_node.id, key=0).copy() - - graph.add_edge(gather.id, op_node.id, **attrs) - graph.remove_edge(data_node.id, op_node.id) - - # need to run manually to override output shape value to resolve shape collision for nodes with - # 'correct_data_layout' output port attrs - op_node['need_shape_inference'] = True - - -def transpose(op_node: Node, port_info: str, input_port: int): - graph = op_node.graph - permutation_data_node = get_node_with_permutation(op_node, port_info) - assert permutation_data_node.has_and_set('permutation'), \ - 'Data node "{}" does not have permutation for node {}, port_info "{}".'.format( - permutation_data_node.id, op_node.id, port_info) - permutation = permutation_data_node.permutation - if len(permutation.perm) == 0: - return - - transpose_name = op_node.soft_get('name', op_node.id) + '/Transpose' - from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs # avoiding recursive imports - transpose = create_op_with_const_inputs( - graph, Transpose, {1: permutation.perm}, {'name': transpose_name, 'override_output_shape': True}) - op_node.in_port(input_port).get_connection().insert_node(transpose) - transpose.infer(transpose) - - -def transpose_nchw_to_nhwc(op_node: Node, port_info: str, input_port: int): - graph = op_node.graph - permutation_data_node = get_node_with_permutation(op_node, port_info) - rank = len(permutation_data_node.shape) - assert rank >= 4, 'Rank must be 4D or higher for HCHW to HHWC permutation on node {}.'.format(op_node.id) - - perm = list(range(rank)) - perm.insert(1, perm.pop()) - perm = int64_array(perm) - - transpose_name = op_node.soft_get('name', op_node.id) + '/Transpose' - from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs # avoiding recursive imports - transpose = create_op_with_const_inputs( - graph, Transpose, {1: perm}, {'name': transpose_name, 'override_output_shape': True}) - op_node.in_port(input_port).get_connection().insert_node(transpose) - transpose.infer(transpose) - - -class PermuteInputs: - input_permutes = { - 'axis': lambda node, port_info, input_port: axis(node, port_info, input_port), - 'slice': lambda node, port_info, input_port: strided_slice(node, port_info, input_port), - 'order': lambda node, port_info, input_port: order(node, port_info, input_port), - 'shape': lambda node, port_info, input_port: shape(node, port_info, input_port), - 'transpose': lambda node, port_info, input_port: transpose(node, port_info, input_port), - 'transpose_nchw_to_nhwc': lambda node, port_info, input_port: transpose_nchw_to_nhwc(node, port_info, - input_port), - } - - shape_check_rules = { - 'rank': lambda port: bool(len(port.data.get_shape()) >= 4), - 'dim_size': lambda port: bool(port.data.get_shape()[0] >= 4), # if input 'dim_size' >= 4 need to permute - } - - def set_input_permutation(self, node1: Node, node2: Node, port_info: str, permutation_rule: str, - shape_check_rule: str = 'rank'): - """ - Sets input permutation attribute on the edge between node1 and node2. - Input permutation consists of function that perform input permutation and - input port info 'input' or 'output' + that points on the input with PermuteAttr.Permutation which - current input depends on. - - shape_check_rule defines the check rule if the op node inputs need to be permuted. - By default 'rank' rule is applied, 'dim_size' is used only for StridedSlice so far. - """ - assert permutation_rule in self.input_permutes, 'No `{}` permutation rule in {}'.format(permutation_rule, - __class__.__name__) - assert shape_check_rule in self.shape_check_rules, 'No `{}` permutation shape check rule ' \ - 'in {}'.format(shape_check_rule, __class__.__name__) - nx.set_edge_attributes(G=node1.graph, - values={(node1.id, node2.id, 0): (self.input_permutes[permutation_rule], port_info, - self.shape_check_rules[shape_check_rule])}, - name='input_permutation') diff --git a/tools/mo/openvino/tools/mo/graph/port.py b/tools/mo/openvino/tools/mo/graph/port.py deleted file mode 100644 index 86ddd81cfe4ead..00000000000000 --- a/tools/mo/openvino/tools/mo/graph/port.py +++ /dev/null @@ -1,506 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from copy import deepcopy - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, strict_compare_tensors -from openvino.tools.mo.graph.connection import Connection -from openvino.tools.mo.utils.error import Error - - -class Port: - class DataAccessor: - def __init__(self): - pass - - def __init__(self, node, idx: int, type: str, **kwargs): - if type not in ['in', 'out']: - raise Error("Inappropriate port type: {}".format(type)) - - # We use self.__dict__ only to not to call __setattr__ method from __init__ function - self.__dict__['node'] = node - self.__dict__['idx'] = idx - self.__dict__['type'] = type - self.__dict__['data'] = self.DataAccessor() - self.__dict__['control_flow'] = False - self.__dict__.update(kwargs) - - self.data.get_shape = self._get_shape - self.data.set_shape = self._set_shape - - self.data.get_value = self._get_value - self.data.set_value = self._set_value - - self.data.get_attr = self._get_attr - self.data.set_attr = self._set_attr - - self.data.has_valid = self._has_valid - - def __eq__(self, other): - return ( - self.__class__ == other.__class__ and - self.node.graph == other.node.graph and - self.node.id == other.node.id and - self.type == other.type and - self.idx == other.idx - ) - - def __hash__(self): - return hash((self.node.id, self.type, self.idx)) - - def __deepcopy__(self, memo): - cls = self.__class__ - result = cls.__new__(cls) - memo[id(self)] = result - for k, v in self.__dict__.items(): - result.__dict__[k] = v if k in ['graph', 'node'] else deepcopy(v) - return result - - def __setattr__(self, key, value): - edge = self.node.in_edge(self.idx, control_flow=self.control_flow) if self.type == 'in' else \ - self.node.out_edge(self.idx, control_flow=self.control_flow) - edge[key] = value - - def __getattr__(self, item): - edge = self.node.in_edge(self.idx, control_flow=self.control_flow) if self.type == 'in' else \ - self.node.out_edge(self.idx, control_flow=self.control_flow) - if edge.get(item) is None: - raise Error( - "Edge from {}_port {} at node {} has no attribute {}".format(self.type, self.idx, self.node.name, item)) - return edge[item] - - def _create_data_if_necessary(self): - if self.node.graph.stage == 'front': - raise Error("_create_data_if_necessary method is not applicable for front Graph phase!") - if self.type == 'in': - raise Error("_create_data_if_necessary method is not applicable for 'in' Port type!") - - if self.idx not in self.node.out_nodes(control_flow=self.control_flow): - from openvino.tools.mo.ops.op import Op - Op.create_data_node(self.node.graph, self.node, out_port=self.idx) - self.node['need_shape_inference'] = True - return self.node.out_node(self.idx, control_flow=self.control_flow) - - def _get_shape(self): - if self.node.graph.stage == 'front': - return None - else: - node_caller = self.node.in_node if self.type == 'in' else self.node.out_node - return node_caller(self.idx, control_flow=self.control_flow).shape - - def _set_shape(self, shape): - if self.node.graph.stage == 'front': - raise NotImplementedError("set_shape not implemented for front phase") - else: - if self.type == 'in': - assert self.node.in_node(self.idx, control_flow=self.control_flow).value is None - self.node.in_node(self.idx, control_flow=self.control_flow).shape = shape_array(shape) - else: - data_node = self.node.out_node(self.idx, control_flow=self.control_flow) - assert data_node.value is None or self.node.has_and_set('override_output_shape') or \ - strict_compare_tensors(data_node.soft_get('force_shape', data_node.shape), shape_array(shape)) - self.node.out_node(self.idx, control_flow=self.control_flow).shape = shape_array(shape) - - def _get_value(self): - if self.node.graph.stage == 'front': - return None - else: - if self.type == 'in': - if self.idx in self.node.in_nodes(control_flow=self.control_flow) and \ - self.node.in_node(self.idx, control_flow=self.control_flow).has_valid('value'): - return self.node.in_node(self.idx, control_flow=self.control_flow).value - else: - if self.idx in self.node.out_nodes(control_flow=self.control_flow) and \ - self.node.out_node(self.idx, control_flow=self.control_flow).has_valid('value'): - return self.node.out_node(self.idx, control_flow=self.control_flow).value - return None - - def _set_value(self, value): - if self.node.graph.stage == 'front': - raise Error("set_value is not applicable for graph front phase") - else: - data_node_caller = self.node.in_node if self.type == 'in' else self.node.out_node - data_node = data_node_caller(self.idx, control_flow=self.control_flow) - const_node = data_node.in_node(control_flow=self.control_flow) if self.type == 'in' else self.node - - force_shape = data_node.soft_get('force_shape', const_node.soft_get('force_shape', None)) - shape = int64_array(value.shape if force_shape is None else force_shape) - - # Set value to data node - data_node.value = value - data_node.shape = shape - - # Set value to constant producer - if const_node.soft_get('type') == 'Const': - const_node.value = value - const_node.shape = shape - - def _get_attr(self, item: str): - if self.node.graph.stage == 'front': - return None - else: - if self.type == 'in': - if self.idx in self.node.in_nodes(control_flow=self.control_flow) and \ - self.node.in_node(self.idx, control_flow=self.control_flow).has_valid(item): - return self.node.in_node(self.idx, control_flow=self.control_flow)[item] - else: - if self.idx in self.node.out_nodes(control_flow=self.control_flow) and \ - self.node.out_node(self.idx, control_flow=self.control_flow).has_valid(item): - return self.node.out_node(self.idx, control_flow=self.control_flow)[item] - return None - - def _set_attr(self, item, value): - raise NotImplementedError() - - def get_in_edge_attrs(self, data=False): - assert self.type == 'in' - for u, v, d in list(self.node.graph.in_edges(self.node.id, data=True)): - if d['in'] == self.idx: - edge_attrs = self.node.graph.get_edge_data(u, v) - for key in edge_attrs: - if edge_attrs[key]['in'] == self.idx: - if data: - return edge_attrs[key], u, v, key - else: - return edge_attrs[key] - if data: - return None, None, None, None - else: - return None - - def _has_valid(self, item): - if self.node.graph.stage == 'front': - raise NotImplementedError - else: - if self.type == 'in': - if self.idx in self.node.in_nodes(control_flow=self.control_flow) and \ - self.node.in_node(self.idx, control_flow=self.control_flow).has_valid(item): - return True - else: - if self.idx in self.node.out_nodes(control_flow=self.control_flow) and \ - self.node.out_node(self.idx, control_flow=self.control_flow).has_valid(item): - return True - return False - - def disconnected(self): - # This method returns False if port connected with some other port - # otherwise it returns True - - if self.type == 'in': - return self.get_source() is None - else: - return len(self.get_destinations()) == 0 - - def get_source(self): - # This method returns Port object that is producer (source) port for out port. - # In case if out port has no source port return None - - assert self.type != 'out', "Can't get source for output port at {} node".format(self.node.name) - - from openvino.tools.mo.graph.graph import Node - producer_ports = [] - - has_producer = False - if self.node.graph.stage == 'front': - for n, d in self.node.get_inputs(control_flow=self.control_flow): - if d['in'] == self.idx: - node = Node(self.node.graph, n) - producer_ports.append(node.out_port(d['out'], control_flow=self.control_flow)) - has_producer = True - if not has_producer: - return None - else: - if self.idx not in self.node.in_nodes(control_flow=self.control_flow): - return None - - in_data = self.node.in_node(self.idx, control_flow=self.control_flow) - for n, d in in_data.get_inputs(control_flow=self.control_flow): - node = Node(self.node.graph, n) - producer_ports.append(node.out_port(d['out'], control_flow=self.control_flow)) - - if len(producer_ports) != 1: - if self.node.graph.strict_mode: - raise Error('Something bad has happened with graph! Data node "{}" has {} producers'.format( - self.node.id, len(producer_ports))) - else: - return None - return producer_ports[0] - - def get_destination(self): - # This method returns Port that is consumer (destination) port for in port. - # In case if in port has no consumer return None - - consumer_ports = self.get_destinations() - if not consumer_ports: - return None - - if len(consumer_ports) > 1: - raise Error("The number of destinations for {} node at {} port is {}".format(self.node.name, - self.idx, - len(consumer_ports))) - return consumer_ports[0] - - def get_destinations(self): - assert self.type != 'in', "Can't get destinations for input port at {} node".format(self.node.name) - - from openvino.tools.mo.graph.graph import Node - consumer_ports = [] - if self.node.graph.stage == 'front': - producer_node = self.node - else: - # In case if node has no output data node in given port, we return None - if self.idx not in self.node.out_nodes(control_flow=self.control_flow): - return [] - producer_node = self.node.out_node(self.idx, control_flow=self.control_flow) - - for n, d in producer_node.get_outputs(edge_attr={'out': self.idx} if self.node.graph.stage == 'front' else None, - control_flow=self.control_flow): - node = Node(self.node.graph, n) - consumer_ports.append(node.in_port(d['in'], control_flow=self.control_flow)) - return consumer_ports - - def get_tensor_names(self, port_renumber: bool = False): - """ - Gets sorted tensor names list. - :param port_renumber: defines whether data node index should be calculated considering port renumbering. - """ - tensor_debug_info = self.get_tensor_debug_info(port_renumber) - tensor_names_list = [] - for attr in tensor_debug_info: - if attr is not None and len(attr) >= 2: - tensor_name = attr[1] - if tensor_name is not None and len(tensor_name) > 0: - tensor_names_list.append(tensor_name.replace(',', '\\,')) - return sorted(tensor_names_list) - - def add_tensor_names(self, tensor_names: list, port_renumber: bool = False): - """ - Sets tensor names list. - :param tensor_names: list of tensor names. - :param port_renumber: defines whether data node index should be calculated considering port renumbering. - """ - - if len(tensor_names) == 0: - return - - new_debug_items = [] - op_name = self.node.soft_get('name', self.node.id) - for tensor_name in tensor_names: - assert isinstance(tensor_name, str), "Tensor names elements should be strings." - new_debug_items.append((op_name, tensor_name)) - - tensor_debug_info = self.get_tensor_debug_info(port_renumber) - tensor_debug_info += new_debug_items - self.set_tensor_debug_info(tensor_debug_info, port_renumber) - - def remove_tensor_names(self, port_renumber: bool = False): - """ - Removes tensor names. - :param port_renumber: defines whether data node index should be calculated considering port renumbering. - """ - self.remove_debug_info(port_renumber=port_renumber) - - def get_tensor_debug_info(self, port_renumber: bool = False): - """ - Gets tensor debug info attribute. - :param port_renumber: defines whether data node index should be calculated considering port renumbering. - """ - def get_tensor_debug_info_from_attrs(attrs): - if 'fw_tensor_debug_info' in attrs: - if attrs['fw_tensor_debug_info'] is not None: - return attrs['fw_tensor_debug_info'] - return [] - - assert self.type != 'in', "Can't get tensor debug info for input port at {} node".format(self.node.name) - - fw_debug_info = [] - if self.node.graph.stage == 'front': - if self.idx in self.node.out_edges(): - out_edge = self.node.out_edge(self.idx) - fw_debug_info += get_tensor_debug_info_from_attrs(out_edge) - else: - # before port renumbering we use sequential numbering - node_idx = self.idx - if port_renumber: - if self.node.type != 'Const': - # after port renumbering port indices start from zero, - # but data node indices remain the same - node_idx = self.idx + len(self.node.in_nodes()) - - if node_idx in self.node.out_nodes(): - out_node = self.node.out_node(node_idx) - fw_debug_info += get_tensor_debug_info_from_attrs(out_node.attrs()) - return fw_debug_info - - def set_tensor_debug_info(self, tensor_info: list, port_renumber: bool = False): - """ - Gets tensor debug info attribute. - :param tensor_info: new tensor debug info value. - :param port_renumber: defines whether data node index should be calculated considering port renumbering. - """ - - assert self.type != 'in', "Can't get tensor debug info for input port at {} node".format(self.node.name) - - if self.node.graph.stage == 'front': - if self.idx in self.node.out_edges(): - out_edge = self.node.out_edge(self.idx) - out_edge['fw_tensor_debug_info'] = tensor_info - else: - # before port renumbering we use sequential numbering - node_idx = self.idx - if port_renumber: - if self.node.type != 'Const': - # after port renumbering port indices start from zero, - # but data node indices remain the same - node_idx = self.idx + len(self.node.in_nodes()) - - if node_idx in self.node.out_nodes(): - out_node = self.node.out_node(node_idx) - out_node['fw_tensor_debug_info'] = tensor_info - - def remove_debug_info(self, port_renumber: bool = False): - """ - Removes tensor debug info attribute. - :param port_renumber: defines whether data node index should be calculated considering port renumbering. - """ - - assert self.type != 'in', "Tensor debug info is not defined for input port at {} node".format(self.node.name) - - if self.node.graph.stage == 'front': - if self.idx in self.node.out_edges(): - out_edge = self.node.out_edge(self.idx) - if 'fw_tensor_debug_info' in out_edge: - del out_edge['fw_tensor_debug_info'] - else: - # before port renumbering we use sequential numbering - node_idx = self.idx - if port_renumber: - if self.node.type != 'Const': - # after port renumbering port indices start from zero, - # but data node indices remain the same - node_idx = self.idx + len(self.node.in_nodes()) - - if node_idx in self.node.out_nodes(): - out_node = self.node.out_node(node_idx) - if 'fw_tensor_debug_info' in out_node: - del out_node['fw_tensor_debug_info'] - - def disconnect(self): - if self.type == 'out': - consumer_ports = self.get_destinations() - if self.node.graph.stage == 'front': - for port in consumer_ports: - self.node.graph.remove_edge(self.node.id, port.node.id) - else: - for port in consumer_ports: - src_node = port.node.in_node(port.idx).id - dst_node = port.node.id - for key, val in self.node.graph.get_edge_data(src_node, dst_node).items(): - if val['in'] == port.idx: - self.node.graph.remove_edge(src_node, dst_node, key=key) - break - else: - source_port = self.get_source() - if source_port is None: - return - for u, v, d in list(self.node.graph.in_edges(self.node.id, data=True)): - if d['in'] == self.idx: - for key in self.node.graph.get_edge_data(u, v): - if self.node.graph.get_edge_data(u, v)[key]['in'] == self.idx: - self.node.graph.remove_edge(u, v, key=key) - return - - def get_connection(self): - if self.type == 'in': - return Connection(self.node.graph, self.get_source(), [self], control_flow=self.control_flow) - else: - return Connection(self.node.graph, self, self.get_destinations(), control_flow=self.control_flow) - - def connect(self, port): - if self.type == 'in': - self.get_connection().set_source(port) - else: - self.get_connection().add_destination(port) - - def _get_data_type(self): - """ - Internal method which does not raise with error if the data type is not known. - Check value of the data node to determine input port data type as well as the respective value in the - '_out_port_data_type' dictionary. - :return: The data type or None if it is not defined - """ - node = self.node - if self.type == 'out': - if node.has_valid('_out_port_data_type') and self.idx in node._out_port_data_type: - return node._out_port_data_type[self.idx] - - # check the data type of the output data node - value = self.data.get_value() - value_data_type = value.dtype if value is not None else None - if value_data_type is not None: - value_data_type = value.dtype if value is not None else None - log.debug('The precision of the output port {} of node {} is determined from the data node as {}' - ''.format(self.idx, self.node.name, value_data_type)) - return value_data_type - return None - else: - # check the data type of the input data node - value = self.data.get_value() - value_data_type = value.dtype if value is not None else None - if value_data_type is not None: - log.debug('The precision of the input port {} of node {} is determined from the data node as {}' - ''.format(self.idx, self.node.name, value_data_type)) - - # The 'get_source' method raises an error if there is no producer op node for the input port. But here we - # don't want to do this, so we temporary disable graph strict mode - old_strict_mode_value = node.graph.strict_mode - node.graph.strict_mode = False - source_port = self.get_source() - source_port_data_type = None - if source_port is not None: - source_port_data_type = source_port._get_data_type() - node.graph.strict_mode = old_strict_mode_value - - # check for the data node and port data type inconsistency. TODO should we raise an error here? - if value_data_type is not None and source_port_data_type is not None and \ - value_data_type != source_port_data_type: - log.warning('Inconsistent data type of the data node and port attribute for port {} of node {}: {} vs ' - '{}. Return data type of the data node.'.format(self.idx, self.node.name, - value_data_type, source_port_data_type)) - # the source port data type has higher priority over the value data type because the MO calculates values in - # I64 precision for shapes but not all OV plugins support I64, so we should trust data type infer functions - return source_port_data_type if source_port_data_type is not None else value_data_type - - def get_data_type(self): - data_type = self._get_data_type() - if data_type is None: - raise Error('The data type for {} port {} of node {} is not defined'.format(self.type, self.idx, - self.node.name)) - return data_type - - def is_data_type_defined(self): - """ - Check if the data-type is already defined for the port. - :return: the result of the check - """ - return self._get_data_type() is not None - - def set_data_type(self, data_type, override=False): - assert self.type == 'out', 'The method can be called for output ports only' - node = self.node - if not node.has_valid('_out_port_data_type'): - node['_out_port_data_type'] = {} - if self.idx in node._out_port_data_type and data_type != node._out_port_data_type[self.idx] and not override: - raise Error('Trying to override data type for output port {} of operation {}: from {} to {}'.format( - self.idx, node.name, node._out_port_data_type[self.idx], data_type)) - node._out_port_data_type[self.idx] = data_type - - def get_default_tensor_name(self): - """ - Gets default_tensor_name - :return: tensor name - """ - if self.type == 'in': - return None - return self.node.soft_get('name', self.node.id) + ":" + str(self.idx) diff --git a/tools/mo/openvino/tools/mo/load/__init__.py b/tools/mo/openvino/tools/mo/load/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/load/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/load/caffe/__init__.py b/tools/mo/openvino/tools/mo/load/caffe/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/load/caffe/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/load/caffe/loader.py b/tools/mo/openvino/tools/mo/load/caffe/loader.py deleted file mode 100644 index aaf5538998de66..00000000000000 --- a/tools/mo/openvino/tools/mo/load/caffe/loader.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -from openvino.tools.mo.load.loader import Loader -from openvino.tools.mo.front.caffe import custom_layers_mapping, loader -from openvino.tools.mo.front.caffe.extractor import caffe_type_extractors, caffe_extractor -from openvino.tools.mo.front.common.register_custom_ops import update_extractors_with_extensions, check_for_duplicates -from openvino.tools.mo.front.extractor import extract_node_attrs -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.telemetry_utils import send_op_names_info, send_shapes_info -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class CaffeLoader(Loader): - enabled = True - - def load(self, graph: Graph): - argv = graph.graph['cmd_params'] - if argv.caffe_parser_path is None: - argv.caffe_parser_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'front', 'caffe', 'proto') - caffe_pb2 = loader.import_caffe_pb2(argv.caffe_parser_path) - - proto, model = loader.load_caffe_proto_model(caffe_pb2, argv.input_proto, argv.input_model) - - update_extractors_with_extensions( - caffe_type_extractors, - argv.disable_omitting_optional if hasattr(argv, 'disable_omitting_optional') else False, - argv.disable_flattening_optional_params if hasattr(argv, 'disable_flattening_optional_params') else False - ) - - try: - original_shapes = loader.caffe_pb_to_nx(graph, proto, model) - except ValueError as e: - raise Error('Invalid prototxt file: value error {}. ' + - refer_to_faq_msg(11), str(e)) from e - graph.check_empty_graph('load_caffe_proto_model') - - graph.__setattr__('proto_path', argv.input_proto) - graph.__setattr__('caffemodel_path', argv.input_model) - graph.__setattr__('name', getattr(proto, 'name', None) or argv.model_name) - graph.graph['layout'] = 'NCHW' - graph.graph['fw'] = 'caffe' - graph.graph['original_shapes'] = original_shapes - graph.graph['caffe_pb2'] = caffe_pb2 - - if argv.k is None: - argv.k = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'front', 'caffe', 'CustomLayersMapping.xml') - custom_layers_map = custom_layers_mapping.load_layers_xml(argv.k) - custom_layers_mapping.update_extractors( - caffe_type_extractors, - custom_layers_map, - argv.disable_omitting_optional if hasattr(argv, 'disable_omitting_optional') else False, - argv.enable_flattening_nested_params if hasattr(argv, 'enable_flattening_nested_params') else False - ) - extract_node_attrs(graph, lambda node: caffe_extractor(node, check_for_duplicates(caffe_type_extractors))) - send_op_names_info('caffe', graph) - send_shapes_info('caffe', graph) diff --git a/tools/mo/openvino/tools/mo/load/kaldi/__init__.py b/tools/mo/openvino/tools/mo/load/kaldi/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/load/kaldi/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/load/kaldi/loader.py b/tools/mo/openvino/tools/mo/load/kaldi/loader.py deleted file mode 100644 index 376d848138a2a3..00000000000000 --- a/tools/mo/openvino/tools/mo/load/kaldi/loader.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.load.loader import Loader -from openvino.tools.mo.front.common.register_custom_ops import update_extractors_with_extensions -from openvino.tools.mo.front.extractor import extract_node_attrs -from openvino.tools.mo.front.kaldi.extractor import kaldi_extractor, kaldi_type_extractors -from openvino.tools.mo.front.kaldi.loader.loader import load_kaldi_model -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.telemetry_utils import send_shapes_info, send_op_names_info -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class KaldiLoader(Loader): - enabled = True - - def load(self, graph: Graph): - argv = graph.graph['cmd_params'] - try: - load_kaldi_model(graph, argv.input_model) - except Exception as e: - raise Error('Model Optimizer is not able to parse Kaldi model {}. '.format(argv.input_model) + - refer_to_faq_msg(91)) from e - graph.check_empty_graph('load_kaldi_nnet_model') - graph.graph['layout'] = 'NCHW' - graph.graph['fw'] = 'kaldi' - - update_extractors_with_extensions(kaldi_type_extractors) - extract_node_attrs(graph, lambda node: kaldi_extractor(node)) - - send_op_names_info('kaldi', graph) - send_shapes_info('kaldi', graph) diff --git a/tools/mo/openvino/tools/mo/load/loader.py b/tools/mo/openvino/tools/mo/load/loader.py deleted file mode 100644 index 355bbe69d8ede3..00000000000000 --- a/tools/mo/openvino/tools/mo/load/loader.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils import class_registration - - -class Loader(object): - registered_cls = [] - registered_ops = {} - excluded_replacers = [] - - def find_and_replace_pattern(self, graph: Graph): - self.load(graph) - - def load(self, graph: Graph): - raise Exception("Define load logic of {} class in its load method".format( - self.__class__.__name__ - )) - - def run_before(self): - """ - Returns list of loader classes which this loader must be run before. - :return: list of classes - """ - return [LoadFinish] - - def run_after(self): - """ - Returns list of loader classes which this loader must be run after. - :return: list of classes - """ - return [] - - @classmethod - def class_type(cls): - return class_registration.ClassType.LOADER - - -class LoadFinish(Loader): - enabled = True - - def run_before(self): - return [] - - def run_after(self): - return [] - - def load(self, graph: Graph): - graph.check_empty_graph('loading from framework') diff --git a/tools/mo/openvino/tools/mo/load/onnx/__init__.py b/tools/mo/openvino/tools/mo/load/onnx/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/load/onnx/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/load/onnx/loader.py b/tools/mo/openvino/tools/mo/load/onnx/loader.py deleted file mode 100644 index 72b819b35b66f1..00000000000000 --- a/tools/mo/openvino/tools/mo/load/onnx/loader.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import logging as log - -from openvino.tools.mo.load.loader import Loader -from openvino.tools.mo.front.common.register_custom_ops import update_extractors_with_extensions, check_for_duplicates -from openvino.tools.mo.front.extractor import extract_node_attrs -from openvino.tools.mo.front.onnx.extractor import onnx_op_extractor, onnx_op_extractors -from openvino.tools.mo.front.onnx.loader import load_onnx_model, protobuf2nx -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.telemetry_utils import send_shapes_info, send_op_names_info -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class ONNXLoader(Loader): - enabled = True - run_not_recursively = True - - def load(self, graph: Graph): - import onnx - import io - argv = graph.graph['cmd_params'] - if isinstance(argv.input_model, str): - model_proto = load_onnx_model(argv.input_model) - elif isinstance(argv.input_model, io.BytesIO): - model_proto = onnx.load_model_from_string(argv.input_model.getvalue()) - else: - raise Error('Unknown ONNX model type: {}'.format(type(argv.input_model))) - - model_graph = model_proto.graph # pylint: disable=no-member - # print(model_graph) - # assert len(model_graph) == 1, "An ONNX model contains more than 1 graph: unsupported" - log.debug("Number of nodes in graph_def: {}".format(len(model_graph.node))) - log.debug("Number of all input ports (not true inputs) in graph_def: {}".format(len(model_graph.input))) - log.debug("Number of initializers in graph_def: {}".format(len(model_graph.initializer))) - log.debug( - "Number of real inputs in graph_def: {}".format(len(model_graph.input) - len(model_graph.initializer))) - update_extractors_with_extensions(onnx_op_extractors) - - try: - protobuf2nx(graph, model_proto) - except Exception as e: - raise Error( - 'Cannot pre-process ONNX graph after reading from model file "{}". ' \ - 'File is corrupt or has unsupported format. Details: {}. ' + - refer_to_faq_msg(44), - argv.input_model, - str(e) - ) from e - log.debug("Number of nodes in NX graph: {}".format(graph.number_of_nodes())) - - graph.__setattr__('name', - argv.model_name if argv.model_name else model_proto.graph.name) # pylint: disable=no-member - graph.graph['layout'] = 'NCHW' - graph.graph['fw'] = 'onnx' - graph.graph['feature_dim'] = 1 - if hasattr(model_proto, 'opset_import'): - graph.graph['fw_opset_version'] = model_proto.opset_import[0].version # pylint: disable=no-member - else: - graph.graph['fw_opset_version'] = None - - graph.check_empty_graph('protobuf2nx. It may happen due to problems with loaded model') - extract_node_attrs(graph, lambda node: onnx_op_extractor(node, check_for_duplicates(onnx_op_extractors))) - send_op_names_info('onnx', graph) - send_shapes_info('onnx', graph) diff --git a/tools/mo/openvino/tools/mo/load/tf/__init__.py b/tools/mo/openvino/tools/mo/load/tf/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/load/tf/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/load/tf/loader.py b/tools/mo/openvino/tools/mo/load/tf/loader.py deleted file mode 100644 index efc517b156b51e..00000000000000 --- a/tools/mo/openvino/tools/mo/load/tf/loader.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os - -# do not print INFO and WARNING messages from TensorFlow -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -try: - import tensorflow.compat.v1 as tf_v1 -except ImportError: - import tensorflow as tf_v1 - -#in some environment suppressing through TF_CPP_MIN_LOG_LEVEL does not work -tf_v1.get_logger().setLevel("ERROR") - -try: - import tensorflow.contrib # pylint: disable=no-name-in-module,import-error -except: - pass # we try to import contrib for loading models that use contrib operations - -import logging as log - -from openvino.tools.mo.load.loader import Loader -from openvino.tools.mo.front.common.register_custom_ops import check_for_duplicates -from openvino.tools.mo.front.common.register_custom_ops import update_extractors_with_extensions -from openvino.tools.mo.front.extractor import restore_edges, extract_node_attrs, remove_control_dependency_inputs, add_outputs_identity -from openvino.tools.mo.front.tf.extractor import get_tf_edges, create_tf_edge, tf_op_extractor, tf_op_extractors -from openvino.tools.mo.front.tf.loader import load_tf_graph_def, protobuf2nx -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively -from openvino.tools.mo.utils import tensorboard_util -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.telemetry_utils import send_op_names_info, send_shapes_info, send_framework_info -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class TFLoader(Loader): - enabled = True - run_not_recursively = True - - def load(self, graph: Graph): - argv = graph.graph['cmd_params'] - if argv.tensorflow_custom_layer_libraries: - libraries = argv.tensorflow_custom_layer_libraries.split(',') - for library in libraries: - log.info('Loading library "{}" with custom operations'.format(library)) - tf_v1.load_op_library(library) - - graph_def, variables_values, framework, inputs_outputs_order = load_tf_graph_def( - graph_file_name=argv.input_model, - is_binary=not argv.input_model_is_text, - checkpoint=argv.input_checkpoint, - user_output_node_names_list=argv.output, - model_dir=argv.saved_model_dir, - meta_graph_file=argv.input_meta_graph, - saved_model_tags=argv.saved_model_tags) - - if inputs_outputs_order is not None and isinstance(inputs_outputs_order, tuple): - graph.inputs_order = inputs_outputs_order[0] - graph.outputs_order = inputs_outputs_order[1] - - send_framework_info(framework) - - try: - tf_v1.import_graph_def(graph_def, name='') - except: - log.warning("TensorFlow post-processing of loaded model was unsuccessful. " - "This is an optional step that Model Optimizer performs for any input model but it is not usually " - "required for all models. " - "It likely means that the original model is ill-formed. " - "Model Optimizer will continue converting this model.") - - log.debug("Number of nodes in graph_def: {}".format(len(graph_def.node))) # pylint: disable=no-member - - if argv.tensorboard_logdir: - tensorboard_util.dump_for_tensorboard(graph_def, argv.tensorboard_logdir) - - update_extractors_with_extensions(tf_op_extractors) - - try: - protobuf2nx(graph, graph_def) - except Exception as e: - raise Error( - 'Cannot pre-process TensorFlow graph after reading from model file "{}". ' \ - 'File is corrupt or has unsupported format. Details: {}. ' + - refer_to_faq_msg(44), - argv.model_name, - str(e) - ) from e - - graph.__setattr__('name', argv.model_name) - # 'layout' parameter change may cause an issue in EltwiseInputReshape replacer - # and convert_nhwc_to_nchw(graph) - graph.graph['layout'] = 'NHWC' - graph.graph['fw'] = 'tf' - - graph.graph['variables_values'] = variables_values - del variables_values - - used_tensors = restore_edges(graph, get_tf_edges) - - # Tensor names information corresponding to a node is stored on outgoing edges. - # As output nodes do not have outgoing edges, fake outputs are required. In the following code - # for each output Identity node is added, and tensor name for the output is kept - # on (output, fake output) edge. After Result nodes adding transformation fake outputs - # are deleted from graph. - add_outputs_identity(graph, graph.nodes - used_tensors, lambda g, output, fake_node_name: g.add_edges_from([ - create_tf_edge(output, fake_node_name, 0)])) - - remove_control_dependency_inputs(graph) - - graph.check_empty_graph('protobuf2nx. It may happen due to problems with loaded model') - extract_node_attrs(graph, lambda node: tf_op_extractor(node, check_for_duplicates(tf_op_extractors))) - - # try to detect layout from the nodes of the graph. If there are no convolution nodes in N(D)HWC layout then we - # consider that the graph is in NCHW layout and no layout conversion should be performed - if not graph_or_sub_graph_has_nhwc_ops(graph): - if not argv.silent: - log.debug('disable_nhwc_to_nchw" was automatically enabled.') - for_graph_and_each_sub_graph_recursively(graph, update_cmd_params_and_layout) - - send_op_names_info(framework, graph) - send_shapes_info(framework, graph) - - -def is_node_layout_nhwc(node: Node): - """ - Check the layout attribute of specific operations and return True if any of them has layout NHWC. - :param node: Node to check - :return: Boolean result of the check - """ - if node.soft_get('op') in ["Conv2D", "DepthwiseConv2dNative", "Conv3D", "Conv2DBackpropInput", - "Conv3DBackpropInputV2"]: - if node.soft_get('layout') in ["NHWC", "NDHWC"]: - log.debug('Detected convolution node with NHWC layout: "{}"'.format(node.soft_get('name', node.id))) - return True - return False - - -def graph_or_sub_graph_has_nhwc_ops(graph: Graph): - """ - Checks that a graph or any sub-graph (inside Loop) operation contains nodes with NHWC layout. - :param graph: main graph to check - :return: Boolean result of the check - """ - NHWC_conv_detected = False - for node in graph.get_op_nodes(): - if is_node_layout_nhwc(node): - NHWC_conv_detected = True - break - - if node.has('sub_graphs'): - for sub_graph_name in node['sub_graphs']: - NHWC_conv_detected |= graph_or_sub_graph_has_nhwc_ops(node.soft_get(sub_graph_name)) - - return NHWC_conv_detected - - -def update_cmd_params_and_layout(graph: Graph): - """ - Updates "cmd_params" and "layout" attribute as the model has only NCHW layout operations. - :param graph: graph to update attributes - :return: Nones - """ - if 'cmd_params' in graph.graph: - graph.graph['cmd_params'].auto_disable_nhwc_to_nchw = True - if 'layout' in graph.graph: - graph.graph['layout'] = 'NCHW' diff --git a/tools/mo/openvino/tools/mo/main.py b/tools/mo/openvino/tools/mo/main.py deleted file mode 100644 index a1e9ef58efe250..00000000000000 --- a/tools/mo/openvino/tools/mo/main.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import os -import sys - -try: - import openvino_telemetry as tm - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm -from openvino.tools.mo.convert_impl import _convert -from openvino.tools.mo.pipeline.common import get_ir_version - -# pylint: disable=no-name-in-module,import-error -from openvino.runtime import serialize - - -def main(cli_parser: argparse.ArgumentParser, framework=None): - ngraph_function, argv = _convert(cli_parser, framework, {}, False) - if ngraph_function is None: - return 1 - - output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd() - model_path_no_ext = os.path.normpath(os.path.join(output_dir, argv.model_name)) - model_path = model_path_no_ext + '.xml' - - serialize(ngraph_function, model_path.encode('utf-8'), model_path.replace('.xml', '.bin').encode('utf-8')) - - print('[ SUCCESS ] Generated IR version {} model.'.format(get_ir_version(argv))) - print('[ SUCCESS ] XML file: {}'.format(model_path)) - print('[ SUCCESS ] BIN file: {}'.format(model_path.replace('.xml', '.bin'))) - return 0 - - -if __name__ == "__main__": - from openvino.tools.mo.utils.cli_parser import get_all_cli_parser # pylint: disable=no-name-in-module,import-error - sys.exit(main(get_all_cli_parser(), None)) diff --git a/tools/mo/openvino/tools/mo/main_caffe.py b/tools/mo/openvino/tools/mo/main_caffe.py deleted file mode 100644 index 6554b8e70ae6c5..00000000000000 --- a/tools/mo/openvino/tools/mo/main_caffe.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import sys - -from openvino.tools.mo.utils.cli_parser import get_caffe_cli_parser # pylint: disable=no-name-in-module,import-error - -if __name__ == "__main__": - from openvino.tools.mo.main import main - sys.exit(main(get_caffe_cli_parser(), 'caffe')) diff --git a/tools/mo/openvino/tools/mo/main_kaldi.py b/tools/mo/openvino/tools/mo/main_kaldi.py deleted file mode 100644 index b1ea7fbd9064d9..00000000000000 --- a/tools/mo/openvino/tools/mo/main_kaldi.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import sys - -from openvino.tools.mo.utils.cli_parser import get_kaldi_cli_parser # pylint: disable=no-name-in-module,import-error - -if __name__ == "__main__": - from openvino.tools.mo.main import main - sys.exit(main(get_kaldi_cli_parser(), 'kaldi')) diff --git a/tools/mo/openvino/tools/mo/main_onnx.py b/tools/mo/openvino/tools/mo/main_onnx.py deleted file mode 100644 index 802ce4700e2b4f..00000000000000 --- a/tools/mo/openvino/tools/mo/main_onnx.py +++ /dev/null @@ -1,11 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import sys - -from openvino.tools.mo.utils.cli_parser import get_onnx_cli_parser # pylint: disable=no-name-in-module,import-error - -if __name__ == "__main__": - from openvino.tools.mo.main import main - - sys.exit(main(get_onnx_cli_parser(), 'onnx')) diff --git a/tools/mo/openvino/tools/mo/main_paddle.py b/tools/mo/openvino/tools/mo/main_paddle.py deleted file mode 100644 index 4a556d54c57eec..00000000000000 --- a/tools/mo/openvino/tools/mo/main_paddle.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import sys - -from openvino.tools.mo.utils.cli_parser import get_all_cli_parser # pylint: disable=no-name-in-module,import-error - -from openvino.frontend import FrontEndManager # pylint: disable=no-name-in-module,import-error - - -if __name__ == "__main__": - from openvino.tools.mo.main import main - sys.exit(main(get_all_cli_parser(), 'paddle')) diff --git a/tools/mo/openvino/tools/mo/main_tf.py b/tools/mo/openvino/tools/mo/main_tf.py deleted file mode 100644 index e48309b2630d06..00000000000000 --- a/tools/mo/openvino/tools/mo/main_tf.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import sys - -from openvino.tools.mo.utils.cli_parser import get_tf_cli_parser # pylint: disable=no-name-in-module,import-error - -if __name__ == "__main__": - from openvino.tools.mo.main import main - sys.exit(main(get_tf_cli_parser(), 'tf')) diff --git a/tools/mo/openvino/tools/mo/middle/AddFakeQuantizeFuse.py b/tools/mo/openvino/tools/mo/middle/AddFakeQuantizeFuse.py deleted file mode 100644 index a48aa1d465f111..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/AddFakeQuantizeFuse.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from typing import Dict - -from openvino.tools.mo.middle.MulFakeQuantizeFuse import resolve_shared_inputs -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.fusing.helpers import get_tensor_in_port, get_value_in_port -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class AddFakeQuantizeFuse(MiddleReplacementPattern): - """ Fuses Add --> FakeQuantize sequence if possible - """ - enabled = False - - def run_after(self): - return [] - - def run_before(self): - return [] - - def pattern(self): - return dict( - nodes=[ - ('preop', dict(op='Add', can_be_fused=True)), - ('preoped', dict()), - ('quantize', dict(op='FakeQuantize')), - ], - edges=[ - ('preop', 'preoped'), - ('preoped', 'quantize', {'in': 0}), - ] - ) - - def replace_pattern(self, graph: Graph, match: Dict[str, Node]): - quantize = match['quantize'] - preop = match['preop'] - - for i in [0, 1]: - if preop.in_port(i).get_source().node.soft_get('type') in ['Convolution', 'Deconvolution', 'MatMul']: - return - - tensor_port, value_port = get_tensor_in_port(preop), get_value_in_port(preop) - - if value_port is None or value_port.data.get_value() is None: - log.debug('AddQuantizeFuse: cannot fuse because Add op has dynamic inputs') - return - - # Direct modifications to quantize 1-st and 2-nd port inputs are performed. - # So the data nodes at those inputs shouldn't have more than 1 consumer maximum 2 consumers to the same - # quantize op (consumed by 1st and 2nd ports). So we duplicate FakeQuantize in_port 1, 2, 3, 4 data - resolve_shared_inputs(node=quantize, port_ids_to_duplicate=[1, 2]) - - quantize.in_port(1).data.set_value(quantize.in_port(1).data.get_value() - value_port.data.get_value()) - if quantize.in_node(1).id != quantize.in_node(2).id: - quantize.in_port(2).data.set_value(quantize.in_port(2).data.get_value() - value_port.data.get_value()) - - in_add_connection = quantize.in_port(0).get_source().node.in_port(0).get_connection() - quantize.in_port(0).disconnect() - in_add_connection.add_destination(quantize.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/middle/AddIsCyclicAttribute.py b/tools/mo/openvino/tools/mo/middle/AddIsCyclicAttribute.py deleted file mode 100644 index 77d591f21422ec..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/AddIsCyclicAttribute.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import networkx as nx - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class AddIsCyclicAttribute(MiddleReplacementPattern): - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.DeleteControlFlowEdges import DeleteControlFlowEdges - return [DeleteControlFlowEdges] - - def run_before(self): - return [] - - @staticmethod - def find_and_replace_pattern(graph: Graph): - is_acyclic = nx.is_directed_acyclic_graph(graph) - graph.graph['is_cyclic'] = not is_acyclic diff --git a/tools/mo/openvino/tools/mo/middle/ApplyNHWCtoNCHWpermutation.py b/tools/mo/openvino/tools/mo/middle/ApplyNHWCtoNCHWpermutation.py deleted file mode 100644 index 7e9568d9eb96e7..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ApplyNHWCtoNCHWpermutation.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.LayoutChangeForConstantShapePaths import LayoutChangeForConstantShapePaths -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.op import PermuteAttrs - - -class ApplyNHWCtoNCHWpermutation(MiddleReplacementPattern): - enabled = True - graph_condition = [lambda graph: graph.graph['layout'] == 'NHWC'] - - def run_after(self): - return [LayoutChangeForConstantShapePaths] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_data_nodes(): - if node.has_and_set('nchw_layout'): - continue - - # Get NHWC to NCHW permutation for N dims, where N = len(node.shape) - permutation = PermuteAttrs().get_nhwc_to_nchw_permutation(len(node.shape)) - - # Check that data node already has permutation - skip_permutation = False - for in_node in node.in_nodes(): - edge_attrs = node.graph.get_edge_data(in_node.id, node.id)[0] - if 'permutation' in edge_attrs: - skip_permutation = True - for out_node in node.out_nodes(): - edge_attrs = node.graph.get_edge_data(node.id, out_node.id)[0] - if 'permutation' in edge_attrs: - skip_permutation = True - - if skip_permutation: - continue - - # Set permutation to all in/out edges - for in_node in node.in_nodes(): - PermuteAttrs.set_permutation(in_node, node, permutation) - - for out_node in node.out_nodes(): - PermuteAttrs.set_permutation(node, out_node, permutation) diff --git a/tools/mo/openvino/tools/mo/middle/ApplyPermutations.py b/tools/mo/openvino/tools/mo/middle/ApplyPermutations.py deleted file mode 100644 index 46c3a70852abb9..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ApplyPermutations.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.middle.InsertLayoutPropagationTransposes import is_input_data_in_correct_layout, \ - is_output_data_in_correct_layout -from openvino.tools.mo.middle.LayoutChangeForConstantShapePaths import LayoutChangeForConstantShapePaths -from openvino.tools.mo.middle.PreserveRuntimeInfo import PreserveRuntimeInfo -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.graph.perm_inputs import get_node_with_permutation -from openvino.tools.mo.graph.port import Port -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.utils.error import Error - - -class ApplyPermutation(MiddleReplacementPattern): - enabled = True - force_clean_up = True - # can't be turned on for Kaldi until permutation logic will be aligned - graph_condition = [lambda graph: graph.graph['fw'] != 'kaldi'] - - def run_after(self): - return [PreserveRuntimeInfo] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - self.permute_data_nodes_attrs(graph) - self.permute_op_nodes_attrs(graph) - self.shape_of_sub_graph_reinference(graph) - self.permute_input_data(graph) - graph.graph['layout'] = 'NCHW' - - @staticmethod - def permute_data_nodes_attrs(graph: Graph): - # Iterate over all data nodes and apply permutation if exists - for node in graph.get_data_nodes(): - if not node.has_valid('permutation') or \ - all([attrs.get('input_permutation', False) for u, v, attrs in graph.out_edges(node.id, data=True)]): - continue - - if len(node.in_nodes()) != 0: # there are data nodes without input operation node inside the TensorIterator - edge_attrs = graph.get_edge_data(node.in_node(0).id, node.id)[0] - if is_output_data_in_correct_layout(node.in_node(0), edge_attrs['out']): - log.debug('Do not permute data node attrs for node "{}" output port "{}"'.format(node.in_node(0).id, - edge_attrs['out'])) - continue - - # Apply permutation for shape and value if exists - if len(node.permutation.perm) == 0: - continue - node.shape = shape_array(node.shape)[node.permutation.perm] - if node.has_valid('value'): - assert len(node.value.shape) == len(node.permutation.perm), \ - 'Node {} has shape {} and permutation {} that does not match. Their lengths should be equal' \ - ''.format(node.name, node.value.shape, node.permutation.perm) - node.value = mo_array(node.value.transpose(node.permutation.perm)) - - @staticmethod - def permute_op_nodes_attrs(graph: Graph): - for node in graph.get_op_nodes(): - if node.has_valid('permute_attrs') and not node.has_and_set('nchw_layout'): - try: - node.permute_attrs.permute_attrs(node) - except Exception as e: - raise Error('Can\'t permute attrs for node {}. Error message: {}'.format(node.id, e)) - - @staticmethod - def permute_input_data(graph: Graph): - for node in graph.get_op_nodes(): - input_permutations = [(in_port, edge_attrs['input_permutation']) for in_port, edge_attrs in - node.in_edges().items() if edge_attrs.get('input_permutation') is not None] - for in_port, input_perm in input_permutations: - permutation, port_info, check_shape = input_perm - direction, port = port_info.split(':') - port = int(port) - port_to_check = node.in_port(port) if direction == 'input' else node.out_port(port) - permutation_data_node = get_node_with_permutation(node, port_info) - - if permutation_data_node.has_and_set('permutation') and \ - not is_input_data_in_correct_layout(node, in_port) and check_shape(port_to_check): - permutation(node, port_info, in_port) - if node.has_and_set('need_shape_inference'): - node.infer(node) - node.need_shape_inference = False - - @staticmethod - def shape_of_sub_graph_reinference(graph: Graph): - """ - After layout permutation (shape change in data nodes) shape sub-graphs contain values in the old layout - To change that we execute full partial inference on the shape-of sub-graphs - """ - shape_ops = graph.get_op_nodes(op='ShapeOf') - for shape in shape_ops: - shape.infer(shape) - - def reinfer_once(in_port: Port): - node = in_port.node - if not node.soft_get('reinferred', False): - node.infer(node) - node['reinferred'] = True - - LayoutChangeForConstantShapePaths().find_shape_subgraph_endpoints( - out_ports=[shape.out_port(0) for shape in shape_ops], action=reinfer_once) diff --git a/tools/mo/openvino/tools/mo/middle/ArgOpsToTopK.py b/tools/mo/openvino/tools/mo/middle/ArgOpsToTopK.py deleted file mode 100644 index 15b19eb469460c..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ArgOpsToTopK.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.topk import TopK -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const - - -class ArgOpsToTopK(MiddleReplacementPattern): - """ - The transformation replaces ArgMax/ArgMin with the TopK layer. - """ - - enabled = True - force_clean_up = True - - def pattern(self): - return dict( - nodes=[ - ('node', dict(op=lambda x: x in ['ArgMax', 'ArgMin'])), - ], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['node'] - node_name = node.soft_get('name', node.id) - - connected_ports = [port for port in node.in_ports().values() if not port.disconnected()] - if len(connected_ports) == 2: - axis = node.in_port(1).data.get_value() - else: - axis = node.axis - - assert axis is not None, 'The "axis" should be defined for node "{}"'.format(node_name) - assert node.has_and_set('output_type'), 'The data type is not set for node "{}"'.format(node_name) - - topk_mode = 'max' if node.op == 'ArgMax' else 'min' - topk_node = TopK(graph, {'axis': axis, 'mode': topk_mode, 'sort': 'index', - 'remove_values_output': node.has_and_set('remove_values_output'), - 'index_element_type': node.output_type}).create_node() - node.in_port(0).get_connection().set_destination(topk_node.in_port(0)) - if node.has_and_set('out_max_val'): # in this mode the ArgMax produces tuples (max_ind, max_value) - concat_node = Concat(graph, {'axis': 1, 'name': node.name + '/Concat'}).create_node() - concat_node.add_input_port(0, skip_if_exist=True) - concat_node.add_input_port(1, skip_if_exist=True) - topk_node.out_port(0).connect(concat_node.in_port(1)) # indices - topk_node.out_port(1).connect(concat_node.in_port(0)) # values - if not node.out_port(0).disconnected(): - node.out_port(0).get_connection().set_source(concat_node.out_port(0)) - else: - if not node.out_port(0).disconnected(): - node.out_port(0).get_connection().set_source(topk_node.out_port(1)) - - topk_node.in_port(1).connect(Const(graph, {'name': node.soft_get('name') + '/TopK', - 'value': node.top_k}).create_node().out_port(0)) - - graph.remove_nodes_from([node.id, node.out_node(0).id]) diff --git a/tools/mo/openvino/tools/mo/middle/AttributedTileNormalizer.py b/tools/mo/openvino/tools/mo/middle/AttributedTileNormalizer.py deleted file mode 100644 index b73a6409999612..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/AttributedTileNormalizer.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.tile import Tile - - -class AttributedTileNormalizer(MiddleReplacementPattern): - enabled = True - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('tile', dict(op='AttributedTile', axis=lambda x: x is not None, tiles=lambda x: x is not None))], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['tile'] - name = node.soft_get('name', node.id) - - axis = node.axis - tiles = node.tiles - - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None - tiles_input_value = int64_array(np.ones(input_shape.size)) - tiles_input_value[axis] = tiles - - const = Const(graph, {'value': tiles_input_value, 'name': name + '/tiles'}).create_node() - tile = Tile(graph, {'name': name}).create_node() - - node.out_port(0).get_connection().set_source(tile.out_port(0)) - node.in_port(0).get_connection().set_destination(tile.in_port(0)) - const.out_port(0).connect(tile.in_port(1)) diff --git a/tools/mo/openvino/tools/mo/middle/BiasAddBroadcasting.py b/tools/mo/openvino/tools/mo/middle/BiasAddBroadcasting.py deleted file mode 100644 index 63bfcfc2420fbc..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/BiasAddBroadcasting.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.middle.EltwiseChecker import EltwiseChecker -from openvino.tools.mo.ops.elementwise import Add -from openvino.tools.mo.front.common.layout import get_features_dim -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class BiasAddInputBroadcasting(MiddleReplacementPattern): - """ - In TF BiasAdd op have 2 inputs: data tensor and bias tensor. Bias always has 1D shape and should be broadcasted - to data tensor by features dimension. - - Also replacing BiasAdd by usual Add op after broadcasting. - """ - enabled = True - force_shape_inference = True - - def run_before(self): - return [EltwiseChecker] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('BiasAdd', dict(kind='op', op='Add', type='BiasAdd')) - ], - edges=[]) - - def replace_pattern(self, graph: Graph, match: dict): - bias_add = match['BiasAdd'] - - # Replace BiasAdd by Add operation - new_add = Add(graph, {'name': bias_add.id + '/Add'}).create_node() - - bias_add.in_port(0).get_connection().set_destination(new_add.in_port(0)) - bias_add.in_port(1).get_connection().set_destination(new_add.in_port(1)) - bias_add.out_port(0).get_connection().set_source(new_add.out_port(0)) - - if bias_add.data_format != 'NCHW': - return - - input_shape = new_add.in_port(0).data.get_shape() - bias_shape = new_add.in_port(1).data.get_shape() - assert len(bias_shape) == 1 - - unsqueeze_dims = np.arange(len(input_shape)) - channel_dim = get_features_dim('NCHW', len(input_shape)) - unsqueeze_dims = np.delete(unsqueeze_dims, channel_dim, 0) - - unsqueeze_node = Unsqueeze(graph, {'name': new_add.id + '/BiasUnsqueeze'}).create_node() - unsqueeze_dims_node = Const(graph, {'name': new_add.id + '/Dims', - 'value': unsqueeze_dims}).create_node() - # Reconnecting nodes - unsqueeze_node.in_port(1).connect(unsqueeze_dims_node.out_port(0)) - unsqueeze_node['override_output_shape'] = True - - new_add.in_port(1).get_connection().insert_node(unsqueeze_node) diff --git a/tools/mo/openvino/tools/mo/middle/BinarizeWeightsM1P1.py b/tools/mo/openvino/tools/mo/middle/BinarizeWeightsM1P1.py deleted file mode 100644 index d128745b452e2f..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/BinarizeWeightsM1P1.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.middle.CheckForCycle import CheckForCycle -from openvino.tools.mo.middle.DeleteNotExecutable import DeleteNotExecutable -from openvino.tools.mo.ops.elementwise import Mul, Pow -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.reshape import Reshape - - -class BinarizeWeightsM1P1(MiddleReplacementPattern): - """ Convert weights to -1/+1 form - - Applicable for convolutions and other operations that have 'weights' that combined with the input data - by mean of multiplication operation. So any linear operator suits. Detect such operations by - multiplication_transparent attribute -- if it is presents and set to True, then multiplication term - can be passed through the operation. If multiplication_transparent attribute is set to True for an operation, - such operation should also has multiplication_transparent_ports that contain a list of pairs with - port indices (in_port, out_port) that defines which port pairs can pass multiplication through. - - For example for some convolutional operation which has 2 ports (input tensor and weights) and 1 output port - this list includes [(0,0)(1,0)]. If convolutional operation also has biases at port 2, it is not included into - this list because this port is not transparent for multiplication operation. - - multiplication_transparent_ports can be None if all possible input/output pairs are multiplication - transparent. - - #TODO Describe how to apply multiplication at output ports -- this is not specified. In the current definition - we can pass through only scalar multiplication, but we already require passing it channel-wise. - """ - enabled = True - - def run_after(self): - return [] - - def run_before(self): - # CheckForCycle and DeleteNotExecutable run graph clean up which should not be run before weights binarization - return [CheckForCycle, DeleteNotExecutable] - - def pattern(self): - return dict( - nodes=[ - ('quantize', dict(kind='op', op='FakeQuantize')), - ('quantized', dict()), - ('operator', dict(kind='op', multiplication_transparent=True)), - ], - edges=[ - ('quantize', 'quantized'), - ('quantized', 'operator'), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - assert match['operator'].has('multiplication_transparent_ports') - - port = match['operator'].input_ports_with(match['quantized']) - assert len(port) >= 1 - if len(port) > 1: - log.debug('BinarizeWeightsM1P1 cannot apply transformation for data {} because it consumed more' - ' than once'.format(match['quantized'].name)) - return - - assert len(port) == 1 - port = port[0] - applicable = [pair for pair in match['operator'].multiplication_transparent_ports if pair[0] == port] - if len(applicable) == 0: - return - - # Look at 3-rd and 4-th inputs of FakeQuantize -- they have constants that should be passed through. - # Assume that the constant that should be passed through is a scalar. - quantize = match['quantize'] - output_low = quantize.in_node(3) - output_high = quantize.in_node(4) - - quantize_name = quantize.soft_get('name', quantize.id) - - if not output_low.has_valid('value') and not output_high.has_valid('value'): - return - - output_low = output_low.value - output_high = output_high.value - - # This pass is applicable for binarization only. Other intX variants are not relevant. - if quantize.levels != 2: - return - - # Recognize two cases: 0/+1 and -1/+1. - zp1 = np.all(output_low == 0) or np.all(output_high == 0) - m1p1 = np.all(-output_low == output_high) - if (not zp1 and not m1p1) or (zp1 and m1p1): - log.debug('BinarizeWeightsM1P1 cannot apply transformation for data {} because it does\'t has one of' - ' 0/+1 or -1/+1 forms.'.format(match['quantized'].name)) - return - - # TODO: Extract real scalar from 3rd and 4th inputs; reusing original tensors is dangerous because - # it may have incompatible shape. - - mult_term = quantize.in_node(3) if np.all(output_high == 0) else quantize.in_node(4) - - new_shape = Const(graph, {'name': quantize_name + '/Reshape/Shape', - 'value': int64_array([-1, 1, 1])}).create_node_with_data() - reshape = Reshape(graph, {'name': quantize_name + '/Reshape'}).create_node_with_data([mult_term, new_shape]) - - # Patch inflow path (by diving by mult_term) - # Put a new Pow/Mul combination here: - # ---->---- (here)---> data ---> [3rd/4th ports]quantize ---> quantized ---> operator - - if len(match['quantized'].out_nodes()) > 1: - log.debug('BinarizeWeightsM1P1: len(match[\'quantized\'].out_nodes()) > 1') - return - power_of_exponent = Const(graph, {'name': quantize_name + '/DivNormalize/Power', - 'value': mo_array(-1.0)}).create_node_with_data() - div_op = Pow(graph, {'name': quantize_name + '/DivNormalize'}) - div_output = div_op.create_node_with_data([mult_term, power_of_exponent]) - - for i in [3, 4]: - match['quantize'].insert_node_with_data_before( - match['quantize'].in_node(i), - Mul, - dict(name=quantize_name + '/MulNormalize'), - additional_inputs=[div_output], - ) - - match['quantized'].value = None # reset value because it will be recomputed - match['quantize'].infer(match['quantize']) - - # Put a complimentary new Mul node here: operator -->---(here)-----> operator.out_node() - - match['operator'].insert_node_with_data_after( - match['operator'].out_node(), - Mul, - dict(name=match['operator'].name + '/MulNormalize'), - [reshape], - ) - - # Disable 'operator' fusion with linear ops, otherwise it will annihilate changes that we just made - match['operator']['can_be_fused'] = False diff --git a/tools/mo/openvino/tools/mo/middle/BlockLSTMtoLSTMSequence.py b/tools/mo/openvino/tools/mo/middle/BlockLSTMtoLSTMSequence.py deleted file mode 100644 index 5dc21ed7674ede..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/BlockLSTMtoLSTMSequence.py +++ /dev/null @@ -1,378 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.LSTM import LSTM -from openvino.tools.mo.utils.error import Error - - -class BlockLSTMtoLSTMSequenceSingleFirstOutput(MiddleReplacementPattern): - """ - This transformation handles BlockLSTM with just one output, concatenation of all the intermediate - output values of the hidden. - TODO: implement for non-constant weights and bias. - For this, it requires to unbound from LSTMRNNSequenceToTensorIterator transformation - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.middle.LSTMRNNSequenceToTensorIterator import LSTMToTensorIterator - return [LSTMToTensorIterator] - - def run_after(self): - from openvino.tools.mo.middle.RNNSequenceNormalizeToIE import RNNSequenceNormalize - return [RNNSequenceNormalize, BlockLSTMtoLSTMSequence] - - def pattern(self): - return dict( - nodes=[ - ('BlockLSTM', dict(op='BlockLSTM')), - ('weights', dict(op='Const')), - ('weights_data', dict(kind='data')), - ('bias', dict(op='Const')), - ('bias_data', dict(kind='data')), - ], - edges=[ - ('weights', 'weights_data'), - ('weights_data', 'BlockLSTM', {'in': 1}), - ('bias', 'bias_data'), - ('bias_data', 'BlockLSTM', {'in': 2}), - ] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - block_lstm = match['BlockLSTM'] - connected_out_ports = [port_idx for port_idx, port in block_lstm.out_ports().items() if - not port.disconnected()] - # support only BlockLSTM with one output of concatenated hidden states from all steps - if len(connected_out_ports) != 1 or connected_out_ports[0] != 0: - return - shift_const = block_lstm.forget_bias - - # determine hidden_size based on weights shapes - w_shape = block_lstm.in_port(1).data.get_shape() - b_shape = block_lstm.in_port(2).data.get_shape() - hidden_size = dynamic_dimension - if len(b_shape) > 0 and b_shape[0] is not dynamic_dimension: - hidden_size = b_shape[0] // 4 - elif len(w_shape) > 1 and w_shape[1] is not dynamic_dimension: - hidden_size = w_shape[1] // 4 - - assert hidden_size is not dynamic_dimension, "OpenVINO does not support BlockLSTM with dynamic hidden_size." - - # normalize weights to the format required by OpenVINO LSTMSequence - weights = block_lstm.in_port(1).data.get_value() - biases = block_lstm.in_port(2).data.get_value() - assert weights is not None and biases is not None, \ - "Internal Model Optimizer error: weights and bias values should be defined." - # 1. reshape weights and bias to highlight channel dimension - weights = weights.reshape([weights.shape[0], 4, hidden_size]) - biases = biases.reshape([4, hidden_size]) - # 2. reorder gates icfo --> fico for both weights and biases - gate_reorder = [2, 0, 1, 3] - weights = np.take(weights, gate_reorder, axis=1) - biases = np.take(biases, gate_reorder, axis=0) - # 3. shift_const.value should be added to the first 1/4th part of the biases (f-gate: 0) - # Note: in case of moving this code up before gate reordering, the addition - # should be applied at different place - biases[0] += shift_const - # 4. return to the original shapes - weights = weights.reshape([weights.shape[0], -1]) - biases = biases.flatten() - # 5. TF stores weights in IO, but OV requires it in OI: transpose - weights = weights.transpose() - # 6. set up normalized values for Constant weights and bias - block_lstm.in_port(1).data.set_value(weights) - block_lstm.in_port(2).data.set_value(biases) - - # re-number h_init_state, c_init_state input ports to match RNNSequence ports order - # at this point there is no clear match to RNNSequence operations - # next re-ordering is expected in LSTMRNNSequenceToTensorIterator transformation - # to match LSTMCell inputs order - init_hidden_state_source = block_lstm.in_port(3).get_source() - init_cell_state_source = block_lstm.in_port(4).get_source() - block_lstm.in_port(4).get_connection().set_source(init_hidden_state_source) - block_lstm.add_input_port(5, skip_if_exist=True) - block_lstm.in_port(5).get_connection().set_source(init_cell_state_source) - block_lstm.delete_input_port(3, skip_if_absent=True) - - new_attrs = {'sequence_dim': 0, - 'batch_dim': 1, - 'direction': 'forward', - 'hidden_size': hidden_size, - 'format': 'tf', - } - - # update attributes of existing operation - # input edges have "bin" attribute for LSTMRNNSequenceToTensorIterator - LSTM.update_node_stat(block_lstm, new_attrs) - - -class BlockLSTMtoLSTMSequence(MiddleReplacementPattern): - """ - MO virtual operation RNNSequence that converts to OV TensorIterator with LSTMCell inside supports 3 outputs: - 0: concatenated hidden states over the whole time sequence, - 1: last hidden state, - 2: last cell state. - - Replacer do several tasks: - 1. Checks if current BlockLSTM can be translated to IR (OV does not support concatenated cell state output - which can be produced by BlockLSTM) - 2. Searches for sub-graph, that takes last cell state out of unsupported concatenated cell state output. - We cut this sub-graph off in case if there are no other consumers of concatenated cell state output and we connect - BlockLSTM to consumers of this sub-graph by port producing last cell state output - 3. Renumber input ports of BlockLSTM to match RNNSequence specification. - 4. (Optional. Resolves by multiple checks) We cut the same sug-graph (as in 2) for concatenated cell states check - for better performance - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.middle.LSTMRNNSequenceToTensorIterator import LSTMToTensorIterator - return [LSTMToTensorIterator] - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - from openvino.tools.mo.middle.RNNSequenceNormalizeToIE import RNNSequenceNormalize - return [MiddleStart, RNNSequenceNormalize] - - def pattern(self): - return dict( - nodes=[ - ('BlockLSTM', dict(op='BlockLSTM')), - - # 0 port: output h vector over the whole time sequence - ('concatenated_hidden_states', (dict(kind='data'))), - - ('mul', dict(op='Mul')), - ('mul_data', dict(kind='data')), - ('after_mul_op_to_the_rest_of_model', dict(kind='op')), - ('concat_0', dict(op='ConcatV2')), - ('concat_0_data', dict(kind='data')), - ('reshape_0', dict(op='Reshape')), - ('reshape_0_data', dict(kind='data')), - ('gather_0', dict(op='Gather')), - ('gather_0_data', dict(kind='data')), - - # 1 port: cell state before the tanh over the whole time sequence - ('concatenated_cell_states_data', (dict(kind='data'))), - - ('concat_1', dict(op='ConcatV2')), - ('concat_1_data', dict(kind='data')), - ('reshape_1', dict(op='Reshape')), - ('reshape_1_data', dict(kind='data')), - ('gather_1', dict(op='Gather')), - ('gather_1_data', dict(kind='data')), - ], - edges=[ - ('BlockLSTM', 'concatenated_hidden_states', {'out': 0}), - ('concatenated_hidden_states', 'mul'), - ('mul', 'mul_data'), - ('mul_data', 'after_mul_op_to_the_rest_of_model'), - ('mul_data', 'concat_0'), - ('concat_0', 'concat_0_data'), - ('concat_0_data', 'reshape_0'), - ('reshape_0', 'reshape_0_data'), - ('reshape_0_data', 'gather_0'), - ('gather_0', 'gather_0_data'), - - ('BlockLSTM', 'concatenated_cell_states_data', {'out': 1}), - ('concatenated_cell_states_data', 'concat_1', {'in': 1}), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'gather_1'), - ('gather_1', 'gather_1_data') - ] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - time_len = match['concatenated_hidden_states'].shape[0] - r""" - Working with concatenated_cell_states_data part first, because OV TensorIterator primitive doesn't have - concatenated cell states output and if we can not collapse it, then we does not support this type of BlockLSTM - - We simplify the sub-graph below by taking another output of BlockLSTM: - concatenated cell states over the whole time sequence -> last cell state - - BlockLSTM - || out 1 (concatenated cell states coming out of BlockLSTM) - \/ in 1 - ConcatV2 - || (concatenation with initial state or another unused data) - \/ - Reshape - || - \/ - Gather (taking the last cell state from previous BlockLSTM, if Gather indexes == time_len) - """ - # check that there are no other consumers of concatenated_cell_states_data data flow - valid_output_names = ['concat_1', 'concat_1_data', 'reshape_1', 'reshape_1_data', 'gather_1', 'gather_1_data'] - valid_output_node_ids = [match[name].id for name in valid_output_names] - node_names_to_check_outputs = ['concatenated_cell_states_data', 'concat_1_data', 'reshape_1_data'] - for name in node_names_to_check_outputs: - for node in match[name].out_nodes(): - if node.id not in valid_output_node_ids: - raise Error("BlockLSTM node {} has output which contains concatenated cell states over the whole " - "time sequence. It is not replaceable by another output and is not supported " - "originally".format(match['BlockLSTM'].id)) - - # check that we really take the last cell state data by Gather - gather_indexes = match['gather_1'].in_node(1).value - if len(gather_indexes) == 1: - gather_index = gather_indexes[0] - else: - raise Error("BlockLSTM node {} has output which contains concatenated cell states over the whole " - "time sequence. It is not replaceable by another output and is not supported " - "originally".format(match['BlockLSTM'].id)) - if gather_index != time_len: - raise Error("BlockLSTM node {} has output which contains concatenated cell states over the whole " - "time sequence. It is not replaceable by another output and is not supported " - "originally".format(match['BlockLSTM'].id)) - - """ - We passed #1 and #2 stages from class description. It means that we can translate the rest of the pattern - to LSTMSequence even without following optimizations - """ - - node = match['BlockLSTM'] - weights_node = node.in_node(1) - biases_node = node.in_node(2) - shift_const = node.forget_bias - - # Assign temporary shape for them for easier manipulation - # TF stores weights in IO order - input_size = node.in_node(0).shape[-1] - hidden_size = node.in_node(3).shape[-1] - weights = weights_node.value - biases = biases_node.value - assert weights.shape[0] == input_size + hidden_size, \ - "weights.shape={} input_size={} hidden_size={}".format(weights.shape, input_size, hidden_size) - assert weights.shape[1] == biases.shape[0] == 4 * hidden_size, \ - "weights.shape={} biases.shape={} hidden_size={}".format(weights.shape, biases.shape, hidden_size) - - weights = weights.reshape([ - weights.shape[0], - 4, # gates - hidden_size - ]) - - biases = biases.reshape([ - 4, # gates - hidden_size - ]) - - # Reorder gates icfo --> fico for both weights and biases - gate_reorder = [2, 0, 1, 3] - weights = np.take(weights, gate_reorder, axis=1) - biases = np.take(biases, gate_reorder, axis=0) - - # shift_const.value should be added to the first 1/4th part of the biases (f-gate: 0) - # Note: in case of moving this code up before gate reordering, the addition - # should be applied at different place - biases[0] += shift_const - - # Return to the original shapes - weights = weights.reshape([weights.shape[0], -1]) - biases = biases.flatten() - - # TF stores weights in IO, but OV requires it in OI: transpose - weights = weights.transpose() - - weights_node.value = weights - weights_node.shape = int64_array(weights.shape) - biases_node.value = biases - biases_node.shape = int64_array(biases.shape) - - attrs = dict(graph.get_edge_data(match['gather_1'].id, match['gather_1_data'].id)[0]) - attrs.update({'out': 2}) - graph.remove_edge(match['BlockLSTM'].id, match['concatenated_cell_states_data'].id) - graph.remove_edge(match['gather_1'].id, match['gather_1_data'].id) - - match['BlockLSTM'].add_output_port(attrs['out']) - graph.add_edge(match['BlockLSTM'].id, match['gather_1_data'].id, **attrs) - - """ - #3 Renumbering h_init_state, c_init_state input ports to match RNNSequence ports order. - """ - h_init_port = 4 - c_init_port = 5 - # c_init_state - if 4 in node.in_nodes(): - assert c_init_port not in node.in_nodes() - cell_state_edge = graph.get_edge_data(node.in_node(4).id, node.id) - cell_state_edge[0]['in'] = c_init_port - - # h_init_state - if 3 in node.in_nodes(): - assert h_init_port not in node.in_nodes() - hidden_state_edge = graph.get_edge_data(node.in_node(3).id, node.id) - hidden_state_edge[0]['in'] = h_init_port - - new_attrs = {'sequence_dim': 0, - 'batch_dim': 1, - 'direction': 'forward', - 'hidden_size': match['concatenated_hidden_states'].shape[-1], - 'format': 'tf', - } - - LSTM.update_node_stat(match['BlockLSTM'], new_attrs) - - """ - Optional #4 optimization from class description following - """ - data_to_mul = [n for n in match['mul'].in_nodes().values() if n.id != match['concatenated_hidden_states'].id] - if len(data_to_mul) != 1: - return # unexpected type of mul - data_to_mul = data_to_mul[0] - if not data_to_mul.has_valid('value'): - return # unexpected type of mul - data_to_mul_value = data_to_mul.value - if not np.all(data_to_mul_value == 1): - return # unexpected type of mul - - # remove useless mul - attrs = dict(graph.get_edge_data(match['BlockLSTM'].id, match['concatenated_hidden_states'].id)[0]) - graph.remove_edge(match['BlockLSTM'].id, match['concatenated_hidden_states'].id) - graph.remove_edge(match['mul'].id, match['mul_data'].id) - graph.add_edge(match['BlockLSTM'].id, match['mul_data'].id, **attrs) - - # find true usages of concatenated hidden states data (not last hidden state) - valid_output_names = ['mul_data', 'concat_0', 'concat_0_data', 'reshape_0', 'reshape_0_data', 'gather_0', - 'gather_0_data'] - valid_output_node_ids = [match[name].id for name in valid_output_names] - node_names_to_check_outputs = ['mul_data', 'concat_0_data', 'reshape_0_data'] - - list_of_concatenated_hidden_states_children_node_ids = [] - for name in node_names_to_check_outputs: - for node in match[name].out_nodes(): - if node.id not in valid_output_node_ids: - list_of_concatenated_hidden_states_children_node_ids.append(node.id) - - if len(list_of_concatenated_hidden_states_children_node_ids) != 1: - return # not supported placement of pattern - conacenated_child_node_id = list_of_concatenated_hidden_states_children_node_ids[0] - if conacenated_child_node_id != match['after_mul_op_to_the_rest_of_model'].id: - return # not supported placement of pattern - - gather_indexes = match['gather_0'].in_node(1).value - if len(gather_indexes) == 1: - gather_index = gather_indexes[0] - else: - return # we have to translate this type of BlockLSTM to LSTMSequence to TensorIterator as is - if gather_index != time_len: - return # we have to translate this type of BlockLSTM to LSTMSequence to TensorIterator as is - - attrs = dict(graph.get_edge_data(match['gather_0'].id, match['gather_0_data'].id)[0]) - attrs.update({'out': 1}) - graph.remove_edge(match['mul_data'].id, match['concat_0'].id) - graph.remove_edge(match['gather_0'].id, match['gather_0_data'].id) - - graph.add_edge(match['BlockLSTM'].id, match['gather_0_data'].id, **attrs) diff --git a/tools/mo/openvino/tools/mo/middle/CheckForCycle.py b/tools/mo/openvino/tools/mo/middle/CheckForCycle.py deleted file mode 100644 index df2f7ffe9ebac5..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/CheckForCycle.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import networkx as nx - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class CheckForCycle(MiddleReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge - return [TensorIteratorMerge] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - is_acyclic = nx.is_directed_acyclic_graph(graph) - if not is_acyclic: - raise Error('Graph contains a cycle. Can not proceed. ' + refer_to_faq_msg(97)) diff --git a/tools/mo/openvino/tools/mo/middle/ConcatOptimization.py b/tools/mo/openvino/tools/mo/middle/ConcatOptimization.py deleted file mode 100644 index 123f5e5562f9a0..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ConcatOptimization.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class ConcatOdInputEraserAndPortsReconnect(MiddleReplacementPattern): - """ - The transformation performs two actions with Concat operations: - 1. Disconnects empty inputs (input tensor has at least one input dimension equal to 0) - 2. Renumber Concat inputs to be 0, 1, 2,... - """ - enabled = True - force_clean_up = True - - def find_and_replace_pattern(self, graph: Graph): - for concat in graph.get_op_nodes(type='Concat'): - for in_port in concat.in_ports().values(): - if not in_port.disconnected(): - shape = in_port.data.get_shape() - assert shape is not None - if 0 in shape: - concat.delete_input_port(in_port.idx) - - connected_ports = [port for port_idx, port in sorted(concat.in_ports().items()) if not port.disconnected()] - assert len(connected_ports), 'Concat "{}" have no inputs after removing inputs with 0 dimensions' \ - ''.format(concat.soft_get('name', concat.id)) - - max_port_index = max([port_idx for port_idx in concat.in_ports().keys()]) - # re-connect input ports sequentially and remove all not used - port_idx_to_connect = 0 - for port_idx in range(max_port_index + 1): - if concat.is_in_port_connected(port_idx): - if port_idx != port_idx_to_connect: - concat.add_input_port(port_idx_to_connect, skip_if_exist=True) - concat.in_port(port_idx).get_connection().set_destination(concat.in_port(port_idx_to_connect)) - port_idx_to_connect += 1 - elif port_idx in concat.in_ports(): - concat.delete_input_port(port_idx) diff --git a/tools/mo/openvino/tools/mo/middle/ConstSwitchResolver.py b/tools/mo/openvino/tools/mo/middle/ConstSwitchResolver.py deleted file mode 100644 index f6bda280efdc93..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ConstSwitchResolver.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.eliminate import remove_op_node_with_data_node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class ConstSwitchEraser(MiddleReplacementPattern): - """ - Erases switch operation and its constant input after data and control flow infer - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.pseudo_topological_sort(): - if node.kind == 'data' or node.op != 'Switch': - continue - switch_op_node = node - pred_id_data_node = switch_op_node.in_node(1) - graph.remove_edge(pred_id_data_node.id, switch_op_node.id) - remove_op_node_with_data_node(graph, switch_op_node) diff --git a/tools/mo/openvino/tools/mo/middle/ConvToBinaryConv.py b/tools/mo/openvino/tools/mo/middle/ConvToBinaryConv.py deleted file mode 100644 index d9875258983d4d..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ConvToBinaryConv.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Mul, Add -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class ConvToBinaryConv(MiddleReplacementPattern): - """ Transform usual convolution with [0,+1] input and [-1,+1] to BinaryConvolution - - Modifies output terms after the Convolution to be able to apply BinaryConvolution - operation instead that accepts [-1,1] input and [-1,1] weights. It requires modification - channel-wise addition with weights reduced along all axis except output channel dimension. - """ - enabled = True - force_clean_up = True - - def pattern(self): - return dict( - nodes=[ - # This pass is applicable for binarization only. Other intX variants are not relevant. - ('quantize', dict(kind='op', op='FakeQuantize', levels=2)), - ('quantized', dict()), # input tensor, not weights - ('operator', dict(kind='op', type='Convolution')), - ], - edges=[ - ('quantize', 'quantized'), - ('quantized', 'operator', {'in':0}), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - assert match['operator'].has('multiplication_transparent_ports') - - quantize = match['quantize'] - - port = match['operator'].input_ports_with(match['quantized']) - assert len(port) >= 1 - if len(port) > 1: - log.debug('BinarizeWeightsM1P1 cannot apply transformation for data {} because it consumed more' - ' than once'.format(match['quantized'].name)) - return - - assert len(port) == 1 - port = port[0] - applicable = [pair for pair in match['operator'].multiplication_transparent_ports if pair[0] == port] - if len(applicable) == 0: - return - - # Look at 3-rd and 4-th inputs of FakeQuantize -- they have constants that should be passed through. - # Assume that the constant that should be passed through is a scalar. - output_low = quantize.in_node(3) - output_high = quantize.in_node(4) - assert len(output_low.out_nodes()) == 1 - assert len(output_high.out_nodes()) == 1 - - if not output_low.has_valid('value') and not output_high.has_valid('value'): - return - - output_low = output_low.value - output_high = output_high.value - - operator = match['operator'] - - weights = operator.in_node(1).value - weights_rounded = np.round(weights) - weights_consistent = np.all(np.isclose(weights, weights_rounded)) and \ - set(np.unique(weights_rounded)).issubset({-1, 1}) - - if weights_consistent and np.all(np.isclose(output_low, 0)) and np.all(np.isclose(output_high, 1)): - reduction_indices = set(range(len(weights.shape))) - set([operator.output_feature_channel]) - weights_reduced = np.add.reduce(weights, axis=tuple(reduction_indices)) - weights_reduced = weights_reduced.reshape([len(weights_reduced), 1, 1]) # FIXME: works for NCHW only - - operator_name = operator.soft_get('name', operator.id) - add = create_op_node_with_second_input(graph, Add, weights_reduced, {'name': operator_name + '/Add_'}) - mul = create_op_node_with_second_input(graph, Mul, mo_array(0.5), {'name': operator_name + '/Mul_'}) - - add.out_port(0).connect(mul.in_port(0)) - - operator.out_port(0).get_connection().set_source(mul.out_port(0)) - add.in_port(0).connect(operator.out_port(0)) - - operator['pad_value'] = float(-1.0) - elif weights_consistent and np.all(np.isclose(output_low, -1)) and np.all(np.isclose(output_high, +1)): - pass - else: - log.debug('ConvToBinaryConv: cannot apply transformation because input range is neither in [0, +1] nor ' - 'in [-1, +1].') - return - - operator['type'] = 'BinaryConvolution' - operator['mode'] = 'xnor-popcount' - operator['pad_value'] = operator.soft_get('pad_value', float(0)) - operator['input'] = operator.in_node(0).shape[1] - # Weights are not bit-packed yet; there should be a separate transformation to do that - - assert output_low.size == 1 - assert output_high.size == 1 - - output_low = quantize.in_node(3) - output_high = quantize.in_node(4) - - # Make sure that low/high values are exactly 0/1 - output_low.value = np.zeros(output_low.shape, dtype=np.float32) - output_high.value = np.ones(output_high.shape, dtype=np.float32) diff --git a/tools/mo/openvino/tools/mo/middle/ConvertGroupedStridedSlice.py b/tools/mo/openvino/tools/mo/middle/ConvertGroupedStridedSlice.py deleted file mode 100644 index 01a823f98a8439..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ConvertGroupedStridedSlice.py +++ /dev/null @@ -1,292 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from copy import deepcopy - -import numpy as np - -from openvino.tools.mo.middle.SliceConverter import ConvertSlice -from openvino.tools.mo.ops.split import VariadicSplit -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph, Node, add_opoutput -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.ops.unsqueeze import Unsqueeze -from openvino.tools.mo.utils.utils import unique_by - - -def strided_slices_equality(lhs: Node, rhs: Node) -> bool: - """ - Equality criterion for StridedSlice layers. - :param lhs: the first StridedSlice layer - :param rhs: the second StridedSlice layer - :return: True, if lhs and rhs have identical attributes 'slices', 'begin_mask', 'end_mask', 'ellipsis_mask', - 'new_axis_mask', 'shrink_axis_mask', and False otherwise. - """ - for attr in ['slices', 'new_axis_mask', 'shrink_axis_mask', 'begin_mask', 'end_mask', 'ellipsis_mask']: - if not np.array_equal(lhs[attr], rhs[attr]): - return False - return True - - -class ConvertGroupedStridedSlice(MiddleReplacementPattern): - """ - This pass converts subgraphs where StridedSlices used for splitting single channel to single Split layers - In case if StrdedSlices consume not entire tensor will be created fake outputs for Split layer - For example: - Let's suppose we have next graph: - Data(1,H,W,54) - |`---->Sslice1_out (1,H,W,(10,18)) - `---->Sslice2_out (1,H,W,(18,36)) - - In this case StridedSlices takes only [10, 36] from input tensor in 3rd dim - So this pass will convert this graph to the next one: - Split(1,H,W,54) - |`---->Fake_data (1,H,W,10) - |`---->Sslice1_out (1,H,W,8) - |`---->Sslice2_out (1,H,W,18) - `----->Fake_data (1,H,W,18) - Where Fake_data - data nodes that have not any consumers. - """ - - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.StridedSliceNormalizer import StridedSliceNormalizer - return [ConvertSlice, StridedSliceNormalizer] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleFinish - return [MiddleFinish] - - def find_and_replace_pattern(self, graph: Graph): - # Iterate over all data nodes and find all with >= 1 consumers - for input_data in list(graph.get_data_nodes()): - # We don't use constant data nodes - if input_data.value is not None: - continue - - if input_data.shape is None: - continue - input_shape = shape_array(input_data.shape) - - # Get all unique StridedSlice consumers - out_nodes = [node for node in input_data.out_nodes() if node.op == 'StridedSlice' and - node.in_node(0).id == input_data.id] - - if len(out_nodes) <= 1: - continue - - valid_for_replacement = True - for n in out_nodes: - if any(not isinstance(s, slice) for s in n.slices): - # this is a slice with dynamic dimension. Such operation is not valid for replacement - valid_for_replacement = False - if not valid_for_replacement: - continue - - sorted_out_nodes = sorted(out_nodes, key=lambda n: list(n.slices)) - out_nodes = unique_by(sorted_out_nodes, strided_slices_equality) - # if there is only one StridedSlice out_node with unique 'slices', - # there is nothing to optimize, continue to the next data node - if len(out_nodes) <= 1: - continue - - for node in out_nodes: - if len(node.slices) != len(out_nodes[0].slices): - valid_for_replacement = False - - # Detect dimension for splitting - split_channel_dim = None - for dim_id, s in enumerate(out_nodes[0].slices): - l, r, stride = s.start, s.stop, s.step - # if both l and r are None then the dimension is not sliced - if (l != 0 or r != input_shape[dim_id]) and (l is not None or r is not None): - if split_channel_dim is None: - split_channel_dim = dim_id - else: - valid_for_replacement = False - - if split_channel_dim is None: - valid_for_replacement = False - - # split_dims contains tuples with split range and output data node - split_dims = [] - for out_id, node in enumerate(out_nodes): - # Check that StridedSlice op has stride eq 1 and splits only feature channel - for id, s in enumerate(node.slices): - l, r, stride = s.start, s.stop, s.step - # We don't support StridedSlice with stride != 1 - if stride != 1: - valid_for_replacement = False - if id == split_channel_dim: - split_dims.append((s.start, s.stop, node.out_node())) - - if not valid_for_replacement: - continue - - # Check feature split intersection - final_data_nodes_list = [] - sorted_split_dims = sorted(split_dims, key=lambda item: (item[0], item[1])) - - # check if we have similar StridedSlice operations with different outputs - prev_sd = sorted_split_dims[0] - to_remove = [] - for i in range(1, len(sorted_split_dims)): - if sorted_split_dims[i][0] == prev_sd[0] and sorted_split_dims[i][1] == prev_sd[1] and sorted_split_dims[i][2].name != prev_sd[2].name: - cur_node = sorted_split_dims[i][2] - for out in cur_node.out_nodes(): - attrs = deepcopy(graph.get_edge_data(cur_node.id, out.id)[0]) - graph.remove_edge(cur_node.id, out.id) - graph.add_edge(prev_sd[2].id, out.id, **attrs) - to_remove.append(i) - - for ind in reversed(to_remove): - sorted_split_dims.pop(ind) - - size_splits = [] - prev_r = 0 - for l, r, out in sorted_split_dims: - # Split dims shouldn't intersect - if l < prev_r: - valid_for_replacement = False - prev_r = r - - if prev_r > input_shape[split_channel_dim]: - valid_for_replacement = False - - if not valid_for_replacement: - continue - - prev_r = 0 - for l, r, out in sorted_split_dims: - # Save missing tensor part - if l > prev_r: - shape = mo_array(input_shape) - size_splits.append(l - prev_r) - shape[split_channel_dim] = l - prev_r - data_node = Op._create_data_node(graph, 'fake_data_'+out_nodes[0].name, {'shape': shape}) - add_opoutput(graph, data_node.id, 0, False, keep_output_port=True) - final_data_nodes_list.append(data_node) - - prev_r = r - size_splits.append(r - l) - final_data_nodes_list.append(out) - - if prev_r < input_shape[split_channel_dim]: - # Add last part of tensor - shape = input_shape.copy() - shape[split_channel_dim] = input_shape[split_channel_dim] - prev_r - size_splits.append(input_shape[split_channel_dim] - prev_r) - data_node = Op._create_data_node(graph, 'fake_data_'+out_nodes[0].name, {'shape': shape}) - add_opoutput(graph, data_node.id, 0, False, keep_output_port=True) - final_data_nodes_list.append(data_node) - - for node in out_nodes: - if not np.all([x == 0 for x in node.shrink_axis_mask]): - out_node = node.out_node() - if np.any(node['shrink_axis_mask']): - self.add_squeeze_for_shrink(graph, node) - if np.any(node['new_axis_mask']): - self.add_unsqueeze_for_new(graph, node) - - for i in range(len(final_data_nodes_list)): - if final_data_nodes_list[i].name == out_node.name: - final_data_nodes_list[i] = node.out_node() - break - - # Insert Split layer and remove old StridedSlice layers - # 1. Remove connections from input_data to StridedSlice ops - out_data_nodes = [] - name_for_future_split = out_nodes[0].name - for node in out_nodes: - out_data_nodes.append(node.out_node()) - graph.remove_edge(input_data.id, node.id) - graph.remove_edge(node.id, node.out_node().id) - graph.remove_node(node.id) - log.debug("Removed: {}".format(node.id)) - - # 2. Create Split layer and reorder outputs - name = name_for_future_split + "/Split" - axis_const = Const(graph, {'value': int64_array(split_channel_dim), - 'name': name + '/Axis'}).create_node_with_data() - size_splits_const = Const(graph, {'value': int64_array(size_splits), - 'name': name + '/Sizes'}).create_node_with_data() - split = VariadicSplit(graph, dict(name=name, out_ports_count=len(size_splits))) - - split.create_node_with_data(inputs=[input_data, axis_const, size_splits_const], - data_nodes=final_data_nodes_list) - - @staticmethod - def add_squeeze_for_shrink(graph: Graph, ss_node: Node): - # add Squeeze for shrink_axis_mask - log.info("StridedSlice op with shrink mask '{}' has been detected".format(ss_node.id)) - - if len(ss_node.in_nodes()) != 4 or len(ss_node.out_nodes()) != 1: - return - - shape_out = ss_node.out_node().shape - dim = mo_array(range(len(ss_node['shrink_axis_mask'])))[mo_array(ss_node['shrink_axis_mask'], dtype=bool)] - ss_shape = [] - i = 0 - k = 0 - - # Don't permute reshape if channels were squeezed - dont_permute = graph.graph['layout'] == 'NCHW' - if graph.graph['layout'] == 'NHWC' and ss_node['shrink_axis_mask'][-1] == 1: - dont_permute = True - - while k < len(shape_out): - if i >= len(ss_node['shrink_axis_mask']) or not ss_node['shrink_axis_mask'][i]: - ss_shape.append(shape_out[k]) - k = k + 1 - else: - ss_node['shrink_axis_mask'][i] = 0 - ss_shape.append(1) - i = i + 1 - - while i < len(ss_node['shrink_axis_mask']): - ss_node['shrink_axis_mask'][i] = 0 - ss_shape.append(1) - i = i + 1 - - ss_node.out_port(0).data.set_shape(ss_shape) - - # insert Squeeze - squeeze_node = Squeeze(graph, dict(name=ss_node.name + '/Squeeze_shrink', - nchw_layout=dont_permute, - correct_data_layout=dont_permute)).create_node() - ss_node.out_port(0).get_connection().insert_node(squeeze_node) - squeeze_node.out_port(0).data.set_shape(shape_out) - - dims_node = Const(graph, {'name': squeeze_node.id + '/Indices', 'value': int64_array(dim)}).create_node() - dims_node.out_port(0).connect(squeeze_node.in_port(1)) - - @staticmethod - def add_unsqueeze_for_new(graph: Graph, ss_node: Node): - log.info("StridedSlice op with new axis mask '{}' has been detected".format(ss_node.id)) - if len(ss_node.in_nodes()) != 4 or len(ss_node.out_nodes()) != 1: - return - - shape_out = ss_node.out_node().shape - dim = mo_array(range(len(ss_node['new_axis_mask'])))[mo_array(ss_node['new_axis_mask'], dtype=bool)] - ss_shape = [] - for i in range(0, len(ss_node['new_axis_mask'])): - if not ss_node['new_axis_mask'][i]: - ss_shape.append(shape_out[i]) - else: - ss_node['new_axis_mask'][i] = 0 - - ss_node.out_port(0).data.set_shape(ss_shape) - - # insert Unsqueeze - unsqueeze_node = Unsqueeze(graph, dict(name=ss_node.name + '/Unsqueeze_new')).create_node() - ss_node.out_port(0).get_connection().insert_node(unsqueeze_node) - unsqueeze_node.out_port(0).data.set_shape(shape_out) - - dims_node = Const(graph, {'name': unsqueeze_node.id + '/Indices', 'value': int64_array(dim)}).create_node() - dims_node.out_port(0).connect(unsqueeze_node.in_port(1)) diff --git a/tools/mo/openvino/tools/mo/middle/ConvertLayoutDependentOperations.py b/tools/mo/openvino/tools/mo/middle/ConvertLayoutDependentOperations.py deleted file mode 100644 index cf4d70dde64922..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ConvertLayoutDependentOperations.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.layout import indices_mapping -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import Op, PermuteAttrs - - -class ConvertLayoutDependentOperations(MiddleReplacementPattern): - """ - This pass finds all convolutions and in case if layout of convolution differs from graph layout - we insert permutes before and after convolution and convert convolution attributes - """ - - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def find_and_replace_pattern(self, graph: Graph): - for node in list(graph.nodes()): - node = Node(graph, node) - node_name = node.soft_get('name', node.id) - # Check that node layout mismatch with graph layout - # For example: NHWC and NCHW or NCDHW and NDHWC - if node.kind == 'op' and node.has_valid('layout') and node.layout != indices_mapping[len(node.layout)][ - graph.graph['layout']]: - input = node.in_node() - output = node.out_node() - - # Calculate permutation for further Transpose operations - if graph.graph['layout'] == 'NCHW': - # if Node has NCHW and graph has NHWC layout - permutation = PermuteAttrs.get_nhwc_to_nchw_permutation(len(node.layout)) - else: - # if Node has NHWC and graph has NCHW layout - permutation = PermuteAttrs.get_nchw_to_nhwc_permutation(len(node.layout)) - - # Schematic representation of transformation below - # - # \ NCHW NCHW - # NHWC -- \ | permutation permutation | - # data-->Convolution(example)-->data -- / | | NCHW | | - # / data->Transpose->data->Convolution->data->Transpose->data - - # 1. Insert input Transpose - # This Transpose will permute input from original input layout to operation layout - edge_attrs = graph.get_edge_data(input.id, node.id)[0] - graph.remove_edge(input.id, node.id) - - input_permute_name = node_name + '/input_transpose' - input_order_const = Const(graph, {'name': input_permute_name + '/order', - 'value': permutation.perm}).create_node_with_data() - input_permute_op = Transpose(graph, {'name': input_permute_name}) - input_permute_data_node = input_permute_op.create_node_with_data([input, input_order_const]) - - graph.add_edge(input_permute_data_node.id, node.id, **edge_attrs) - - # 2. Insert output Transpose - # This Transpose will permute output from operation layout to original input layout - edge_attrs = graph.get_edge_data(node.id, output.id)[0] - graph.remove_edge(node.id, output.id) - - input_data_node = Op.create_data_node(graph, node, {'shape': output.shape[permutation.perm]}, - edge_attrs) - - output_permute_name = node_name + '/output_transpose' - output_order_const = Const(graph, {'name': output_permute_name + '/order', - 'value': permutation.inv}).create_node_with_data() - output_permute_op = Transpose(graph, {'name': output_permute_name} - ).create_node_with_data([input_data_node, output_order_const], - data_nodes=output) - - # 3. Add permutations for Node - # Here we use permutation mechanism where data nodes takes permutation attribute. - # And then we call permute_attrs method that permutes node attributes according to permutations on - # data nodes. - node.in_node()['permutation'] = permutation - node.out_node()['permutation'] = permutation - node.permute_attrs.permute_attrs(node) - - node.in_node()['permutation'] = None - node.out_node()['permutation'] = None diff --git a/tools/mo/openvino/tools/mo/middle/ConvertMultiInputConv.py b/tools/mo/openvino/tools/mo/middle/ConvertMultiInputConv.py deleted file mode 100644 index a0a3856f20eb2a..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ConvertMultiInputConv.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import copy - -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class ConvertMultiInputConv(MiddleReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import PreMiddleStart - return [PreMiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def pattern(self): - return dict( - nodes=[('op', dict(kind='op', op='ConvND'))], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['op'] - node.op = 'Conv2D' - - if node.bias_term: - num_inputs = len(node.in_nodes()) - 2 - w_node = node.in_node(len(node.in_nodes()) - 2) - b_node = node.in_node(len(node.in_nodes()) - 1) - else: - num_inputs = len(node.in_nodes()) - 1 - w_node = node.in_node(len(node.in_nodes()) - 1) - - for i in range(1, num_inputs): - in_i = node.in_node(i) - out_i = node.out_node(i) - conv_id = graph.unique_id(node.id + '__') - graph.add_node(conv_id, **copy.deepcopy(node.get_attrs())) - new_conv = Node(graph, conv_id) - new_conv.name = conv_id - - graph.remove_edge(in_i.id, node.id) - graph.remove_edge(node.id, out_i.id) - graph.add_edges_from([ - (w_node.id, conv_id, {'in': 1, 'bin': 'weights'}), - ]) - - if node.bias_term: - graph.add_edges_from([ - (b_node.id, conv_id, {'in': 2, 'bin': 'biases'}), - ]) - - graph.add_edges_from([ - (in_i.id, conv_id, {'in': 0}), - ]) - graph.add_edge(conv_id, out_i.id, **{'out': 0}) diff --git a/tools/mo/openvino/tools/mo/middle/CustomSubgraphCall.py b/tools/mo/openvino/tools/mo/middle/CustomSubgraphCall.py deleted file mode 100644 index 092acf10d18e64..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/CustomSubgraphCall.py +++ /dev/null @@ -1,318 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import copy -import logging as log - -import numpy as np -import os - -# do not print INFO and WARNING messages from TensorFlow -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' - -from openvino.tools.mo.front.common.layout import nhwc_to_nchw_permute -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, shape_insert -from openvino.tools.mo.front.extractor import update_ie_fields -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.graph.graph import Node, add_opoutput -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - -nchw_to_nhwc_constant_name = 'IE_NCHW_TO_NHWC' -nhwc_to_nchw_constant_name = 'IE_NHWC_TO_NCHW' - - -class CustomSubgraphCall(MiddleReplacementPattern): - enabled = True - force_clean_up = True - graph_condition = [lambda graph: graph.graph['fw'] == 'tf'] - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import PreMiddleStart - return [PreMiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - @staticmethod - def update_placeholders(graph: Graph): - """ - Iterates over all nodes of the graph, find all TF sub-graph call operations and updates placeholders shapes and adds - transpose operation if necessary. - :param graph: graph to operate on - :return: None - """ - for node in graph.get_op_nodes(op='TFCustomSubgraphCall'): - CustomSubgraphCall.update_placeholder_shape_and_add_transpose(node) - - @staticmethod - def update_placeholder_shape_and_add_transpose(node: Node): - """ - The function changes placeholders shapes from NHWC to NCHW format and add transpose operations if needed. - :param node: node to operate on. - :return: None - """ - try: - import tensorflow.compat.v1 as tf_v1 - except ImportError: - import tensorflow as tf_v1 - # in some environment suppressing through TF_CPP_MIN_LOG_LEVEL does not work - tf_v1.get_logger().setLevel("ERROR") - - from openvino.tools.mo.front.common.layout import convert_shape, nhwc_to_nchw_permute, nchw_to_nhwc_permute - from openvino.tools.mo.front.tf.extractors.utils import tf_tensor_shape - from openvino.tools.mo.front.tf.partial_infer.tf import add_node_def_to_subgraph, update_input_in_pbs - - tf_v1.reset_default_graph() - - inputs_replacements = list() - - # transpose permutation constant - nchw_to_nhwc_constant = tf_v1.constant(nchw_to_nhwc_permute, dtype=tf_v1.int32, name=nchw_to_nhwc_constant_name) - nhwc_to_nchw_constant = tf_v1.constant(nhwc_to_nchw_permute, dtype=tf_v1.int32, name=nhwc_to_nchw_constant_name) - - for placeholder_name in node['input_nodes_names']: - # dummy node which we can refer to as input in the transpose for the output node - # dummy node should be unique for each placeholder - dummy_node = tf_v1.constant(value=[[[[1]]]], dtype=tf_v1.float32, - name='random_dummy_name_' + placeholder_name) - - placeholder = node['pbs'][placeholder_name] - cur_shape = tf_tensor_shape(placeholder.attr['shape'].shape) - if len(cur_shape) == 4: # TODO think about better check that transpose is required - nchw_shape = convert_shape(cur_shape, nhwc_to_nchw_permute) - for ind in range(len(cur_shape)): - placeholder.attr['shape'].shape.dim[ind].size = nchw_shape[ind] - transpose_name = placeholder.name + '_transpose' - transpose = tf_v1.transpose(dummy_node, nchw_to_nhwc_constant, transpose_name) # NCHW -> NHWC - - # add transpose operations to GraphDef after placeholders - add_node_def_to_subgraph(node, transpose.op.node_def, transpose_name, len(node['input_nodes_names'])) - inputs_replacements.append((placeholder.name, transpose_name)) - inputs_replacements.append((dummy_node.name, placeholder.name)) - node['real_input_dims'].append(nchw_shape) - else: - node['real_input_dims'].append(cur_shape) - add_node_def_to_subgraph(node, nchw_to_nhwc_constant.op.node_def) - add_node_def_to_subgraph(node, nhwc_to_nchw_constant.op.node_def) - - # update initial input names to a transposed ones - for old_input_tensor_name, new_name in inputs_replacements: - update_input_in_pbs(node, old_input_tensor_name, new_name) - - @staticmethod - def add_output_nodes_transposes(graph: Graph): - """ - Iterates over all nodes of the graph, find all TF sub-graph call operations and adds Transpose operations to the - output nodes if they are 4D to covert output from NHWC to NCHW. - :param graph: graph to operate on - :return: None - """ - for node in graph.get_op_nodes(op='TFCustomSubgraphCall'): - CustomSubgraphCall.add_sub_graph_call_output_tensors_transposes(node) - - @staticmethod - def make_shape_4d(shape: np.array): - """ - Create 4D tensor from 1D, 2D or 3D by adding new dimensions of size 1. - :param shape: shape to extend. - :return: 4D tensor. - """ - new_shape = shape_array(shape) - old_shape_len = len(shape) - - # TODO think about proper way to add additional dimensions considering layout - for x in range(4 - old_shape_len): - # if the shape is 0D or 1D then we should add additional dimensions to batch dimension - if len(new_shape) <= 1: - new_shape = shape_insert(new_shape, 0, 1) - else: - new_shape = shape_insert(new_shape, 1, 1) - return new_shape - - @staticmethod - def add_reshape_before_op_node(graph: Graph, data_node_name: str, op_node_name: str, edge_attrs: dict): - """ - Adds reshape operation which expands dimension of the specified data tensor to 4D. - :param graph: graph to operate on. - :param data_node_name: the name of the data node to be reshaped to 4D tensor. - :param op_node_name: name of the TFCustomSubgraphCall node which produces the tensor. - :param edge_attrs: edge attributes which should be preserved. - :return: None - """ - data_node = Node(graph, data_node_name) - - graph.remove_edge(data_node_name, op_node_name) - - assert data_node['shape'] is not None - - new_shape = CustomSubgraphCall.make_shape_4d(data_node['shape']) - - # reshape shape data node - reshape_shape_data_node_name = graph.unique_id("Reshape_shape_") - graph.add_node(reshape_shape_data_node_name, kind='data', name=reshape_shape_data_node_name, value=new_shape, - shape=[1]) - - # reshape operation node - reshape_node_name = graph.unique_id("Reshape_") - graph.add_node(reshape_node_name, kind='op', type='Reshape', name=reshape_node_name, op='Reshape', - data_type=data_node['data_type']) - update_ie_fields(graph.node[reshape_node_name]) - - # reshaped data node - reshaped_value = None - if data_node['value'] is not None: - reshaped_value = np.reshape(data_node['value'], new_shape) - reshaped_data_node_name = graph.unique_id("reshaped_data_") - graph.add_node(reshaped_data_node_name, kind='data', name=reshaped_data_node_name, shape=new_shape, - value=reshaped_value, nchw_layout=True) - - graph.add_edges_from([ - (data_node_name, reshape_node_name, {'in': 0}), - (reshape_shape_data_node_name, reshape_node_name, {'in': 1}), - (reshape_node_name, reshaped_data_node_name, {'out': 0}), - (reshaped_data_node_name, op_node_name, edge_attrs) - ]) - - @staticmethod - def add_reshape_after_data_node(graph: Graph, data_node_name: str): - """ - Adds reshape operation which changes shape of the tensor produced by TFSubgraphCall from 4D to real dimension - of the tensor. The data_node_name node contains real dimensions of the tensor but they will be changed in the - add_reshapes_for_tf_subgraph_calls function to a 4D because OV TF call layer supports output in 4D only. - :param graph: graph to operate on. - :param data_node_name: name of the data node to be reshaped to correct dimensions. - :return: None - """ - data_node = Node(graph, data_node_name) - - # if the data node was previously marked as output then we need to mark as output new reshaped data node - is_out_node = False - if len(data_node.out_nodes()) == 1 and data_node.out_node().has('op') and data_node.out_node().op == 'Result': - is_out_node = True - graph.remove_node(data_node.out_node().id) - - # save old consumers nodes with edge attributes - old_consumer_nodes_with_attrs = list() - for index, out_op in enumerate(data_node.out_nodes()): - edge_attrs = graph.get_edge_data(data_node_name, out_op.name)[0] - old_consumer_nodes_with_attrs.append((out_op.name, edge_attrs)) - - # remove old consumers from the data node - for out_op in list(data_node.out_nodes()): - graph.remove_edge(data_node_name, out_op.name) - - # reshape operation node - reshape_node_name = graph.unique_id("Reshape_") - graph.add_node(reshape_node_name, kind='op', type='Reshape', name=reshape_node_name, op='Reshape', - data_type=data_node['data_type']) - update_ie_fields(graph.node[reshape_node_name]) - - # reshape shape data node - reshape_shape_data_node_name = graph.unique_id("Reshape_shape_") - graph.add_node(reshape_shape_data_node_name, kind='data', name=reshape_shape_data_node_name, - value=mo_array(data_node['shape']), shape=[1]) - - # reshaped data node - reshaped_value = None - if data_node['value'] is not None: - reshaped_value = mo_array(data_node['value']) - reshaped_data_node_name = graph.unique_id("reshaped_data_") - graph.add_node(reshaped_data_node_name, kind='data', name=reshaped_data_node_name, - shape=mo_array(data_node['shape']), value=reshaped_value, nchw_layout=True) - - if is_out_node: - add_opoutput(graph, reshaped_data_node_name, 0, False) - - graph.add_edges_from([ - (data_node_name, reshape_node_name, {'in': 0}), - (reshape_shape_data_node_name, reshape_node_name, {'in': 1}), - (reshape_node_name, reshaped_data_node_name, {'out': 0}), - ]) - - for out_node_name, edge_attrs in old_consumer_nodes_with_attrs: - graph.add_edges_from([ - (reshaped_data_node_name, out_node_name, edge_attrs) - ]) - - @staticmethod - def add_reshapes_for_tf_subgraph_calls(graph: Graph): - """ - Input and output tensors of the TFCustomSubgraphCall must be 4D because OV layer accepts and produces only 4D - tensors. This function adds reshape operations where it is necessary. - :param graph: graph to operate on. - :return: None. - """ - for src_node_name, dst_node_name, edge_attrs in list(graph.edges(data=True)): - src_node = Node(graph, src_node_name) - dst_node = Node(graph, dst_node_name) - if dst_node.kind == 'op' and dst_node.has_valid('type') and dst_node.type == 'TFCustomSubgraphCall' and \ - src_node.has_valid('shape') and len(src_node.shape) != 4: - log.info("There is an data tensor of shape '{}' which goes into '{}' node".format( - src_node.shape, dst_node.type)) - CustomSubgraphCall.add_reshape_before_op_node(graph, src_node_name, dst_node_name, edge_attrs) - - for node in graph.get_op_nodes(op='TFCustomSubgraphCall'): - for index, data_node in node.out_nodes().items(): - real_dims_count = len(data_node.shape) - if real_dims_count != 4: - log.info( - "There is an data tensor of shape '{}' with real dims count '{}' which goes out of '{}' " - "node".format(data_node.shape, real_dims_count, node.name)) - CustomSubgraphCall.add_reshape_after_data_node(graph, data_node.id) - - # need to update shape of the op so OV generates XML with 4D tensors - out_shape = CustomSubgraphCall.make_shape_4d(data_node['shape']) - - data_node['shape'] = out_shape - - @staticmethod - def add_sub_graph_call_output_tensors_transposes(node: Node): - """ - Adds transpose operations to the output nodes if they are 4D to change layout from NCHW to NHWC. - :param node: the node to add transposes to the output nodes to. - :return: None - """ - try: - import tensorflow.compat.v1 as tf_v1 - except ImportError: - import tensorflow as tf_v1 - # in some environment suppressing through TF_CPP_MIN_LOG_LEVEL does not work - tf_v1.get_logger().setLevel("ERROR") - - from openvino.tools.mo.front.tf.partial_infer.tf import get_subgraph_output_tensors, add_node_def_to_subgraph - _, output_tensors = get_subgraph_output_tensors(node) - - # transpose permutation constant - nhwc_to_nchw_constant = tf_v1.constant(nhwc_to_nchw_permute, dtype=tf_v1.int32, name=nhwc_to_nchw_constant_name) - - # dummy node which we can refer to as input in the transpose for the output node - dummy_node = tf_v1.constant(value=[[[[1]]]], dtype=tf_v1.float32, name='random_dummy_name') - - new_out_tensor_names = list() - for out_tensor_name in node['output_tensors_names']: - out_name, out_port = out_tensor_name.split(':') - if len(output_tensors[ - int(out_port)].shape) == 4: # TODO think about better check whether transpose is required - out_transpose_name = out_name + '_port_' + out_port + '_transpose' - transpose = tf_v1.transpose(dummy_node, nhwc_to_nchw_constant, name=out_transpose_name) - - # starting from TF 1.8 it is not possible to modify the "node_def" of the "tf.op", so we create a copy, - # update it and use further - new_input_names = transpose.op.node_def.input[:] - new_input_names[0] = out_tensor_name - new_node_def = copy.deepcopy(transpose.op.node_def) - new_node_def.input[:] = new_input_names - add_node_def_to_subgraph(node, new_node_def, position=len(node['nodes_order'])) - new_out_tensor_names.append(out_transpose_name) - else: - new_out_tensor_names.append(out_tensor_name) - - # update output tensor names with transposes operations - node['output_tensors_names'] = new_out_tensor_names - - def find_and_replace_pattern(self, graph: Graph): - CustomSubgraphCall.update_placeholders(graph) - CustomSubgraphCall.add_output_nodes_transposes(graph) - CustomSubgraphCall.add_reshapes_for_tf_subgraph_calls(graph) diff --git a/tools/mo/openvino/tools/mo/middle/CutInputHavingZeroDimFromConcat.py b/tools/mo/openvino/tools/mo/middle/CutInputHavingZeroDimFromConcat.py deleted file mode 100644 index 647aa5b9d7c151..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/CutInputHavingZeroDimFromConcat.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.concat import Concat - - -class CutInputHavingZeroDimFromConcat(MiddleReplacementPattern): - """ - This transformation deletes inputs of Concat having zeros in their shapes, if not all inputs have such shapes. - """ - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('concat', dict(type='Concat')) - ], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - concat_node = match['concat'] - sources_of_ports = [concat_node.in_port(i).get_connection().get_source() for i in concat_node.in_ports()] - # If 'concat' is ConcatV2 layer from TF, then this layer initially had input 'axis' as the last input. - # But then this input was deleted and the attribute 'axis' was added. Hence, the last port source can - # be None in such case. - sources_of_ports = [s for s in sources_of_ports if s is not None] - - input_nodes = [s.node for s in sources_of_ports] - if not all(n.has_valid('type') for n in input_nodes): - return - - saved_ports = [] - disconnected_ports = [] - - for port_num, node in enumerate(input_nodes): - if node.soft_get('type') == 'Const' and len(node.shape) > 1 and any(i == 0 for i in node.shape): - disconnected_ports.append(port_num) - else: - saved_ports.append(port_num) - - if not saved_ports or not disconnected_ports: - return - - if len(saved_ports) == 1: - before_concat = concat_node.in_port(saved_ports[0]).get_connection().get_source() - concat_node.out_port(0).get_connection().set_source(before_concat) - return - - new_concat_attrs = concat_node.attrs().copy() - new_concat_attrs['name'] = concat_node.name + '/Concat_' - new_concat_attrs['in_ports_count'] = len(saved_ports) - new_concat_node = Concat(graph, attrs=new_concat_attrs).create_node() - - for new_port_num, old_port_num in enumerate(saved_ports): - concat_node.in_port(old_port_num).get_connection().set_destination(new_concat_node.in_port(new_port_num)) - - for p in disconnected_ports: - concat_node.in_port(p).disconnect() - - concat_node.out_port(0).get_connection().set_source(new_concat_node.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/middle/DecomposeBias.py b/tools/mo/openvino/tools/mo/middle/DecomposeBias.py deleted file mode 100644 index 87c73b50f973b7..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/DecomposeBias.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Add -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.reshape import Reshape - - -class DecomposeBias(MiddleReplacementPattern): - enabled = True - force_clean_up = True - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('op', dict(kind='op', op=lambda op: op in ['Conv', 'ConvTranspose', 'Conv2D', - 'Conv3D', 'Conv2DBackpropInput', 'MatMul', - 'Conv3DBackpropInputV2', 'Convolution', - 'Deconvolution', 'ConvND', 'Conv2D', 'Deconv2D']))], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['op'] - if node.has_port('in', 2) and not node.in_port(2).disconnected() and not node.has_and_set('shape_input'): - bias_name = node.name - new_node_name = node.name + '/WithoutBiases' - add = Add(graph, dict(name=bias_name)).create_node() - rename_nodes([(node, new_node_name), (add, bias_name)]) - node.out_port(0).get_connection().set_source(add.out_port(0)) - node.out_port(0).connect(add.in_port(0)) - node.in_port(2).get_connection().set_destination(add.in_port(1)) - - bias = add.in_port(1).get_source().node - if bias.has_valid("type") and bias.type == "Const": - input_shape = add.in_port(0).data.get_shape() - if len(input_shape) > 2: - dims_to_add = len(input_shape) - 2 if graph.graph['layout'] == 'NCHW' else 0 - if dims_to_add > 0: - reshape = create_op_node_with_second_input( - graph, Reshape, int64_array([input_shape[1]] + [1] * dims_to_add), - {'name': node.id + '/Dims'}) - add.in_port(1).get_connection().set_destination(reshape.in_port(0)) - reshape.out_port(0).connect(add.in_port(1)) diff --git a/tools/mo/openvino/tools/mo/middle/DecomposeBidirectionalRNNSequence.py b/tools/mo/openvino/tools/mo/middle/DecomposeBidirectionalRNNSequence.py deleted file mode 100644 index a199ef8f92ba95..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/DecomposeBidirectionalRNNSequence.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.split import Split -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import Op - - -class DecomposeBidirectionalRNNSequence(MiddleReplacementPattern): - """ - Decomposes bidirectional RNNSequence to forward and reverse RNNSequence ops. - - Both initial state are split to two part, two parts of the results are concatenated. - - Axis of split/concat is completely defined by ONNX recurrent layers specification. - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.MXNetRNNSequenceNormalize import MXNetRNNSequenceNormalize - from openvino.tools.mo.middle.ONNXRNNSequenceNormalize import ONNXRNNSequenceNormalize - return [ONNXRNNSequenceNormalize, MXNetRNNSequenceNormalize] - - def pattern(self): - return dict( - nodes=[ - ('lstm', dict(kind='op', type='RNNSequence', direction='bidirectional')), - ('input', dict(kind='data')), - ('W', dict(kind='data')), - ('R', dict(kind='data')), - ('B', dict(kind='data')), - ], - edges=[ - ('input', 'lstm', {'in': 0}), - ('W', 'lstm', {'in': 1}), - ('R', 'lstm', {'in': 2}), - ('B', 'lstm', {'in': 3}), - ] - ) - - @staticmethod - def split_helper(node: Node, index: int, direction: str, axis: int = 0): - return Op._create_data_node( - node.graph, - name=node.name + '/SplittedBiLSTM/{}/'.format(direction), - attrs={'value': np.take(node.value, [index], axis), - 'shape': shape_array(np.take(node.value, [index], axis).shape)} - ) - - def split_data(self, data: Node): - """ Helper. Split data node into two part along 0 axis """ - assert len(data.shape) == 3 - assert data.shape[0] == 2 - - output_data = [Op._create_data_node(data.graph, - name=data.name + '/SplittedBiLSTM/{}'.format(['forward', 'reverse'][i])) for - i in [0, 1]] - split_op = Split(data.graph, dict(name=data.name + '/DecomposedBiLSTM_0', num_splits=2)) - axis_const = Const(data.graph, {'name': data.name + '/DecomposedBiLSTM_0' + '/Split_axis', - 'value': np.int64(0)}).create_node_with_data() - return split_op.create_node_with_data([data, axis_const], data_nodes=output_data) - - def replace_pattern(self, graph: Graph, match: dict): - bidirectional_cell = match['lstm'] - new_init_hiddens = self.split_data(bidirectional_cell.in_node(5)) - new_init_cells = self.split_data(bidirectional_cell.in_node(6)) if 6 in bidirectional_cell.in_nodes() \ - else (None, None) - - blob_bidirectional_split = lambda node: ( - self.split_helper(node, 0, 'forward'), - self.split_helper(node, 1, 'reverse') - ) - - splitted_W = blob_bidirectional_split(bidirectional_cell.in_node(1)) - splitted_R = blob_bidirectional_split(bidirectional_cell.in_node(2)) - splitted_B = blob_bidirectional_split(bidirectional_cell.in_node(3)) - - outputs = self.split_bidirectional( - bidirectional_cell, - new_init_hiddens, - new_init_cells, - splitted_W, - splitted_R, - splitted_B, - ) - - self.concat_outputs(bidirectional_cell, outputs[0], outputs[1], bidirectional_cell.out_nodes()) - - @staticmethod - def get_new_cell(bidirectional_cell: Node, direction: str): - assert direction in ['forward', 'reverse'] - - cell_class = Op.get_op_class_by_name(bidirectional_cell.op) - new_cell = lambda graph, attrs: cell_class(graph, attrs) - attrs = bidirectional_cell.attrs().copy() - new_attrs = { - 'direction': direction, - 'name': bidirectional_cell.name + '/Split/' + direction, - } - attrs.update(new_attrs) - # split bidirectional activations - assert 'activations' in attrs - if attrs['activations'] is not None and len(attrs['activations']) > 1: - assert len(attrs['activations']) == 2, 'Bidirectional RNN should have 2 activations' - activations = attrs['activations'] - attrs['activations'] = [activations[0 if direction == 'forward' else 1]] - return new_cell(bidirectional_cell.graph, attrs) - - def split_bidirectional(self, - bidirectional_cell: Node, - new_init_hiddens: list, - new_init_cells: list, - splitted_W: tuple, - splitted_R: tuple, - splitted_B: tuple): - """ - Split one bidirectional RNNSequence node into 2 one-directional RNNSequence nodes. - - All input data nodes should be already prepared; they are - have 2 in the num_dir dimension. - """ - all_outputs = [] - for i in [0, 1]: - direction = ['forward', 'reverse'][i] - op = self.get_new_cell(bidirectional_cell, direction) - - output_data = Op._create_data_node( - bidirectional_cell.graph, - name=bidirectional_cell.out_node(0).name + '/Split/' + str(i), - attrs={'shape': bidirectional_cell.out_node(0).shape.copy()} - ) - - assert output_data.shape[1] == 2 - output_data.shape[1] = 1 - - output_hidden = Op._create_data_node( - bidirectional_cell.graph, - name=bidirectional_cell.out_node(1).name + '/Split/' + str(i), - attrs={'shape': bidirectional_cell.out_node(1).shape.copy()} - ) - - assert output_hidden.shape[0] == 2 - output_hidden.shape[0] = 1 - - data_nodes = [ - output_data, - output_hidden, - ] - - if bidirectional_cell.op == 'LSTM': - output_cell = Op._create_data_node( - bidirectional_cell.graph, - name=bidirectional_cell.out_node(2).name + '/Split/' + str(i), - attrs={'shape': bidirectional_cell.out_node(2).shape.copy()} - ) - - assert output_cell.shape[0] == 2 - output_cell.shape[0] = 1 - - data_nodes.append(output_cell) - - all_outputs.append( - op.create_node_with_data( - inputs=[ - bidirectional_cell.in_node(0), - splitted_W[i], - splitted_R[i], - splitted_B[i], - None, - new_init_hiddens[i], - new_init_cells[i] if bidirectional_cell.op == 'LSTM' else None, - ], - data_nodes=data_nodes - ) - ) - return all_outputs - - @staticmethod - def concat_outputs(bi_rnn, forward_outputs, reverse_outputs, final_outputs): - """ Concatenates two set of outputs from bidirectiondl RNNSequence nodes """ - concat_ops = [ - Concat(bi_rnn.graph, { - 'name': bi_rnn.name + '/FinalConcat/Data', - 'axis': 1, - 'in_ports_count': 2, - }), - Concat(bi_rnn.graph, { - 'name': bi_rnn.name + '/FinalConcat/HiddenState', - 'axis': 0, - 'in_ports_count': 2, - }), - Concat(bi_rnn.graph, { - 'name': bi_rnn.name + '/FinalConcat/CellState', - 'axis': 0, - 'in_ports_count': 2, - }) - ] - - bi_rnn.graph.remove_node(bi_rnn.id) - - for i in final_outputs: - concat_ops[i].create_node_with_data( - [forward_outputs[i], reverse_outputs[i]], - data_nodes=[final_outputs[i]] - ) diff --git a/tools/mo/openvino/tools/mo/middle/Deconvolution3rdInputNormalization.py b/tools/mo/openvino/tools/mo/middle/Deconvolution3rdInputNormalization.py deleted file mode 100644 index 307948afccc07e..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/Deconvolution3rdInputNormalization.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import PermuteAttrs - - -class Deconvolution3rdInputNormalization(MiddleReplacementPattern): - enabled = True - force_clean_up = True - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('op', dict(kind='op', type='Deconvolution'))], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['op'] - if not node.has_port('in', 2) or node.in_port(2).disconnected() or not node.has_and_set('shape_input'): - return - - if node.has_valid('layout') and not node.layout.startswith('NC') and graph.graph['layout'] == 'NCHW': - input_shape_rank = len(node.in_port(0).data.get_shape()) - permutation = PermuteAttrs.get_nhwc_to_nchw_permutation(input_shape_rank) - - data_node = node.in_node(2) - - name = node.soft_get('name', node.id) + '/ShapeGather' - const = Const(graph, {'value': permutation.perm, 'name': name + '/Const', - 'need_shape_inference': True}).create_node_with_data() - axis_const = Const(graph, {'value': int64_array(0), 'name': name + '/Axis'}).create_node_with_data() - gather = Gather(graph, {'name': name, - 'need_shape_inference': True}).create_node_with_data([data_node, const, axis_const]) - attrs = graph.get_edge_data(data_node.id, node.id, key=0).copy() - - graph.add_edge(gather.id, node.id, **attrs) - graph.remove_edge(data_node.id, node.id) diff --git a/tools/mo/openvino/tools/mo/middle/DeleteControlFlowEdges.py b/tools/mo/openvino/tools/mo/middle/DeleteControlFlowEdges.py deleted file mode 100644 index 296d3dbd9f502c..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/DeleteControlFlowEdges.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class DeleteControlFlowEdges(MiddleReplacementPattern): - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.PartialInfer import PartialInfer - return [PartialInfer] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - for u, v, k, attrs in list(graph.edges(keys=True, data=True)): - if 'control_flow_edge' in attrs and attrs['control_flow_edge']: - graph.remove_edge(u, v, k) - log.debug('Removing control flow edge from {} to {}'.format(u, v)) diff --git a/tools/mo/openvino/tools/mo/middle/DeleteNotExecutable.py b/tools/mo/openvino/tools/mo/middle/DeleteNotExecutable.py deleted file mode 100644 index 2a2166838e2cd0..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/DeleteNotExecutable.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class DeleteNotExecutable(MiddleReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.middle.TensorIteratorConditionChecker import ConditionChecks - return [ConditionChecks] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - nodes_to_remove = set() - for node_name, node_attrs in list(graph.nodes(data=True)): - if node_attrs['kind'] == 'data' and 'executable' in node_attrs and not node_attrs['executable']: - [nodes_to_remove.add(op) for op, _ in graph.in_edges(node_name)] - nodes_to_remove.add(node_name) - log.debug('Removing the following not executable nodes: {}' - ''.format('\n'.join(sorted(map(str, nodes_to_remove))))) - graph.remove_nodes_from(nodes_to_remove) diff --git a/tools/mo/openvino/tools/mo/middle/DilatedConvolution.py b/tools/mo/openvino/tools/mo/middle/DilatedConvolution.py deleted file mode 100644 index 0e248fe6c466c6..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/DilatedConvolution.py +++ /dev/null @@ -1,211 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.partial_infer.utils import shape_insert -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const - - -class DilatedConvolutionConverter(MiddleReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import PreMiddleStart - return [PreMiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def pattern(self): - return dict( - nodes=[ - ('conv', dict(kind='op', op=lambda value: value in ['Conv2D', 'DepthwiseConv2dNative', 'Conv3D'])), - ('space_to_batch', dict(kind='op', op='SpaceToBatch')), - ('batch_to_space', dict(kind='op', op='BatchToSpace')), - ('stb_pad_begin', dict(kind='op', op='Const')), - ('stb_pad_end', dict(kind='op', op='Const')), - ('bts_crop_begin', dict(kind='op', op='Const')), - ('bts_crop_end', dict(kind='op', op='Const')), - ('input', dict(kind='data')), - ('output', dict(kind='data')), - ('conv_output', dict(kind='data')), - ('stb_output', dict(kind='data')), - ('stb_bs', dict(kind='data')), - ('stb_pad_begin_d', dict(kind='data')), - ('stb_pad_end_d', dict(kind='data')), - ('bts_bs', dict(kind='data')), - ('bts_crop_begin_d', dict(kind='data')), - ('bts_crop_end_d', dict(kind='data')) - ], - edges=[ - ('input', 'space_to_batch', {'in': 0}), - ('stb_bs', 'space_to_batch', {'in': 1}), - ('stb_pad_begin', 'stb_pad_begin_d', {'in': 0}), - ('stb_pad_begin_d', 'space_to_batch', {'in': 2}), - ('stb_pad_end', 'stb_pad_end_d', {'in': 0}), - ('stb_pad_end_d', 'space_to_batch', {'in': 3}), - ('space_to_batch', 'stb_output', {'out': 0}), - ('stb_output', 'conv', {'in': 0}), - ('conv', 'conv_output', {'out': 0}), - ('conv_output', 'batch_to_space', {'in': 0}), - ('bts_bs', 'batch_to_space', {'in': 1}), - ('bts_crop_begin', 'bts_crop_begin_d', {'in': 0}), - ('bts_crop_begin_d', 'batch_to_space', {'in': 2}), - ('bts_crop_end', 'bts_crop_end_d', {'in': 0}), - ('bts_crop_end_d', 'batch_to_space', {'in': 3}), - ('batch_to_space', 'output', {'out': 0}), - ]) - - def replace_pattern(self, graph: Graph, match: dict): - conv = match['conv'] - stb = match['space_to_batch'] - bts = match['batch_to_space'] - - block_size = match['stb_bs'] - - conv.in_port(0).disconnect() - stb.in_port(0).get_connection().set_destination(conv.in_port(0)) - bts.out_port(0).get_connection().set_source(conv.out_port(0)) - - conv.dilation[conv.spatial_dims] = block_size.value[conv.spatial_dims] - - pad_begin = match['stb_pad_begin_d'].value - match['bts_crop_begin_d'].value - pad_end = match['stb_pad_end_d'].value - match['bts_crop_end_d'].value - conv.pad[conv.spatial_dims] = [[pad_begin[x], pad_end[x]] for x in conv.spatial_dims] - conv['auto_pad'] = None - - -class DilatedConvolution1DConverter(MiddleReplacementPattern): - """ - Transformation looks for a pattern that TF generates for a 1D dilated convolution with help of SpaceToBatch (STB) - and BatchToSpace (BTS). The transformation removes STB and BTS operations and updates the Convolution node - attributes with a dilation values. - """ - enabled = True - force_clean_up = True - force_shape_inference = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import PreMiddleStart - return [PreMiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def pattern(self): - return dict( - nodes=[ - ('conv', dict(kind='op', op=lambda value: value in ['Conv2D', 'DepthwiseConv2dNative'])), - ('space_to_batch', dict(kind='op', op='SpaceToBatch')), - ('unsqueeze', dict(kind='op', op='Unsqueeze')), - ('squeeze', dict(kind='op', op='Squeeze')), - ('batch_to_space', dict(kind='op', op='BatchToSpace')), - ('input_data', dict(kind='data')), - ('output', dict(kind='data')), - ('unsqueeze_output', dict(kind='data')), - ('squeeze_output', dict(kind='data')), - ('conv_output', dict(kind='data')), - ('stb_output', dict(kind='data')), - ('stb_bs', dict(kind='data')), - ('unsqueeze_dim', dict(kind='data')), - ('stb_pad', dict(kind='data')), - ('bts_bs', dict(kind='data')), - ('bts_crop', dict(kind='data')) - ], - edges=[ - ('input_data', 'space_to_batch', {'in': 0}), - ('stb_bs', 'space_to_batch', {'in': 1}), - ('stb_pad', 'space_to_batch', {'in': 2}), - ('space_to_batch', 'stb_output', {'out': 0}), - ('stb_output', 'unsqueeze', {'in': 0}), - ('unsqueeze_dim', 'unsqueeze', {'in': 1}), - ('unsqueeze', 'unsqueeze_output', {'out': 0}), - ('unsqueeze_output', 'conv', {'in': 0}), - ('conv', 'conv_output', {'out': 0}), - ('conv_output', 'squeeze', {'in': 0}), - ('squeeze', 'squeeze_output', {'out': 0}), - ('squeeze_output', 'batch_to_space', {'in': 0}), - ('bts_bs', 'batch_to_space', {'in': 1}), - ('bts_crop', 'batch_to_space', {'in': 2}), - ('batch_to_space', 'output', {'out': 0}), - ]) - - def swap_pad_and_unsqueeze(self, pad: Node, unsqueeze: Node): - # insert additional items to the pads in the position specified by the Unsqueeze axis - unsqueeze_axis = unsqueeze.in_port(1).data.get_value() - for port_id in [1, 2]: - current_value = pad.in_port(port_id).get_connection().data.get_value() - new_value_node = Const(pad.graph, {'name': pad.soft_get('name', pad.id) + '/value_{}'.format(port_id), - 'value': shape_insert(current_value, unsqueeze_axis.item(), 0), - 'override_output_shape': True}).create_node() - pad.in_port(port_id).disconnect() - pad.in_port(port_id).connect(new_value_node.out_port(0)) - - # swap Pad and Unsqueeze layers - unsqueeze.in_port(0).disconnect() - pad.in_port(0).get_connection().set_destination(unsqueeze.in_port(0)) - unsqueeze.out_port(0).get_connection().set_source(pad.out_port(0)) - unsqueeze.out_port(0).connect(pad.in_port(0)) - - # output shapes of Pad and Unsqueeze changed so need to recalculate them - pad['override_output_shape'] = True - unsqueeze['override_output_shape'] = True - - def replace_pattern(self, graph: Graph, match: dict): - conv = match['conv'] - stb = match['space_to_batch'] - bts = match['batch_to_space'] - unsqueeze = match['unsqueeze'] - squeeze = match['squeeze'] - - if len(conv.in_port(0).data.get_shape()) != 4: - log.debug('The convolution node "{}" input is not 4D'.format(conv.soft_get('name', conv.id))) - return - - block_size = stb.in_port(1).data.get_value() - if len(block_size) != 1: - log.debug('The block size must contain 1 element') - return - - unsqueeze_dims = mo_array(unsqueeze.in_port(1).data.get_value()) - if unsqueeze_dims.size != 1 or unsqueeze_dims.item() != 1: - log.debug('The Unsqueeze dimension is not equal to 1') - return - - # remove SpaceToBatch and BatchToSpace operations - unsqueeze.in_port(0).get_connection().set_source(stb.in_port(0).get_source()) - bts.out_port(0).get_connection().set_source(squeeze.out_port(0)) - stb.in_port(0).disconnect() - bts.in_port(0).disconnect() - - conv.dilation[conv.spatial_dims] = [1, block_size.item()] - - pad = match['stb_pad'].value - match['bts_crop'].value - # update the pad value by inserting one zero element since the STB node consumes 3D tensor and have 1D pad value - # but the successive convolution consumes 4D tensor - pad = np.insert(pad, 0, 0, 0) - conv.pad[conv.spatial_dims] = [[pad[x][0], pad[x][1]] for x in range(len(pad))] - conv['auto_pad'] = None - - # the intermediate shapes will be changed after nodes relocation so mark nodes accordingly - input_producer = unsqueeze.in_port(0).get_source().node - input_producer['need_shape_inference'] = True - input_producer['override_output_shape'] = True - unsqueeze['need_shape_inference'] = True - unsqueeze['override_output_shape'] = True - conv['need_shape_inference'] = True - conv['override_output_shape'] = True - - # if the input to SpaceToBatch is a Pad layer then we can swap it with Unsqueeze so the Pad will be fused to a - # Convolution layer further in the pipeline - if input_producer.soft_get('type') == 'Pad': - self.swap_pad_and_unsqueeze(input_producer, unsqueeze) diff --git a/tools/mo/openvino/tools/mo/middle/EltwiseChecker.py b/tools/mo/openvino/tools/mo/middle/EltwiseChecker.py deleted file mode 100644 index 0a80f6f8cbae10..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/EltwiseChecker.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_insert -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.fusing.helpers import get_tensor_in_port, get_value_in_port -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class EltwiseChecker(MiddleReplacementPattern): - """ - Checks if element-wise operation can be converted to ScaleShift or not: - decision gets made by verifying constant input value shape is like 1,N,1,1 - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.EltwiseInputReshape import Eltwise1DInputReshape - return [Eltwise1DInputReshape] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleFinish - return [MiddleFinish] - - @staticmethod - def set_flags_to_false(node: Node, flags: list): - for flag in flags: - node[flag] = False - - def mark_eltwise_node(self, node, feature_channel=None): - tensor_port, value_port = get_tensor_in_port(node), get_value_in_port(node) - if tensor_port is None or value_port is None: - self.set_flags_to_false(node, ['can_be_fused', 'can_be_scaleshift']) - return - - connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - if len(connected_in_ports) != 2: - return - - tensor_shape = tensor_port.data.get_shape() - out_shape = node.out_port(0).data.get_shape() - assert tensor_shape is not None and out_shape is not None - if not np.array_equal(tensor_shape, out_shape): - # ScaleShift operation doesn't support broadcasting - self.set_flags_to_false(node, ['can_be_fused', 'can_be_scaleshift']) - return - - value_shape = value_port.data.get_shape() - assert value_shape is not None - assert len(value_shape) <= len(tensor_shape), \ - "No broadcasting was done for elementwise node {} due to previous checks in EltwiseChecker class. " \ - "But constant input rank is larger than tensor input rank, that is inconsistent".format(node.name) - - # if both tensors are 0D they cannot be converted to scaleshift - if len(tensor_shape) == 0 and len(value_shape) == 0: - self.set_flags_to_false(node, ['can_be_scaleshift']) - return - - broadcasted_value_shape = shape_insert(value_shape, 0, [1] * (len(tensor_shape) - len(value_shape))) - - feature_dim = min(1, tensor_shape.size - 1) if node.graph.graph['layout'] == 'NCHW' else -1 - if feature_channel is not None: - feature_dim = feature_channel - ones = np.ones(len(tensor_shape), dtype=np.float32) - possible_shape = ones.copy() - np.put(possible_shape, feature_dim, tensor_shape.item(feature_dim)) - - if not np.array_equal(broadcasted_value_shape, ones) and \ - not np.array_equal(broadcasted_value_shape, possible_shape): - # ScaleShift weights should have [1,C,1,1]-like or [1,1,1,1]-like shape - self.set_flags_to_false(node, ['can_be_fused', 'can_be_scaleshift']) - return - - if len(tensor_shape) not in [2, 4, 5]: - # ScaleShift operation is supported for 2D, 4D or 5D tensor inputs - self.set_flags_to_false(node, ['can_be_scaleshift']) - return - - def find_and_replace_pattern(self, graph: Graph, feature_channel=None): - for node in graph.get_op_nodes(is_eltwise=True): - self.mark_eltwise_node(node) diff --git a/tools/mo/openvino/tools/mo/middle/EltwiseInputReshape.py b/tools/mo/openvino/tools/mo/middle/EltwiseInputReshape.py deleted file mode 100644 index 9b2503967a23b2..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/EltwiseInputReshape.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.layout import get_features_dim, shape_for_layout -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_insert, is_fully_defined -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class Eltwise1DInputReshape(MiddleReplacementPattern): - """ - Inserts Reshape before 1-D input to Eltwise if another input of Eltwise is multi-dimensional tensor with the - same feature size as 1-D input - - Replacer is useful in cases of layout change in MO (for example NHWC-> NCHW translation of TensorFlow models) - - Example: - Eltwise Mul operation in TF multiplies Tensors by feature dimension with shapes [1,375,500,24] and [24]. - After layout change in MO Eltwise Mul have input shapes [1,24,375,500] and [24]. It is a problem (500!=24). - We have to insert Reshape layer for Tensor with [24] shape to correspond the feature dimension of - Tensor [1,24,375,500] shape - - change of graph.graph['layout'] may cause an issue - change in re-layout function: convert_nhwc_to_nchw(graph) may cause an issue - """ - enabled = False - - def find_and_replace_pattern(self, graph: Graph): - layout = graph.graph['layout'] - for eltwise_op_node in graph.get_op_nodes(is_eltwise=True): - out_shape = eltwise_op_node.out_port().data.get_shape() - if 4 <= len(out_shape) <= 5: - out_features = out_shape[get_features_dim(layout, len(out_shape))] - for port, node in eltwise_op_node.in_nodes().items(): - if len(node.shape) != len(out_shape) and len(node.shape) == 1 and out_features == node.shape[0]: - new_shape = shape_for_layout(layout, batch=1, features=out_features, height=1, width=1, - depth=1 if len(out_shape) == 5 else None) - dim_const = Const(graph, {'value': new_shape, 'name': node.id + '/Dim'}).create_node() - reshape_op = Reshape(graph, attrs={'dim': new_shape, 'name': node.id + '/Broadcast'}).create_node() - - eltwise_op_node.in_port(port).get_source().node.out_port(0).get_connection().set_destination(reshape_op.in_port(0)) - reshape_op.in_port(1).connect(dim_const.out_port(0)) - - reshape_op.out_port(0).connect(eltwise_op_node.in_port(port)) - - -def compute_unsqueeze_map_for_eltwise(eltwise_node: Node): - ''' - The function computes a map of unsqueeze_dims for each producer of eltwise node. - These unsqueeze_dims are needed to normalize input shapes of eltwise node. - ''' - eltwise_shape = eltwise_node.out_port(0).data.get_shape() - max_dims = max( - [len(port.data.get_shape()) for port in eltwise_node.in_ports().values() if port.data.get_shape() is not None]) - axis = eltwise_node.soft_get('axis', None) - unsqueeze_dims_map = {} - for consumer_port in eltwise_node.in_ports().values(): - producer_port = consumer_port.get_source() - producer_shape = producer_port.data.get_shape() - unsqueeze_dims = int64_array([]) - - # 1. Compute unsqueeze dimensions in the tail - if len(producer_shape) != max_dims and len(producer_shape) > 0 and axis is not None: - num_unsqueeze_dims = max_dims - axis - len(producer_shape) - if num_unsqueeze_dims > 0: - unsqueeze_dims = np.arange(len(producer_shape), len(producer_shape) + num_unsqueeze_dims, - dtype=np.int64) - - # 2. Compute unsqueeze dimensions in the head - unsqueeze_dims_head = np.arange(len(eltwise_shape) - len(producer_shape) - len(unsqueeze_dims), dtype=np.int64) - - # Pay attention that unsqueeze dims order makes sense - # since shape is normalized in the tail first and after in the head - unsqueeze_dims = np.concatenate((unsqueeze_dims, unsqueeze_dims_head)) - unsqueeze_dims_map[producer_port] = unsqueeze_dims - - return unsqueeze_dims_map - - -def normalize_eltwise_inputs(graph: Graph): - ''' - The function normalizes input shapes for eltwise nodes. - In the first step the function gets to know which shapes/unsqueeze dims for inputs are required for normalization. - In the second step the function inserts Unsqueeze nodes between non-normalized inputs and eltwise nodes. - ''' - # Generate a map for producers of eltwise nodes with non-normalized shapes - # and in this map every producer has another map that reflects normalized shape - # to a list of eltwise consumers - mapping = {} - for eltwise_node in graph.get_op_nodes(is_eltwise=True): - unsqueeze_dims_map = compute_unsqueeze_map_for_eltwise(eltwise_node) - for consumer_port in eltwise_node.in_ports().values(): - producer_port = consumer_port.get_source() - unsqueeze_dims = unsqueeze_dims_map[producer_port] - if unsqueeze_dims is not None and len(unsqueeze_dims) > 0: - unsqueeze_dims = tuple([x for x in unsqueeze_dims]) - if producer_port not in mapping: - mapping.update({producer_port: {unsqueeze_dims: [consumer_port]}}) - elif unsqueeze_dims not in mapping[producer_port]: - mapping[producer_port].update({unsqueeze_dims: [consumer_port]}) - else: - mapping[producer_port][unsqueeze_dims].append(consumer_port) - - # Walk through each produced in the map and insert Unsqueeze nodes between a producer and eltwise nodes - for producer_port in mapping.keys(): - producer_node = producer_port.node - for unsqueeze_dims in mapping[producer_port].keys(): - unsqueeze_name = producer_node.soft_get('name', producer_node.id) + '/EltwiseUnsqueeze' - unsqueeze_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array(list(unsqueeze_dims))}, - {'name': unsqueeze_name}) - unsqueeze_node.in_port(0).connect(producer_port) - - # Insert Unsqueeze with determined unsqueeze dimensions between the current producer and eltwise node - for consumer_port in mapping[producer_port][unsqueeze_dims]: - consumer_port.connect(unsqueeze_node.out_port(0)) - - # The shape and value adjustments must be explicitly done within the transformation - # since the transformation is called from Fusing transformation that excludes - # automatic call of shape inference pass - producer_port_value = producer_port.data.get_value() - producer_port_shape = producer_port.data.get_shape() - new_shape = producer_port_shape.copy() - for unsqueeze_dim in unsqueeze_dims: - new_shape = shape_insert(new_shape, unsqueeze_dim, 1) - if producer_port_value is not None and is_fully_defined(new_shape): - unsqueeze_node.out_port(0).data.set_value(np.reshape(producer_port_value, new_shape)) - else: - unsqueeze_node.out_port(0).data.set_shape(new_shape) diff --git a/tools/mo/openvino/tools/mo/middle/FakeSplitOutputs.py b/tools/mo/openvino/tools/mo/middle/FakeSplitOutputs.py deleted file mode 100644 index 9e86d406890a67..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/FakeSplitOutputs.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.op import Op - - -class AddFakeOutputsToSplit(MiddleReplacementPattern): - """ - Adding fake outputs for Split nodes in case when it has less output ports than split parts: - This pass: - 1. Looking for Split operations - 2. Check that Split have less connected output ports than split parts - 3. For every missed port adding this port, Output operation to this port - """ - - enabled = True - - def run_after(self): - return [TensorIteratorMerge] - - def find_and_replace_pattern(self, graph: Graph): - for split_node in graph.get_op_nodes(op='Split'): - Op.normalize_outputs(split_node) - - -class AddFakeOutputsToVariadicSplit(MiddleReplacementPattern): - """ - Adding fake outputs for VariadicSplit nodes in case when it has less output ports than split parts: - This pass: - 1. Looking for VariadicSplit operations - 2. Check that VariadicSplit have less connected output ports than split parts - 3. For every missed port adding this port, Output operation to this port - """ - - enabled = True - - def run_after(self): - return [TensorIteratorMerge] - - @staticmethod - def pattern(): - return dict( - nodes=[('op', dict(kind='op', op='VariadicSplit'))], - edges=[], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['op'] - axis = node.in_port(1).data.get_value() - size_splits = node.in_port(2).data.get_value() - - output_shape = sum([node.out_node(port).shape[axis] for port in node.out_nodes()]) - - if output_shape == node.in_port(0).data.get_shape()[axis]: - return - - if not node.has_valid('out_ports_count'): - node['out_ports_count'] = len(size_splits) - - Op.normalize_outputs(node) diff --git a/tools/mo/openvino/tools/mo/middle/FuseReshapesSequence.py b/tools/mo/openvino/tools/mo/middle/FuseReshapesSequence.py deleted file mode 100644 index 35fd4cfdeed3e4..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/FuseReshapesSequence.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.pass_separator import PostMiddleStart, MiddleFinish -from openvino.tools.mo.middle.passes.eliminate import remove_op_node_with_data_node -from openvino.tools.mo.middle.passes.fusing.helpers import get_next_operation -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class FuseReshapesSequence(MiddleReplacementPattern): - """ - Finds sequence of Reshapes operations and merge them to a single Reshape operation. - """ - # TODO the pass should be extended for Reshape with special symbols "0" or "-1" - # For example: 1,100 -> Reshape(2,5,10) -> 2,5,10 -> Reshape(0,10,-1) -> 2,10,5 - - enabled = True - run_not_recursively = True # non-unified data nodes view in TI body (no Const ops, bare data node) - - def run_before(self): - return [PostMiddleStart] - - def run_after(self): - return [MiddleFinish] - - def find_and_replace_pattern(self, graph: Graph): - reshape_nodes = graph.get_op_nodes(type='Reshape') - for node in reshape_nodes: - if not graph.has_node(node.id): - # the Reshape node has been removed in the previous iteration - continue - if len(node.out_port(0).get_destinations()) == 1: - log.debug('First phase for Reshape: {}'.format(node.soft_get('name'))) - - next_op = get_next_operation(node)[0] - log.debug('second node: id={}, type={}'.format(next_op.soft_get('id'), next_op.soft_get('type'))) - if next_op.has_valid('type') and next_op.type == 'Reshape': - dim_value = next_op.in_port(1).data.get_value() - if dim_value is None or 0 in dim_value or -1 in dim_value: - # we do not fuse reshape sequences with special symbols: 0, -1 - continue - - # Detected Reshape1 --> data --> Reshape2 pattern without side edges. Remove Reshape1 - log.debug('Second phase for Reshape: {}'.format(node.soft_get('name'))) - remove_op_node_with_data_node(graph, node) - - -class FuseReshapesSequenceKaldi(MiddleReplacementPattern): - """ - Finds sequence of Reshapes operations of special type and remove them. It is enabled for Kaldi because - such type of reshapes are created in add_reshape_around_convolution/pooling - data(b, t, w, c) -> Reshape(0, -1) -> data(b, t*w*c) -> Reshape(br, tr, wr, cr) - Check, that - * br = b - taken from shape before Reshape as is; - * tr = t and wr = w - that constants used in the second reshape is the same in shape before the first Reshape - """ - - enabled = True - run_not_recursively = True # non-unified data nodes view in TI body (no Const ops, bare data node) - graph_condition = [lambda graph: graph.graph['fw'] == 'kaldi'] - - def run_before(self): - from openvino.tools.mo.middle.MergeNodesPermutations import MergeNodesPermutations - return [MergeNodesPermutations] - - def run_after(self): - return [FuseReshapesSequence] - - def pattern(self): - return dict( - nodes=[ - ('reshape_in_dims', dict(kind='op', op='Const')), - ('reshape_in_dims_d', dict(kind='data')), - ('reshape_in', dict(kind='op', op='Reshape', special_zero=True)), - ('reshape_in_d', dict(kind='data')), - ('shape', dict(kind='op', op='ShapeOf')), - ('shape_d', dict(kind='data')), - ('gather_in_1', dict(kind='op', op='Const')), - ('gather_in_1_d', dict(kind='data')), - ('gather_in_2', dict(kind='op', op='Const')), - ('gather_in_2_d', dict(kind='data')), - ('gather_batch', dict(kind='op', op='Gather')), - ('gather_batch_d', dict(kind='data')), - ('time_dim', dict(kind='op', op='Const')), - ('time_dim_d', dict(kind='data')), - ('concat_dims', dict(kind='op', op='Concat')), - ('concat_dims_d', dict(kind='data')), - ('reshape_out', dict(kind='op', op='Reshape')), - ], - edges=[('reshape_in_dims', 'reshape_in_dims_d'), ('reshape_in_dims_d', 'reshape_in', {'in': 1}), - ('reshape_in', 'reshape_in_d'), ('reshape_in_d', 'reshape_out', {'in': 0}), - ('reshape_in_d', 'shape'), ('shape', 'shape_d'), - ('shape_d', 'gather_batch', {'in': 0}), - ('gather_in_1', 'gather_in_1_d'), ('gather_in_1_d', 'gather_batch', {'in': 1}), - ('gather_in_2', 'gather_in_2_d'), ('gather_in_2_d', 'gather_batch', {'in': 2}), - ('gather_batch', 'gather_batch_d'), ('gather_batch_d', 'concat_dims', {'in': 0}), - ('time_dim', 'time_dim_d'), ('time_dim_d', 'concat_dims', {'in': 1}), - ('concat_dims', 'concat_dims_d'), ('concat_dims_d', 'reshape_out', {'in': 1}) - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - reshape_in = match['reshape_in'] - - log.debug('First phase for Reshape: {}'.format(reshape_in.soft_get('name'))) - in_shape = reshape_in.in_port(0).get_source().data.get_shape() - if not is_fully_defined(in_shape[1:]): - return - - reshape_in_dims = match['reshape_in_dims'] - if not np.all(reshape_in_dims.out_port(0).data.get_value() == [0, -1]): - return - - gather_in_1 = match['gather_in_1'] - if not np.all(gather_in_1.out_port(0).data.get_value() == [0]): - return - - gather_in_2 = match['gather_in_2'] - if not np.all(gather_in_2.out_port(0).data.get_value() == [0]): - return - - reshape_out = match['reshape_out'] - log.debug('second child_node: id={}, type={}'.format(reshape_out.soft_get('id'), reshape_out.soft_get('type'))) - - concat_dims_node = match['concat_dims'] - shapeof_node = match['shape'] - - # check that t and w is the same as before the first Reshape - t_node = match['time_dim'] - w_node = concat_dims_node.in_port(3).get_source().node - const_dim_2 = 3 - if w_node.op != 'Const': - w_node = concat_dims_node.in_port(2).get_source().node - const_dim_2 = 2 - if w_node.op != 'Const' or \ - not is_fully_defined(t_node.out_port(0).data.get_value()) or \ - not is_fully_defined(w_node.out_port(0).data.get_value()) or \ - not np.all(t_node.out_port(0).data.get_value() == [in_shape[1]]) or \ - not np.all(w_node.out_port(0).data.get_value() == [in_shape[const_dim_2]]): - return - - # Detected Reshape1 --> data --> Reshape2 pattern without side edges. Remove Reshape1 - log.debug('Second phase for Reshape: {}'.format(reshape_in.soft_get('name'))) - shapeof_node.in_port(0).disconnect() - concat_dims_node.out_port(0).disconnect() - remove_op_node_with_data_node(graph, reshape_in) - remove_op_node_with_data_node(graph, reshape_out) diff --git a/tools/mo/openvino/tools/mo/middle/FusedBatchNormNonConstant.py b/tools/mo/openvino/tools/mo/middle/FusedBatchNormNonConstant.py deleted file mode 100644 index 47d150e59182b0..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/FusedBatchNormNonConstant.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Mul, Add, Pow -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const - - -class FusedBatchNormNonConstant(MiddleReplacementPattern): - """ - Replaces FusedBatchNorm(input, beta, gamma, mean, variance) with non-constant mean and variance, - but with constant beta and gamma to a sub-expression consisting of a combinatin of Eltwise layers and ScaleShift. - """ - - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleFinish - return [MiddleFinish] - - def pattern(self): - return dict( - nodes=[ - ('op', dict(kind='op', op=lambda op: op in ['FusedBatchNorm', 'FusedBatchNormV2', - 'FusedBatchNormV3']))], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['op'] - if (node.data_format != b'NHWC' or - len(node.in_nodes()) != 5 or - node.in_node(0).value is not None or # input - node.in_node(1).value is None or # scale - node.in_node(2).value is None or # offset - node.in_node(3).value is not None or # mean - node.in_node(4).value is not None or # variance - node.in_node(1).value.ndim != 1 or - node.in_node(2).value.ndim != 1): - return - - scale_mul = Mul(graph, dict(name=node.name + '/scale_mul_')) - shift_add = Add(graph, dict(name=node.name + '/shift_add_')) - mean_add = Add(graph, dict(name=node.name + '/mean_add_')) - variance_mul = Mul(graph, dict(name=node.name + '/variance_mul_')) - - neg_const = Const(graph, dict(value=mo_array(-1), name=node.name + '/mean_negate_')) - mean_negate = Mul(graph, dict(name=node.name + '/mean_negate_')) - mean_arg = mean_add.create_node_with_data([ - node.in_node(0), - mean_negate.create_node_with_data([node.in_node(3), - neg_const.create_node_with_data() - ])]) - - shift_const = Const(graph, dict(value=node.eps, name=node.name + '/variance_denom_shift_const_')) - power_const = Const(graph, dict(value=-0.5, name=node.name + '/variance_denom_power_const_')) - variance_denom_shift = Add(graph, dict(name=node.name + '/variance_denom_shift_')) - variance_denom_power = Pow(graph, dict(name=node.name + '/variance_denom_power_')) - variance_arg = variance_mul.create_node_with_data([ - mean_arg, - variance_denom_power.create_node_with_data([ - variance_denom_shift.create_node_with_data([node.in_node(4), shift_const.create_node_with_data()]), - power_const.create_node_with_data()] - )]) - - shift_add.create_node_with_data([ - scale_mul.create_node_with_data([ - variance_arg, - node.in_node(1)]), - node.in_node(2)], - data_nodes=node.out_node()) - - node.graph.remove_node(node.id) diff --git a/tools/mo/openvino/tools/mo/middle/FusedBatchNormTraining.py b/tools/mo/openvino/tools/mo/middle/FusedBatchNormTraining.py deleted file mode 100644 index cf27c773e82319..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/FusedBatchNormTraining.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.mvn import MVN -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input, create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape - - -class FusedBatchNormTraining(MiddleReplacementPattern): - """ - Transformation looks for the BatchNorm layers in training mode and does the following: - 1. Fuses batch dimension with one of the spatial dimensions of the input to BatchNorm because batch normalization is - performed over batch dimension also (per channel(features) dimension). - 2. Inserts MVN layer. - 3. Reshape MVN output back to the original one. - """ - enabled = True - replacement_id = "Fused_Batch_Norm_is_training_true" - force_shape_inference = True - force_clean_up = True - # transformation works for the NHWC layout because transformation inserts Reshape to fuse N and H dimensions - graph_condition = [lambda graph: graph.graph['layout'] == 'NHWC'] - - def pattern(self): - return dict( - nodes=[ - ('op', dict(kind='op', op=lambda op: op in ['FusedBatchNorm', 'FusedBatchNormV2', 'FusedBatchNormV3'], - is_training=True))], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['op'] - node_name = node.soft_get('name', node.id) - node.is_training = False - - shape = node.in_port(1).data.get_shape() - assert shape is not None, 'The shape of scale input of the BatchNorm node {} is not defined'.format(node.name) - - bn_mean = Const(graph, {'name': node_name + '/mean', 'value': np.zeros(shape, dtype=np.float32), - 'override_output_shape': True}).create_node() - bn_std = Const(graph, {'name': node_name + '/std', 'value': np.ones(shape, dtype=np.float32), - 'override_output_shape': True}).create_node() - node.in_port(3).get_connection().set_source(bn_mean.out_port(0)) - node.in_port(4).get_connection().set_source(bn_std.out_port(0)) - - # save the original shape - original_shape = Shape(graph, {'name': node.in_port(0).get_source().node.soft_get('name')}).create_node() - original_shape.in_port(0).connect(node.in_port(0).get_source()) - - input_rank = len(node.in_port(0).data.get_shape()) - rng = create_op_with_const_inputs(graph, Range, - {0: int64_array(1), 1: int64_array(input_rank - 1), 2: int64_array(1)}, - {'name': node_name + '/Range', 'output_type': np.int64}) - mvn = MVN(graph, {'name': node_name + '/mvn_', 'eps': node.soft_get('eps', 1e-6), 'eps_mode': 'inside_sqrt', - 'normalize_variance': 1, 'override_output_shape': True}).create_node() - node.in_port(0).get_connection().insert_node(mvn) - mvn.in_port(1).connect(rng.out_port(0)) - - reshape_4d = create_op_node_with_second_input(graph, Reshape, int64_array([1, -1, 0, 0]), - {'override_output_shape': True, - 'name': node_name + '/fused_batch_and_channels'}) - mvn.in_port(0).get_connection().insert_node(reshape_4d) - - # restore original shape - reshape_back = Reshape(graph, {'name': node_name + '/restore_shape', - 'override_output_shape': True}).create_node() - reshape_back.in_port(1).connect(original_shape.out_port(0)) - mvn.out_port(0).get_connection().insert_node(reshape_back) diff --git a/tools/mo/openvino/tools/mo/middle/GRURNNSequenceToTensorIterator.py b/tools/mo/openvino/tools/mo/middle/GRURNNSequenceToTensorIterator.py deleted file mode 100644 index 00ed6afdf3e44b..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/GRURNNSequenceToTensorIterator.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.tensor_iterator import TensorIterator -from openvino.tools.mo.front.common.partial_infer.utils import shape_delete -from openvino.tools.mo.graph.graph import Graph, add_opoutput -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class GRUAndRNNToTensorIterator(MiddleReplacementPattern): - """ Converts normalized RNNSequence with op=GRU/RNN to TensorIterator. - - Normalized RNNSequence means that it should be processed by - RNNSequenceNormalize transform that ensures its strict form. - - This transformation builds an alternative sub-graph for GRUSequence - with TensorIterator connected in the same way as an original GRUSequence - node and with internal body represented as GRUCell op node with necessary - squeezes and unsqueezes around. - """ - - enabled = True - id = 'gru_and_rnn_to_tensor_iterator' - - def run_after(self): - from openvino.tools.mo.middle.RNNSequenceNormalizeToIE import RNNSequenceNormalize - return [RNNSequenceNormalize] - - def run_before(self): - from openvino.tools.mo.middle.permute_tensor_iterator import TransposeTensorIteratorLSTM - return [TransposeTensorIteratorLSTM] - - def pattern(self): - return dict( - nodes=[ - ('rnn_layer', dict(kind='op', type='RNNSequence')), - ('input', dict(kind='data')), - ('weights', dict(kind='data')), - ('biases', dict(kind='data')), - # don't capture optional input initial states here - ('output', dict(kind='data')), - # don't capture optional output last states here - ], - edges=[ - ('input', 'rnn_layer', {'in': 0}), - ('weights', 'rnn_layer', {'bin': 'weights', 'in': 1}), - ('biases', 'rnn_layer', {'bin': 'biases', 'in': 2}), - ('rnn_layer', 'output', {'out': 0}), - ] - ) - - @staticmethod - def get_rnn_cell(name: str): - op = Op.get_op_class_by_name(name + 'Cell') - return op - - def replace_pattern(self, graph: Graph, match: dict): - if match['rnn_layer']['op'] == 'LSTM': - return - - rnn_layer = match['rnn_layer'] - - # Build TensorIterator body first - body = Graph(name=rnn_layer.name + '/sub_graph') - body.graph = graph.graph - - # 1. Input squeeze Reshape - inputs = [Op._create_data_node(body, rnn_layer.name + '/inport/' + str(inp), - {'shape': rnn_layer.in_node(inp).shape.copy(), - 'value': rnn_layer.in_node(inp).value.copy() - if rnn_layer.in_node(inp).value is not None and inp in [1, 2] else None}) - for inp in [0, 4, 1, 2]] # X, h_init, WR, B - - inputs[0].shape[rnn_layer.sequence_dim] = 1 - input_squeeze = Squeeze(body, dict(name=rnn_layer.name + '/input_squeeze', internal_layer_id=0)) - input_squeeze_dim = Const(body, dict(name=rnn_layer.name + '/input_squeeze_dim', - value=rnn_layer.sequence_dim)).create_node_with_data() - inputs[0] = input_squeeze.create_node_with_data([inputs[0], input_squeeze_dim], - edge_attrs=[{'internal_port_id': 0}]) - - # 2. Output unsqueeze Reshape - outputs = [Op._create_data_node(body, rnn_layer.name + '/outport/' + str(out), - {'shape': rnn_layer.out_node(out).shape.copy() if out in rnn_layer.out_nodes() else None}) - for out in [0]] - for out in outputs: - add_opoutput(body, out.id, 0, False) - - outputs[0].shape = shape_delete(outputs[0].shape, rnn_layer.sequence_dim) - output_unsqueeze_dim = Const(body, dict(name=rnn_layer.name + '/output_unsqueeze_dim', - value=rnn_layer.sequence_dim)).create_node_with_data() - output_unsqueeze = Unsqueeze(body, dict(name=rnn_layer.name + '/output_unsqueeze/', internal_layer_id=2)) - - additional_attrs = dict(activations=rnn_layer.activations, - activation_alpha=rnn_layer.activation_alpha, - activation_beta=rnn_layer.activation_beta, - clip=rnn_layer.clip) - if rnn_layer.op == 'GRU': - additional_attrs['linear_before_reset'] = rnn_layer.linear_before_reset - - # 3. ***Cell - rnn_cell_op = self.get_rnn_cell(rnn_layer['op'])(body, dict(hidden_size=rnn_layer.hidden_size, - name=rnn_layer.name + '/{}Cell'.format(rnn_layer.op), - **additional_attrs, - internal_layer_id=1)) - - gru_cell = rnn_cell_op.create_node_with_data(inputs, data_nodes=outputs, - edge_attrs=[{}, {'internal_port_id': 1}, - {'internal_port_id': 2}, {'bin': 'weights'}, - {'bin': 'biases'}]) - - # internal ports for outputs of cell - gru_cell.in_node().out_edge(0)['internal_port_id'] = 4 # h_state - - gru_cell = output_unsqueeze.create_node_with_data([gru_cell, output_unsqueeze_dim]) - gru_cell.in_node().out_edge(0)['internal_port_id'] = 3 - add_opoutput(body, gru_cell.id, 0, False) - - # 4. TensorIterator layer creating - assert rnn_layer.direction in ['forward', 'reverse'] - if rnn_layer.direction == 'forward': - stride = 1 - start = None - end = None - else: - assert rnn_layer.direction == 'reverse' - stride = -1 - start = -1 - end = 0 - - # stacked h_state - output_port_map = [{ - 'external_port_id': 3, - 'internal_layer_id': 2, - 'internal_port_id': 3, - - 'axis': rnn_layer.sequence_dim, - 'stride': stride, - 'start': start, - 'end': end, - 'part_size': 1, - }] - - # Adding last h_state to outputs - if len(rnn_layer.out_nodes()) == 2: - output_port_map.extend([{ - 'external_port_id': 4, - 'internal_layer_id': 1, - 'internal_port_id': 4, - }]) - - ti_op = TensorIterator(graph, { - 'name': rnn_layer.name + '/TensorIterator', - 'body': body, - 'in_ports_count': 4, - 'out_ports_count': len(rnn_layer.out_nodes()), - - 'input_port_map': [ - { - 'external_port_id': 0, - 'internal_layer_id': 0, - 'internal_port_id': 0, - - 'axis': rnn_layer.sequence_dim, - 'stride': stride, - 'start': start, - 'end': end, - 'part_size': 1, - }, - { - 'external_port_id': 1, - 'internal_layer_id': 1, - 'internal_port_id': 1, - }, - ], - - 'output_port_map': output_port_map, - # only for h state - 'back_edges': [ - { - 'from_layer': 1, - 'from_port': 4, - 'to_layer': 1, - 'to_port': 1, - }, - ] - }) - - assert sorted(rnn_layer.out_nodes().keys()) == list(range(len(rnn_layer.out_nodes()))), \ - "There are gaps in output ports of GRUSequence operation. Node {}".format(rnn_layer.id) - - outs = ti_op.create_node_with_data([rnn_layer.in_node(i) for i in [0, 4]], # X, h_init - data_nodes=[rnn_layer.out_node(i) for i in range(len(rnn_layer.out_nodes()))], - edge_attrs=[{'external_port_id': 0}, {'external_port_id': 1}]) - - if not isinstance(outs, list): - outs = list([outs]) - - graph.remove_node(rnn_layer.id) - outs[0].in_edge(0)['external_port_id'] = 3 - for i, out in enumerate(outs[1:]): - external_port_id = 4 + i - out.in_edge()['external_port_id'] = external_port_id - - ti = outs[0].in_node() - TensorIterator.cover_body_input_data_nodes_with_parameter_ops(ti) - TensorIterator.cover_body_constant_data_nodes_with_const_ops(ti) - TensorIterator.normalize_internal_ids(ti) diff --git a/tools/mo/openvino/tools/mo/middle/GatherNDDecomposition.py b/tools/mo/openvino/tools/mo/middle/GatherNDDecomposition.py deleted file mode 100644 index 2f08c651fefc29..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/GatherNDDecomposition.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input, create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.reshape import Reshape - - -class GatherNDDecomposition(MiddleReplacementPattern): - """ - Hot fix for new speech-to-text model enabling while GatherND is not implemented in IE. - We can replace GatherND to Reshape + Gather in case when GatherND indices have just one - meaningful dimension. - TODO: Investigate whether we must replace GatherND with Reshape + Gather always (due to performance benefits) - for this particular case or only if the plugin does not support GatherND. - And the best place for the transformation is nGraph so we need to move it. - """ - enabled = True - force_clean_up = True - - def run_before(self): - from openvino.tools.mo.middle.BlockLSTMtoLSTMSequence import BlockLSTMtoLSTMSequence - return [BlockLSTMtoLSTMSequence] - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def pattern(self): - return dict( - nodes=[('GatherND', dict(kind='op', op='GatherND', batch_dims=0))], - edges=[] - ) - - @staticmethod - def indices_check(indices: np.array, input_shape: tuple): - """ - Check that indices have just one meaningful dimension and all other dimensions of input have size 1. - """ - n_dims = indices.shape[-1] - non_zero = None - for i in range(n_dims): - if not np.all(np.take(indices, indices=[i], axis=-1) == 0): - if non_zero is None: - non_zero = i - else: - return None - else: - if input_shape[i] != 1: - return None - return non_zero - - def replace_pattern(self, graph: Graph, match: dict): - gather = match['GatherND'] - gather_name = gather.soft_get('name', gather.id) - input_shape = gather.in_node(0).shape - indices = gather.in_node(1).value - if indices is None: - # We can't do such special pass without indices value - return - - # 0. All needed checks that we can replace GatherND by Gather - gather_idx = self.indices_check(indices, input_shape) - if gather_idx is None: - log.warning( - 'Node {} with op=GatherND can\'t be normalized to op=Gather.'.format(gather_name)) - return - - # 1. Add Reshape and connect - new_shape = int64_array([-1] + list(input_shape[indices.shape[-1]:])) - reshape = create_op_node_with_second_input(graph, Reshape, new_shape, - {'name': gather_name + '/Reshape_for_GatherND/'}) - gather.in_port(0).get_connection().set_destination(reshape.in_port(0)) - - # 2. Eliminate last dim (n_dims values) from indices shape: - new_indices = np.reshape( - np.take(indices, indices=[gather_idx], axis=-1), indices.shape[:-1]) - - rename_node(gather, gather_name + '/to_delete') - - # 3. Create new Gather operation and reconnect all inputs/outputs - new_gather = create_op_with_const_inputs(graph, Gather, {1: new_indices, 2: int64_array(0)}, - {'name': gather_name}) - rename_node(new_gather, gather_name) - - reshape.out_port(0).connect(new_gather.in_port(0)) - - gather.out_port(0).get_connection().set_source(new_gather.out_port(0)) - - # 4. Remove old Gather node - graph.remove_node(gather.id) diff --git a/tools/mo/openvino/tools/mo/middle/GroupNorm.py b/tools/mo/openvino/tools/mo/middle/GroupNorm.py deleted file mode 100644 index a879ec62dba840..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/GroupNorm.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import Dict - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.elementwise import Mul, Add -from openvino.tools.mo.ops.mvn import MVN -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.utils.shape import node_to_get_spatial_dimensions_value, node_to_get_features_dimension_value, \ - node_to_get_batch_value, new_shape_node_from_shape_nodes, get_shape_and_rank_nodes_by_port - - -class GroupNormToMVN(MiddleReplacementPattern): - """ - Converts GroupNorm operation to Reshape + MVN + Reshape + Mul + Add - """ - op = 'GroupNorm' - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.middle.EltwiseChecker import EltwiseChecker - # TODO the EltwiseChecker does not work correctly for eltwises with 1D inputs - return [EltwiseChecker] - - def pattern(self): - return dict( - nodes=[ - ('op', dict(op='GroupNorm')), - ], - edges=[]) - - def replace_pattern(self, graph: Graph, match: Dict[str, Node]): - group_norm_node = match['op'] - group_norm_num_input_dims = len(group_norm_node.in_port(0).data.get_shape()) - - # node computing initial GroupNorm input shape - initial_shape_op_node = Shape(graph, {'name': group_norm_node.name + '/Shape'}).create_node() - initial_shape_op_node.in_port(0).connect(group_norm_node.in_port(0).get_source()) - - initial_shape_op_node_float = Cast( - graph, {'name': initial_shape_op_node.name + '/to_float', - 'dst_type': data_type_str_to_np(graph.graph['cmd_params'].data_type)}).create_node() - initial_shape_op_node.out_port(0).connect(initial_shape_op_node_float.in_port(0)) - - initial_batch_dim_node = node_to_get_batch_value(initial_shape_op_node_float) - initial_features_dim_node = node_to_get_features_dimension_value(initial_shape_op_node_float) - initial_spatial_dims_node_int = node_to_get_spatial_dimensions_value(initial_shape_op_node) - initial_spatial_dims_node = Cast( - graph, {'name': initial_spatial_dims_node_int.name + '/to_float', - 'dst_type': data_type_str_to_np(graph.graph['cmd_params'].data_type)}).create_node() - initial_spatial_dims_node_int.out_port(0).connect(initial_spatial_dims_node.in_port(0)) - - group_size_node = Const(graph, {'value': int64_array([group_norm_node.num_groups]), - 'name': group_norm_node.name + '/GroupSize'}).create_node() - - # calculate "features // group_size" value - reciprocal_group_size_node = Const(graph, {'value': mo_array([1.0 / group_norm_node.num_groups]), - 'name': group_norm_node.name + '/ReciprocalGroupSize'}).create_node() - - c_div_g_node = Mul(graph, {}).create_node() - c_div_g_node.in_port(0).connect(initial_features_dim_node.out_port(0)) - c_div_g_node.in_port(1).connect(reciprocal_group_size_node.out_port(0)) - - batch_mul_group_size_node = Mul(graph, {}).create_node() - batch_mul_group_size_node.in_port(0).connect(initial_batch_dim_node.out_port(0)) - batch_mul_group_size_node.in_port(1).connect(group_size_node.out_port(0)) - - # create new node which concatenates several dims to one - new_shape_node_float = new_shape_node_from_shape_nodes([batch_mul_group_size_node, c_div_g_node, - initial_spatial_dims_node]) - new_shape_node = Cast(graph, - {'name': new_shape_node_float.name + '/to_int64', 'dst_type': np.int64}).create_node() - new_shape_node_float.out_port(0).connect(new_shape_node.in_port(0)) - - reshape_for_mvn_node = Reshape(graph, {}).create_node() - - group_norm_node.in_port(0).get_connection().set_destination(reshape_for_mvn_node.in_port(0)) - reshape_for_mvn_node.in_port(1).connect(new_shape_node.out_port(0)) - - # Reshape the gamma and beta constants to correct layout from [C] to [1,C], [1,C,1], [1,C,1,1] etc - gamma_beta_shape = np.ones([group_norm_num_input_dims], dtype=np.int64) - gamma_beta_shape[1] = -1 - - gamma_value = group_norm_node.in_port(1).get_source().data.get_value() - beta_value = group_norm_node.in_port(2).get_source().data.get_value() - assert gamma_value is not None, 'The gamma should be constant' - assert beta_value is not None, 'The beta should be constant' - gamma_value = np.reshape(gamma_value, gamma_beta_shape) - group_norm_node.in_port(1).get_source().data.set_value(gamma_value) - beta_value = np.reshape(beta_value, gamma_beta_shape) - group_norm_node.in_port(2).get_source().data.set_value(beta_value) - - # MVN - mvn_node = MVN(graph, {'name': group_norm_node.name + '/MVN', - 'normalize_variance': 1, - 'eps': group_norm_node.eps, - 'eps_mode': 'inside_sqrt'}).create_node() - mvn_node.in_port(0).connect(reshape_for_mvn_node.out_port(0)) - - # MVN axes - _, rank = get_shape_and_rank_nodes_by_port(mvn_node.in_port(0).get_connection().get_source(), - return_as_a_scalar=True) - rng = create_op_with_const_inputs(graph, Range, {0: int64_array(1), 2: int64_array(1)}, - {'name': group_norm_node.name + '/Range', 'output_type': np.int64}) - mvn_node.in_port(1).connect(rng.out_port(0)) - rng.in_port(1).connect(rank.out_port(0)) - - # reshape to the initial shape before multiplying with gamma and adding beta - reshape_to_initial_shape_node = Reshape(graph, {}).create_node() - reshape_to_initial_shape_node.in_port(0).connect(mvn_node.out_port(0)) - reshape_to_initial_shape_node.in_port(1).connect(initial_shape_op_node.out_port(0)) - - mul_node = Mul(graph, {'name': mvn_node.name + '/Mul'}).create_node() - mul_node.in_port(0).connect(reshape_to_initial_shape_node.out_port(0)) - group_norm_node.in_port(1).get_connection().set_destination(mul_node.in_port(1)) - - add_node = Add(graph, {'name': mul_node.name + '/Add'}).create_node() - add_node.in_port(0).connect(mul_node.out_port(0)) - group_norm_node.in_port(2).get_connection().set_destination(add_node.in_port(1)) - - group_norm_node.out_port(0).get_connection().set_source(add_node.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/middle/InputCut.py b/tools/mo/openvino/tools/mo/middle/InputCut.py deleted file mode 100644 index 03f5057ad676d5..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/InputCut.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import add_input_ops -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class MiddleInputCut(MiddleReplacementPattern): - enabled = True - force_clean_up = True - run_not_recursively = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import PreMiddleStart - return [PreMiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def find_and_replace_pattern(self, graph: Graph): - add_input_ops(graph, graph.graph['user_shapes'], False) diff --git a/tools/mo/openvino/tools/mo/middle/InsertLayoutPropagationTransposes.py b/tools/mo/openvino/tools/mo/middle/InsertLayoutPropagationTransposes.py deleted file mode 100644 index 3ae82693619bf4..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/InsertLayoutPropagationTransposes.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.middle.pass_separator import PostMiddleStart -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph, Node, Port -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.op import PermuteAttrs - - -class InsertLayoutPropagationTranspose(MiddleReplacementPattern): - """ - The transformation inserts Transpose layers before/after operations that change the interpretation of data, for - example, Reshape from 3D to 4D or from 4D to 3D. These Transpose layers basically convert layout from N(D)HWC to - NC(D)HW and in the reverse order. - """ - enabled = True - force_clean_up = True # need to run clean up after the transformation to update shapes - graph_condition = [lambda graph: graph.graph['layout'] == 'NHWC'] - - def run_after(self): - return [PostMiddleStart] - - def run_before(self): - return [] - - @staticmethod - def is_nchw_to_nhwc_transpose_needed(node: Node): - """ - The function checks that it is necessary to insert Transpose from NCHW to NHWC before the node. - The transpose is needed when all the following conditions are met: - 1. The node is marked as 'reinterp_shape' attribute - 2. The node is *not* marked as getting input in correct layout (implicitly imply that the input is on port 0) - 3. The input shape rank is not less than 4 - 4. Node is not a part of shape sub-graph (layout permutation is handled separately for such a sub-graph) - - :param node: node to check - :return: result of the check - """ - return node.has_and_set('reinterp_shape') and \ - not is_input_data_in_correct_layout(node, 0) and \ - len(node.in_port(0).data.get_shape()) >= 4 and \ - all([port.data.get_value() is None for port in node.out_ports().values() if not port.disconnected()]) - - @staticmethod - def is_nhwc_to_nchw_transpose_needed(node: Node): - """ - The function checks that it is necessary to insert Transpose from NHWC to NCHW after the node. - The transpose is needed when all the following conditions are met: - 1. The node is marked as 'reinterp_shape' attribute - 2. The node is *not* marked as generating output in correct layout (implicitly imply that the output port is 0) - 3. The output shape rank is not less than 4 - 4. Node is not a part of shape sub-graph (layout permutation is handled separately for such a sub-graph) - :param node: node to check - :return: result of the check - """ - return node.has_and_set('reinterp_shape') and \ - not is_output_data_in_correct_layout(node, 0) and \ - len(node.out_port(0).data.get_shape()) >= 4 and \ - all([port.data.get_value() is None for port in node.out_ports().values() if not port.disconnected()]) - - def find_and_replace_pattern(self, graph: Graph): - - # we need to import these functions here to avoid circular dependent imports - from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input - - if graph.graph['layout'] != 'NHWC': - # we check it here because this transformation is called explicitly from the pipeline - return - - # reshape from 4D-5D -> ND. Insert Transpose(NC(D)HW->N(D)HWC) before Reshape - for reinterp_shape_node_id in graph.get_nodes_with_attributes(reinterp_shape=True): - reinterp_shape_node = Node(graph, reinterp_shape_node_id) - assert 0 in reinterp_shape_node.in_nodes(), 'Node {} does not have 0 input. \n{}'.format( - reinterp_shape_node_id, graph.dump_graph_for_graphviz()) - input_shape = reinterp_shape_node.in_node(0).shape - if self.is_nchw_to_nhwc_transpose_needed(reinterp_shape_node): - permute_node = create_op_node_with_second_input( - graph, Transpose, PermuteAttrs().get_nchw_to_nhwc_permutation(len(input_shape)).perm, - {'name': reinterp_shape_node.in_port(0).get_source().node.name + '/Transpose'} - ) - reinterp_shape_node.in_port(0).get_connection().insert_node(permute_node) - - order_const = permute_node.in_port(1).get_source().node - order_const.infer(order_const) - # do not infer the Transpose node because it should have input data node in NCHW layout (but currently - # it is NHWC because data node attributes has not been permuted yet) and produce output in NHWC layout - # (which is true at this moment) - permute_node['need_shape_inference'] = False - # mark the Transpose output data node having correct layout so it's shape will not be permuted - mark_output_as_in_correct_layout(permute_node, 0) - - # keep the reinterp_shape_node in NHWC layout - for in_port_id, _ in reinterp_shape_node.in_ports().items(): - mark_input_as_in_correct_layout(reinterp_shape_node, in_port_id) - - # reshape from ND -> 4D-5D. Insert Transpose(N(D)HWC->NC(D)HW) after Reshape - for reinterp_shape_node_id in graph.get_nodes_with_attributes(reinterp_shape=True): - reinterp_shape_node = Node(graph, reinterp_shape_node_id) - assert 0 in reinterp_shape_node.out_nodes(), 'Node {} does not have 0 output. \n{}'.format( - reinterp_shape_node_id, graph.dump_graph_for_graphviz()) - output_shape = reinterp_shape_node.out_node(0).shape - if self.is_nhwc_to_nchw_transpose_needed(reinterp_shape_node): - permute_node = create_op_node_with_second_input( - graph, Transpose, PermuteAttrs().get_nhwc_to_nchw_permutation(len(output_shape)).perm, - {'name': reinterp_shape_node.id + '/Transpose'}) - reinterp_shape_node.out_port(0).get_connection().insert_node(permute_node) - - # the Reshape and Transpose operations should work in original (NHWC layout) so the Transpose - # will convert it to the NCHW - mark_input_as_in_correct_layout(permute_node, 0) - mark_input_as_in_correct_layout(permute_node, 1) - # do not set Transpose output data node 'correct_data_layout' attribute so the data node shape will be - # permuted - - # keep the reinterp_shape_node in NHWC layout - mark_output_as_in_correct_layout(reinterp_shape_node, 0) - for in_port_id in reinterp_shape_node.in_ports().keys(): - if in_port_id: - mark_input_as_in_correct_layout(reinterp_shape_node, in_port_id) - - # do not re-infer the Transpose node because it output data node should be in NHWC layout to make the - # rest of the graph consistent - permute_node['need_shape_inference'] = False - - -def is_input_data_in_correct_layout(node: Node, port_ind: int): - assert node.soft_get('kind') == 'op', 'The function work with operation nodes only' - return 'correct_in_data_layout' in node.attrs() and port_ind in node.attrs()['correct_in_data_layout'] - - -def mark_input_as_in_correct_layout(node: Node, port_ind: int): - assert node.soft_get('kind') == 'op', 'The function work with operation nodes only' - graph = node.graph - graph.node[node.id].setdefault('correct_in_data_layout', set()) - graph.node[node.id]['correct_in_data_layout'].add(port_ind) - - -def is_output_data_in_correct_layout(node: Node, port_ind: int): - assert node.soft_get('kind') == 'op', 'The function work with operation nodes only' - return 'correct_out_data_layout' in node.attrs() and port_ind in node.attrs()['correct_out_data_layout'] - - -def mark_output_as_in_correct_layout(node: Node, port_ind: int): - assert node.soft_get('kind') == 'op', 'The function work with operation nodes only' - graph = node.graph - graph.node[node.id].setdefault('correct_out_data_layout', set()) - graph.node[node.id]['correct_out_data_layout'].add(port_ind) - - -def mark_as_correct_data_layout(node: Node): - """ - The analogue of the attribute 'correct_data_layout' for the operation node - :param node: node to mark it with attribute 'correct_data_layout' - :return: None - """ - assert node.soft_get('kind') == 'op', 'The function work with operation nodes only' - for ind, port in node.in_ports().items(): - mark_input_as_in_correct_layout(node, ind) - - for ind, port in node.out_ports().items(): - mark_output_as_in_correct_layout(node, ind) - - -def insert_transpose(graph: Graph, input_port: Port, before_input=True): - from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs - - input_rank = len(input_port.data.get_shape()) - if input_rank > 3: - if before_input: - axis_order = np.concatenate((int64_array([0]), - int64_array(list(range(2, input_rank))), - int64_array([1]))) - source_node = input_port.get_source().node - transpose_name = source_node.soft_get('name', source_node.id) + '/TransposeToNHWC' - else: - axis_order = np.concatenate( - (int64_array([0]), - int64_array([input_rank - 1]), - int64_array(list(range(1, input_rank - 1))))) - transpose_name = input_port.node.soft_get('name', input_port.node.id) + '/TransposeToNCHW' - input_port.node['need_shape_inference'] = True - input_port.node['override_output_shape'] = True - transpose = create_op_with_const_inputs(graph, Transpose, {1: axis_order}, {'name': transpose_name}) - input_port.get_connection().insert_node(transpose) - transpose['need_shape_inference'] = True - transpose['override_output_shape'] = True diff --git a/tools/mo/openvino/tools/mo/middle/InsertSelect.py b/tools/mo/openvino/tools/mo/middle/InsertSelect.py deleted file mode 100644 index 072c5c6426a526..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/InsertSelect.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import networkx as nx -import numpy as np - -from openvino.tools.mo.middle.MakeKaldiConstReshapable import create_const_with_batch_from_input -from openvino.tools.mo.ops.elementwise import Equal -from openvino.tools.mo.ops.select import Select -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.pattern_match import find_pattern_matches, inverse_dict -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.assign import Assign -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.crop import Crop -from openvino.tools.mo.ops.read_value import ReadValue -from openvino.tools.mo.ops.result import Result -from openvino.tools.mo.utils.graph import bfs_search -from openvino.tools.mo.utils.error import Error - - -def check_inputs(graph: Graph): - inputs = graph.get_op_nodes(op='Parameter') - if len(inputs) == 1: - return inputs[0] - elif len(inputs) == 2: - if inputs[0].name == 'ivector': - return inputs[1] - elif inputs[1].name == 'ivector': - return inputs[0] - else: - raise Error("There are 2 inputs for Kaldi model but we can't find out which one is ivector. " + - "Use name \'ivector\' for the corresponding input") - else: - raise Error("There are {} inputs for Kaldi model but we expect only 1 or 2".format(len(inputs))) - - -class AddSelectBeforeMemoryNodePattern(MiddleReplacementPattern): - """ - Add Select before saving state with Memory to avoid garbage saving. - We need to know delay on each node where Select is adding. For that we traverse the whole graph and set frame time - for each node using the following rules: - * Splice increases frame time by length of its context. If Crop is following Splice - it takes one concrete - moment of time, so frame time increases by its value - Example: - node ---> Splice(-5, -4, ... 0) ---> node - frame time: 0 ---> 5 ---> 5 - node ---> Splice(-5, -4, ... 0) ---> Crop(offset = 2, dim = 1) ---> node - frame time: 0 ---> 5 ---> 3 ---> 3 - * Nodes with several inputs have frame time= max (frame time of each input) - * Node with one input have the same frame time as its input - """ - enabled = True - graph_condition = [lambda graph: graph.graph['fw'] == 'kaldi'] - - def run_after(self): - from openvino.tools.mo.middle.ReplaceMemoryOffsetWithSplice import ReplaceMemoryOffsetWithMemoryNodePattern - from openvino.tools.mo.middle.RemoveDuplicationMemory import MergeNeighborSplicePattern - return [ReplaceMemoryOffsetWithMemoryNodePattern, - MergeNeighborSplicePattern] - - def run_before(self): - from openvino.tools.mo.middle.ReplaceSpliceNodePattern import ReplaceSpliceNodePattern - return [ReplaceSpliceNodePattern] - - @staticmethod - def calculate_frame_time(graph: Graph): - # there are either one or two inputs in Kaldi. Only main input can change delay in network. - # Usually ivector input has name 'ivector'. - max_frame_time = -2 - inputs = graph.get_op_nodes(op='Parameter') - inp = check_inputs(graph) - inp_name = inp.soft_get('name', inp.id) - - # sort nodes to calculate delays - nodes = list(bfs_search(graph, [inp_name])) - - for n in nodes: - node = Node(graph, n) - - # just ignore data nodes - if node.kind != 'op': - continue - - # calculate frame_time (delay) that was not calculated - if node.frame_time < 0: - # Splice increases frame delay - if node.op == "Splice": - if node.in_port(0).get_source().node.frame_time == -1: - continue - node.frame_time = node.in_port(0).get_source().node.frame_time + len(node.context) - 1 - # crop often used to get concrete time frame, set frame_time correctly for this case - elif node.op == 'Crop': - if node.in_port(0).get_source().node.frame_time == -1: - continue - if node.in_port(0).get_connection().get_source().node.op == 'Splice': - splice_node = node.in_port(0).get_source().node - assert len(node.offset) == 1 - assert len(node.dim) == 1 - new_delay = splice_node.context[node.offset[0] // node.dim[0]] - splice_node.context[0] - node.frame_time = splice_node.in_port(0).get_source().node.frame_time + new_delay - else: - node.frame_time = node.in_port(0).get_source().node.frame_time - elif node.op == 'ShapeOf': - # exclude shape path from time delay calculation using special value - node.frame_time = max_frame_time - elif node.op == 'Broadcast': - # finished shape path - node.frame_time = node.in_port(0).get_source().node.frame_time - # for node with several inputs frame_time = maximum of delays from branches - else: - # find out maximum of delay and check that we have at least one branch with another delay - node.frame_time = -1 if len(node.in_ports()) != 0 else 0 - min_in_frame_time = -1 - for inp in node.in_ports(): - if node.in_port(inp).disconnected(): - continue - in_node = node.in_port(inp).get_source().node - if in_node.frame_time < min_in_frame_time: - min_in_frame_time = in_node.frame_time - if in_node.frame_time > node.frame_time and in_node.frame_time != -1: - node.frame_time = in_node.frame_time - # if all inputs have special value for frame time, node have special value for frame time too - # because it is on shape path - if min_in_frame_time == max_frame_time: - node.frame_time = max_frame_time - - - @staticmethod - def insert_select(graph: Graph, node: Node): - context_len = node.frame_time + 1 - - if context_len == 1: - return - - in_node_port = node.in_port(0).get_source() - in_node_shape = node.in_port(0).data.get_shape() - node.in_port(0).disconnect() - - # add Select before saving state to avoid saving garbage - select_node = Select(graph, {'name': 'select_' + node.name}).create_node() - zero_else = create_const_with_batch_from_input(in_node_port, in_node_shape[1]) - select_node.in_port(1).connect(in_node_port) - select_node.in_port(2).connect(zero_else.out_port(0)) - - # check if we have already appropriate iteration counter - existing_counters = find_pattern_matches(graph, nodes=[('mem_in', dict(op='ReadValue')), - ('mem_in_data', dict(shape=int64_array([context_len]))), - ('crop_mem_in', dict(op='Crop', axis=int64_array([1]), - offset=int64_array([1]), - dim=int64_array([context_len - 1]))), - ('crop_mem_in_data', dict()), - ('concat', dict(op='Concat', axis=1)), - ('concat_data', dict()), - ('const_1', dict(op='Const')), - ('const_1_data', dict()), - ('mem_out', dict(op='Assign')), - ('crop_out', dict(op='Crop', axis=int64_array([1]), - offset=int64_array([0]), - dim=int64_array([1]))), - ('crop_out_data', dict()), - ('select', dict(op='Select')) - ], - edges=[('mem_in', 'mem_in_data'), ('mem_in_data', 'crop_mem_in'), - ('crop_mem_in', 'crop_mem_in_data'), - ('crop_mem_in_data', 'concat', {'in': 0}), - ('const_1', 'const_1_data'), - ('const_1_data', 'concat', {'in': 1}), - ('concat', 'concat_data'), ('concat_data', 'mem_out'), - ('concat_data', 'crop_out'), ('crop_out', 'crop_out_data'), - ('crop_out_data', 'select')]) - counter_match = next(existing_counters, None) - if counter_match is not None: - ones = Node(graph, inverse_dict(counter_match)['const_1']) - input_port = Node(graph, inverse_dict(counter_match)['crop_out']).out_port(0) - else: - init_value_mem_out = create_const_with_batch_from_input(in_node_port, context_len, precision=np.int32) - mem_out = ReadValue(graph, {'name': 'iteration_number', - 'variable_id': 'iteration_' + node.name, - 'variable_shape': None, - 'variable_type': None - }).create_node() - mem_out.in_port(0).connect(init_value_mem_out.out_port(0)) - cut_first = Crop(graph, {'name': 'cut_first', 'axis': int64_array([1]), - 'offset': int64_array([1]), 'dim': int64_array([context_len - 1])}).create_node() - cut_first.in_port(0).connect(mem_out.out_port(0)) - ones = create_const_with_batch_from_input(in_node_port, 1, 1, np.int32) - concat = Concat(graph, {'name': 'concat_ones', 'in_ports_count': 2, 'axis': 1}).create_node() - concat.in_port(0).connect(cut_first.out_port(0)) - concat.in_port(1).connect(ones.out_port(0)) - mem_in = Assign(graph, {'name': 'iteration_number_out', - 'variable_id': 'iteration_' + node.name}).create_node() - mem_in.in_port(0).connect(concat.out_port(0)) - res = Result(graph, {}).create_node() - mem_in.out_port(0).connect(res.in_port(0)) - cut_last = Crop(graph, {'name': 'cut_last', 'axis': int64_array([1]), - 'offset': int64_array([0]), 'dim': int64_array([1])}).create_node() - cut_last.in_port(0).connect(concat.out_port(0)) - input_port = cut_last.out_port(0) - - # Check if data from memory is 1 - # if it is True, we have correct data and should proceed with saving it to memory - # else we have not gathered context and have garbage here, shouldn't change initial state of memory - cast_in = Equal(graph, {'name': input_port.node.name + '/cast_to_bool'}).create_node() - cast_in.in_port(0).connect(ones.out_port(0)) - cast_in.in_port(1).connect(input_port) - select_node.in_port(0).connect(cast_in.out_port(0)) - select_node.out_port(0).connect(node.in_port(0)) - select_node.out_port(0).data.set_shape(in_node_shape) - - def find_and_replace_pattern(self, graph: Graph): - if np.all([node.soft_get('name', node.id) == 'iteration_number_out' - for node in graph.get_op_nodes(op='Assign')]): - return - - nx.set_node_attributes(G=graph, name='frame_time', values=-1) - should_continue = True - while should_continue: - self.calculate_frame_time(graph) - should_continue = False - for node in graph.get_op_nodes(op='Assign'): - if node.frame_time == -1: - should_continue = True - - for node in graph.get_op_nodes(op='Assign'): - if node.soft_get('name', node.id) == 'iteration_number_out': - continue - self.insert_select(graph, node) - - for node in graph.get_op_nodes(): - if 'frame_time' in node: - del node['frame_time'] diff --git a/tools/mo/openvino/tools/mo/middle/InterpolateSequenceToInterpolate.py b/tools/mo/openvino/tools/mo/middle/InterpolateSequenceToInterpolate.py deleted file mode 100644 index 6c1ee0458eb8eb..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/InterpolateSequenceToInterpolate.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from typing import List - -import numpy as np - -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import group_by_with_binary_predicate - - -def node_has_one_consumer(node: Node) -> bool: - return len(node.out_port(0).get_destinations()) == 1 - - -def is_next(first: Node, second: Node) -> bool: - """ - This function checks if 'first' is predecessor of 'second'. The node 'first' is called to be - a predecessor of the node 'second', if an output of 'first' is an input of 'second', and - number of destinations of 'first' is equal to 1. - :param first: an Interpolate layer - :param second: another Interpolate layer - :return: True, if 'first' is an predecessor of 'second', and False otherwise. - """ - dests = first.out_port(0).get_destinations() - if node_has_one_consumer(first): - return second.id == dests[0].node.id - elif first.soft_get('maybe_part_of_sequence', False): - return len(dests) == 2 and second.id in [d.node.id for d in dests] - return False - - -class CanBeFused: - def __init__(self): - # We need to accumulate set of axes of compared nodes, because there can be a sequence of a set of axes - # {i}{j}{i} - self.accumulated_axes = set() - self.default_values_for_opset4 = { - 'mode': None, - 'shape_calculation_mode': None, - 'coordinate_transformation_mode': 'half_pixel', - 'nearest_mode': 'round_prefer_floor', - 'antialias': 0, - 'cube_coeff': -0.75 - } - self.default_pads = int64_array([0]) - - def _compare_attributes_of_interpolate1(self, first: Node, second: Node) -> bool: - """ - This function checks whether attributes of Interpolate-1 nodes first and second are identical - (except attribute 'axes'). - :param first: the first of compared nodes - :param second: the second of compared nodes - :return: True, if attributes of nodes are identical and False otherwise - """ - # If some of attributes 'mode', 'align_corners', 'antialias', 'pads_begin', 'pads_end' are different, - # then attributes of nodes are not identical. - op = Interpolate(graph=first.graph, attrs={}) - for attr in ['mode', 'align_corners', 'antialias', 'pads_begin', 'pads_end']: - if first.soft_get(attr, default=op.attrs[attr]) != second.soft_get(attr, default=op.attrs[attr]): - return False - return True - - def _compare_attributes_of_interpolate4(self, first: Node, second: Node) -> bool: - """ - This function checks whether attributes of Interpolate-4 nodes first and second are identical. - :param first: the first of compared nodes - :param second: the second of compared nodes - :return: True, if attributes of nodes are identical and False otherwise - """ - # If some of attributes 'mode', 'coordinate_transformation_mode', 'nearest_mode', 'antialias', 'cube_coeff' - # are different, then attributes of first and second are not identical. - for attr in self.default_values_for_opset4.keys(): - default_value = self.default_values_for_opset4[attr] - if first.soft_get(attr, default=default_value) != second.soft_get(attr, default=default_value): - return False - - # If attributes 'pads_begin' or 'pads_end' of nodes first and second are different, then attributes - # of first and second are not identical. - for attr in ['pads_begin', 'pads_end']: - if not np.array_equal(first.soft_get(attr, default=self.default_pads), - second.soft_get(attr, default=self.default_pads)): - return False - return True - - def _compare_attributes(self, first: Node, second: Node) -> bool: - """ - This function checks whether attributes of nodes first and second are identical (except attribute 'axes'). - :param first: the first of compared nodes - :param second: the second of compared nodes - :return: True, if attributes of nodes are identical and False otherwise - """ - # If opsets of nodes are different, then nodes have different attributes. - fst_opset = first.get_opset() - snd_opset = second.get_opset() - if fst_opset != snd_opset: - return False - - if fst_opset not in ['opset1', 'opset4']: - fst_name = first.soft_get('name', first.id) - snd_name = second.soft_get('name', second.id) - raise Error('Unsupported opset {} for nodes with names {} and {}'.format(fst_opset, fst_name, snd_name)) - - if fst_opset == 'opset1': - return self._compare_attributes_of_interpolate1(first, second) - else: - return self._compare_attributes_of_interpolate4(first, second) - - def __call__(self, first: Node, second: Node) -> bool: - """ - This function checks whether Interpolate nodes 'first' and 'second' can be fused. - :param first: the first of fused nodes - :param second: the second of fused nodes - :return: True, if nodes can be fused, and False otherwise - """ - if not (is_next(first, second) and self._compare_attributes(first, second)): - self.accumulated_axes = set() - return False - - fst_axes = set([a for a in Interpolate.get_axes(first)]) - snd_axes = set([a for a in Interpolate.get_axes(second)]) - - self.accumulated_axes = self.accumulated_axes | fst_axes - - # If the set of accumulated axes and the set of axes of 'second' do not intersect then nodes can be fused, - # because interpolations with respect to various axes do not affect each other. - if not(self.accumulated_axes & snd_axes): - return True - - # Otherwise, nodes cannot be fused. - self.accumulated_axes = set() - return False - - -def get_interpolate_attributes(node: Node) -> dict: - opset_to_default_values = { - 'opset1': { - 'mode': None, - 'align_corners': 0, - 'antialias': 0, - 'pads_begin': 0, - 'pads_end': 0, - 'version': 'opset1' - }, - 'opset4': { - 'mode': None, - 'shape_calculation_mode': None, - 'antialias': 0, - 'pads_begin': int64_array([0]), - 'pads_end': int64_array([0]), - 'coordinate_transformation_mode': 'half_pixel', - 'nearest_mode': 'round_prefer_floor', - 'cube_coeff': -0.75, - 'version': 'opset4' - }, - } - opset = node.get_opset() - result = {} - if opset in opset_to_default_values: - default_values = opset_to_default_values[opset] - for attr in default_values.keys(): - value = node.soft_get(attr, default=default_values[attr]) - result[attr] = value - return result - else: - raise Error('Unsupported opset {} for node with name {}.'.format(opset, node.soft_get('name', node.id))) - - -def replace_sequence(seq: List[Node], graph: Graph): - """ - This function replaces a sequence of consecutive Interpolate layers with one Interpolate layer, - if modes of all nodes of a sequence are the same. - :param seq: sequence of Interpolate layers - :param graph: graph to which nodes of seq belong - :return: Nothing - """ - if not seq: - return - if len(seq) == 1: - return - - modes = set([n.mode for n in seq]) - if len(modes) != 1: - return - - dims_and_scales_ = [] - # Each element of the list dims_and_scales_ is a pair - # (axis, output size for this axis) (opset1) - # or - # (axis, output size for this axis, output scales for this axis) (opset4) - if seq[0].get_opset() == 'opset1': - for interp in seq: - dims_and_scales_.extend(zip(Interpolate.get_axes(interp), - interp.in_port(1).get_connection().get_source().data.get_value())) - - axis_to_size = sorted(list(dict(dims_and_scales_).items()), key=lambda x: x[0]) - axes_of_node = int64_array([z[0] for z in axis_to_size]) - sizes = shape_array([z[1] for z in axis_to_size]) - scales = np.ones(len(axis_to_size), dtype=np.float32) - else: - for interp in seq: - dims_and_scales_.extend(zip(Interpolate.get_axes(interp), - interp.in_port(1).get_connection().get_source().data.get_value(), - interp.in_port(2).get_connection().get_source().data.get_value())) - - axis_to_size = sorted(dims_and_scales_, key=lambda x: x[0]) - axes_of_node = int64_array([z[0] for z in axis_to_size]) - sizes = shape_array([z[1] for z in axis_to_size]) - scales = mo_array([z[2] for z in axis_to_size]) - - fst_interp_node = seq[0] - last_interp_node = seq[-1] - last_interp_node_name = last_interp_node.soft_get('name', last_interp_node.id) - attributes = get_interpolate_attributes(fst_interp_node) - - opset = fst_interp_node.get_opset() - if opset == 'opset1': - attributes['axes'] = axes_of_node - interp_node = create_op_with_const_inputs(graph, Interpolate, {1: sizes}, attributes) - - fst_interp_connection = fst_interp_node.in_port(0).get_connection() - fst_interp_connection.set_destination(interp_node.in_port(0)) - - last_interp_node.out_port(0).get_connection().set_source(interp_node.out_port(0)) - else: - attributes['in_ports_count'] = 4 - interp_node = create_op_with_const_inputs(graph, Interpolate, - {1: sizes, 2: scales, 3: axes_of_node}, - attributes) - - fst_interp_connection = fst_interp_node.in_port(0).get_connection() - fst_interp_connection.set_destination(interp_node.in_port(0)) - - last_interp_node.out_port(0).get_connection().set_source(interp_node.out_port(0)) - - rename_nodes([(last_interp_node, last_interp_node_name + '/delete'), (interp_node, last_interp_node_name)]) - - -class InterpolateSequenceToInterpolate(MiddleReplacementPattern): - """ - This transformation replaces a sequence of Interpolate layers by one Interpolate layer. - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.middle.UpsampleToResample import UpsampleToResample - return [UpsampleToResample] - - def find_and_replace_pattern(self, graph: Graph): - log.debug('Enabled replacement of a sequence of Interpolate layers with one Interpolate layer.') - interps = [n for n in graph.pseudo_topological_sort() if n.kind == 'op' and n.op == 'Interpolate'] - fuser = CanBeFused() - sequences = group_by_with_binary_predicate(interps, fuser) - for seq in sequences: - replace_sequence(seq, graph) diff --git a/tools/mo/openvino/tools/mo/middle/L2NormFusing.py b/tools/mo/openvino/tools/mo/middle/L2NormFusing.py deleted file mode 100644 index 863f14207b7dd3..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/L2NormFusing.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.normalize_l2 import NormalizeL2Op -from openvino.tools.mo.front.common.layout import get_features_dim -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class L2NormToNorm(MiddleReplacementPattern): - """ - Transformation fuses sub-graph performing l2 normalization into the NormalizeL2 operation. OV plugins do not support - NormalizeL2 operation and there is a nGraph transformation which converts NormalizeL2 to NormalizeIE. The latter one - allows to normalize over just channel dimension or "channel + all spatial" dimensions for 2D, 3D or 4D cases. - """ - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import PreMiddleStart - return [PreMiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def pattern(self): - return dict( - nodes=[ - ('input', dict(kind='data')), - ('l2_normalize', dict(kind='op', op='Mul')), - ('l2_normalize_data', dict(kind='data')), - ('maximum', dict(kind='op', op='Maximum')), - ('maximum_data', dict(kind='data')), - ('maximum_y_data', dict(kind='data')), - ('rsqrt_pow', dict(kind='data', value=lambda x: np.all(x == -0.5) if x is not None else False)), - ('rsqrt', dict(kind='op', op='Pow')), - ('rsqrt_data', dict(kind='data')), - ('square_pow', dict(kind='data', value=lambda x: np.all(x == 2) if x is not None else False)), - ('square', dict(kind='op', op='Pow')), - ('square_data', dict(kind='data')), - ('sum', dict(kind='op', op='ReduceSum')), - ('sum_data', dict(kind='data')), - ], - edges=[ - ('input', 'square', {'in': 0}), - ('square_pow', 'square', {'in': 1}), - ('square', 'square_data'), - ('square_data', 'sum'), - ('sum', 'sum_data'), - ('maximum_y_data', 'maximum'), - ('sum_data', 'maximum'), - ('maximum', 'maximum_data'), - ('maximum_data', 'rsqrt', {'in': 0}), - ('rsqrt_pow', 'rsqrt', {'in': 1}), - ('rsqrt', 'rsqrt_data'), - ('rsqrt_data', 'l2_normalize'), - ('input', 'l2_normalize'), - ('l2_normalize', 'l2_normalize_data'), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - y = match['maximum'].in_port(0).data.get_value() - if y is None: - y = match['maximum'].in_port(1).data.get_value() - - if y is None or y.shape != (): - log.debug('The value of the "maximum_y_data" is not defined or is not constant') - return - - # We need to check axes which performed reduction because OV supports only 2D, 3D, 4D inputs and - # reduction only along spatial and channel dimensions. - input_rank = len(match['sum'].in_port(0).data.get_shape()) - if input_rank not in [2, 3, 4]: - log.debug('OV supports L2 normalization only for 2D, 3D and 4D tensors.') - return - - axes = match['sum'].in_port(1).data.get_value() - axes = int64_array(axes) - if axes.shape == (): - axes = int64_array([axes]) - axes = int64_array([axis if axis >= 0 else axis + input_rank for axis in axes]) - axes.sort() - - transformation_applicable = False - # check for case C + all spatial dims. Works for 2D (NC), 3D (NCH) and 4D (NCHW and NHWC) - if len(axes) + 1 == input_rank and np.array_equal(axes, int64_array(np.arange(start=1, stop=input_rank))): - transformation_applicable = True - - # check for pure C channel normalization - if len(axes) == 1 and ((input_rank == 4 and get_features_dim(graph.graph['layout'], input_rank) == axes[0]) or - (input_rank != 4 and axes[0] == 1)): - transformation_applicable = True - - if not transformation_applicable: - log.debug('OV doesn\'t support l2 normalization with reduction along axes {}.'.format(axes)) - return - - output_name = match['l2_normalize'].soft_get('name', match['l2_normalize'].id) - normalize_node = create_op_node_with_second_input(graph, NormalizeL2Op, axes, {'name': output_name, - 'eps_mode': 'max', 'eps': y}) - match['square'].in_port(0).get_source().connect(normalize_node.in_port(0)) - - match['square'].in_port(0).disconnect() - if match['l2_normalize'].in_port(0).get_source().node.id == match['rsqrt'].id: - match['l2_normalize'].in_port(1).disconnect() - else: - match['l2_normalize'].in_port(0).disconnect() - - match['l2_normalize'].out_port(0).get_connection().set_source(normalize_node.out_port(0)) - rename_nodes([(match['l2_normalize'], output_name + "/TBR"), (normalize_node, output_name)]) diff --git a/tools/mo/openvino/tools/mo/middle/LSTMRNNSequenceToTensorIterator.py b/tools/mo/openvino/tools/mo/middle/LSTMRNNSequenceToTensorIterator.py deleted file mode 100644 index 8bd0d156d09a63..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/LSTMRNNSequenceToTensorIterator.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.RNNSequenceNormalizeToIE import RNNSequenceNormalize -from openvino.tools.mo.ops.lstm_cell import LSTMCell -from openvino.tools.mo.ops.tensor_iterator import TensorIterator -from openvino.tools.mo.front.common.partial_infer.utils import shape_delete -from openvino.tools.mo.graph.graph import Graph, add_opoutput -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class LSTMToTensorIterator(MiddleReplacementPattern): - """ Converts normalized RNNSequence with op=LSTM to TensorIterator. - - Normalized RNNSequence means that it should be processed by - RNNSequenceNormalize transform that ensures its strict form. - - This transformation builds an alternative sub-graph for LSTMSequence - with TensorIterator connected in the same way as an original LSTMSequence - node and with internal body represented as LSTMCell op node with necessary - squeezes and unsqueezes around. - """ - - enabled = True - force_clean_up = True - id = 'lstm_to_tensor_iterator' - - def run_after(self): - return [RNNSequenceNormalize] - - def run_before(self): - from openvino.tools.mo.middle.permute_tensor_iterator import TransposeTensorIteratorLSTM - return [TransposeTensorIteratorLSTM] - - def pattern(self): - return dict( - nodes=[ - ('lstm', dict(kind='op', op='LSTM', type='RNNSequence')), - ('input', dict(kind='data')), - ('weights', dict(kind='data')), - ('biases', dict(kind='data')), - # don't capture optional input initial states here - ('output', dict(kind='data')), - # don't capture optional output last states here - ], - edges=[ - ('input', 'lstm', {'in': 0}), - ('weights', 'lstm', {'bin': 'weights', 'in': 1}), - ('biases', 'lstm', {'bin': 'biases', 'in': 2}), - ('lstm', 'output', {'out': 0}), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - lstm = match['lstm'] - - # Build TensorIterator body first - body = Graph(name=lstm.name + '/sub_graph') - body.graph = graph.graph - - # 1. Input squeeze Reshape - inputs = [Op._create_data_node(body, lstm.name + '/inport/' + str(inp), - {'shape': lstm.in_node(inp).shape.copy(), - 'value': lstm.in_node(inp).value.copy() - if lstm.in_node(inp).value is not None and inp in [1, 2] else None}) - for inp in [0, 4, 5, 1, 2]] # X, WR, B, h_init, c_init - - inputs[0].shape[lstm.sequence_dim] = 1 - input_squeeze = Squeeze(body, dict(name=lstm.name + '/input_squeeze', internal_layer_id=0)) - squeeze_dim_data = Const(body, {'name': lstm.name + '/input_squeeze_dim', - 'value': [lstm.sequence_dim]}).create_node_with_data() - inputs[0] = input_squeeze.create_node_with_data([inputs[0], squeeze_dim_data], - edge_attrs=[{'internal_port_id': 0}]) - - # 2. Output unsqueeze Reshape - outputs = [Op._create_data_node(body, lstm.name + '/outport/' + str(out), - {'shape': lstm.out_node(out).shape.copy() if out in lstm.out_nodes() - else lstm.in_node(4).shape.copy()}) for out in [0, 1]] - for out in outputs: - add_opoutput(body, out.id, 0, False) - - outputs[0].shape = shape_delete(outputs[0].shape, lstm.sequence_dim) - output_unsqueeze = Unsqueeze(body, dict(name=lstm.name + 'output_unsqueeze', internal_layer_id=2)) - unsqueeze_dim_data = Const(body, {'name': lstm.name + '/output_unsqueeze_dim', - 'value': [lstm.sequence_dim]}).create_node_with_data() - - # 3. LSTMCell - lstm_cell_op = LSTMCell(body, dict(hidden_size=lstm.hidden_size, - activations=lstm.activations, - activation_alpha=lstm.activation_alpha, - activation_beta=lstm.activation_beta, - clip=lstm.clip, - input_forget=lstm.input_forget, - name=lstm.name + '/LSTMCell', - internal_layer_id=1)) - lstm_cell_node = lstm_cell_op.create_node_with_data(inputs, data_nodes=outputs, - edge_attrs=[{}, {'internal_port_id': 1}, - {'internal_port_id': 2}, {'bin': 'weights'}, - {'bin': 'biases'}]) - lstm_cell_node[0].in_node().out_edge(0)['internal_port_id'] = 4 - lstm_cell_node[0].in_node().out_edge(1)['internal_port_id'] = 5 - lstm_cell_node[0] = output_unsqueeze.create_node_with_data([lstm_cell_node[0], unsqueeze_dim_data]) - lstm_cell_node[0].in_node().out_edge(0)['internal_port_id'] = 3 - add_opoutput(body, lstm_cell_node[0].id, 0, False) - - # 4. TensorIterator layer creating - assert lstm.direction in ['forward', 'reverse'] - if lstm.direction == 'forward': - stride = 1 - start = None - end = None - else: - assert lstm.direction == 'reverse' - stride = -1 - start = -1 - end = 0 - - output_port_map = [{ - 'external_port_id': 3, - 'internal_layer_id': 2, - 'internal_port_id': 3, - - 'axis': lstm.sequence_dim, - 'stride': stride, - 'start': start, - 'end': end, - 'part_size': 1, - }] - - # Adding h_state, c_state to outputs - if len(lstm.out_nodes()) == 3: - output_port_map.extend([{ - 'external_port_id': 4, - 'internal_layer_id': 1, - 'internal_port_id': 4, - }, { - 'external_port_id': 5, - 'internal_layer_id': 1, - 'internal_port_id': 5, - }]) - - ti_op = TensorIterator(graph, { - 'name': lstm.name + '/TensorIterator', - 'body': body, - 'in_ports_count': 3, - 'out_ports_count': len(lstm.out_nodes()), - - 'input_port_map': [ - { - 'external_port_id': 0, - 'internal_layer_id': 0, - 'internal_port_id': 0, - - 'axis': lstm.sequence_dim, - 'stride': stride, - 'start': start, - 'end': end, - 'part_size': 1, - }, - { - 'external_port_id': 1, - 'internal_layer_id': 1, - 'internal_port_id': 1, - }, - { - 'external_port_id': 2, - 'internal_layer_id': 1, - 'internal_port_id': 2, - }, - ], - - 'output_port_map': output_port_map, - - 'back_edges': [ - { - 'from_layer': 1, - 'from_port': 4, - 'to_layer': 1, - 'to_port': 1, - }, - { - 'from_layer': 1, - 'from_port': 5, - 'to_layer': 1, - 'to_port': 2, - }, - ] - }) - - assert sorted(lstm.out_nodes().keys()) == list(range(len(lstm.out_nodes()))), \ - "There are gaps in output ports of LSTMSequence operation. Node {}".format(lstm.id) - - outs = ti_op.create_node_with_data([lstm.in_node(i) for i in [0, 4, 5]], # X, h_init, c_init - data_nodes=[lstm.out_node(i) for i in range(len(lstm.out_nodes()))], - edge_attrs=[{'external_port_id': 0}, {'external_port_id': 1}, - {'external_port_id': 2}]) - - if not isinstance(outs, list): - outs = list([outs]) - - graph.remove_node(lstm.id) - outs[0].in_edge(0)['external_port_id'] = 3 - for i, out in enumerate(outs[1:]): - external_port_id = 4 + i - out.in_edge()['external_port_id'] = external_port_id - - ti = outs[0].in_node() - TensorIterator.cover_body_input_data_nodes_with_parameter_ops(ti) - TensorIterator.cover_body_constant_data_nodes_with_const_ops(ti) - TensorIterator.normalize_internal_ids(ti) diff --git a/tools/mo/openvino/tools/mo/middle/LayoutChangeForConstantShapePaths.py b/tools/mo/openvino/tools/mo/middle/LayoutChangeForConstantShapePaths.py deleted file mode 100644 index 894911adf15c3a..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/LayoutChangeForConstantShapePaths.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from collections import deque -from typing import List, Set - -from openvino.tools.mo.middle.InsertLayoutPropagationTransposes import is_output_data_in_correct_layout, \ - InsertLayoutPropagationTranspose, mark_input_as_in_correct_layout, mark_output_as_in_correct_layout -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.graph.port import Port -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class LayoutChangeForConstantShapePaths(MiddleReplacementPattern): - enabled = True - graph_condition = [lambda graph: graph.graph['layout'] == 'NHWC', - lambda graph: not graph.graph['cmd_params'].static_shape] - force_clean_up = True - - def run_after(self): - return [InsertLayoutPropagationTranspose] - - def run_before(self): - return [] - - @staticmethod - def get_next_in_ports(in_port: Port) -> Set[Port]: - next_in_ports = set() - for out_port in in_port.node.out_ports().values(): - next_in_ports.update(out_port.get_destinations()) - return next_in_ports - - def find_shape_subgraph_endpoints(self, out_ports: List[Port], visited: set = None, - action: callable = None) -> Set[Port]: - """ - Searches for input ports of data dependent operations starting from output ports passed to the function. - Condition for data dependent operations is absence of node output value. - - :param out_ports: list of output ports to start search from - :param visited: set of input ports that were visited to avoid visiting them more than once - :param action: function to call on the each input port of shape sub-graph - :return: set of input ports of data dependent operations - """ - if visited is None: - visited = set() - - deque_of_in_ports = deque() - for out_port in out_ports: - deque_of_in_ports.extend(out_port.get_destinations()) - - end_points_in_ports = set() - while len(deque_of_in_ports): - in_port = deque_of_in_ports.popleft() - if in_port in visited: - continue - next_in_ports = self.get_next_in_ports(in_port) - if any([port.data.get_value() is None for port in next_in_ports]): - end_points_in_ports.add(in_port) - else: - deque_of_in_ports.extend(next_in_ports) - if action is not None: - action(in_port) - visited.add(in_port) - return end_points_in_ports - - def find_and_replace_pattern(self, graph: Graph): - shape_ops = graph.get_op_nodes(op='ShapeOf') - - # 1. Inserting Gather to N*C format on constant shape paths - for shape in shape_ops: - source_port = shape.in_port(0).get_source() - if is_output_data_in_correct_layout(source_port.node, source_port.idx): - continue # data is already in N*C format - - name = shape.soft_get('name', shape.id) - rank = source_port.data.get_shape().size - - if rank in [4, 5]: - index = int64_array([0, *list(range(2, rank)), 1]) - else: - continue # data is layout independent - - gather = create_op_with_const_inputs(graph, op=Gather, port_value_dict={1: index, 2: int64_array(0)}, - op_attrs={'name': name + '/GatherNCHWtoNHWC'}) - shape.out_port(0).get_connection().insert_node(gather) - - # 2. Inserting Gather/Transpose to NC* format - shape_sub_graph_end_points = self.find_shape_subgraph_endpoints([shape.out_port(0) for shape in shape_ops]) - for in_port in shape_sub_graph_end_points: - name = in_port.node.soft_get('name', in_port.node.id) - shape = in_port.data.get_shape() - - should_switch_layout = not any([is_output_data_in_correct_layout(port.node, port.idx) - for port in in_port.node.out_ports().values() if not port.disconnected()]) - should_insert_gather = should_switch_layout and len(shape) == 1 and shape.item(0) in [4, 5] - should_insert_transpose = should_switch_layout and len(shape) in [4, 5] - - if should_insert_gather: - # we should turn input permutation off to perform it with the following gather insertion - in_port.__setattr__('input_permutation', None) - index = int64_array([0, shape.item(0) - 1, *list(range(1, shape.item(0) - 1))]) - gather = create_op_with_const_inputs(graph, op=Gather, - port_value_dict={1: index, 2: int64_array(0)}, - op_attrs={'name': name + '/GatherNHWCtoNCHW'}) - in_port.get_connection().insert_node(gather) - elif should_insert_transpose: - # we should turn input permutation off to perform it with the following transpose insertion - in_port.__setattr__('input_permutation', None) - order = int64_array([0, len(shape) - 1, *list(range(1, len(shape) - 1))]) - transpose = create_op_with_const_inputs(graph, op=Transpose, port_value_dict={1: order}, - op_attrs={'name': name + '/TransposeNHWCtoNCHW', - 'override_output_shape': True}) - mark_input_as_in_correct_layout(transpose, 0) - mark_output_as_in_correct_layout(transpose, 0) - in_port.get_connection().insert_node(transpose) - else: - continue # data is layout independent diff --git a/tools/mo/openvino/tools/mo/middle/LayoutChangeForEinsum.py b/tools/mo/openvino/tools/mo/middle/LayoutChangeForEinsum.py deleted file mode 100644 index 3b8ffe74f10826..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/LayoutChangeForEinsum.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.InsertLayoutPropagationTransposes import is_input_data_in_correct_layout, \ - is_output_data_in_correct_layout -from openvino.tools.mo.ops.einsum import Einsum -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class LayoutChangeForEinsum(MiddleReplacementPattern): - """ - The transformation adjusts Einsum equation to NCHW layout. - Subscripts for tensor of rank greater than three must be adjusted - to NCHW layout, meaning a label for the last dimension is moved - to the second position in the subscript. - There is an exception when the last label in the subscript is ellipsis - and covers multiple dimensions. In this case subscript is not changed and - Transpose to get original NHWC layout back is inserted. - The transformation is only applicable to TensorFlow case. - """ - enabled = True - force_shape_inference = True - graph_condition = [lambda graph: graph.graph['layout'] == 'NHWC'] - - def run_after(self): - from openvino.tools.mo.middle.MarkSubgraphsWithCorrectLayout import MarkSubGraphsWithCorrectLayout - return [MarkSubGraphsWithCorrectLayout] - - def run_before(self): - from openvino.tools.mo.middle.InsertLayoutPropagationTransposes import InsertLayoutPropagationTranspose - return [InsertLayoutPropagationTranspose] - - def find_and_replace_pattern(self, graph: Graph): - import openvino.tools.mo.middle.InsertLayoutPropagationTransposes as InsertTransposes - for einsum in graph.get_op_nodes(type='Einsum'): - einsum_name = einsum.soft_get('name', einsum.id) - assert einsum.has_valid('equation'), "Equation attribute is mandatory" \ - " for Einsum node {}".format(einsum_name) - equation = einsum.equation - connected_in_ports = [port for port in einsum.in_ports().values() if not port.disconnected()] - num_inputs = len(connected_in_ports) - - # check if correct_data_layout attribute is set for inputs and output - # this attribute can be set up within MarkSubgraphWithCorrectLayout transformation - # for example, when Einsum is located near to MatMul operation in a graph - input_correct_layout_mask = [] - for input_ind in range(num_inputs): - input_correct_layout_mask.append(is_input_data_in_correct_layout(einsum, input_ind)) - is_output_layout_correct = is_output_data_in_correct_layout(einsum, 0) - - # compute a mask of which inputs/output are adjusted to the required layout - # if they are not adjusted, it means to require transpose - input_ranks = [len(einsum.in_port(port_idx).data.get_shape()) for port_idx in range(num_inputs)] - output_rank = len(einsum.out_port(0).data.get_shape()) - permuted_equation, are_inputs_adjusted, is_output_adjusted = Einsum.adjust_equation_with_NCHW_layout( - einsum_name, - equation, - input_ranks, - output_rank, input_correct_layout_mask, is_output_layout_correct) - assert len(are_inputs_adjusted) == num_inputs - - # setup adjusted equation - einsum.equation = permuted_equation - - # insert Transpose node to get NHWC layout back (for inputs) that is required due to specifics of equation - for input_ind in range(num_inputs): - if not are_inputs_adjusted[input_ind]: - # that means Einsum can only accept input in NHWC layout - # so the inserted transpose before the Einsum will convert the layout to NHWC - InsertTransposes.insert_transpose(graph, einsum.in_port(input_ind), before_input=True) - if not is_output_adjusted: - # that means Einsum can only generate output in NHWC layout - # so the inserted transpose followed after the output will convert the layout back into NCHW layout - InsertTransposes.insert_transpose(graph, einsum.out_port(0), before_input=False) diff --git a/tools/mo/openvino/tools/mo/middle/LeakyReluPattern.py b/tools/mo/openvino/tools/mo/middle/LeakyReluPattern.py deleted file mode 100644 index 457208ab43adeb..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/LeakyReluPattern.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from openvino.tools.mo.middle.fusings import Fusing -from openvino.tools.mo.middle.pass_separator import PostMiddleStart -from openvino.tools.mo.ops.activation_ops import LeakyReLU -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class LeakyReLUFusion(MiddleReplacementPattern): - """ - The transformation finds next subgraph: - - -->Data-------->Maximum-->Data - `-->Mul---` - - and replaces with ReLU with negative slope (LeakyRelu) - """ - enabled = True - force_clean_up = True - - def run_after(self): - return [Fusing] - - def run_before(self): - return [PostMiddleStart] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('data', dict(kind='data')), - ('mul_data', dict(kind='data')), - ('max_op', dict(kind='op', type='Maximum')), - ('const_op', dict(kind='op', type='Const')), - ('const_data', dict(kind='data')), - ('mul_op', dict(kind='op', type='Multiply')), - ], - edges=[ - ('data', 'mul_op'), - ('mul_op', 'mul_data'), - ('data', 'max_op'), - ('mul_data', 'max_op'), - ('const_op', 'const_data'), - ('const_data', 'mul_op') - ], - ) - - def replace_pattern(self, graph: Graph, match: dict): - mul_node = match['mul_op'] - const_node = match['const_op'] - max_node = match['max_op'] - max_name = max_node.soft_get('name', max_node.id) - - const_value = const_node.out_port(0).data.get_value() - if const_value is None or const_value.size != 1: - log.debug('Mul layer "{}" can not participate in conversion to the LeakyReLU because constant "{}" ' - 'contains more than one element: {}'.format(mul_node.id, const_node.id, const_value.size)) - return - - # Create new LeakyReLU operation - leaky_relu_node = LeakyReLU(graph, dict(negative_slope=const_value.item(0))).create_node() - - data_in_port = int(mul_node.in_port(0).get_source().node.type == 'Const') - mul_node.in_port(data_in_port).get_source().connect(leaky_relu_node.in_port(0)) - max_node.out_port(0).get_connection().set_source(leaky_relu_node.out_port(0)) - - rename_nodes([(max_node, max_name + '/TBR'), (leaky_relu_node, max_name)]) - - log.debug('Successful conversion from {} {} to ReLU with negative slope (leaky ReLU)' - ''.format(max_node.id, mul_node.id)) diff --git a/tools/mo/openvino/tools/mo/middle/MXNetRNNSequenceNormalize.py b/tools/mo/openvino/tools/mo/middle/MXNetRNNSequenceNormalize.py deleted file mode 100644 index b45ab8c03e8e2d..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/MXNetRNNSequenceNormalize.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_insert, mo_array, shape_array, \ - unmask_shape -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.transpose import Transpose - - -class MXNetRNNSequenceNormalize(MiddleReplacementPattern): - """ - Convert blobs and shapes of MXNet-like RNN cell to OV compatible form. - - The target form of this operation is not normally covered by a dedicated - layer in OV. It should be further transformed to some other layer - that are supported by OV. This transformation pass involves weights and - shapes processing only. - - Post-conditions: - Inputs: - 0: X input data, shape [batch_size, seq_len, input_size] (or [seq_len. bathc_size, int_size], depends on - batch_dim param) - 1: W weights blob, shape [num_dir, n_cells, M, hidden_size, input_size] - 2: R weights blob, shape [num_dir, n_cells, M, hidden_size, hidden_size] - 3: B biases blob, shape [num_dir, n_cells, 2, M, hidden_size] - 4: (optional) sequence_length, shape [batch_size] - 5: initial hidden state, shape [num_dir, batch_size, hidden_size] - ([num_dir, n_cells, batch_size, hidden_size] if num_cells != 1) - 6: (only for LSTM) initial cell state, shape [num_dir, batch_size, hidden_size] - 7: (optional for LSTM) Peepholes weights, shape [num_dir, n_cells, (M - 1) * hidden_size] - - Outputs: - 0: Y output blob, shape [batch_size, num_dir, seq_len, hidden_size] - 1: (optional) Y_h, shape [num_dir, batch_size, hidden_size] - 2: (optional for LSTM) Y_c, shape [num_dir, batch_size, hidden_size] - - Where: - M -- number of gates in this cell (4 for LSTM, 3 for GRU, 1 for RNN). - num_dir -- number of directions ('forvard', 'bidirectional', 'reverse') - n_cells -- number of cells in layer (always 1 for ONNX). - - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.MXNetSplitMultiLayers import MXNetSplitLayersToRNNSequence - return [MXNetSplitLayersToRNNSequence] - - def pattern(self): - return dict( - nodes=[ - ('rnn_layer', dict(kind='op', type='RNNSequence', format='mxnet')), - ('input', dict(kind='data')), - ('params', dict(kind='data')), - ], - edges=[ - ('input', 'rnn_layer', {'in': 0}), - ('params', 'rnn_layer', {'in': 1}), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - rnn_layer = match['rnn_layer'] - - self.check_init_states(graph, match) - self.repack_weights(graph, match) - self.add_output_reshape(graph, match) - self.check_input_ports(graph, match) - rnn_layer['normalized'] = True - - @staticmethod - def repack_weights(graph: Graph, match: dict): - input = match['input'] - rnn_layer = match['rnn_layer'] - params = match['params'].value.copy() - - graph.remove_edge(match['params'].id, rnn_layer.id) - - input_size = input.shape[2] - direction = 2 if rnn_layer.has_num_directions else 1 - bsize = (2 * rnn_layer.hidden_size * direction * 1) * rnn_layer.multiplier - - W = mo_array(params[0:len(params) - bsize]) - B = mo_array(params[len(params) - bsize:]) - - W = W.reshape((direction, -1)) - B = B.reshape((direction, -1)) - - W, R = mo_array(W[:, 0:rnn_layer.hidden_size * rnn_layer.multiplier * input_size]), mo_array(W[:, rnn_layer.hidden_size * rnn_layer.multiplier* input_size:]) - - W, R = [x.reshape([ - direction, # 0: num of directions - 1, # 1: num_cells - rnn_layer.multiplier, # 2: four output parts of the matrix for all gates - rnn_layer.hidden_size, # 3: output size per direction and gate - -1]) # 4: input size/hidden size in W/R correspondingly - for x in (W, R)] - - assert W.shape[-1] == input_size - assert R.shape[-1] == rnn_layer.hidden_size - - B = B.reshape([ - direction, # 0: num of directions, limitation: should be 1 - 1, - 2, # 3: num of component B - rnn_layer.multiplier, # 1: four output parts of the matrix for all gates in order: i, f, c, o - rnn_layer.hidden_size, # 2: output size per direction and gate - ]) - - # Reorder gates: ifco --> fico - gate_reorder = rnn_layer.gate_order - W = np.take(W, gate_reorder, axis=2) - R = np.take(R, gate_reorder, axis=2) - B = np.take(B, gate_reorder, axis=3) - - # Add ports to rnn_layer - rnn_layer.add_sequence_of_ports(type='in', rng=range(7)) - - for blob, port in [(W, 1), (R, 2), (B, 3)]: - Op.create_and_connect_input_data_node( - graph, - rnn_layer, - {'value': blob, 'shape': int64_array(blob.shape)}, - {'in': port, 'permutation': None} - ) - - @staticmethod - def check_init_states(graph: Graph, match: dict): - """ - Check if cell have initial states and create zeros states if not. - And renumber ports for this states. - """ - rnn_cell = match['rnn_layer'] - num_directions = 2 if rnn_cell.direction == 'bidirectional' else 1 - batch_size = rnn_cell.in_node(0).shape[rnn_cell.batch_dim] - - h_init_port = 5 - c_init_port = 6 - - if 2 not in rnn_cell.in_nodes(): - h_shape = [num_directions, batch_size, rnn_cell.hidden_size] # from ONNX spec - h_init = np.full(h_shape, 0, dtype=np.float32) - Op.create_and_connect_input_data_node( - graph, - rnn_cell, - {'value': h_init, 'shape': int64_array(h_init.shape)}, - {'in': h_init_port, 'permutation': None} - ) - else: - hidden_state_edge = graph.get_edge_data(rnn_cell.in_node(2).id, rnn_cell.id) - hidden_state_edge[0]['in'] = h_init_port - - if rnn_cell.op == 'LSTM': - if 3 not in rnn_cell.in_nodes(): - c_shape = [num_directions, batch_size, rnn_cell.hidden_size] # from ONNX spec - c_init = np.full(c_shape, 0, dtype=np.float32) - Op.create_and_connect_input_data_node( - graph, - rnn_cell, - {'value': c_init, 'shape': int64_array(c_init.shape)}, - {'in': c_init_port, 'permutation': None} - ) - else: - cell_state_edge = graph.get_edge_data(rnn_cell.in_node(3).id, rnn_cell.id) - cell_state_edge[0]['in'] = c_init_port - - @staticmethod - def add_output_reshape(graph: Graph, match: dict): - """ - Since MXNet Y output shape is [batch_size, seq_len, hidden_size * num_directions] we need to add reshape - from above common format [batch_size, num_directions, seq_len, hidden_size] to MXNet format. - """ - lstm = match['rnn_layer'] - input = match['input'] - if not lstm.has_num_directions: - return - old_data_node = lstm.out_node(0) - num_directions = 2 if lstm.direction in ['bidirectional'] else 1 - mxnet_shape = lstm.out_node(0).shape.copy() - - if lstm.batch_dim == 0: - mo_shape = shape_array([input.shape[lstm.batch_dim], input.shape[lstm.sequence_dim], lstm.hidden_size]) - else: - mo_shape = shape_array([input.shape[lstm.sequence_dim], input.shape[lstm.batch_dim], lstm.hidden_size]) - - if lstm.has_num_directions: - mo_shape = shape_insert(mo_shape, 1, np.int64(num_directions)) - - lstm_name = lstm.soft_get('name', lstm.id) - - new_data = Op._create_data_node(graph, name=lstm_name + '/Data/Reshape_mxnet/', attrs={'shape': mo_shape}) - graph.remove_edge(lstm.id, old_data_node.id) - graph.add_edge(lstm.id, new_data.id, key=0, out=0) - - # Add Transpose - permute_order = Const(graph, {'name': lstm_name + '/Transpose_mxnet_order', - 'value': int64_array([0, 2, 1, 3])}).create_node_with_data() - permute_data = Transpose(graph, {'name': lstm_name + '/Transpose_mxnet/'} - ).create_node_with_data([new_data, permute_order]) - - # Add Reshape - reshape = Reshape(graph, {'name': lstm_name + '/Reshape_mxnet/'}) - reshape_dim_data = Const(graph, {'name': lstm_name + '/Reshape_mxnet_dim', - 'value': int64_array(unmask_shape(mxnet_shape))}).create_node_with_data() - - reshape.create_node_with_data([permute_data, reshape_dim_data], dict(), data_nodes=[old_data_node]) - - @staticmethod - def check_input_ports(graph: Graph, match: dict): - """ - Check that all mandatory ports is present. - """ - rnn_layer = match['rnn_layer'] - mandatory_ports = [0, 1, 2, 3, 5] - - if rnn_layer.op == 'LSTM': - mandatory_ports.append(6) - - assert set(rnn_layer.in_nodes().keys()) >= set(mandatory_ports) diff --git a/tools/mo/openvino/tools/mo/middle/MXNetSplitMultiLayers.py b/tools/mo/openvino/tools/mo/middle/MXNetSplitMultiLayers.py deleted file mode 100644 index 02b5b4f3c4d99f..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/MXNetSplitMultiLayers.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.partial_infer.utils import shape_insert, int64_array -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import Op - - -class MXNetSplitLayersToRNNSequence(MiddleReplacementPattern): - """ - Split MXNet multilayer cell to multiple one-layers cells LSTM/GRU/RNN. - Also concatenate output hiddens and cells states of this layers. - """ - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('rnn_layer', dict(kind='op', type='RNNSequence', format='mxnet', multilayers=True)), - ('input', dict(kind='data')), - ('params', dict(kind='data')), - ], - edges=[ - ('input', 'rnn_layer', {'in': 0}), - ('params', 'rnn_layer', {'in': 1}), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - output_states = self.split_multilayer_cell(graph, match) - - rnn_layer = match['rnn_layer'] - self.concat_output_states(graph, match, output_states) - rnn_layer.graph.remove_node(rnn_layer.id) - - @staticmethod - def get_new_cell(multilayer_cell: Node, number: int): - cell_class = Op.get_op_class_by_name(multilayer_cell.op) - new_cell = lambda graph, attrs: cell_class(graph, attrs) - attrs = multilayer_cell.attrs().copy() - new_attrs = { - 'num_layers': 1, - 'multilayers': False, - 'name': multilayer_cell.name + '/LayerSplittedLSTM/{}'.format(number), - } - attrs.update(new_attrs) - return new_cell(multilayer_cell.graph, attrs) - - def split_multilayer_cell(self, graph: Graph, match: dict): - """ - Split one multilayer type=RNNSequence cell to num_layers consecutive cells. - All parameters splits to parts for new num_layers cells. - """ - input = match['input'] - rnn_layer = match['rnn_layer'] - params = match['params'].value.copy() - - have_hidden = False - if 2 in rnn_layer.in_nodes(): - hidden_state_value = rnn_layer.in_node(2).value - have_hidden = True - - have_cell = False - if 3 in rnn_layer.in_nodes(): - cell_state_value = rnn_layer.in_node(3).value - have_cell = True - - direction = 2 if rnn_layer.has_num_directions else 1 - num_layers = rnn_layer.num_layers - input_size = input.shape[2] - bsize = (2 * rnn_layer.hidden_size * direction * num_layers) * rnn_layer.multiplier - - size = rnn_layer.hidden_size * direction * rnn_layer.multiplier - first_layer_params_size = (input_size + rnn_layer.hidden_size + 2) * size - other_layer_params_size = (rnn_layer.hidden_size * direction + rnn_layer.hidden_size + 2) * size - assert params.size == (first_layer_params_size + (num_layers - 1) * other_layer_params_size) - - input_node = input - params_layer_size_count = 0 - output_states = [[], []] - - param_w = params[0:len(params)-bsize] - param_b = params[len(params) - bsize:] - layer_bsize = (2 * rnn_layer.hidden_size * direction) * rnn_layer.multiplier - - for l in range(num_layers): - params_layer_size = first_layer_params_size if l == 0 else other_layer_params_size - - layer_params_w = param_w[params_layer_size_count: params_layer_size_count + - (params_layer_size - layer_bsize)].copy() - layer_params_b = param_b[layer_bsize*l: layer_bsize*l+layer_bsize].copy() - layer_params = np.concatenate((layer_params_w, layer_params_b), axis=0) - params_layer_size_count = params_layer_size_count + params_layer_size - layer_bsize - - op = self.get_new_cell(rnn_layer, l) - name = str(rnn_layer.soft_get('name', rnn_layer.id)) - params_value_node = Const( - rnn_layer.graph, - dict(name=name + '/LayerSplittedParamsLSTM/{}/'.format(l), value=layer_params) - ).create_node_with_data() - - if have_hidden: - layer_hidden_state = hidden_state_value[l * direction: l * direction + direction] # pylint: disable=possibly-used-before-assignment - hidden_state_value_node = Const( - rnn_layer.graph, - dict(name=name + '/LayerSplittedHiddenState/{}/'.format(l), value=layer_hidden_state) - ).create_node_with_data() - else: - hidden_state_value_node = None - - if have_cell: - layer_cell_state = cell_state_value[l * direction: l * direction + direction] # pylint: disable=possibly-used-before-assignment - cell_state_value_node = Const( - rnn_layer.graph, - dict(name=name + '/LayerSplittedCellState/{}/'.format(l), value=layer_cell_state) - ).create_node_with_data() - else: - cell_state_value_node = None - - if l < num_layers-1: - output_data = Op._create_data_node( - rnn_layer.graph, - name=rnn_layer.out_node(0).name + '/LayerSplit/' + str(l), - attrs={'shape': rnn_layer.out_node(0).shape.copy()} - ) - else: - output_data = rnn_layer.out_node(0) - - # Output nodes creating: - state_size = int64_array([input.shape[rnn_layer.batch_dim], rnn_layer.hidden_size]) - if rnn_layer.has_num_directions: - state_size = shape_insert(state_size, 0, direction) - - output_hidden = Op._create_data_node( - rnn_layer.graph, - name=rnn_layer.out_node(1).name + '/LayerSplit/' + str(l), - attrs={'shape': mo_array(state_size)} - ) - - current_data_nodes = [output_data, output_hidden] - - if rnn_layer.op == 'LSTM': - output_cell = Op._create_data_node( - rnn_layer.graph, - name=rnn_layer.out_node(2).name + '/LayerSplit/' + str(l), - attrs={'shape': mo_array(state_size)} - ) - current_data_nodes.append(output_cell) - - data_nodes = op.create_node_with_data( - inputs=[ - input_node, - params_value_node, - hidden_state_value_node, - cell_state_value_node - ], - data_nodes=current_data_nodes, - ) - - input_node = data_nodes[0] - output_states[0].append(data_nodes[1]) - - if rnn_layer.op =='LSTM': - output_states[1].append(data_nodes[2]) - - return output_states - - @staticmethod - def concat_output_states(graph: Graph, match: dict, new_states: list): - """ Concatenates output states from multilayer layer. """ - rnn_layer = match['rnn_layer'] - original_states = [rnn_layer.out_node(i) if i in rnn_layer.out_nodes() else None for i in [1, 2]] - - concat_ops = [ - Concat(rnn_layer.graph, { - 'name': rnn_layer.name + '/FinalLayerSplitConcat/HiddenState', - 'axis': -1 - }), - Concat(rnn_layer.graph, { - 'name': rnn_layer.name + '/FinalLayerSplitConcat/CellState', - 'axis': -1 - }) - ] - - for i in range(len(original_states)): # [0] or [0, 1] - if original_states[i] is None: - continue - concat_ops[i].attrs.update({'in_ports_count': len(new_states[i])}) - concat_ops[i].create_node_with_data(inputs=new_states[i], data_nodes=[original_states[i]]) diff --git a/tools/mo/openvino/tools/mo/middle/MXTileReplacer.py b/tools/mo/openvino/tools/mo/middle/MXTileReplacer.py deleted file mode 100644 index ce5848f45e77dc..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/MXTileReplacer.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class MXTileReplacer(MiddleReplacementPattern): - """ - Aligns Tile operation from MxNet framework with OpenVINO Tile - - MxNet has no restrictions for `tile_array` input of `Tile` operation. - If len(tile_array) > rank(data), this transformation will insert Unsqueeze before Tile operation, - because in this case output_shape > input_shape - - DOC link: https://beta.mxnet.io/api/ndarray/_autogen/mxnet.ndarray.tile.html#mxnet.ndarray.tile - """ - - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('tile', dict(kind='op', op='Tile')) - ], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['tile'] - name = node.soft_get('name', node.id) - in_shape = node.in_port(0).data.get_shape() - out_shape = node.out_port(0).data.get_shape() - - tile_array_diff = len(out_shape) - len(in_shape) - if tile_array_diff == 0: - return - assert tile_array_diff > 0,\ - 'Unexpected difference between rank(input) and rank(output) for node {}'.format(name) - unsqueeze_dims = int64_array(range(tile_array_diff)) - unsqueeze = create_op_node_with_second_input(graph, Unsqueeze, unsqueeze_dims, - dict(name=name + '/Unsqueeze', override_output_shape=True)) - node.in_port(0).get_connection().insert_node(unsqueeze) diff --git a/tools/mo/openvino/tools/mo/middle/MakeKaldiConstReshapable.py b/tools/mo/openvino/tools/mo/middle/MakeKaldiConstReshapable.py deleted file mode 100644 index 50fc9d6a7771a4..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/MakeKaldiConstReshapable.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input, create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Port -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.broadcast import Broadcast -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.crop import Crop -from openvino.tools.mo.ops.shape import Shape - - -def create_const_with_batch_from_input(producer_port: Port, second_dim, value=0, precision=np.float32): - """ - Create const with batch taken from input_out_port and second dimension equals second_dim - :param producer_port: take batch from this port - :param second_dim: second dimension for created constant - :param value: value to initialize constant - :param precision: precision for constant - :return created constant node - """ - graph = producer_port.node.graph - input_name = producer_port.node.soft_get('name', producer_port.node.id) - - shape_of_input = None - for dest in producer_port.get_destinations(): - if dest.node.soft_get('op') == "ShapeOf": - shape_of_input = dest.node - break - - if shape_of_input is None: - shape_of_input = Shape(graph, {'name': input_name + '/Shape'}).create_node() - shape_of_input.in_port(0).connect(producer_port) - - get_batch = None - for dest in shape_of_input.out_port(0).get_destinations(): - if dest.node.soft_get('op') == "Crop" and \ - dest.node.in_port(1).get_source().node.soft_get('value', []) == int64_array([1]): - get_batch = dest.node - break - - if get_batch is None: - get_batch = create_op_node_with_second_input(graph, Crop, int64_array([1]), - {'name': shape_of_input.name + '/Crop', - 'axis': int64_array([0]), 'offset': int64_array([0])}, - shape_of_input) - - mem_shape = None - for dest in get_batch.out_port(0).get_destinations(): - if dest.node.soft_get('op') == "Concat" and \ - dest.node.in_port(1).get_source().node.soft_get('value', []) == int64_array([second_dim]): - mem_shape = dest.node - break - - if mem_shape is None: - mem_shape = create_op_node_with_second_input(graph, Concat, int64_array([second_dim]), - {'name': get_batch.name + '/Concat', 'axis': 0, - 'in_ports_count': 2}, get_batch) - - init_value_prev_lstm_output = None - for dest in mem_shape.out_port(0).get_destinations(): - if dest.node.soft_get('op') == "Broadcast" and \ - dest.node.in_port(1).get_source().node.soft_get('value', []) == mo_array([value], dtype=precision): - init_value_prev_lstm_output = dest.node - break - - if init_value_prev_lstm_output is None: - init_value_prev_lstm_output = create_op_with_const_inputs(graph, Broadcast, - {0: mo_array([value], dtype=precision)}, - {'name': mem_shape.name + '/Broadcast'}) - init_value_prev_lstm_output.in_port(1).connect(mem_shape.out_port(0)) - - return init_value_prev_lstm_output - - -class MakeKaldiConstReshapable(MiddleReplacementPattern): - """ - Add broadcasting of constant nodes based on batch from Parameter node. This approach works only for Kaldi, - because it has the same batch in whole graph due to framework specific. - """ - enabled = True - graph_condition = [lambda graph: graph.graph['fw'] == "kaldi"] - - def run_after(self): - from openvino.tools.mo.middle.InsertSelect import AddSelectBeforeMemoryNodePattern - from openvino.tools.mo.middle.ReplaceMemoryOffsetWithSplice import ReplaceMemoryOffsetWithMemoryNodePattern - from openvino.tools.mo.middle.ReplaceSpliceNodePattern import ReplaceSpliceNodePattern - return [AddSelectBeforeMemoryNodePattern, ReplaceMemoryOffsetWithMemoryNodePattern, - ReplaceSpliceNodePattern] - - def find_and_replace_pattern(self, graph: Graph): - params = graph.get_op_nodes(op="Parameter") - batch = params[0].shape[0] - - # check that all Parameters have the same batch - for p in params: - assert p.shape[0] == batch, \ - "Parameter {} has batch different from the {}".format(p.soft_get('name', p.id), - params[0].soft_get('name', params[0].id)) - - # make constants for initialization of ReadValue reshapable - for read in graph.get_op_nodes(op='ReadValue'): - input_node = read.in_port(0).get_source().node - if input_node.soft_get('op') == "Const": - const_shape = input_node.out_port(0).data.get_shape() - # extra check to be sure that we don't break shapes compatibility in graph - # in Kaldi models we have only 2 dimensions - # and batch should be set the same as we will get from Parameter - # otherwise just skip such node - if len(const_shape) != 2 or const_shape[0] != batch: - continue - new_const = create_const_with_batch_from_input(params[0].out_port(0), - const_shape[1], - value=input_node.value[0], precision=input_node.data_type) - input_node.out_port(0).get_connection().set_source(new_const.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/middle/MarkSubgraphsWithCorrectLayout.py b/tools/mo/openvino/tools/mo/middle/MarkSubgraphsWithCorrectLayout.py deleted file mode 100644 index 80d4cdc1b6f7bb..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/MarkSubgraphsWithCorrectLayout.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from collections import deque -from typing import Set - -from openvino.tools.mo.middle.InsertLayoutPropagationTransposes import InsertLayoutPropagationTranspose, \ - mark_as_correct_data_layout, mark_output_as_in_correct_layout, mark_input_as_in_correct_layout -from openvino.tools.mo.middle.LayoutChangeForConstantShapePaths import LayoutChangeForConstantShapePaths -from openvino.tools.mo.middle.pass_separator import PostMiddleStart -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.graph.port import Port -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class MarkSubGraphsWithCorrectLayout(MiddleReplacementPattern): - """ - The transformation looks for the layout agnostic operations which does not have a layout (NCHW or NHWC) and makes - necessary changes to infer the part of the topology in the original layout: - 1. Prevents from adding Transpose operations before and after "reinterp_shape" like operations which change rank of - the input and output tensors of this layout agnostic op. - 2. Disable attributes permutation for all intermediate ops between these "reinterp_shape" nodes. - 3. Marks nodes along the weight path of convolutions as in correct layout to not permute them from NHWC to NCHW. - The latest is needed for TF NCHW graphs as well. In Conv/Deconv infer functions "set_permutation()" - ads "permutation" attr to weights data node even for NCHW, it is needed to permute Conv weights from the - original TF layout into OV even for NCHW graphs. Therefore for TF models - to prevent unwarranted permutations need to mark weights path as having correct layout even for NCHW graphs. - """ - enabled = True - graph_condition = [lambda graph: graph.graph['fw'] == 'tf'] - op_conditions = [lambda n: n.soft_get('op') == 'MatMul' and - any([len(port.data.get_shape()) in (4, 5) for port in n.in_ports().values()]), - ] - - def run_after(self): - return [PostMiddleStart] - - def run_before(self): - return [InsertLayoutPropagationTranspose] - - @staticmethod - def get_input_nodes(node: Node): - return [src_port.get_source().node for src_port in node.in_ports().values() if not src_port.disconnected()] - - @staticmethod - def get_output_nodes(node: Node): - result = [] - for out_port in node.out_ports().values(): - if not out_port.disconnected(): - for dest_port in out_port.get_destinations(): - result.append(dest_port.node) - return result - - @staticmethod - def bfs(start_nodes: list, visited: set, condition: callable = None, forward: bool = True): - """ - The function performs BFS starting from selected nodes in forward or backward direction adding nodes by an - optional condition - :param start_nodes: Nodes to start search from - :param visited: set of already visited nodes where traversing should not happen - :param condition: function getting a Node as input and returning whether the node should be included into the - result or not. If the value is None then the node is added unconditionally. - :param forward: boolean flag specifying the traverse direction - :return: the list of Nodes visited - """ - assert visited is not None, 'The "visited" set must be defined' - assert start_nodes is not None, 'The list of start nodes must be specified' - - result = list() - d = deque(start_nodes) - while len(d) != 0: - cur_node = d.popleft() - result.append(cur_node) - visited.add(cur_node) - if forward: - next_nodes = MarkSubGraphsWithCorrectLayout.get_output_nodes(cur_node) - else: - next_nodes = MarkSubGraphsWithCorrectLayout.get_input_nodes(cur_node) - for next_node in next_nodes: - if next_node not in visited and (condition is None or condition(next_node)): - d.append(next_node) - return result - - def find_and_replace_pattern(self, graph: Graph): - visited = set() - marked_nodes = set() - condition_forward = lambda n: not InsertLayoutPropagationTranspose.is_nhwc_to_nchw_transpose_needed(n) - condition_backward = lambda n: not InsertLayoutPropagationTranspose.is_nchw_to_nhwc_transpose_needed(n) - for node_condition in self.op_conditions: - for node in graph.get_op_nodes(): - if node_condition(node): - log.debug('Detected node "{}" as a node which should be executed in the original layout' - ''.format(node.soft_get('name', node.id))) - forward_visited_nodes = self.bfs([node], visited, condition_forward, True) - backward_visited_nodes = self.bfs([node], visited, condition_backward, False) - - # find "reinterp_shape" like ops which change rank of input to 4D or 5D from smaller dimensions - for back_node in backward_visited_nodes: - for input_node in self.get_input_nodes(back_node): - if input_node not in backward_visited_nodes and not condition_forward(input_node): - marked_nodes.add(input_node) - - # find "reinterp_shape" like ops which change rank of input from 4D or 5D to smaller dimensions - for forward_node in forward_visited_nodes: - for output_node in self.get_output_nodes(forward_node): - if output_node not in forward_visited_nodes and not condition_backward(output_node): - marked_nodes.add(output_node) - - marked_nodes.update(forward_visited_nodes + backward_visited_nodes) - - if len(marked_nodes): - log.debug('The following nodes will be executed in the original layout: {}' - ''.format([n.soft_get('name', n.id) for n in marked_nodes])) - - # mark all matched nodes as in correct layout and disable attributes permutation for them - for visited_node in marked_nodes: - mark_as_correct_data_layout(visited_node) - visited_node['nchw_layout'] = True - - _, nodes_weigths, nodes_in_weights = self.get_ports_and_nodes_on_weights(graph) - for node in nodes_weigths: - if node in nodes_in_weights: - for ind, port in node.in_ports().items(): - if ind not in nodes_in_weights[node]: - mark_input_as_in_correct_layout(node, ind) - for ind, port in node.out_ports().items(): - mark_output_as_in_correct_layout(node, ind) - else: - mark_as_correct_data_layout(node) - node['nchw_layout'] = True - - for node in self.get_ports_and_nodes_on_shape_subgraphs(graph)[1]: - mark_as_correct_data_layout(node) - node['nchw_layout'] = True - - @staticmethod - def get_weighted_layer_type_to_in_weights_port(): - get_weights_port_index = lambda node: node.weights_index if node.has_valid('weights_index') else 1 - weighted_layer_type_to_in_weights_port = { - 'Convolution': get_weights_port_index, - 'DeformableConvolution': get_weights_port_index, - 'Deconvolution': get_weights_port_index, - 'BinaryConvolution': get_weights_port_index, - } - return weighted_layer_type_to_in_weights_port - - @staticmethod - def insert_permute_inputs_before_dynamic_weights_subgraph(dynamic_subgraphs: Set[Node] = None): - """ - The function inserts permutations on input nodes in the weights subgraph - :param dynamic_subgraphs: Set of Nodes belonging to weight path subgraphs - :return: the list of Nodes which are inputs to weight path subgraphs - """ - dynamic_in_nodes = dict() - for node in dynamic_subgraphs: - node_type = node.soft_get('type') - if node_type not in ['Const', 'Parameter', 'ShapeOf']: - idx_lst = list() - for idx in [idx for idx, port in node.in_ports().items() if - not port.disconnected() and port.get_source().node not in dynamic_subgraphs]: - PermuteInputs().set_input_permutation(node.in_node(idx), node, 'input:{}'.format(idx), - 'transpose_nchw_to_nhwc') - idx_lst.append(idx) - if len(idx_lst): - dynamic_in_nodes[node] = idx_lst - return dynamic_in_nodes - - @staticmethod - def walk_up_from_in_ports_to_out_ports(in_ports: Set[Port], out_ports: Set[Port], port_condition=None): - r"""" - Returns all intermediate ports and nodes of such a sub-graph: - - out_ports - | | - \/ \/ - . . . - | | - \/ \/ - in_ports - """ - visited_ports = set() - visited_nodes = set() - - deque_of_in_ports = deque(in_ports) - while len(deque_of_in_ports): - in_port = deque_of_in_ports.popleft() - if in_port.get_source() is None: - continue - source_node = in_port.get_source().node - if in_port in visited_ports: # do not check visited_nodes as search is based on ports - continue - visited_ports.update({in_port, in_port.get_source()}) - if in_port.get_source() in out_ports: # reached source marked to stop the search - if not len(in_port.get_source().node.in_ports()): # for Constants and Parameters to be visited - visited_nodes.add(in_port.get_source().node) - continue - for idx, port in source_node.in_ports().items(): - if not port.disconnected() and (not port_condition or port_condition(source_node, idx)): - deque_of_in_ports.append(port) - visited_nodes.add(source_node) - return visited_ports, visited_nodes - - @staticmethod - def is_not_weight_port(node: Node, idx: int): - w_types_to_in_port_dict = MarkSubGraphsWithCorrectLayout.get_weighted_layer_type_to_in_weights_port() - node_type = node.soft_get('type') - return node_type in w_types_to_in_port_dict.keys() and idx != w_types_to_in_port_dict[node_type](node) - - @staticmethod - def get_ports_and_nodes_on_weights(graph): - nodes = graph.get_op_nodes() - - # collect all input ports with weights - weight_ports = set() - result_ports = set() - start_ports = set() - w_types_to_in_port_dict = MarkSubGraphsWithCorrectLayout.get_weighted_layer_type_to_in_weights_port() - for node in nodes: - node_type = node.soft_get('type', 'unknown') - if node_type not in w_types_to_in_port_dict.keys(): - if node_type in ['Const', 'Parameter', 'ShapeOf', 'ExtractImagePatches']: - start_ports.add(node.out_port(0)) - continue - weight_port_idx = w_types_to_in_port_dict[node_type](node) - assert node.is_in_port_connected(weight_port_idx), \ - 'Unexpected port configuration of {} node with name=`{}`'.format(node_type, - node.soft_get('name', node.id)) - weight_ports.add(node.in_port(weight_port_idx)) - for result in graph.get_op_nodes(type='Result'): - result_ports.update(result.in_ports().values()) - - # collect all sub-graphs that start with Constant/Parameter/ShapeOf/ExtractImagePatches and end at in_port as - # weights - ports_w, nodes_w = MarkSubGraphsWithCorrectLayout.walk_up_from_in_ports_to_out_ports(weight_ports, start_ports) - # collect all sub-graphs that start with Constant/Parameter/ShapeOf/ExtractImagePatches, end at Result nodes and - # not contains branches that end as weights - ports_d, nodes_d = MarkSubGraphsWithCorrectLayout.walk_up_from_in_ports_to_out_ports( - result_ports, start_ports, MarkSubGraphsWithCorrectLayout.is_not_weight_port) - nodes_dif = nodes_w.difference(nodes_d) - nodes_in_w = MarkSubGraphsWithCorrectLayout.insert_permute_inputs_before_dynamic_weights_subgraph(nodes_dif) - return ports_w.difference(ports_d), nodes_dif, nodes_in_w - - @staticmethod - def get_ports_and_nodes_on_shape_subgraphs(graph): - shape_sources = {shape_of.out_port(0) for shape_of in graph.get_op_nodes(type='ShapeOf')} - end_points = LayoutChangeForConstantShapePaths().find_shape_subgraph_endpoints( - [shape.out_port(0) for shape in graph.get_op_nodes(type='ShapeOf')]) - ports, nodes = MarkSubGraphsWithCorrectLayout.walk_up_from_in_ports_to_out_ports(end_points, shape_sources) - return ports, nodes diff --git a/tools/mo/openvino/tools/mo/middle/MergeNodesPermutations.py b/tools/mo/openvino/tools/mo/middle/MergeNodesPermutations.py deleted file mode 100644 index a97e9d8c26dcc6..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/MergeNodesPermutations.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.middle.ApplyNHWCtoNCHWpermutation import ApplyNHWCtoNCHWpermutation -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.utils.error import Error - - -class MergeNodesPermutations(MiddleReplacementPattern): - enabled = True - - def run_after(self): - return [ApplyNHWCtoNCHWpermutation] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - self.merge_nodes_permutations(graph) - - @staticmethod - def merge_nodes_permutations(graph: Graph): - # Iterate over all data nodes and check all permutations for similarity - # In case of equal permutations, this permutation will be set as attribute for data node - # otherwise exception will be raised - for node in graph.nodes(): - node = Node(graph, node) - if node.kind != 'data': - continue - - permutations = [] - - # Get all permutations from in edges - for in_node in node.in_nodes(): - edge_attrs = node.graph.get_edge_data(in_node.id, node.id)[0] - if 'permutation' in edge_attrs: - permutations.append(edge_attrs['permutation']) - - # Get all permutations from out edges - for out_node in node.out_nodes(): - edge_attrs = node.graph.get_edge_data(node.id, out_node.id)[0] - if 'permutation' in edge_attrs: - permutations.append(edge_attrs['permutation']) - - final_permutations = [] - for p in permutations: - if p is not None: - final_permutations.append(p.perm) - else: - final_permutations.append(int64_array(np.arange(node.shape.size))) - - if len(final_permutations) == 0: - continue - - # Check that all permutations are equal - if not all([np.array_equal(final_permutations[0], perm) for perm in final_permutations]): - raise Error('Permutations requested for {} data node are not equal! List of permutations: {}' - ''.format(node.name, [p.perm for p in permutations])) - - assert not node.has_valid('permutation') or np.array_equal(node.permutation, permutations[0]) - node['permutation'] = permutations[0] diff --git a/tools/mo/openvino/tools/mo/middle/MoveConstToLoopBody.py b/tools/mo/openvino/tools/mo/middle/MoveConstToLoopBody.py deleted file mode 100644 index f30944a676e1c7..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/MoveConstToLoopBody.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.loop import Loop -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.graph.graph import Graph - - -class MoveConstToLoopBody(MiddleReplacementPattern): - """ - It moves constant producers for Loop node into the body graph and removes input ports for them. - This transformations helps to continue constant folding inside the body graph if possible. - The constant folding serves as optimization path and helps to avoid issue connecting with constants - lying on weights path to Convolution node. - """ - enabled = True - force_shape_inference = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import PostMiddleStart - return [PostMiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.ApplyPermutations import ApplyPermutation - return [ApplyPermutation] - - def find_and_replace_pattern(self, graph: Graph): - cleanup_called_once = False - - # walk through all Loop nodes and find Const inputs - for loop_node in graph.get_op_nodes(op='Loop'): - # call clean-up only once that performs constant folding - if not cleanup_called_once: - graph.clean_up() - cleanup_called_once = True - - # move constant node into the body graph and removes body parameter nodes corresponding to them - Loop.pull_constant_inputs_into_body(loop_node) - - # since some input ports can be removed after the pulling constants, normalization of Loop node is required - Loop.normalize_input_output_ports(loop_node) - - # perform shape inference for the Loop node again since new constant can be appeared - # and constant folding can be helpful for weights path to Convolution node inside the body graph - loop_node['need_shape_inference'] = True diff --git a/tools/mo/openvino/tools/mo/middle/MulFakeQuantizeFuse.py b/tools/mo/openvino/tools/mo/middle/MulFakeQuantizeFuse.py deleted file mode 100644 index d1a568d604bade..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/MulFakeQuantizeFuse.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from collections import defaultdict -from typing import Dict, List - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.fusing.helpers import get_tensor_in_port, get_value_in_port -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const - - -def resolve_shared_inputs(node: Node, port_ids_to_duplicate: List[int]): - """ - Duplicates shared constants that are consumed by more than one node. - If constant is consumed by several ports of one node - no duplication gets done - """ - graph = node.graph - - for port_id in port_ids_to_duplicate: - dst_port_map = defaultdict(list) - for dst in node.in_port(port_id).get_source().get_connection().get_destinations(): - dst_port_map[dst.node].append(dst.idx) - del dst_port_map[node] - value = node.in_port(port_id).data.get_value() - if value is None: - log.debug('Can not duplicate due no data for in_port {} of node {}'.format(port_id, node.name)) - for node, idxs in dst_port_map.items(): - const = Const(graph, {'value': mo_array(value), - 'name': node.soft_get('name', node.id) + '/duplicated_'}).create_node() - for idx in idxs: - node.in_port(idx).disconnect() - const.out_port(0).connect(node.in_port(idx)) - const.infer(const) - - -class MulFakeQuantizeFuse(MiddleReplacementPattern): - """ Fuses Mul --> FakeQuantize sequence if possible - """ - enabled = False - - def run_after(self): - return [] - - def run_before(self): - return [] - - def pattern(self): - return dict( - nodes=[ - ('preop', dict(op='Mul', can_be_fused=True)), - ('preoped', dict()), - ('quantize', dict(op='FakeQuantize')), - ], - edges=[ - ('preop', 'preoped'), - ('preoped', 'quantize', {'in': 0}), - ] - ) - - def replace_pattern(self, graph: Graph, match: Dict[str, Node]): - quantize = match['quantize'] - preop = match['preop'] - - tensor_port, value_port = get_tensor_in_port(preop), get_value_in_port(preop) - - if value_port is None or value_port.data.get_value() is None: - log.debug('MulQuantizeFuse: cannot fuse because Mul op has dynamic inputs') - return - - mul_val = value_port.data.get_value() - if np.any(mul_val <= 0): - return - - # Direct modifications to quantize 1-st and 2-nd port inputs are performed. - # So the data nodes at those inputs shouldn't have more than 1 consumer maximum 2 consumers to the same - # quantize op (consumed by 1st and 2nd ports). So we duplicate FakeQuantize in_port 1, 2 data if needed - resolve_shared_inputs(node=quantize, port_ids_to_duplicate=[1, 2]) - - # TODO: need some special processing for values that exactly equal to threshold - - quantize.in_port(1).data.set_value(quantize.in_port(1).data.get_value() / mul_val) - if quantize.in_node(1).id != quantize.in_node(2).id: - quantize.in_port(2).data.set_value(quantize.in_port(2).data.get_value() / mul_val) - - # Reconnect Mul as it no longer needed for current FakeQuantize - in_mul_connection = quantize.in_port(0).get_source().node.in_port(0).get_connection() - quantize.in_port(0).disconnect() - in_mul_connection.add_destination(quantize.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/middle/ONNXRNNSequenceNormalize.py b/tools/mo/openvino/tools/mo/middle/ONNXRNNSequenceNormalize.py deleted file mode 100644 index 0e003ecda9a295..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ONNXRNNSequenceNormalize.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import compatible_dims -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.op import Op - - -class ONNXRNNSequenceNormalize(MiddleReplacementPattern): - """ - Convert blobs and shapes of ONNX-like LSTM, GRU, RNN cells to common form (internal for MO). - After this normalization pass passes for splitting bidirectional calls and - multilayer cells will be applied. - - This transformation pass involves weights and shapes processing only: - 1. Weights reshaping and reordering - 2. Gates reordering - - - Inputs will have the following order after normalising: - 0: X input data, shape [batch_size, seq_len, input_size] - 1: W weights blob, shape [num_dir, n_cells, M, hidden_size, input_size] - 2: R weights blob, shape [num_dir, n_cells, M, hidden_size, hidden_size] - 3: B biases blob, shape [num_dir, n_cells, 2, M, hidden_size] - 4: (optional) sequence_length, shape [batch_size] - 5: initial hidden state, shape [num_dir, batch_size, hidden_size] - ([num_dir, n_cells, batch_size, hidden_size] if num_cells != 1) - 6: (only for LSTM) initial cell state, shape [num_dir, batch_size, hidden_size] - 7: (optional for LSTM) Peepholes weights, shape [num_dir, n_cells, (M - 1) * hidden_size] - - Outputs: - 0: Y output blob, shape [batch_size, num_dir, seq_len, hidden_size] - 1: (optional) Y_h, shape [num_dir, batch_size, hidden_size] - 2: (optional for LSTM) Y_c, shape [num_dir, batch_size, hidden_size] - - Where: - M -- number of gates in this cell (4 for LSTM, 3 for GRU, 1 for RNN). - num_dir -- number of directions ('forvard', 'bidirectional', 'reverse') - n_cells -- number of cells in layer (always 1 for ONNX). - """ - - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('rnn_layer', dict(kind='op', type='RNNSequence', format='onnx')), - ('input', dict(kind='data')), - ('W', dict(kind='data')), - ('R', dict(kind='data')), - ], - # We are not handling optional inputs - edges=[ - ('input', 'rnn_layer', {'in': 0}), - ('W', 'rnn_layer', {'bin': 'W'}), - ('R', 'rnn_layer', {'bin': 'R'}), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - self.repack_weights(graph, match) - self.check_init_states(graph, match) - self.check_input_ports(graph, match) - match['rnn_layer']['normalized'] = True - - @staticmethod - def repack_weights(graph: Graph, match: dict): - """ - Repack weights into general format (described above) and reorder gates. - """ - rnn_layer = match['rnn_layer'] - W = match['W'].value.copy() - R = match['R'].value.copy() - num_directions = 2 if rnn_layer.direction == 'bidirectional' else 1 - - graph.remove_edge(match['W'].id, rnn_layer.id) - graph.remove_edge(match['R'].id, rnn_layer.id) - - # find optional 'B' biases blob - if 3 in rnn_layer.in_nodes(): - # TODO: check if 'bin': 'B' attribute is assigned to this edge - B = rnn_layer.in_node(3).value.copy() - graph.remove_edge(rnn_layer.in_node(3).id, rnn_layer.id) - else: - B_shape = [num_directions, 2 * rnn_layer.multiplier * rnn_layer.hidden_size] # from ONNX spec - B = np.full(B_shape, 0, dtype=np.float32) - - # Add extra dimensions for W, R and B for easier repacking and reordering - B = B.reshape([ - num_directions, # 0: num of directions - rnn_layer.num_layers, # 1: num_layers - 2, # 2: two input parts of the matrix: W, R - rnn_layer.multiplier, # 3: four output parts of the matrix for all gates in order: i, o, f, c - rnn_layer.hidden_size, # 4: output size per direction and gate - ]) - - W, R = [x.reshape([ - num_directions, # 0: num of directions - rnn_layer.num_layers, # 1: num_layers - rnn_layer.multiplier, # 2: four output parts of the matrix for all gates in order: i, o, f, c - rnn_layer.hidden_size, # 3: output size per direction and gate - -1]) # 4: input size/hidden size in W/R correspondingly - for x in (W, R)] - - input_size = match['input'].shape[2] - assert compatible_dims(input_size, W.shape[-1]) - - # Reorder gates: iofc --> fico - gate_reorder = rnn_layer.gate_order - W, R = (np.take(x, gate_reorder, axis=2) for x in (W, R)) - B = np.take(B, gate_reorder, axis=3) - - for blob, port in [(W, 1), (R, 2), (B, 3)]: - Op.create_and_connect_input_data_node( - graph, - rnn_layer, - {'value': blob, 'shape': int64_array(blob.shape)}, - {'in': port, 'permutation': None} - ) - - @staticmethod - def check_init_states(graph: Graph, match: dict): - """ - Check if cell have initial states and create zeros states if not. - """ - rnn_layer = match['rnn_layer'] - num_directions = 2 if rnn_layer.direction == 'bidirectional' else 1 - batch_size = rnn_layer.in_node(0).shape[rnn_layer.batch_dim] - - h_init_port = 5 - c_init_port = 6 - - if h_init_port not in rnn_layer.in_nodes(): - h_shape = [num_directions, batch_size, rnn_layer.hidden_size] # from ONNX spec - h_init = np.full(h_shape, 0, dtype=np.float32) - Op.create_and_connect_input_data_node( - graph, - rnn_layer, - {'value': h_init, 'shape': int64_array(h_init.shape)}, - {'in': h_init_port, 'permutation': None} - ) - - if rnn_layer.op == 'LSTM': - if c_init_port not in rnn_layer.in_nodes(): - c_shape = [num_directions, batch_size, rnn_layer.hidden_size] # from ONNX spec - c_init = np.full(c_shape, 0, dtype=np.float32) - Op.create_and_connect_input_data_node( - graph, - rnn_layer, - {'value': c_init, 'shape': int64_array(c_init.shape)}, - {'in': c_init_port, 'permutation': None} - ) - - @staticmethod - def check_input_ports(graph: Graph, match: dict): - """ - Check that all mandatory ports is present. - """ - rnn_layer = match['rnn_layer'] - mandatory_ports = [0, 1, 2, 3, 5] - - if rnn_layer.op == 'LSTM': - mandatory_ports.extend([6]) - - assert set(rnn_layer.in_nodes().keys()) >= set(mandatory_ports) diff --git a/tools/mo/openvino/tools/mo/middle/ONNXResize11ToInterpolate.py b/tools/mo/openvino/tools/mo/middle/ONNXResize11ToInterpolate.py deleted file mode 100644 index bf70528df55d7d..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ONNXResize11ToInterpolate.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.activation_ops import Floor -from openvino.tools.mo.ops.elementwise import Add, Div, Mul -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.common.layout import get_depth_dim, get_height_dim, get_width_dim -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.strided_slice import StridedSlice - - -def convert_mode(onnx_mode: str) -> str: - return {'nearest': 'nearest', 'linear': 'linear_onnx', 'cubic': 'cubic'}[onnx_mode] - - -def replace_resize(graph: Graph, resize: Node): - log.debug("Converting of ONNX Resize-11 to Interpolate-4 " - "is triggered for node {}.".format(resize.soft_get('name', resize.id))) - - input_shape = resize.in_port(0).data.get_shape() - input_rank = len(input_shape) - resize_name = resize.soft_get('name', resize.id) - if input_rank not in {4, 5}: - log.warning('The input shape is not 4D or 5D for op with name {}'.format(resize_name)) - return - - assert (resize.is_in_port_connected(0) and (resize.is_in_port_connected(2) or resize.is_in_port_connected(3))), \ - "Scales or sizes inputs must be connected to Node {} with op {}.".format(resize.soft_get("name", resize.id), - resize.op) - - assert resize.soft_get('coordinate_transformation_mode') != 'tf_crop_and_resize', \ - 'Mode tf_crop_and_resize is not supported for op {} with name {}'.format(resize.op, - resize.soft_get("name", resize.id)) - - layout = graph.graph['layout'] - - if input_rank == 4: - begin_dim = get_height_dim(layout, input_rank) - end_dim = get_width_dim(layout, input_rank) + 1 - else: - begin_dim = get_depth_dim(layout, input_rank) - end_dim = get_width_dim(layout, input_rank) + 1 - - sizes_ss = create_op_with_const_inputs(graph, StridedSlice, - {1: int64_array([begin_dim]), - 2: int64_array([end_dim]), - 3: int64_array([1])}, - {'name': resize_name + '/StridedSlice_sizes', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0])}) - scales_ss = create_op_with_const_inputs(graph, StridedSlice, - {1: int64_array([begin_dim]), - 2: int64_array([end_dim]), - 3: int64_array([1])}, - {'name': resize_name + '/StridedSlice_scales', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0])}) - axes_node = Const(graph, - {'name': resize_name + '/axis', - 'value': int64_array(np.arange(begin_dim, end_dim))}).create_node() - - shape_calculation_mode = 'sizes' if resize.is_in_port_connected(3) else 'scales' - - interpolate_node = Interpolate(graph, {'version': 'opset4', - 'mode': convert_mode(resize.mode), - 'coordinate_transformation_mode': resize.coordinate_transformation_mode, - 'cube_coeff': resize.cube_coeff, - 'nearest_mode': resize.nearest_mode, - 'pads_begin': int64_array([0]), - 'pads_end': int64_array([0]), - 'antialias': 0, - 'shape_calculation_mode': shape_calculation_mode, - 'in_ports_count': 4}).create_node() - - axes_node.out_port(0).connect(interpolate_node.in_port(3)) - shape_of = Shape(graph, {'name': resize_name + '/ShapeOf'}).create_node() - - add_node = create_op_with_const_inputs(graph, Add, - {1: float_array([1.0e-5])}, - {'name': resize_name + '/Add'}) - - dst_dtype = np.float32 # even if data_type=FP16 use float32 for shape values - - if not resize.is_in_port_connected(3): - cast_shape_to_float = Cast(graph, {'dst_type': dst_dtype}).create_node() - mul_node = Mul(graph, {'name': resize_name + '/Mul'}).create_node() - shape_of.out_port(0).connect(cast_shape_to_float.in_port(0)) - cast_shape_to_float.out_port(0).connect(mul_node.in_port(0)) - cast_add_result_to_int = Cast(graph, {'dst_type': np.int64}).create_node() - floor_node = Floor(graph, {'name': resize_name + '/Floor'}).create_node() - mul_node.out_port(0).connect(add_node.in_port(0)) - add_node.out_port(0).connect(floor_node.in_port(0)) - floor_node.out_port(0).connect(cast_add_result_to_int.in_port(0)) - cast_add_result_to_int.out_port(0).connect(sizes_ss.in_port(0)) - sizes_ss.out_port(0).connect(interpolate_node.in_port(1)) - scales_ss.out_port(0).connect(interpolate_node.in_port(2)) - - connection_of_resize_input = resize.in_port(0).get_connection() - connection_of_resize_input.set_destination(interpolate_node.in_port(0)) - - connection_of_scales = resize.in_port(2).get_connection() - connection_of_scales.set_destination(scales_ss.in_port(0)) - - connection_of_resize_input.get_source().connect(shape_of.in_port(0)) - connection_of_scales.get_source().connect(mul_node.in_port(1)) - else: - cast_shape_to_float = Cast(graph, {'dst_type': dst_dtype}).create_node() - cast_sizes_to_float = Cast(graph, {'dst_type': dst_dtype}).create_node() - div_node = Div(graph, {'name': resize_name + '/Div'}).create_node() - cast_sizes_to_float.out_port(0).connect(div_node.in_port(0)) - cast_shape_to_float.out_port(0).connect(div_node.in_port(1)) - shape_of.out_port(0).connect(cast_shape_to_float.in_port(0)) - div_node.out_port(0).connect(add_node.in_port(0)) - add_node.out_port(0).connect(scales_ss.in_port(0)) - scales_ss.out_port(0).connect(interpolate_node.in_port(2)) - sizes_ss.out_port(0).connect(interpolate_node.in_port(1)) - - connection_of_resize_input = resize.in_port(0).get_connection() - connection_of_resize_input.set_destination(interpolate_node.in_port(0)) - - connection_of_sizes = resize.in_port(3).get_connection() - connection_of_sizes.set_destination(sizes_ss.in_port(0)) - - connection_of_resize_input.get_source().connect(shape_of.in_port(0)) - connection_of_sizes.get_source().connect(cast_sizes_to_float.in_port(0)) - - rename_nodes([(resize, resize_name + '/delete'), (interpolate_node, resize_name)]) - resize.out_port(0).get_connection().set_source(interpolate_node.out_port(0)) - - -class ONNXResize11ToInterpolate(MiddleReplacementPattern): - """ - The transformation replaces ONNX Resize 11 with Interpolate-4. - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.middle.InterpolateSequenceToInterpolate import InterpolateSequenceToInterpolate - return [InterpolateSequenceToInterpolate] - - def find_and_replace_pattern(self, graph: Graph): - resize11_ops = graph.get_op_nodes(op='ONNXResize11') - for resize in resize11_ops: - replace_resize(graph, resize) diff --git a/tools/mo/openvino/tools/mo/middle/PartialInfer.py b/tools/mo/openvino/tools/mo/middle/PartialInfer.py deleted file mode 100644 index d930baa45b19ad..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/PartialInfer.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -import logging as log - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined, shape_array, \ - dynamic_dimension_value, unmask_shape -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class PartialInfer(MiddleReplacementPattern): - enabled = True - run_not_recursively = True - - def run_after(self): - from openvino.tools.mo.front.create_tensor_nodes import CreateTensorNodes - return [CreateTensorNodes] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - dynamic_inputs = {} - for parameter in graph.get_op_nodes(op='Parameter'): - param_shape = parameter.soft_get('shape', shape_array(dynamic_dimension_value)) - if not is_fully_defined(param_shape): - parameter_name = parameter.soft_get('name', parameter.id) - dynamic_inputs[parameter_name] = param_shape - partial_infer(graph) diff --git a/tools/mo/openvino/tools/mo/middle/PoolV2ToAttributedPool.py b/tools/mo/openvino/tools/mo/middle/PoolV2ToAttributedPool.py deleted file mode 100644 index 463b3daeacb78f..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/PoolV2ToAttributedPool.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.pooling import Pooling - - -class PoolV2ToAttributedPool(MiddleReplacementPattern): - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for pool_v2_node in graph.get_op_nodes(op='PoolingV2'): - pool_v2_name = pool_v2_node.soft_get('name', pool_v2_node.id) - - pool_v1_node = Pooling(graph, {'window': pool_v2_node.in_port(1).data.get_value(), - 'stride': pool_v2_node.in_port(2).data.get_value(), - - 'pad': pool_v2_node.pad, - 'spatial_dims': pool_v2_node.spatial_dims, - 'auto_pad': pool_v2_node.auto_pad, - 'output_spatial_shape': pool_v2_node.output_spatial_shape, - 'pad_spatial_shape': pool_v2_node.pad_spatial_shape, - - 'pool_method': pool_v2_node.pool_method, - 'permute_attrs': pool_v2_node.permute_attrs,}).create_node() - - rename_nodes([(pool_v2_node, pool_v2_name + '/to_be_removed'), (pool_v1_node, pool_v2_name)]) - - pool_v2_node.in_port(0).get_connection().set_destination(pool_v1_node.in_port(0)) - pool_v2_node.out_port(0).get_connection().set_source(pool_v1_node.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/middle/PreserveRuntimeInfo.py b/tools/mo/openvino/tools/mo/middle/PreserveRuntimeInfo.py deleted file mode 100644 index 259872c45bd17c..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/PreserveRuntimeInfo.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.middle.MergeNodesPermutations import MergeNodesPermutations -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.utils.runtime_info import OldAPIMapOrder - - -class PreserveRuntimeInfo(MiddleReplacementPattern): - """ This transformation preserves original layout for Parameter and Result nodes - and adds old_api_map_order attribute in rt_info which stores the following information: - - Parameter: - Order of the transpose which should be applied to Parameter with old API layout to - obtain Parameter with new API layout. - - Result: - Order of the transpose which should be applied to Result with new API layout to - obtain Result with old API layout. - - This transformation shouldn't be applied for Parameter or Result nodes inside - body graphs of any operations like If, TensorIterator, Loop etc. For this reason - transformation should be executed non-recursively. - """ - enabled = True - run_not_recursively = True - - def run_after(self): - return [MergeNodesPermutations] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - self.preserve_rt_info(graph) - - @staticmethod - def add_old_api_map_order_into_rt_info(op: Node): - # rt info update - assert op.has('rt_info'), 'Unable to preserve runtime information for node with name={}'.format(op) - - old_api_map = OldAPIMapOrder(version=0) - attr_name = old_api_map.get_name() - if (attr_name, old_api_map.get_version()) not in op.rt_info.info: - op.rt_info.info[(attr_name, old_api_map.get_version())] = old_api_map - return attr_name, old_api_map.get_version() - - @staticmethod - def preserve_rt_info(graph: Graph): - for op in graph.get_op_nodes(type='Parameter'): - op_name = op.soft_get('name', op.id) - if 'auto_disable_nhwc_to_nchw' in graph.graph['cmd_params'] and \ - graph.graph['cmd_params'].auto_disable_nhwc_to_nchw: - rank = op.out_port(0).data.get_shape().size - if rank < 4: - continue - order = list(range(rank)) - order.remove(1) - order.append(1) - order = int64_array(order) - elif op.has_valid('permute_attrs') and not op.has_and_set('nchw_layout') and \ - op.out_node(0).has_valid('permutation'): - permutation = op.out_node(0).permutation - order = permutation.inv - if np.array_equal(order, range(len(permutation.inv))): - continue - - # keep input in the framework format - transpose = create_op_node_with_second_input( - graph, Transpose, permutation.perm, - {'name': op_name + '/Transpose({})'.format(permutation.perm)}) - - # source mode is used to keep tensor names at Parameter node - op.out_port(0).get_connection().insert_node(transpose, "source") - - if op.has_valid('permute_attrs'): - del op['permute_attrs'] - if op.out_node(0).has_valid('permutation'): - del op.out_node(0)['permutation'] - else: - continue - - rt_info_key = PreserveRuntimeInfo.add_old_api_map_order_into_rt_info(op) - op.rt_info.info[rt_info_key].old_api_transpose_parameter(order) - - for op in graph.get_op_nodes(type='Result'): - if op.in_ports(): - prev_node_out_port = op.in_port(0).get_connection().get_source() - if prev_node_out_port is None: - continue - in_node = prev_node_out_port.node - in_data_node = in_node.out_node(prev_node_out_port.idx) - - if 'auto_disable_nhwc_to_nchw' in graph.graph['cmd_params'] and \ - graph.graph['cmd_params'].auto_disable_nhwc_to_nchw: - rank = prev_node_out_port.data.get_shape().size - if rank < 4: - continue - order = list(range(rank - 1)) - order.insert(1, rank - 1) - order = int64_array(order) - elif in_data_node.has_and_set('permutation'): - permutation = in_data_node['permutation'] - order = permutation.perm - - if np.array_equal(order, range(len(permutation.perm))): - continue - - # keep result in the framework format - transpose = create_op_node_with_second_input(graph, Transpose, permutation.inv) - # preserve output node name as it is used as output name in legacy IE API - transpose.name = in_node.name - in_node.name += "/prev" - - op.in_port(0).get_connection().insert_node(transpose) - else: - continue - - rt_info_key = PreserveRuntimeInfo.add_old_api_map_order_into_rt_info(op) - op.rt_info.info[rt_info_key].old_api_transpose_result(order) diff --git a/tools/mo/openvino/tools/mo/middle/RNNSequenceNormalizeToIE.py b/tools/mo/openvino/tools/mo/middle/RNNSequenceNormalizeToIE.py deleted file mode 100644 index c35fb3f39cd973..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/RNNSequenceNormalizeToIE.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_delete, mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.unsqueeze import Unsqueeze -from openvino.tools.mo.utils.shape import node_to_get_shape_value_of_indices - - -class RNNSequenceNormalize(MiddleReplacementPattern): - """ - This class normalize RNNSequence layers to IE-compatible from of weights, inputs and outputs. - - In this pass next will be done: - 1. Weights repack (squeeze all useless shapes in all blobs and concatenate W and R together, also add - bin param and all similar staff ) - 1. UNSqueeze num directions (in states and ) - 2. Initial states squeeze - 4. Renumbering inputs - 5. Ports checks - - After this normalization this layer will have next format of inputs: - 0: X input data, shape [batch_size, seq_len, input_size] - 1: WR weights blob, shape [M * hidden_size, hidden_size + input_size] - 2: B biases blob, shape [M * hidden_size] - 3: (optional) sequence_length, shape [batch_size] - 4: initial hidden state, shape [batch_size, hidden_size] - 5: (only for LSTM) initial cell state, shape [batch_size, hidden_size] - 6: (optional for LSTM) Peepholes weights, shape [(M - 1) * hidden_size] - - """ - force_shape_inference = True - - def run_after(self): - from openvino.tools.mo.middle.DecomposeBidirectionalRNNSequence import DecomposeBidirectionalRNNSequence - return [DecomposeBidirectionalRNNSequence] - - def pattern(self): - return dict( - nodes=[ - ('rnn_layer', dict(kind='op', type='RNNSequence')), - ('input', dict(kind='data')), - ('W', dict(kind='data')), - ('R', dict(kind='data')), - ('B', dict(kind='data')), - ], - edges=[ - ('input', 'rnn_layer', {'in': 0}), - ('W', 'rnn_layer', {'in': 1}), - ('R', 'rnn_layer', {'in': 2}), - ('B', 'rnn_layer', {'in': 3}), - ], - ) - - def replace_pattern(self, graph: Graph, match: dict): - self.repack_weights(graph, match) - if match['rnn_layer'].has_num_directions: - self.unsqueeze_num_directions(graph, match) - self.squeeze_initial_states(graph, match) - self.reordering_inputs(graph, match) - # some additional checks for ports number and similar stuff - - def repack_weights(self, graph: Graph, match: dict): - # Concat W, R in IE- format - # Delete useless num_dir dimensions and n_cells dimensions in W, R, B (peepholes?) - lstm = match['rnn_layer'] - W, R, B = match['W'].value.copy(), match['R'].value.copy(), match['B'].value.copy() - - graph.remove_edge(match['W'].id, lstm.id) - graph.remove_edge(match['R'].id, lstm.id) - graph.remove_edge(match['B'].id, lstm.id) - - # Sum component of B that correspond to W and R - if lstm.op == 'GRU' and lstm.linear_before_reset: - B_shape = mo_array(B.shape) - B_shape[3] = 4 - B_shape[2] = 1 - B_tmp = np.zeros(shape=B_shape, dtype=np.float32) - B_tmp[:, :, :, 0, :] = B[:, :, 0, 0, :] + B[:, :, 1, 0, :] - B_tmp[:, :, :, 1, :] = B[:, :, 0, 1, :] + B[:, :, 1, 1, :] - B_tmp[:, :, :, 2, :] = B[:, :, 0, 2, :][:, :, np.newaxis, :] - B_tmp[:, :, :, 3, :] = B[:, :, 1, 2, :][:, :, np.newaxis, :] - B = B_tmp - else: - B = np.sum(B, axis=2, keepdims=True) - - # Concatenate W, R to IE-compatible format - assert len(W.shape) == 5 - assert len(R.shape) == 5 - WR = np.concatenate([W, R], axis=4) - - # Squeeze useless dimensions - assert WR.shape[0] == 1 # num_dir == 1 - assert WR.shape[1] == 1 # num_cells == 1 - assert B.shape[0] == 1 - assert B.shape[1] == 1 - WR = WR.squeeze(axis=(0, 1)) - B = B.squeeze(axis=(0, 1)) - - # Flatten all output (0, 1) and input dimensions (2, 3) - final_shape_WR = [WR.shape[0] * WR.shape[1], -1] - assert final_shape_WR[0] == lstm.hidden_size * lstm.multiplier - WR = WR.reshape(final_shape_WR) - - final_shape_B = final_shape_WR - if lstm.op == 'GRU' and lstm.linear_before_reset: - final_shape_B[0] = lstm.hidden_size * 4 - B = B.reshape(final_shape_B) - - # Squeeze fake dimension in B - B = B.squeeze(axis=-1) - - for blob, port, name in [(WR, 1, 'weights'), (B, 2, 'biases')]: - Op.create_and_connect_input_data_node( - graph, - lstm, - {'value': blob, 'shape': int64_array(blob.shape)}, - {'in': port, 'bin': name, 'permutation': None} - ) - - @staticmethod - def unsqueeze_num_directions(graph: Graph, match: dict): - """ Assuming considered LSTM/GRU/RNN node should has num_directions in output shape and add Unsqueeze - to match it. - """ - - rnn_layer = match['rnn_layer'] - rnn_layer_name = rnn_layer.soft_get('name', rnn_layer.id) - # num_directions is at 1st position in output shape, and in 0st position in hidden and cell states - # please refer to docs in this transform - - direction_dim = [1, 0, 0] # index of dimension with direction index - for i in rnn_layer.out_nodes(): - old_data_node = rnn_layer.out_node(i) - old_shape = old_data_node.shape.copy() - new_shape = shape_delete(old_shape, direction_dim[i]) - - data = Op._create_data_node(graph, name=rnn_layer.name + '/Out/{}/'.format(i), attrs={'shape': new_shape}) - graph.remove_edge(rnn_layer.id, old_data_node.id) - graph.add_edge(rnn_layer.id, data.id, key=0, out=i) - - unsqueeze = Unsqueeze(graph, dict()) - - unsqueeze_dim_data = Const(graph, {'name': rnn_layer.name + '/UnsqueezeNumDirections/{}/Dim'.format(i), - 'value': int64_array([direction_dim[i]])}).create_node_with_data() - - unsqueeze.create_node_with_data([data, unsqueeze_dim_data], - dict(name=rnn_layer_name + '/UnsqueezeNumDirections/{}'.format(i)), - data_nodes=[old_data_node]) - @staticmethod - def squeeze_initial_states(graph: Graph, match: dict): - """ - Squeeze input initial states of recurrent node to 2-D shape. - """ - hidden_init_port = 5 - cell_init_port = 6 - - rnn_layer = match['rnn_layer'] - # Add input ports to rnn_layer - rnn_layer.add_sequence_of_ports(type='in', rng=range(7)) - rnn_layer_name = rnn_layer.soft_get('name', rnn_layer.id) - - assert hidden_init_port in rnn_layer.in_nodes() - hidden_size = rnn_layer.hidden_size - shape = Shape(graph, dict(name=rnn_layer_name + '/ShapeOf')).create_node() - rnn_layer.in_port(0).get_source().connect(shape.in_port(0)) - - reshape_h = create_op_node_with_second_input(graph, Reshape, second_input_value=int64_array([-1, hidden_size]), - op_attrs={'name': rnn_layer_name + '/HiddenStateResize', - 'override_output_shape': True}) - rnn_layer.in_port(hidden_init_port).get_connection().insert_node(reshape_h) - - if rnn_layer.op == 'LSTM': - assert cell_init_port in rnn_layer.in_nodes() - reshape_c = create_op_node_with_second_input(graph, Reshape, - second_input_value=int64_array([-1, hidden_size]), - op_attrs={'name': rnn_layer_name + '/CellStateResize', - 'override_output_shape': True}) - rnn_layer.in_port(cell_init_port).get_connection().insert_node(reshape_c) - - @staticmethod - def reordering_inputs(graph: Graph, match: dict): - """ - Reorder (renumbering) inputs to described format. We need to renumber initial states ports. - """ - rnn_layer = match['rnn_layer'] - assert 5 in rnn_layer.in_nodes() - hidden_state_edge = graph.get_edge_data(rnn_layer.in_node(5).id, rnn_layer.id) - hidden_state_edge[0]['in'] = 4 - - if rnn_layer.op == 'LSTM': - assert 6 in rnn_layer.in_nodes() - cell_state_edge = graph.get_edge_data(rnn_layer.in_node(6).id, rnn_layer.id) - cell_state_edge[0]['in'] = 5 - - @staticmethod - def ports_checks(graph: Graph, match: dict): - """ - Check that all mandatory ports is present. - """ - rnn_layer = match['rnn_layer'] - mandatory_ports = [0, 1, 2, 4] - - if rnn_layer.op == 'LSTM': - mandatory_ports.append(5) - - assert set(rnn_layer.in_nodes().keys()) >= set(mandatory_ports) diff --git a/tools/mo/openvino/tools/mo/middle/ReluQuantizeFuse.py b/tools/mo/openvino/tools/mo/middle/ReluQuantizeFuse.py deleted file mode 100644 index a9dbf849ff8ab4..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ReluQuantizeFuse.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from typing import Dict - -import numpy as np - -from openvino.tools.mo.middle.BinarizeWeightsM1P1 import BinarizeWeightsM1P1 -from openvino.tools.mo.middle.MulFakeQuantizeFuse import resolve_shared_inputs -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class ReluFakeQuantizeMark(MiddleReplacementPattern): - """ - This pass marks Relu operations that can be fused to FakeQuantize op with `removable_before_quantize` flag. - - 1. We count the number of Relu outputs that are Quantize and can absorb Relu (`quantized_to_fuse_count` attribute). - 2. Relu is fusible if all its outputs can absorb it. - - """ - enabled = True - - def run_after(self): - return [BinarizeWeightsM1P1] - - def run_before(self): - from openvino.tools.mo.middle.SharedWeightsDuplication import SharedWeightsDuplication - return [SharedWeightsDuplication] - - def pattern(self): - return dict( - nodes=[ - ('relu', dict(op='ReLU')), - ('relu_d', dict()), - ('quantize', dict(op='FakeQuantize')), - ], - edges=[ - ('relu', 'relu_d'), - ('relu_d', 'quantize', {'in': 0}), - ] - ) - - def replace_pattern(self, graph: Graph, match: Dict[str, Node]): - relu = match['relu'] - quantize = match['quantize'] - - if not relu.has_valid('quantized_to_fuse_count'): - relu['quantized_to_fuse_count'] = 0 - - if quantize.in_node(1).id == quantize.in_node(2).id: - # Provisional limitation that related to binary quantization - assert quantize.has_valid('levels') and quantize.levels == 2 - - threshold = quantize.in_port(1).data.get_value() - if threshold is None: - log.debug('ReluQuantizeFuse: cannot fuse because FakeQuantize op has dynamic input on the 1st port. ' - 'levels=`{}`'.format(quantize.levels)) - return - - relu['quantized_to_fuse_count'] += 1 - - else: - assert quantize.has_valid('levels') and quantize.levels != 2 - min_value = quantize.in_port(1).data.get_value() - if min_value is None: - log.debug('ReluQuantizeFuse: cannot fuse because FakeQuantize op has dynamic input on the 1st port, ' - 'levels=`{}`'.format(quantize.levels)) - return - if np.all(min_value >= 0): - relu['quantized_to_fuse_count'] += 1 - - relu['removable_before_quantize'] = relu['quantized_to_fuse_count'] == len(relu.out_port(0).get_destinations()) - - -class ClampQuantizeMark(MiddleReplacementPattern): - """ - This pass marks Clamp operations that can be fused to FakeQuantize op with `removable_before_quantize` flag. - - 1. We count the number of Clamp outputs that are FakeQuantize and can absorb Clamp (`quantized_to_fuse_count` attribute) - 2. Clamp is fusible if all its outputs can absorb it. - - """ - enabled = True - - def run_after(self): - return [BinarizeWeightsM1P1] - - def run_before(self): - from openvino.tools.mo.middle.SharedWeightsDuplication import SharedWeightsDuplication - return [SharedWeightsDuplication] - - def pattern(self): - return dict( - nodes=[ - ('clamp', dict(op='Clamp')), - ('clamp_d', dict()), - ('quantize', dict(op='FakeQuantize')), - ], - edges=[ - ('clamp', 'clamp_d'), - ('clamp_d', 'quantize', {'in': 0}), - ] - ) - - def replace_pattern(self, graph: Graph, match: Dict[str, Node]): - clamp = match['clamp'] - quantize = match['quantize'] - clamp_min = clamp.in_port(1).data.get_value() - clamp_max = clamp.in_port(2).data.get_value() - if clamp_min is None or clamp_max is None: - log.debug('ReluQuantizeFuse: cannot fuse because Clamp op has dynamic input on the 1st or 2nd port') - return - - if not clamp.has_valid('quantized_to_fuse_count'): - clamp['quantized_to_fuse_count'] = 0 - - if quantize.in_node(1).id == quantize.in_node(2).id: - # Binary case is not tested so we won't fuse Clamp - assert quantize.has_valid('levels') and quantize.levels == 2 - clamp['removable_before_quantize'] = False - else: - assert quantize.has_valid('levels') and quantize.levels != 2 - min_value = quantize.in_port(1).data.get_value() - if min_value is None: - log.debug('ReluQuantizeFuse: cannot fuse because FakeQuantize op has dynamic input on the 1st port, ' - 'levels=`{}`'.format(quantize.levels)) - return - max_value = quantize.in_port(2).data.get_value() - if max_value is None: - log.debug('ReluQuantizeFuse: cannot fuse because FakeQuantize op has dynamic input on the 2nd port, ' - 'levels=`{}`'.format(quantize.levels)) - return - if np.all(min_value >= clamp_min) and np.all(max_value <= clamp_max): - clamp['quantized_to_fuse_count'] += 1 - - clamp['removable_before_quantize'] = clamp['quantized_to_fuse_count'] == len(clamp.out_port(0).get_destinations()) - - -class ReluQuantizeFuse(MiddleReplacementPattern): - """ Fuses ReLU --> FakeQuantize sequence if possible - - Relu --> FakeQuantize fusion is possible if: - 1. Relu is consumed to 0-th port of FakeQuantize - 2. FakeQuantize ports 1 and 2 defines such input range that 0 is not included - """ - enabled = True - - def run_after(self): - return [ReluFakeQuantizeMark] - - def run_before(self): - from openvino.tools.mo.middle.SharedWeightsDuplication import SharedWeightsDuplication - return [SharedWeightsDuplication] - - def pattern(self): - return dict( - nodes=[ - ('relu', dict(removable_before_quantize=True)), - ('relu_d', dict()), - ('quantize', dict(op='FakeQuantize')), - ], - edges=[ - ('relu', 'relu_d'), - ('relu_d', 'quantize', {'in': 0}), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - quantize = match['quantize'] - - if quantize.levels == 2: - # extra logic due to special 1 & 2 port input meaning in binary case - it is threshold separating two quants - threshold = quantize.in_port(1).data.get_value() - - # Direct modifications to quantize 1-st port input are performed. - # So the data node at this input shouldn't have more than 1 consumer maximum 2 consumers to the same - # quantize op (consumed by 1st and 2nd ports). So we duplicate FakeQuantize in_port 1 data if needed - resolve_shared_inputs(node=quantize, port_ids_to_duplicate=[1]) - - # As we restricted to binarization case only, so we need to detect from - # which side of 0 FakeQuantize threshold resides: - # if the threshold > 0, it remains the same; - # if the threshold == 0, it also remains the same; - # if the threshold < 0, it should be modified to -infinity that means that all inputs map to output_high - modification_mask = threshold < 0 - threshold[modification_mask] = float('-inf') - - # Reconnect ReLU as it no longer needed for current FakeQuantize - in_relu_connection = quantize.in_port(0).get_source().node.in_port(0).get_connection() - quantize.in_port(0).disconnect() - in_relu_connection.add_destination(quantize.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/middle/RemoveDuplicationMemory.py b/tools/mo/openvino/tools/mo/middle/RemoveDuplicationMemory.py deleted file mode 100644 index f62a5928b907f0..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/RemoveDuplicationMemory.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.crop import Crop - - -class RemoveMemoryDuplicationPattern(MiddleReplacementPattern): - """ - Remove Splice nodes with context that is included in context of another Splice with the same input - """ - enabled = True - - def run_before(self): - return [MergeNeighborSplicePattern] - - @staticmethod - def pattern(): - return dict( - nodes=[('op', dict(op='Splice'))], - edges=[]) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - mem = match['op'] - mem_shape = mem.in_port(0).data.get_shape() - mem_parent = mem.in_port(0).get_source() - context = mem['context'] - - for child_port in mem_parent.get_destinations(): - child = child_port.node - # check if we find Splice containing context 'context' - if child['op'] == 'Splice' and child.id != mem.id and set(child['context']).issubset(set(context)): - left_cont_out = child['context'][0] - left_cont = context[0] - - for child_of_child in child.out_port(0).get_destinations(): - out_transfer = child_of_child.node - out_transfer_port = child_of_child - if out_transfer['op'] == 'Crop': - # modify existing Crop to get right data from larger Splice - out_transfer['offset'] = out_transfer['offset'] + (left_cont_out - left_cont) * mem_shape[-1] - else: - # insert Crop if we have not one - child_of_child.disconnect() - crop_node = Crop(graph, {'name': graph.unique_id(prefix='Splice_crop_'), - 'offset': (left_cont_out - left_cont) * mem_shape[-1], - 'dim': mo_array([len(child['context']) * mem_shape[-1]]), - 'axis': mo_array([-1])}).create_node() - child.out_port(0).connect(crop_node.in_port(0)) - crop_node.out_port(0).connect(child_of_child) - crop_node.out_port(0).data.set_shape(child.out_port(0).data.get_shape()) - - out_transfer_port = crop_node.in_port(0) - - # move edge to child from old Splice to larger - out_transfer_port.disconnect() - mem.out_port(0).connect(out_transfer_port) - - graph.remove_node(child.id) - - -class MergeNeighborSplicePattern(MiddleReplacementPattern): - """ - Merge Splices with neighbor contexts, for example: [-5, 0] and [0, 3] to context [-5, 3] - """ - enabled = True - - def run_after(self): - return [RemoveMemoryDuplicationPattern] - - @staticmethod - def pattern(): - return dict( - nodes=[('op', dict(op='Splice'))], - edges=[]) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - mem = match['op'] - mem_shape = mem.in_port(0).data.get_shape() - mem_parent = mem.in_port(0).get_source() - context = mem['context'] - - for child_port in mem_parent.get_destinations(): - child = child_port.node - if child['op'] == 'Splice' and child.id != mem.id and \ - (child['context'][0] == context[-1] or child['context'][0] == context[-1]): - - new_context = list(context) - new_context.extend(list(child['context'])) - new_context = list(set(new_context)) - new_context.sort() - if child['context'][0] == context[-1]: - new_node = mem - rem_node = child - else: - new_node = child - rem_node = mem - - # reset edges from rem_node to new_node - for out_port_rem in rem_node.out_port(0).get_destinations(): - out_transfer = out_port_rem.node - out_transfer_shape = out_port_rem.data.get_shape().copy() - - out_port_rem.disconnect() - - if out_transfer['op'] == 'Crop': - # modify existing Crop to get right data from larger Splice - out_transfer['offset'] = out_transfer['offset'] + (len(new_context) - len(rem_node.context)) * mem_shape[-1] - out_port_rem.connect(new_node.out_port(0)) - else: - # insert Crop if we have not one - crop_node = Crop(graph, {'name': graph.unique_id(prefix='Splice_crop_'), - 'offset': (len(new_context) - len(rem_node.context)) * mem_shape[-1], - 'dim': mo_array([len(rem_node['context']) * mem_shape[-1]]), - 'axis': mo_array([-1])}).create_node() - new_node.out_port(0).connect(crop_node.in_port(0)) - crop_node.out_port(0).connect(out_port_rem) - crop_node.out_port(0).data.set_shape(out_transfer_shape) - - for out_port_rem in new_node.out_port(0).get_destinations(): - out_transfer = out_port_rem.node - out_transfer_shape = out_port_rem.data.get_shape().copy() - - if out_transfer['op'] != 'Crop': - # insert Crop if we have not one - crop_node = Crop(graph, {'name': graph.unique_id(prefix='Splice_crop_'), - 'offset': mo_array([0]), - 'dim': mo_array([len(new_node['context']) * mem_shape[-1]]), - 'axis': mo_array([-1])}).create_node() - new_node.out_port(0).connect(crop_node.in_port(0)) - out_port_rem.disconnect() - crop_node.out_port(0).connect(out_port_rem) - crop_node.out_port(0).data.set_shape(out_transfer_shape) - - new_shape = new_node.out_port(0).data.get_shape() - new_shape[1] += rem_node.out_port(0).data.get_shape()[1] - rem_node.in_port(0).data.get_shape()[1] - new_node.out_port(0).data.set_shape(new_shape) - new_node.context = new_context - - graph.remove_node(rem_node.id) diff --git a/tools/mo/openvino/tools/mo/middle/RemoveIdentity.py b/tools/mo/openvino/tools/mo/middle/RemoveIdentity.py deleted file mode 100644 index ad887f0a96d06f..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/RemoveIdentity.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.eliminate import remove_op_node_with_data_node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class RemoveIdentity(MiddleReplacementPattern): - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.InputCut import MiddleInputCut - return [MiddleInputCut] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def pattern(self): - return dict( - nodes=[('op', dict(kind='op', identity=True))], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - remove_op_node_with_data_node(graph, match['op']) - - -class RemoveDropout(MiddleReplacementPattern): - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.InputCut import MiddleInputCut - return [MiddleInputCut] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def pattern(self): - return dict( - nodes=[('op', dict(op='Dropout'))], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - remove_op_node_with_data_node(graph, match['op']) - - -class RemoveNodesWithZeroPhase(MiddleReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.middle.InputCut import MiddleInputCut - return [MiddleInputCut] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def pattern(self): - return dict( - nodes=[('op', dict(kind='op', phase=0))], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - remove_op_node_with_data_node(graph, match['op']) diff --git a/tools/mo/openvino/tools/mo/middle/RemoveRedundantReshapeAfterCropAndResize.py b/tools/mo/openvino/tools/mo/middle/RemoveRedundantReshapeAfterCropAndResize.py deleted file mode 100644 index 022c91e4f3e121..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/RemoveRedundantReshapeAfterCropAndResize.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.middle.FuseReshapesSequence import FuseReshapesSequence -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class RemoveRedundantReshapeAfterCropAndResize(MiddleReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import MiddleFinish - return [MiddleFinish] - - def run_before(self): - return [FuseReshapesSequence] - - def pattern(self): - return dict( - nodes=[ - ('crop_and_resize', dict(kind='op', op='CropAndResize')), - ('crop_and_resize_data', dict(kind='data')), - ('reshape_1', dict(kind='op', op='Reshape')), - ('reshape_1_data', dict(kind='data')), - ('reshape_2', dict(kind='op', op='Reshape')), - ], - edges=[ - ('crop_and_resize', 'crop_and_resize_data'), - ('crop_and_resize_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'reshape_2'), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - car_node = match['crop_and_resize'] - reshape_2_node = match['reshape_2'] - - shape_1 = match['crop_and_resize_data'].shape - shape_2 = match['reshape_2'].out_node().shape - if not np.all(shape_1 == shape_2): - log.debug('Cannot remove reshape operations after CropAndResize due to different shapes: {} vs {}'.format( - shape_1, shape_2 - )) - return - - car_node.out_port(0).disconnect() - consumer_port_node = reshape_2_node.out_port(0).get_connection().get_destination() - consumer_port_node.disconnect() - car_node.out_port(0).connect(consumer_port_node) diff --git a/tools/mo/openvino/tools/mo/middle/RemoveRedundantReshapes.py b/tools/mo/openvino/tools/mo/middle/RemoveRedundantReshapes.py deleted file mode 100644 index 70b96c73e7ca64..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/RemoveRedundantReshapes.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.middle.FuseReshapesSequence import FuseReshapesSequence -from openvino.tools.mo.middle.pass_separator import PostMiddleStart -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class RemoveRedundantReshapes(MiddleReplacementPattern): - """ - Finds Reshape layers that does nothing and removes them. - """ - enabled = True - force_clean_up = True - run_not_recursively = True # non-unified data nodes view in TI body (no Const ops, bare data node) - - def run_after(self): - return [FuseReshapesSequence] - - def run_before(self): - return [PostMiddleStart] - - def find_and_replace_pattern(self, graph: Graph): - for reshape_node in graph.get_op_nodes(type='Reshape'): - in_ports = [port for port in reshape_node.in_ports().values() if not port.disconnected()] - assert len(in_ports) == 2, "`Reshape` node must have 2 inputs" - previous_dim_op = reshape_node.in_port(1).get_source().node.op - if previous_dim_op != 'Const': - continue - dim = reshape_node.in_port(1).get_connection().data.get_value() - - in_shape = reshape_node.in_port(0).data.get_shape() - - if np.array_equal(dim, in_shape) and len(reshape_node.out_nodes()): - log.debug("Useless reshape with dim {} was deleted: {}".format(str(dim), reshape_node.name)) - reshape_node.out_port(0).get_connection().set_source(reshape_node.in_port(0).get_source()) - graph.remove_nodes_from([reshape_node.out_node(0).id, reshape_node.id]) diff --git a/tools/mo/openvino/tools/mo/middle/RemoveUselessConcatSplit.py b/tools/mo/openvino/tools/mo/middle/RemoveUselessConcatSplit.py deleted file mode 100644 index 2fd5668b57a7a0..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/RemoveUselessConcatSplit.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class RemoveUselessConcatSplitPattern(MiddleReplacementPattern): - r""" - Remove useless construction with concat and split like follows: - / / | \ \ - br1 br2 .. br(n-1)br(n) - \ \ | / / - concat - | - split - / / | \ \ - br1 br2 .. br(n-1)br(n) - - """ - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.middle.ReplaceSpliceNodePattern import ReplaceSpliceNodePattern - return [ReplaceSpliceNodePattern] - - @staticmethod - def pattern(): - return dict( - nodes=[('concat', dict(op='Concat')), - ('data', dict(kind='data')), - ('split', dict(op='Split'))], - edges=[('concat', 'data'), - ('data', 'split')]) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - concat_node = match['concat'] - split_node = match['split'] - - # don't apply pass if concat have another outputs except split - if len(concat_node.out_port(0).get_destinations()) != 1: - return - - inputs = list(concat_node.in_ports().values()) - outputs = list(split_node.out_ports().values()) - - if len(inputs) != len(outputs): - return - - for i in range(len(inputs)): - if not all(inputs[i].data.get_shape() == outputs[i].data.get_shape()): - return - - for i in range(len(inputs)): - outputs[i].get_connection().set_source(inputs[i].get_source()) - inputs[i].disconnect() diff --git a/tools/mo/openvino/tools/mo/middle/RemoveUselessCrops.py b/tools/mo/openvino/tools/mo/middle/RemoveUselessCrops.py deleted file mode 100644 index b4a1e2bb82ad7b..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/RemoveUselessCrops.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class RemoveUselessCropsPattern(MiddleReplacementPattern): - r""" - Remove useless construction with crops and concat like follows: - in_node - / / | \ \ - crop crop .. crop crop - \ \ | / / - out_node - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.RemoveDuplicationMemory import MergeNeighborSplicePattern - return [MergeNeighborSplicePattern] - - @staticmethod - def pattern(): - return dict( - nodes=[('crop', dict(op='Crop')), - ('data', dict(kind='data')), - ('concat', dict(op='Concat'))], - edges=[('crop', 'data'), - ('data', 'concat', {'in': 0})]) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - crop_node = match['crop'] - crop_node_parent_port = crop_node.in_port(0).get_source() - concat_node = match['concat'] - - if len(crop_node.out_port(0).get_destinations()) != 1: - return - - outs = crop_node_parent_port.get_destinations() - offsets_dims = list([]) - crop_list = list([]) - axis = crop_node['axis'] - for in_port in outs: - out = in_port.node - if out['op'] == 'Crop' and out['axis'] == axis and \ - len(out.out_port(0).get_destinations()) == 1 and \ - out.out_port(0).get_destination().node == concat_node: - # crop type 1 - if 'dim' in out: - offsets_dims.append((out['offset'], out['dim'])) - # crop type 3 - elif 'crop_begin' in out and 'crop_end' in out: - offsets_dims.append((out['crop_begin'], out['crop_end']-out['crop_begin'])) - # crop type 2 with const dim - elif not out.in_port(1).disconnected() and out.in_port(1).data.get_value() is not None: - offsets_dims.append((out['offset'], out.in_port(1).data.get_value())) - # crop type 2 with non-const dim or strange type of crop - else: - return - crop_list.append(out) - - offsets_dims.sort(key=lambda off_dim: off_dim[0]) - size = 0 - for off_d in offsets_dims: - if size != off_d[0]: - return - size = size + off_d[1] - - if size != crop_node_parent_port.data.get_shape()[axis]: - return - - remove_concat = True - free_port = None - for inp in concat_node.in_ports(): - if not concat_node.in_port(inp).disconnected(): - in_node = concat_node.in_port(inp).get_source().node - if in_node not in crop_list: - remove_concat = False - else: - in_node.out_port(0).disconnect() - free_port = inp - - if remove_concat: - concat_outs = concat_node.out_port(0).get_destinations() - for out in concat_outs: - out.disconnect() - crop_node_parent_port.connect(out) - else: - crop_node_parent_port.connect(concat_node.in_port(free_port)) diff --git a/tools/mo/openvino/tools/mo/middle/RemoveUselessPad.py b/tools/mo/openvino/tools/mo/middle/RemoveUselessPad.py deleted file mode 100644 index 06c62c3bfb7c37..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/RemoveUselessPad.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.eliminate import remove_op_node_with_data_node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class RemoveUselessPad(MiddleReplacementPattern): - """ - The Pad layer is removed if all padding values are equal to 0 (Constant values). - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='Pad'): - all_pads_zeros = True - for in_port_ind in range(1, 3): - input_node = node.in_port(in_port_ind).get_source().node - value = input_node.soft_get('value', None) - all_pads_zeros &= input_node.soft_get('type') == 'Const' and value is not None and np.all(value == 0) - - if all_pads_zeros: - remove_op_node_with_data_node(graph, node) diff --git a/tools/mo/openvino/tools/mo/middle/ReplaceMemoryOffsetWithSplice.py b/tools/mo/openvino/tools/mo/middle/ReplaceMemoryOffsetWithSplice.py deleted file mode 100644 index e71bd6eaa9075b..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ReplaceMemoryOffsetWithSplice.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.splice import Splice -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.assign import Assign -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.crop import Crop -from openvino.tools.mo.ops.read_value import ReadValue -from openvino.tools.mo.ops.result import Result -from openvino.tools.mo.utils.error import Error - - -class ReplaceMemoryOffsetNodePattern(MiddleReplacementPattern): - """ - Replace MemoryOffset with Splice - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.middle.RemoveDuplicationMemory import RemoveMemoryDuplicationPattern - return [RemoveMemoryDuplicationPattern] - - def run_after(self): - from openvino.tools.mo.middle.split_tdnn_memoryoffset import SplitTdnnMemoryOffset - return [SplitTdnnMemoryOffset] - - @staticmethod - def pattern(): - return dict( - nodes=[('op', dict(op='MemoryOffset', has_default=False))], - edges=[]) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['op'] - pair_node = Node(graph, node.pair_name) - - if pair_node.has_default: - return - - if node.in_port(0).get_source() is not None: - input_node_out_port = node.in_port(0).get_source() - op_output_id = node.out_port(0).get_destination().node.id - out_node_in_ports = pair_node.out_port(0).get_destinations() - else: - input_node_out_port = pair_node.in_port(0).get_source() - op_output_id = pair_node.out_port(0).get_destination().node.id - out_node_in_ports = node.out_port(0).get_destinations() - - in_shape = input_node_out_port.data.get_shape().copy() - - node_id = node.id - node_name = node.name - node_t = node.t - - splice = Splice(graph, {'name': node_name, - 'id': node_id, - 'context': int64_array(range(node_t, 1)) - if node_t < 0 else int64_array(range(0, node_t+1))}).create_node() - splice.in_port(0).connect(input_node_out_port) - - # offset of Crop will be 0 (first element) if node_t < 0 and in_shape[1]*node_t (last element) if node_t > 0 - crop = Crop(graph, {'name': 'Splice_Crop', - 'axis': int64_array([1]), - 'offset': int64_array([max(0, in_shape[1] * node_t)]), - 'dim': int64_array([in_shape[1]])}).create_node() - - splice.out_port(0).connect(crop.in_port(0)) - splice.out_port(0).data.set_shape(int64_array([in_shape[0], (abs(node_t) + 1) * in_shape[1]])) - - outs = input_node_out_port.get_destinations() - for in_port in outs: - out_ = in_port.node - if out_.op == 'Concat' and out_ == out_node_in_ports[0].node: - crop_input = Crop(graph, {'name': 'Splice_Crop', - 'axis': int64_array([1]), - 'offset': int64_array([-min(0, in_shape[1] * node_t)]), - 'dim': int64_array([in_shape[1]])}).create_node() - splice.out_port(0).connect(crop_input.in_port(0)) - - in_port.disconnect() - crop_input.out_port(0).connect(in_port) - crop_input.out_port(0).data.set_shape(in_shape) - - for dest_port in out_node_in_ports: - dest_port.connect(crop.out_port(0)) - - graph.remove_node(op_output_id) - graph.remove_node(node.id) - graph.remove_node(pair_node.id) - - -class ReplaceMemoryOffsetWithMemoryNodePattern(MiddleReplacementPattern): - """ - Replace MemoryOffset with Memory if IfDefined used with it to avoid cycles - """ - enabled = True - force_shape_inference = True - - def run_before(self): - from openvino.tools.mo.middle.RemoveDuplicationMemory import RemoveMemoryDuplicationPattern - return [RemoveMemoryDuplicationPattern] - - @staticmethod - def pattern(): - return dict( - nodes=[('op', dict(op='MemoryOffset', has_default=True))], - edges=[]) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['op'] - pair_node = Node(graph, node.pair_name) - - if node.t >= 0: - raise Error('Does not support IfDefined with t > 0') - - if node.in_port(0).get_source() is not None: - input_port = node.in_port(0).get_source() - op_output_id = node.out_port(0).get_destination().node.id - out_port = pair_node.out_port(0) - node_name = node.name - pair_name = pair_node.name - else: - input_port = pair_node.in_port(0).get_source() - op_output_id = pair_node.out_port(0).get_destination().node.id - out_port = node.out_port(0) - node_name = pair_node.name - pair_name = node.name - - in_shape = input_port.data.get_shape() - node_t = abs(node.t) - - init_value_memory_out = Const(graph, {'name': 'init_value_' + pair_name, - 'value': np.zeros(int64_array([in_shape[0], in_shape[1]*node_t]), dtype=np.float32), - 'shape': int64_array([in_shape[0], in_shape[1]*node_t])}).create_node() - memory_out = ReadValue(graph, {'name': pair_name, - 'variable_id': node_name+pair_name, - 'variable_shape': None, - 'variable_type': None - }).create_node() - init_value_memory_out.out_port(0).connect(memory_out.in_port(0)) - - if node_t > 1: - crop_concat = Crop(graph, {'name': 'Memory_crop', 'dim': mo_array([in_shape[1]*(node_t-1)]), - 'offset': mo_array([in_shape[1]]), 'axis': mo_array([1])}).create_node() - memory_out.out_port(0).connect(crop_concat.in_port(0)) - concat = Concat(graph, {'name': 'Memory_concat'}).create_node() - concat.add_sequence_of_ports('in', range(2)) - crop_concat.out_port(0).connect(concat.in_port(0)) - concat.in_port(1).connect(input_port) - - memory_in = Assign(graph, {'name': node_name, 'variable_id': node_name + pair_name}).create_node() - concat.out_port(0).connect(memory_in.in_port(0)) - out = Result(graph, {'name': 'Memory_output'}).create_node() - memory_in.out_port(0).connect(out.in_port(0)) - - crop_out = Crop(graph, {'name': 'Memory_crop_out', 'dim': mo_array([in_shape[1]]), - 'offset': mo_array([0]), 'axis': mo_array([1])}).create_node() - memory_out.out_port(0).connect(crop_out.in_port(0)) - out_port.get_connection().set_source(crop_out.out_port(0)) - else: - memory_in = Assign(graph, {'name': node_name, 'variable_id': node_name + pair_name}).create_node() - memory_in.in_port(0).connect(input_port) - out = Result(graph, {'name': 'Memory_output'}).create_node() - memory_in.out_port(0).connect(out.in_port(0)) - out_port.get_connection().set_source(memory_out.out_port(0)) - - graph.remove_node(op_output_id) - graph.remove_node(node.id) - graph.remove_node(pair_node.id) diff --git a/tools/mo/openvino/tools/mo/middle/ReplacePNorm.py b/tools/mo/openvino/tools/mo/middle/ReplacePNorm.py deleted file mode 100644 index a61254c32a338e..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ReplacePNorm.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.ReduceOps import ReduceSum -from openvino.tools.mo.ops.elementwise import Pow -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.reshape import Reshape - - -class ReplacePNormNodePattern(MiddleReplacementPattern): - """ - PNorm operation should be replaced by operations: Power(P) -> Reshape(n,c*g->n,g,c)-> ReduceSum(axis=1)-> Power(1/P) - """ - enabled = True - - @staticmethod - def pattern(): - return dict( - nodes=[('op', dict(op='pnorm'))], - edges=[]) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['op'] - shape = node.in_port(0).data.get_shape().copy() - - assert shape[1] % node.group == 0 - - power_node = create_op_node_with_second_input(graph, Pow, node.p, {'name': node.id + '_power'}) - - reshape_node = create_op_node_with_second_input(graph, Reshape, - int64_array([shape[0], shape[1] / node.group, node.group]), - {'name': node.id + '_reshape'}) - reshape_node.in_port(0).connect(power_node.out_port(0)) - - reducesum_node = create_op_node_with_second_input(graph, ReduceSum, - int64_array([2]), - {'name': node.id + '_sum', 'keep_dims': False}) - reducesum_node.in_port(0).connect(reshape_node.out_port(0)) - - invpower_node = create_op_node_with_second_input(graph, Pow, 1.0 / node.p, {'name': node.id + '_invpower'}) - - invpower_node.in_port(0).connect(reducesum_node.out_port(0)) - - node.in_port(0).get_connection().set_destination(power_node.in_port(0)) - node.out_port(0).get_connection().set_source(invpower_node.out_port(0)) diff --git a/tools/mo/openvino/tools/mo/middle/ReplaceSpliceNodePattern.py b/tools/mo/openvino/tools/mo/middle/ReplaceSpliceNodePattern.py deleted file mode 100644 index ec34aa53b99b51..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ReplaceSpliceNodePattern.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.kaldi.replace_lstm_node_pattern import unique_id -from openvino.tools.mo.ops.split import VariadicSplit -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.assign import Assign -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.crop import Crop -from openvino.tools.mo.ops.read_value import ReadValue -from openvino.tools.mo.ops.result import Result - - -class ReplaceSpliceNodePattern(MiddleReplacementPattern): - r""" - This pass decomposes Splice layer to the sequence Slice Concat and Memory layers - For example: - Let's suppose we have next graph: - - Input (N, H) -> Slice -> Next_Layer (N, k*H) - - Where (N, k*H) is is real input of subsequent topology. - Splice is used for accumulation next (k-1)/2 and previous (k-1)/2 input data - - So this pass will convert this graph to the next one: - - Input [N, H] __ - / / - Concat [N, k*H] - / \ - Memory [N, k*H] -> Slice [N, (k-1)*H] Memory [N, k*H] - - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.RemoveDuplicationMemory import MergeNeighborSplicePattern, RemoveMemoryDuplicationPattern - return [MergeNeighborSplicePattern, - RemoveMemoryDuplicationPattern] - - @staticmethod - def pattern(): - return dict( - nodes=[('op', dict(op='Splice'))], - edges=[]) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - node = match['op'] - in_shape = node.in_port(0).data.get_shape().copy() - memory_element = in_shape[1] - node.const_dim - memory_size = memory_element * len(node.context) - - memory_pair_id = unique_id('id') - # Memory(in) - input_memory = ReadValue(graph, {'name': 'prev_splice_memory', - 'variable_id': memory_pair_id, - 'variable_shape': None, - 'variable_type': None - }).create_node() - - # Memory(in) \ - # Crop - # Input(temp) / - crop = Crop(graph, {'name': 'Splice_Crop', - 'axis': int64_array([1]), - 'offset': int64_array([memory_element]), - 'dim': int64_array([memory_size - memory_element])}).create_node() - crop.in_port(0).connect(input_memory.out_port(0)) - - # Crop \ - # Concat - # Input / - concat_node = Concat(graph, {'name': 'Splice_Concat', - 'in_ports_count': 2, - 'axis': 1}).create_node() - concat_node.in_port(0).connect(crop.out_port(0)) - - # Concat -> Memory(out) - mem_out = Assign(graph, {'name': 'out_splice_memory', 'variable_id': memory_pair_id}).create_node() - mem_out.in_port(0).connect(concat_node.out_port(0)) - Result(graph).create_node().in_port(0).connect(mem_out.out_port(0)) - - if node.const_dim != 0: - memory_element_constdim = node.const_dim - memory_size_constdim = memory_element_constdim * len(node.context) - - split = create_op_with_const_inputs( - graph, VariadicSplit, {1: int64_array(1), 2: int64_array([memory_element, memory_element_constdim])}, - {'name': node.id + '_split_const', 'out_ports_count': 2}) - - split.out_port(0).connect(concat_node.in_port(1)) - - # create separate splice construction for const_dim - memory_pair_id = unique_id('memory_for_const_dim') - init_value_input_memory_const_dim = Const(graph, {'name': 'init_value_const_dim_in_memory', - 'value': np.zeros(int64_array([in_shape[0], - memory_size_constdim]), dtype=np.float32), - 'shape': int64_array([in_shape[0], - memory_size_constdim])}).create_node() - input_memory_const_dim = ReadValue(graph, {'name': 'const_dim_in_memory', - 'variable_id': memory_pair_id, - 'variable_shape': None, - 'variable_type': None - }).create_node() - init_value_input_memory_const_dim.out_port(0).connect(input_memory_const_dim.in_port(0)) - - crop_const_dim = Crop(graph, {'name': 'const_dim_crop', - 'axis': int64_array([1]), - 'offset': int64_array([memory_element_constdim]), - 'dim': int64_array( - [memory_size_constdim - memory_element_constdim])}).create_node() - crop_const_dim.in_port(0).connect(input_memory_const_dim.out_port(0)) - - concat_node_const_dim = Concat(graph, {'name': 'const_dim_concat', - 'in_ports_count': 2, - 'axis': 1}).create_node() - concat_node_const_dim.in_port(0).connect(crop_const_dim.out_port(0)) - - mem_out_const_dim = Assign(graph, {'name': 'const_dim_out_memory', - 'variable_id': memory_pair_id}).create_node() - mem_out_const_dim.in_port(0).connect(concat_node_const_dim.out_port(0)) - Result(graph).create_node().in_port(0).connect(mem_out_const_dim.out_port(0)) - - # connect splice to Split as begin and Concat as the end - split.out_port(1).connect(concat_node_const_dim.in_port(1)) - crop_first = Crop(graph, {'name': 'const_dim_crop_first', - 'axis': int64_array([1]), - 'offset': int64_array([0]), - 'dim': int64_array([memory_element_constdim])}).create_node() - crop_first.in_port(0).connect(concat_node_const_dim.out_port(0)) - - concat_const = Concat(graph, {'name': node.id + '_concat_const', 'axis': 1, - 'in_ports_count': 2}).create_node() - concat_const.in_port(1).connect(crop_first.out_port(0)) - concat_const.in_port(0).connect(concat_node.out_port(0)) - - init_value_input_memory = Const(graph, {'name': 'init_value_' + node.name, - 'value': np.zeros(int64_array([in_shape[0], memory_size]), dtype=np.float32), - 'shape': int64_array([in_shape[0], memory_size])}).create_node() - init_value_input_memory.out_port(0).connect(input_memory.in_port(0)) - node.in_port(0).get_connection().set_destination(split.in_port(0)) - node.out_port(0).get_connection().set_source(concat_const.out_port(0)) - else: - init_value_input_memory = Const(graph, {'name': 'init_value_' + node.name, - 'value': np.zeros(int64_array([in_shape[0], memory_size]), dtype=np.float32), - 'shape': int64_array([in_shape[0], memory_size])}).create_node() - init_value_input_memory.out_port(0).connect(input_memory.in_port(0)) - node.in_port(0).get_connection().set_destination(concat_node.in_port(1)) - node.out_port(0).get_connection().set_source(concat_node.out_port(0)) - - # to avoid re-inference of shape and touching in next replacements - graph.remove_node(node.id) diff --git a/tools/mo/openvino/tools/mo/middle/ReverseTransposeNormalization.py b/tools/mo/openvino/tools/mo/middle/ReverseTransposeNormalization.py deleted file mode 100644 index fe1a9ff40a8ba0..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ReverseTransposeNormalization.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const - - -class ReverseTransposeNormalization(MiddleReplacementPattern): - enabled = True - force_shape_inference = True - - def pattern(self): - return dict( - nodes=[('transpose', dict(type='Transpose', reverse_order=True))], - edges=[], - ) - - def replace_pattern(self, graph: Graph, match: [str, Node]): - node = match['transpose'] - assert len(node.in_nodes()) == 1 - order = np.arange(len(node.in_port(0).data.get_shape()))[::-1] - const = Const(graph, {'value': order, 'name': node.soft_get('name', node.id) + '/Order'}).create_node() - node.add_input_port(1, skip_if_exist=True) - const.out_port(0).connect(node.in_port(1)) - node['reverse_order'] = False diff --git a/tools/mo/openvino/tools/mo/middle/ReverseV2ToReverseSequence.py b/tools/mo/openvino/tools/mo/middle/ReverseV2ToReverseSequence.py deleted file mode 100644 index 18b0e9ff0609e3..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/ReverseV2ToReverseSequence.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.reverse_sequence import ReverseSequence -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, rename_node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.broadcast import Broadcast -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.ops.unsqueeze import Unsqueeze -from openvino.tools.mo.utils.shape import node_to_get_shape_value_of_indices - - -class ReverseToReverseSequence(MiddleReplacementPattern): - """ - Transformation converts Reverse to ReverseSequence operation. - Parameters for ReverseSequence calculates in the following way: - * seq_axis - set axis value from Reverse operation - * batch_axis - set 0 if seq_axis is not 0 otherwise set 1 - * seq_lengths - take from shape shape[seq_axis] value and broadcast it to vector with shape[batch_axis] length - If input is 1D tensor then we add one more dimension to set different seq_axis and batch_axis. - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.PartialInfer import PartialInfer - return [PartialInfer] - - def run_before(self): - from openvino.tools.mo.middle.reverse_tensor_iterator import ReverseTensorIteratorLSTM - return [ReverseTensorIteratorLSTM] - - def find_and_replace_pattern(self, graph: Graph): - reverse_nodes = graph.get_op_nodes(op='Reverse') - for reverse in reverse_nodes: - reverse_name = reverse.soft_get('name', reverse.id) - - assert reverse.in_port(1).disconnected() - assert reverse.has_valid('axis') - - in_shape_rank = len(reverse.in_port(0).data.get_shape()) - # 1. Add new dimension as batch for rank = 1 to have batch != seq_axis - if in_shape_rank == 1: - unsq_node = create_op_node_with_second_input(graph, Unsqueeze, int64_array([0]), - {'name': reverse_name+"/Unsqueeze"}) - reverse.in_port(0).get_source().connect(unsq_node.in_port(0)) - new_in = unsq_node.out_port(0) - batch_axis = 0 - seq_axis = 1 - else: - new_in = reverse.in_port(0).get_source() - seq_axis = reverse['axis'] - batch_axis = 0 if seq_axis != 0 else 1 - - # 2. For ReverseSequence 1-port input is seq_lengths => create this input node as - # shape[seq_axis] broadcasted to shape[batch_axis] - # in ---> ShapeOf ----> Gather(seq_axis) ----> Broadcast-----> - # | | - # | -------> Gather(batch_axis)----------| - shape_node = Shape(graph, {'name': reverse_name + "/Shape"}).create_node() - new_in.connect(shape_node.in_port(0)) - seq_axis_node = node_to_get_shape_value_of_indices(shape_node, [seq_axis]) - batch_node = node_to_get_shape_value_of_indices(shape_node, [batch_axis]) - broadcast_node = Broadcast(graph, {'name': reverse_name + "/Broadcast"}).create_node() - broadcast_node.in_port(0).connect(seq_axis_node.out_port(0)) - broadcast_node.in_port(1).connect(batch_node.out_port(0)) - - # 3. Create new ReverseSequence node and reconnect all inputs/outputs to it - rename_node(reverse, reverse_name + '/to_delete') - reverse_sequence = ReverseSequence(graph, {'name': reverse_name, 'seq_axis': seq_axis, - 'batch_axis': batch_axis}).create_node() - reverse_sequence.in_port(0).connect(new_in) - reverse_sequence.in_port(1).connect(broadcast_node.out_port(0)) - - # 4. remove added dimension for rank = 1 - if in_shape_rank == 1: - rename_node(reverse_sequence, reverse_name + '/ReverseSequence') - squeeze_node = create_op_node_with_second_input(graph, Squeeze, int64_array([0]), - {'name': reverse_name}) - squeeze_node.in_port(0).connect(reverse_sequence.out_port(0)) - reverse.out_port(0).get_connection().set_source(squeeze_node.out_port(0)) - else: - reverse.out_port(0).get_connection().set_source(reverse_sequence.out_port(0)) - - # 5. Delete old Reverse node - graph.remove_nodes_from([reverse.id for reverse in reverse_nodes]) diff --git a/tools/mo/openvino/tools/mo/middle/SSliceComplex.py b/tools/mo/openvino/tools/mo/middle/SSliceComplex.py deleted file mode 100644 index d7c5abd5cce882..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/SSliceComplex.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from typing import Dict - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import add_constant_to_negative_values, create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.transpose import Transpose - - -class SSliceComplex(MiddleReplacementPattern): - """ - Some TF models contain the sub-graph - SomeOp - | - -------------------------- - | | - StridedSlice StridedSlice - | | - ------------------------ - Complex - | - | other inputs - | | ... | - ------------------- - SomeOp1 - - Here SomeOp is some node with the real output and with the shape [N_0, ..., N_{k - 1}, 2, N_{k +1}, ..., N_{r - 1}], - and StridedSlice nodes have output shapes [N_0, ..., N_{k - 1}, N_{k +1}, ..., N_{r - 1}]. - - But MO and OpenVINO do not support complex tensors. Hence, we need to replace this sub-graph with. - 1. If k == r - 1, then the replacement should be the subgraph - - SomeOp other inputs - | | ... | - ------------------- - SomeOp1 - - 2. In the other case, that is if 0 <= k and k < r - 1 the replacement should be the subgraph - - SomeOp - | - Transpose -- input_order - | - | - | other inputs - | | ... | - ------------------- - SomeOp1 - - where the input_order is a Constant, and the value of input_order is [0, ..., k - 1, k + 1, ..., r - 1, k]. - """ - enabled = True - - def pattern(self): - return dict( - nodes=[ - ('strided_slice_real', dict(kind='op', op='StridedSlice')), - ('strided_slice_real_data', dict(kind='data')), - ('strided_slice_imag', dict(kind='op', op='StridedSlice')), - ('strided_slice_imag_data', dict(kind='data')), - ('complex', dict(op='Complex')), - ], - edges=[ - ('strided_slice_real', 'strided_slice_real_data'), - ('strided_slice_imag', 'strided_slice_imag_data'), - ('strided_slice_real_data', 'complex', {'in': 0}), - ('strided_slice_imag_data', 'complex', {'in': 1}), - ]) - - def replace_pattern(self, graph: Graph, match: Dict[str, Node]): - strided_slice_real = match['strided_slice_real'] - strided_slice_imag = match['strided_slice_imag'] - - real_input = strided_slice_real.in_port(0).get_source().node - imag_input = strided_slice_imag.in_port(0).get_source().node - if real_input.id != imag_input.id: - log.debug('The pattern does not correspond to operation for complex tensor. Different inputs.') - return - - real_slices = np.array(strided_slice_real.slices) - imag_slices = np.array(strided_slice_imag.slices) - - zeros_in_real_input_slices = np.argwhere(real_slices==0).flatten() - ones_in_imag_input_slices = np.argwhere(imag_slices==1).flatten() - - if len(zeros_in_real_input_slices) != 1 or len(ones_in_imag_input_slices) != 1: - return - - slice_dim_for_real_part = zeros_in_real_input_slices[0] - slice_dim_for_imag_part = ones_in_imag_input_slices[0] - if slice_dim_for_real_part != slice_dim_for_imag_part: - return - - emulated_complex_tensor_shape = strided_slice_real.in_port(0).data.get_shape() - if emulated_complex_tensor_shape is None: - return - - emulated_complex_tensor_rank = len(emulated_complex_tensor_shape) - complex_node = match['complex'] - - for dst in complex_node.out_port(0).get_connection().get_destinations(): - after_complex_node = dst.node - # TODO: now it does not support adjustment of `axis` inputs for other operations such Gather, Concat, etc. - # It does not traverse the full path affected by complex numbers for adjusting the corresponding operations. - # It can affect other models with complex numbers for which we can generate incorrect IRs or offline transformation fails. - if after_complex_node.type == 'Roll': - add_constant_to_negative_values(after_complex_node, 2, int64_array(emulated_complex_tensor_rank)) - - input_slices_have_ellipsis = len(np.argwhere(real_slices == Ellipsis).flatten()) != 0 - - # If output of SomeOp is sliced on the last dimension on the last dimension (like described in 1 case), skipping Complex op is enough. - # Otherwise, (like described in 2 case) Transpose insertion is needed to align data arrangement. - if slice_dim_for_real_part == emulated_complex_tensor_rank - 1 or input_slices_have_ellipsis: - complex_node.out_port(0).get_connection().set_source(strided_slice_real.in_port(0).get_source()) - else: - complex_node_name = complex_node.soft_get('name', complex_node.id) - perm = int64_array([*range(0, slice_dim_for_real_part), - *range(slice_dim_for_real_part + 1, emulated_complex_tensor_rank), - slice_dim_for_real_part]) - transpose = create_op_with_const_inputs(graph, Transpose, {1: perm}, - {'name': complex_node_name + '/cmplx'}) - complex_node.out_port(0).get_connection().set_source(transpose.out_port(0)) - strided_slice_real.in_port(0).get_source().connect(transpose.in_port(0)) - rename_nodes([(complex_node, complex_node_name + '/to_be_removed'), (transpose, complex_node_name)]) diff --git a/tools/mo/openvino/tools/mo/middle/SharedWeightsDuplication.py b/tools/mo/openvino/tools/mo/middle/SharedWeightsDuplication.py deleted file mode 100644 index aa29c4cdd02a13..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/SharedWeightsDuplication.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.op import Op - - -class SharedWeightsDuplication(MiddleReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.middle.CheckForCycle import CheckForCycle - return [CheckForCycle] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import PreMiddleStart - return [PreMiddleStart] - - def find_and_replace_pattern(self, graph: Graph): - """ - This function finds all const data nodes that have more that one consumer and then duplicate them - """ - data_nodes = [Node(graph, id) for id in graph.nodes() if Node(graph, id).soft_get('kind') == 'data'] - for node in data_nodes: - # Check that node has const values and more than one consumer - if len(node.in_nodes()) and node.in_node().soft_get('type') == 'Const' and len(node.out_nodes()) > 1 and \ - node.value is not None: - # Here we delete all edges between base node and it's consumers (except first), and then duplicate this - # node to connect with other consumers - for v, d in node.get_outputs(): - out_node = Node(graph, v) - e_attrs = d - graph.remove_edge(node.id, out_node.id) - data = Op.create_input_data_node(graph, "Copy_{}".format(node.id), mo_array(node.value), - graph.node[node.id]) - - graph.add_edges_from([(data.id, out_node.id, e_attrs)]) - diff --git a/tools/mo/openvino/tools/mo/middle/SliceConverter.py b/tools/mo/openvino/tools/mo/middle/SliceConverter.py deleted file mode 100644 index 0b679cab332139..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/SliceConverter.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.graph.port import Port -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.clamp import Clamp -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.strided_slice import StridedSlice - - -def create_ss_interval_border(graph: Graph, slice_border_port: Port, shape: np.ndarray, axes: np.ndarray, node_name: str): - """ - This function creates "begin"/"end" parameters for the StridedSlice based on Slice's "starts"/"ends" - - :param graph: graph to operate on. - :param slice_border_port: node output port that provides "starts"/"ends" values for the Slice. - :param shape: input shape of the Slice - :param axes: axes that "starts" and "ends" apply to - :param node_name: Slice node name - :return: Concat node that forms "begin"/"end" values for the StridedSlice - """ - # the value for 'starts' or 'ends' might be maximum/minimum possible value of int64. This - # value must be converted to maximum/minimum of int32 because such big values do not fit into the int32 which is - # supported by the StridedSlice layer - clamp = create_op_with_const_inputs( - graph, Clamp, port_value_dict={1: np.iinfo(np.int32).min, 2: np.iinfo(np.int32).max}, - op_attrs=dict(name=node_name + '/Clamp')) - clamp.in_port(0).connect(slice_border_port) - # we have to convert "starts"/"ends" values from the network to one data type with constant values that are created - # here to prevent type errors in Concat node - cast = Cast(graph, dict(name=node_name + '/CastToI64', dst_type=np.int64)).create_node() - cast.in_port(0).connect(clamp.out_port(0)) - concat = Concat(graph, dict(name=node_name + '/Concat', axis=0)).create_node() - for value_idx, port_idx in enumerate(axes): - concat.add_input_port(port_idx) - # "axes" may not be sorted, so we need to split "starts"/"ends" values and connect each value to the correct - # Concat input port - value = create_op_with_const_inputs( - graph, Gather, port_value_dict={1: int64_array([value_idx]), 2: int64_array(0)}, - op_attrs={'name': node_name + '/Gather'}) - cast.out_port(0).connect(value.in_port(0)) - value.out_port(0).connect(concat.in_port(port_idx)) - for port_idx in range(len(shape)): - if not concat.is_in_port_connected(port_idx): - concat.add_input_port(port_idx) - # This border value would be ignored in StridedSlice because of the begin_mask\end_mask - const = Const(graph, dict(name=node_name + '/Const', value=int64_array([0]))).create_node() - const.out_port(0).connect(concat.in_port(port_idx)) - - return concat - - -class ConvertSlice(MiddleReplacementPattern): - """ - This class converts a Slice operation to StridedSlice in reshape-able way by parsing the 'starts' and 'ends' - parameters based on the 'axes' parameter - """ - - enabled = True - force_clean_up = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='Slice'): - node_name = node.soft_get('name', node.id) - - input_shape = node.in_port(0).data.get_shape() - if node.is_in_port_connected(3): - axes = node.in_port(3).data.get_value().copy() - assert axes is not None, 'The input with axes is not constant for node {}'.format(node_name) - for i, val in enumerate(axes): - axes[i] = get_canonical_axis_index(input_shape, val) - else: - axes = int64_array(range(len(input_shape))) - - ss_begin = create_ss_interval_border(graph, node.in_port(1).get_source(), input_shape, axes, node_name) - ss_end = create_ss_interval_border(graph, node.in_port(2).get_source(), input_shape, axes, node_name) - node.in_port(1).disconnect() - node.in_port(2).disconnect() - rename_nodes([(ss_begin, node_name + '/Begin'), (ss_end, node_name + '/End')]) - - if node.is_in_port_connected(4): - steps = node.in_port(4).data.get_value() - assert steps is not None, 'The input with steps is not constant for node {}'.format(node_name) - else: - steps = np.ones([axes.size], dtype=np.int64) - - ss_begin_mask = np.zeros(len(input_shape), dtype=np.int64) - ss_end_mask = np.zeros(len(input_shape), dtype=np.int64) - ss_step = np.ones(len(input_shape), dtype=np.int64) - - for i, axis in enumerate(axes): - ss_begin_mask[axis] = 1 - ss_end_mask[axis] = 1 - ss_step[axis] = steps[i] - - ss_strides = Const(graph, dict(name=node_name + '/Strides', value=ss_step)).create_node() - - ss = StridedSlice(graph, dict(name='ss', new_axis_mask=np.zeros(len(input_shape), dtype=np.int64), - shrink_axis_mask=np.zeros(len(input_shape), dtype=np.int64), - ellipsis_mask=np.zeros(len(input_shape), dtype=np.int64), - begin_mask=ss_begin_mask, - end_mask=ss_end_mask)).create_node() - - node.in_port(0).get_connection().set_destination(ss.in_port(0)) - ss.in_port(1).connect(ss_begin.out_port(0)) - ss.in_port(2).connect(ss_end.out_port(0)) - ss.in_port(3).connect(ss_strides.out_port(0)) - node.out_port(0).get_connection().set_source(ss.out_port(0)) - - rename_nodes([(node, node_name + '/ShouldBeDeleted'), (ss, node_name)]) diff --git a/tools/mo/openvino/tools/mo/middle/SliceLikeToStridedSlice.py b/tools/mo/openvino/tools/mo/middle/SliceLikeToStridedSlice.py deleted file mode 100644 index 7712e10f750b15..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/SliceLikeToStridedSlice.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -from typing import Dict - -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.strided_slice import StridedSlice -from openvino.tools.mo.utils.shape import get_shape_values_by_range_idxs, new_shape_node_from_shape_nodes, \ - get_shape_and_rank_nodes_by_port - - -class SliceLikeToStridedSlice(MiddleReplacementPattern): - """ - Replace mxnet slice_like operation with StridedSlice in reshapable way. - The begin parameter for StridedSlice is always a zero vector. - The end parameter depends on the slice_like inputs and axes. - - 1. If slice_like inputs has the same ranks, we can use second input shape (shape_like) as the end parameter for - StridedSlice. Axes parameter will form end_mask, that allows to use slice only on the desired axes. - Example: - input_shape = [1, 64, 128, 256], shape_like = [1, 2, 3, 4], axes = [2, 3]. - In that case end = shape_like = [1, 2, 3, 4], but end_mask = [0, 0, 1, 1], so output_shape = [1, 64, 3, 4] - - 2. Axes parameter has the last dimension of the first input shape (in that case shape_like >= input_shape). - Here we can use only a part of shape_like as the end parameter. - Example: - input_shape = [1, 64, 128, 256], shape_like = [1, 2, 3, 4, 5], axes = [2, 3]. - end = shape_like[:4] = [1, 2, 3, 4], end_mask = [0, 0, 1, 1], output_shape = [1, 64, 3, 4] - - 3. Usual case, where we form end parameter by concatenate parts of shape_like and input_shape. - Examples: - input_shape = [1, 64, 128, 256, 512], shape_like = [1, 2, 3, 4], axes = [2, 3]. - end = shape_like[:4] + input_shape[4:] = [1, 2, 3, 4, 512], - end_mask = [0, 0, 1, 1, 0], output_shape = [1, 64, 3, 4, 512] - - input_shape = [1, 64, 128, 256], shape_like = [1, 2, 3, 4, 5], axes = [0, 2]. - end = shape_like[:3] + input_shape[3:] = [1, 2, 3, 256], - end_mask = [1, 0, 1, 0], output_shape = [1, 64, 3, 256] - """ - - enabled = True - graph_condition = [lambda graph: graph.graph['fw'] == 'mxnet'] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('op', dict(kind='op', op='slice_like')) - ], - edges=[] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: Dict[str, Node]): - node = match['op'] - name = node.soft_get('name', node.id) - input_shape = node.in_port(0).data.get_shape() - second_input_shape = node.in_port(1).data.get_shape() - - begin_mask = np.zeros(len(input_shape), dtype=np.int64) - end_mask = np.zeros(len(input_shape), dtype=np.int64) - - for i in node.axes: - end_mask[i] = np.int64(1) - - new_axis_mask = np.zeros(len(input_shape), dtype=np.int64) - shrink_axis_mask = np.zeros(len(input_shape), dtype=np.int64) - ellipsis_mask = np.zeros(len(input_shape), dtype=np.int64) - - ss = create_op_with_const_inputs(graph, StridedSlice, - port_value_dict={1: np.zeros(len(input_shape), dtype=np.int64)}, - op_attrs={'name': 'StridedSlice', 'begin_mask': begin_mask, - 'end_mask': end_mask, 'new_axis_mask': new_axis_mask, - 'shrink_axis_mask': shrink_axis_mask, - 'ellipsis_mask': ellipsis_mask}) - if input_shape.size == second_input_shape.size: - end = Shape(graph, dict(name=name + '/End')).create_node() - end.in_port(0).connect(node.in_port(1).get_source()) - ss.in_port(2).connect(end.out_port(0)) - else: - shape_like, rank_like = get_shape_and_rank_nodes_by_port(node.in_port(1).get_source()) - end_first_part = get_shape_values_by_range_idxs(shape_like, rank_like, 0, node.axes[-1], include_end=True) - if input_shape.size - 1 == node.axes[-1]: - ss.in_port(2).connect(end_first_part.out_port(0)) - else: - shape, rank = get_shape_and_rank_nodes_by_port(node.in_port(0).get_source()) - end_second_part = get_shape_values_by_range_idxs(shape, rank, node.axes[-1], -1, include_begin=False, - include_end=True) - end = new_shape_node_from_shape_nodes([end_first_part, end_second_part]) - ss.in_port(2).connect(end.out_port(0)) - - node.in_port(0).get_connection().set_destination(ss.in_port(0)) - node.in_port(1).disconnect() - node.out_port(0).get_connection().set_source(ss.out_port(0)) - - rename_nodes([(node, name + '/ShouldBeDeleted'), (ss, name)]) diff --git a/tools/mo/openvino/tools/mo/middle/SplitConcatPairToInterpolate.py b/tools/mo/openvino/tools/mo/middle/SplitConcatPairToInterpolate.py deleted file mode 100644 index 477fdccbcba335..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/SplitConcatPairToInterpolate.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from typing import Optional - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.activation_ops import Floor -from openvino.tools.mo.ops.elementwise import Mul -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.strided_slice import StridedSlice - - -def get_concat_after_split(split: Node) -> Optional[Node]: - """ - This function gets consumers of the 'split' node, checks that the following conditions are fulfilled: - 1) 'split' node has only one consumer; - 2) for any output port of 'split', number of corresponding input ports of the consumer is the same; - 3) for any output port 'i' of the 'split', corresponding input ports of the consumer are - [i * m, ..., i * m + (m - 1)], where 'm' is the same for all 'i'; - 4) the consumer operation is 'Concat'; - 5) 'split' is a unique producer for this 'Concat'; - and, if all these conditions are fulfilled, returns the above mentioned 'Concat' node. Otherwise, if some of these - conditions is false, this functions returns None. - :param split: Split node - :return: Concat node, if all conditions are fulfilled, or None otherwise - """ - # If number of output nodes of 'split' is not equal to 1, then the transformation is not applicable. - split_outputs = [d.node for _, p in split.out_ports().items() for d in p.get_connection().get_destinations()] - names_of_split_outputs = set([n.name for n in split_outputs]) - if len(names_of_split_outputs) != 1: - return - - groups_of_inputs = [[d.idx for d in p.get_connection().get_destinations()] for _, p in split.out_ports().items()] - sizes_of_groups = set([len(g) for g in groups_of_inputs]) - # If numbers of consumer ports are various for various output ports of 'split', then the transformation - # is not applicable. - if len(sizes_of_groups) != 1: - return - # The transformation is applicable iff output port 0 of 'split' goes to ports [0, ..., m-1] of next node, - # output port 1 of 'split' goes to ports [m, ..., m + (m-1)] of next node, ..., output port i of 'split' - # goes to ports [i * m, ..., i * m + (m - 1)], and so on. - flatten_groups = [i for g in groups_of_inputs for i in g] - if flatten_groups != list(range(0, len(flatten_groups))): - return - - dest = split.out_port(0).get_destinations()[0].node - # The transformation is applicable, only if next node is Concat. - if dest.soft_get('type') != 'Concat': - return - - # The transformation is applicable, only if Split is a unique producer for Concat. - dest_inputs = [p.get_source().node for p in dest.in_ports().values() if not p.disconnected()] - names_of_concat_inputs = set([n.soft_get('name', n.id) for n in dest_inputs]) - expected_number_of_unique_inputs = 1 if dest.has_valid('axis') else 2 - if len(names_of_concat_inputs) != expected_number_of_unique_inputs: - return - - return dest - - -def get_interpolate_pattern(split: Node) -> dict: - split_shape = split.in_port(0).data.get_shape() - if len(split_shape) not in {4, 5}: - return {} - concat = get_concat_after_split(split) - if concat is None: - return {} - return {'split': split, 'concat': concat} - - -def get_split_scale(split: Node) -> int: - split_dests = [d.node for _, p in split.out_ports().items() for d in p.get_connection().get_destinations()] - num_of_split_dests = len(split_dests) - num_of_split_out_ports = len(split.out_ports()) - fractional_part = num_of_split_dests / num_of_split_out_ports - num_of_split_dests // num_of_split_out_ports - assert fractional_part == 0, "Number of output ports of Split must be multiple of number of inputs of Concat" - return len(split_dests) // len(split.out_ports()) - - -def replace_interpolate_pattern(graph: Graph, match: dict): - split = match['split'] - scale = float32_array([get_split_scale(split)]) - axis = int(split.in_port(1).get_connection().get_source().node.value) - split_node_name = split.name - axis_node = Const(graph, {'name': split_node_name + '/axis', 'value': int64_array([axis])}).create_node() - - shape_node = Shape(graph, dict(name=split_node_name + '/Shape')).create_node() - scales_node = Const(graph, dict(name=split_node_name + '/scales', value=scale)).create_node() - mul_node = Mul(graph, dict(name=split_node_name + '/Mul')).create_node() - scales_node.out_port(0).connect(mul_node.in_port(1)) - - strided_slice_node = create_op_with_const_inputs(graph, - StridedSlice, - {1: int64_array([axis]), 2: int64_array([axis + 1])}, - { - 'name': split_node_name + '/StridedSlice', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0]) - }) - shape_node.out_port(0).connect(strided_slice_node.in_port(0)) - - cast_shape_to_float = Cast(graph, {'dst_type': np.float32}).create_node() - - strided_slice_node.out_port(0).connect(cast_shape_to_float.in_port(0)) - cast_shape_to_float.out_port(0).connect(mul_node.in_port(0)) - - interp_node = Interpolate(graph, - dict(name=split_node_name + '/Interpolate', - mode='nearest', - antialias=0, pads_begin=int64_array([0]), pads_end=int64_array([0]), - coordinate_transformation_mode='half_pixel', nearest_mode='round_prefer_floor', - cube_coeff=-0.75, version='opset4', shape_calculation_mode='scales', - in_ports_count=4, maybe_part_of_sequence=True)).create_node() - - floor_node = Floor(graph, {'name': split_node_name + '/Floor'}).create_node() - cast_mul_result_to_int = Cast(graph, {'dst_type': np.int64}).create_node() - - mul_node.out_port(0).connect(floor_node.in_port(0)) - floor_node.out_port(0).connect(cast_mul_result_to_int.in_port(0)) - - cast_mul_result_to_int.out_port(0).connect(interp_node.in_port(1)) - scales_node.out_port(0).connect(interp_node.in_port(2)) - axis_node.out_port(0).connect(interp_node.in_port(3)) - - match['concat'].out_port(0).get_connection().set_source(interp_node.out_port(0)) - - split_connection = split.in_port(0).get_connection() - split_connection.set_destination(interp_node.in_port(0)) - split_connection.get_source().connect(shape_node.in_port(0)) - - -class SplitConcatPairToInterpolate(MiddleReplacementPattern): - """ - This transformation looks for Interpolation layer implemented using simple operations, i.e. Split and Concat, - and replaces found pattern with a sequence of Shape, StridedSlice, Const, Mul, Interpolate. - - Found pattern: - nodes=[ - ('split', dict(kind='op', op='Split')), - ('concat', dict(kind='op', op='Concat')), - ], - edges=[ - ('split', 'concat'), - ] - - Here we assume that - 1) 'split' is in NDHWC layout and is a 5D-tensor; - 2) split dimensions for 'split' belongs to {1, 2, 3}; - 3) all outputs of 'split' go to only inputs of 'concat'; - 4) 'concat' takes inputs only from 'split'; - 5) split_dim of 'split' is equal to axis of 'concat'. - - Found pattern will be replaced with - nodes=[ - ('shape', dict(kind='op', op='Shape')), - ('strided_slice', dict(kind='op', op='StridedSlice')), - ('scales', dict(kind='op', op='Const')), - ('scaled_shape', dict(kind='op', op='Mul')), - ('interp', dict(kind='op', op='Interpolate')) - ], - edges=[ - ('shape', 'strided_slice', {'in': 0}), - ('strided_slice', 'scaled_shape', {'in': 0}), - ('scales', 'scaled_shape', {'in': 1}), - ('scaled_shape', 'interp', {'in': 1}), - ] - - Here scaling factor in Interpolate is equal to a quotient of dividing number of input ports of 'concat' - by number of output ports of 'split'. - """ - enabled = True - force_clean_up = True - - def run_before(self): - from openvino.tools.mo.middle.InterpolateSequenceToInterpolate import InterpolateSequenceToInterpolate - return [InterpolateSequenceToInterpolate] - - def find_and_replace_pattern(self, graph: Graph): - log.debug('Enabled replacement of a pair of Split and Concat with Interpolate.') - splits = graph.get_op_nodes(op='Split') - patterns = [] - - for split_node in splits: - interpolate_pattern = get_interpolate_pattern(split_node) - if interpolate_pattern: - patterns.append(interpolate_pattern) - - for pattern in patterns: - replace_interpolate_pattern(graph, pattern) diff --git a/tools/mo/openvino/tools/mo/middle/StridedSliceNormalizer.py b/tools/mo/openvino/tools/mo/middle/StridedSliceNormalizer.py deleted file mode 100644 index 6853294eca0646..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/StridedSliceNormalizer.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.split import VariadicSplit -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension, dynamic_dimension_value, \ - is_dynamic_slice -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import PermuteAttrs -from openvino.tools.mo.ops.strided_slice import StridedSlice -from openvino.tools.mo.utils.error import Error - - -class StridedSliceNormalizer(MiddleReplacementPattern): - r""" - StridedSlice is not normal if it cannot be permuted by ApplyPermutations. This normalizer - inserts blank colons ':' in slice expression so that it can be correctly permuted - from NHWC to NCHW layout. It changes masks and inserts blank begin, end and strides values. - In order to successfully handle StridedSlice in ShapeOf subgraphs - changes must be done by inserting nodes not just by overwriting constants. - - StridedSlice is not normal in 2 cases: - 1. rank of a slice expression is less than rank of input tensor - 2. there is an ellipsis - - 1st case example - BEFORE: - | - begin - value=[0, 0] - | - - AFTER: - | - begin Const - value=[0, 0] value=[0, 0] - \ / - \ / - Concat - value=[0, 0, 0, 0] - | - - Input of a shape [16, 100, 100, 3] in NHWC layout, output = input[:, 0:50]. - StridedSlice will be extended to input[:, 0:50, :, :]. - After permutation to NCHW output = input[:, :, 0:50, :]. - Example for 'begin' input transformation is shown above on the picture. - 'end' and 'strides' inputs will be transformed the same way. - - 2nd case example - BEFORE: - | - begin - value=[1, 50] - | - - AFTER: - | - begin - value=[1, 1, 1] - | - VariadicSplit - / \ - / \ - / Const \ - \ val=[0, 0] / - \ | / - \ | / - Concat - value=[1, 0, 0, 1, 1] - | - - Input of a shape [16, 10, 100, 100, 3] in NDHWC layout, output = input[1:4, ..., 1:51, 1:3], - output_shape = [3, 10, 100, 50, 2]. In order to perform correct layout permutation - ellipsis must be replaced with colons: input[1:4, ..., 1:51, 1:3] => input[1:4, :, :, 1:51, 1:3]. - After layour permutation input[1:4, 1:3, :, : 1:5]. - - In the places of colons blank begin, end and strides values should be inserted. - In order to do that we split input and insert blank zeros to the middle. - Example for 'begin' input transformation is shown above on the picture. - 'end' and 'strides' inputs will be transformed the same way. - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.middle.LayoutChangeForConstantShapePaths import LayoutChangeForConstantShapePaths - return [LayoutChangeForConstantShapePaths] - - def run_after(self): - from openvino.tools.mo.middle.SliceConverter import ConvertSlice - return [ConvertSlice] - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(type='StridedSlice'): - StridedSliceNormalizer.normalize_strided_slice(graph, node) - PermuteAttrs.create_permute_attrs(node, - attrs=[('begin_mask', 'input:0'), # but indeed depends from slice_rank - ('end_mask', 'input:0'), - ('new_axis_mask', 'input:0'), - ('shrink_axis_mask', 'input:0'), - ('ellipsis_mask', 'input:0')]) - - # StridedSliceNormalizer inserted nodes that changed original begin, end, and strides data nodes - # Until now it was not possible to set correct permutations - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:1', 'slice', 'dim_size') - PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:2', 'slice', 'dim_size') - if node.is_in_port_connected(3): - PermuteInputs().set_input_permutation(node.in_node(3), node, 'input:3', 'slice', 'dim_size') - - # If there are new_axis_mask or shrink_axis_mask then StridedSlice should be performed in the - # original layout, same as for Squeeze, Unsqueeze, Reshape, Gather - if np.count_nonzero(node['new_axis_mask']) > 0 or np.count_nonzero(node['shrink_axis_mask']) > 0: - node['reinterp_shape'] = True - node['nchw_layout'] = True - - @staticmethod - def normalize_strided_slice(graph: Graph, node: Node): - input_shape = node.in_port(0).data.get_shape() - input_rank = len(input_shape) - begin = node.in_port(1).data.get_value() - if begin is not None: - slice_rank = len(begin) - else: - slice_rank = input_rank + np.count_nonzero(node.new_axis_mask) - np.count_nonzero(node.shrink_axis_mask) - - StridedSlice.align_mask_with_slice_rank(node, slice_rank) # if StridedSlice is created after partial_infer - StridedSliceNormalizer.normalize_slices_attr(node) - - num_insertions = input_rank - slice_rank + np.count_nonzero(node.new_axis_mask) - assert num_insertions >= 0, 'slice_rank - num_new_axis must <= input rank. Got instead: ' \ - 'input_rank = {}, slice_rank = {}, num_new_axis = {}'. \ - format(input_rank, slice_rank, np.count_nonzero(node.new_axis_mask)) - - if np.any(node.ellipsis_mask): - assert np.count_nonzero(node.ellipsis_mask) == 1, 'only one ellipsis_mask nonzero value is allowed' - ellipsis_start = np.nonzero(node.ellipsis_mask)[0][0] - # since we don't expect values in begin and end: take the whole range along ellipsis_start - node.begin_mask[ellipsis_start] = 0 - node.end_mask[ellipsis_start] = 0 - node.ellipsis_mask[ellipsis_start] = 0 - insertion_start_idx = ellipsis_start + 1 - - StridedSliceNormalizer.unroll_ellipsis_for_inputs(graph, node, ellipsis_start, num_insertions) - elif num_insertions > 0: - insertion_start_idx = slice_rank # insert blank values to mask ends - StridedSliceNormalizer.extend_inputs(node, num_insertions) - - if num_insertions > 0: - # insert blank values for ellipsis unrolling and extending - for mask_name in StridedSlice.get_mask_names(): - node[mask_name] = np.insert(node[mask_name], insertion_start_idx, [0] * num_insertions).astype(int) # pylint: disable=possibly-used-before-assignment - - @staticmethod - def unroll_ellipsis_for_inputs(graph: Graph, node: Node, ellipsis_start: int, num_insertions: int): - node_name = node.soft_get('name', node.id) - - for i, input_name in [(1, 'begin'), (2, 'end'), (3, 'strides')]: - if i == 3 and not node.is_in_port_connected(3): - continue # no need to extend strides if they are not connected - - blank_values_arr = np.zeros(num_insertions) if input_name != 'strides' else np.ones(num_insertions) - blank_values_node = Const(graph, {'name': node_name + '/const_to_unroll_{}_ellipsis'.format(input_name), - 'value': int64_array(blank_values_arr)}).create_node() - - concat_in_ports_count = 3 if ellipsis_start != 0 else 2 - concat = Concat(graph, {'axis': 0, 'name': node_name + '/concat_{}'.format(input_name), - 'in_ports_count': concat_in_ports_count}).create_node() - - if ellipsis_start != 0: - split = create_op_with_const_inputs(graph, VariadicSplit, {1: int64_array(0), - 2: int64_array([ellipsis_start, -1])}, - {'name': node_name + '/split_for_{}_ellipsis'.format(input_name), - 'out_ports_count': 2}) - node.in_port(i).get_connection().set_destination(split.in_port(0)) - - concat.in_port(0).connect(split.out_port(0)) - concat.in_port(1).connect(blank_values_node.out_port(0)) - concat.in_port(2).connect(split.out_port(1)) - else: - concat.in_port(0).connect(blank_values_node.out_port(0)) - node.in_port(i).get_connection().set_destination(concat.in_port(1)) - - concat.out_port(0).get_connection().set_destination(node.in_port(i)) - - @staticmethod - def extend_inputs(node: Node, num_insertions: int): - graph = node.graph - node_name = node.soft_get('name', node.id) - - for i, input_name in [(1, 'begin'), (2, 'end'), (3, 'strides')]: - if i == 3 and not node.is_in_port_connected(3): - continue # no need to extend strides if they are not connected - - blank_values_arr = np.zeros(num_insertions) if input_name != 'strides' else np.ones(num_insertions) - blank_values_node = Const(graph, {'name': node_name + '/extend_{}_const'.format(input_name), - 'value': int64_array(blank_values_arr)}).create_node() - - if node.in_port(i).get_source().node.soft_get('type') == 'Concat': - # concat already exists - concat = node.in_port(i).get_source().node - # because output data node shape will be changed - # while shapes will be reinferred no need to check consistency - concat['override_output_shape'] = True - - last_in_port = max(concat.in_ports().keys()) - assert not concat.in_port(last_in_port).disconnected(), 'The last in_port of Concat node {} ' \ - 'should be connected'. \ - format(concat.soft_get('name', node.id)) - - concat.add_input_port(last_in_port + 1) - concat.in_port(last_in_port + 1).connect(blank_values_node.out_port(0)) - else: - # have to create concat - concat = Concat(graph, {'axis': 0, 'name': node_name + '/concat_{}'.format(input_name), - 'in_ports_count': 2}).create_node() - node.in_port(i).get_connection().set_destination(concat.in_port(0)) - concat.in_port(1).connect(blank_values_node.out_port(0)) - concat.out_port(0).get_connection().set_destination(node.in_port(i)) - - @staticmethod - def normalize_slices_attr(node: Node): - # removes negative starts, ends and magic numbers from 'slice' attr which is used by ConvertGroupedStridedSlice - slice_rank = len(node['slices']) - data_shape = node.in_port(0).data.get_shape() - - node_name = node.soft_get('name', node.id) - if node.is_in_port_connected(3): - strides = node.in_port(3).data.get_value() - if strides is None: - raise Error('StridedSlice operation for node {} supports only constant strides input'.format(node_name)) - else: - strides = np.ones(len(node['slices']), dtype=np.int32) - - num_ellipsis_inserts = len(data_shape) - slice_rank + np.count_nonzero(node.new_axis_mask) + 1 - res_slices = [] - - in_idx = 0 - for i, s in enumerate(node['slices']): - if node.new_axis_mask[i]: - res_slices.append(slice(0, 1, 1)) - elif node.shrink_axis_mask[i]: - res_slices.append(slice(s, s + 1, strides[i])) # need strides if shrink index is negative - elif node.ellipsis_mask[i]: - for idx in range(num_ellipsis_inserts): - res_slices.append(slice(0, data_shape[in_idx], 1)) - in_idx += 1 - else: - res_slices.append(s) - - if not (node.new_axis_mask[i] or node.ellipsis_mask[i]): - if res_slices[-1] != dynamic_dimension_value and data_shape[in_idx] is not dynamic_dimension and \ - res_slices[-1] is not None and not is_dynamic_slice(res_slices[-1]): - res_slices[-1] = slice(*res_slices[-1].indices(data_shape[in_idx])) # convert negative begins/ends - in_idx += 1 - node.slices = mo_array(res_slices) diff --git a/tools/mo/openvino/tools/mo/middle/SwapAxesMiddleReplacer.py b/tools/mo/openvino/tools/mo/middle/SwapAxesMiddleReplacer.py deleted file mode 100644 index c44520197e16ce..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/SwapAxesMiddleReplacer.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const - - -class SwapAxisMiddleReplacer(MiddleReplacementPattern): - enabled = True - - def pattern(self): - return dict( - nodes=[('op', dict(kind='op', op='SwapAxis'))], - edges=[], - ) - - def replace_pattern(self, graph: Graph, match: [str, Node]): - swapaxis = match['op'] - assert len(swapaxis.in_ports()) == 1 - assert swapaxis.has_and_set('order') - order = swapaxis.order - - swapaxis.add_input_port(1) - const = Const(graph, {'value': order, 'name': swapaxis.soft_get('name', swapaxis.id) + '/Order'}).create_node() - const.out_port(0).connect(swapaxis.in_port(1)) - - Transpose.update_node_stat(swapaxis, {'need_shape_inference': True}) - - del swapaxis['order'] diff --git a/tools/mo/openvino/tools/mo/middle/TF_lstm_cell_to_generic.py b/tools/mo/openvino/tools/mo/middle/TF_lstm_cell_to_generic.py deleted file mode 100644 index 574c49438e78f1..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/TF_lstm_cell_to_generic.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class TensorFlowLSTMtoGeneric(MiddleReplacementPattern): - """ - Resolves all differences in TensorFlow LSTMCell and OpenVINO LSTMCell: - - weights transposing - - shift_const value addition to biases - - extra inputs deletion - """ - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.permute_tensor_iterator import TransposeTensorIteratorLSTM - return [TransposeTensorIteratorLSTM] - - - def pattern(self): - return dict( - nodes=[('lstm', dict(op='LSTMCell', tf=True))], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - weights_node = match['lstm'].in_node(3) - biases_node = match['lstm'].in_node(4) - node = match['lstm'] - shift_const = node.shift_const - - # make sure that the node is the only consumer or weights and biases - # to let us modify them without hassle - assert len(weights_node.out_nodes()) == 1 - assert len(biases_node.out_nodes()) == 1 - - # Assign temporary shape for them for easier manipulation - # TF stores weights in IO order - input_size = node.in_node(0).shape[1] - hidden_size = node.in_node(1).shape[1] - weights = weights_node.value - biases = biases_node.value - assert weights.shape[0] == input_size + hidden_size, \ - "weights.shape={} input_size={} hidden_size={}".format(weights.shape, input_size, hidden_size) - assert weights.shape[1] == biases.shape[0] == 4 * hidden_size, \ - "weights.shape={} biases.shape={} hidden_size={}".format(weights.shape, biases.shape, hidden_size) - - weights = weights.reshape([ - weights.shape[0], - 4, # gates - hidden_size - ]) - - biases = biases.reshape([ - 4, # gates - hidden_size - ]) - - # Reorder gates icfo --> fico for both weights and biases - gate_reorder = [2, 0, 1, 3] - weights = np.take(weights, gate_reorder, axis=1) - biases = np.take(biases, gate_reorder, axis=0) - - # shift_const.value should be added to the first 1/4th part of the biases (f-gate: 0) - # Note: in case of moving this code up before gate reordering, the addition - # should be applied at different place - biases[0] += shift_const - - # Return to the original shapes - weights = weights.reshape([weights.shape[0], -1]) - biases = biases.flatten() - - # TF stores weights in IO, but OV requires it in OI: transpose - weights = weights.transpose() - - weights_node.value = weights - weights_node.shape = int64_array(weights.shape) - biases_node.value = biases - biases_node.shape = int64_array(biases.shape) - - # Cut all extra inputs off - for i in range(len(node.inputs), len(node.inputs) + len(node.extra_inputs)): - node.graph.remove_edge(node.in_node(i).id, node.id) diff --git a/tools/mo/openvino/tools/mo/middle/TensorIteratorBackEdge.py b/tools/mo/openvino/tools/mo/middle/TensorIteratorBackEdge.py deleted file mode 100644 index 4e7867011b5b2b..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/TensorIteratorBackEdge.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.middle.TensorIteratorCondition import DynamicDecoderConditionMatcher -from openvino.tools.mo.ops.TensorIterator_ops import TensorIteratorBackEdge, TensorIteratorOutput -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class BackEdgesMatching(MiddleReplacementPattern): - """ - This pattern are needed for matching back edges in while loops in TF graphs. - Back edge is a chain of nodes in while loop that iterate one variable in graph over loop steps. It consist of - nodes: - Exit (optional) - ^ - | - Enter () -> Merge -> Switch -> Identity -> SOME OPERATIONS -> NextIteration -> - ^ | - | | - ------------------------------------------------------------------ - The structure of pattern without Data nodes between ops (every node is named as op attribute of this node): - Data-- - | - NextIteration -> Merge-- - | - ->Switch (out=1) -> Identity - | - TensorIteratorCondition-- - """ - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - # since the pattern of this transformation contains TensorIteratorCondition, - # condition matchers must be applied first - from openvino.tools.mo.middle.TensorIteratorCondition import DynamicDecoderConditionMatcher, LoopConditionMatcher, \ - SimpleConditionMatcher - return [DynamicDecoderConditionMatcher, SimpleConditionMatcher, LoopConditionMatcher] - - def run_before(self): - from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge - return [TensorIteratorMerge] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('Enter_1_data', dict(kind='data')), - - ('Merge_1', dict(kind='op', op='Merge')), - ('Merge_1_data', dict(kind='data')), - - ('Switch_1', dict(kind='op', op='Switch')), - ('Switch_1_data', dict(kind='data')), - - ('Identity_1', dict(kind='op', op='Identity')), - ('Identity_1_data', dict(kind='data')), - - ('NextIteration', dict(kind='op', op='NextIteration')), - ('NextIteration_data', dict(kind='data')), - - ('condition', dict(kind='op', op='TensorIteratorCondition')), - ('condition_cond_data', dict(kind='data')), - ], - edges=[ - ('Enter_1_data', 'Merge_1'), - ('Merge_1', 'Merge_1_data'), - - ('Merge_1_data', 'Switch_1'), - ('Switch_1', 'Switch_1_data', {'out': 1}), - ('Switch_1_data', 'Identity_1'), - ('Identity_1', 'Identity_1_data'), - - ('NextIteration', 'NextIteration_data'), - ('NextIteration_data', 'Merge_1'), - - ('condition', 'condition_cond_data'), - ('condition_cond_data', 'Switch_1'), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - log.debug('================== BackEdgeFind ===============') - - nodes_for_remove = [] - from_body_data = match['NextIteration'].in_node() - - # If Exit path is exist -> create TensorIteratorOutput for this - if 0 in match['Switch_1'].out_nodes(): - Exit = match['Switch_1'].out_node(0).out_node(0) # Switch -> Switch_data -> Exit - assert Exit.has_valid('op') and Exit.op == 'Exit' - output_data = Exit.out_node(0) - - nodes_for_remove.append(match['Switch_1'].out_node(0).id) - nodes_for_remove.append(Exit.id) - - # Creating TensorIteratorOutput without partition - output = TensorIteratorOutput(graph, dict(external_port_id=None, - internal_layer_id=None, \ - name=Exit.name + '/TensorIteratorOutput_' - )) - output.create_node_with_data(inputs=[from_body_data, match['condition_cond_data']], - data_nodes=[output_data]) - - assert match['NextIteration_data'].id != match['Enter_1_data'].id - backedge = TensorIteratorBackEdge(graph, dict(name=match['Identity_1'].name + '/TensorIteratorBackEdge_')) - backedge.create_node_with_data(inputs=[match['Enter_1_data'], from_body_data, match['condition_cond_data']], - data_nodes=[match['Identity_1_data']]) - - # Delete useless nodes - safe_nodes = ['Identity_1_data', 'condition', 'condition_cond_data', 'Enter_1_data'] - for node in match.keys(): - if node not in safe_nodes: - nodes_for_remove.append(match[node].id) - graph.remove_nodes_from(nodes_for_remove) diff --git a/tools/mo/openvino/tools/mo/middle/TensorIteratorCondition.py b/tools/mo/openvino/tools/mo/middle/TensorIteratorCondition.py deleted file mode 100644 index fcdbe88401c612..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/TensorIteratorCondition.py +++ /dev/null @@ -1,547 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.middle.pattern_match import apply_pattern -from openvino.tools.mo.middle.TensorIterator_utils import delete_selects_from -from openvino.tools.mo.ops.TensorIterator_ops import TensorIteratorCondition, TensorIteratorBackEdge -from openvino.tools.mo.ops.identity import Identity -from openvino.tools.mo.front.common.partial_infer.utils import mo_array, int64_array -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -def make_nodes_1D(nodes: list): - """ - Reshape every node from nodes from 0D to 1D (nodes should have shape attribute). - """ - for node in nodes: - assert node.shape is None or len(node.shape) == 0 - node.shape = int64_array([1]) - if node.value is not None: - node.value = np.reshape(node.value, node.shape) - - -def looking_for_op_in_list(nodes: list, op: str): - for node in nodes: - if node.has_valid('op') and node.op == op: - return node - - return None - - -class LoopConditionMatcher(MiddleReplacementPattern): - """ - This pattern match condition for TensorIterator in while loops in TF. - The structure of pattern without Data nodes between ops. Every node is named as op attribute of this node - (data nodes is marked by (data)): - Const---- - | - v - Const -> Enter -> Merge ---------------------> Switch -> Identity -> Add -> NextIteration - | ^ - ---> Less ----| | - ^ | | - Maximum -> Minimum -> Enter-| | | - ^ v | -Shape -> StridedSlice -> Enter -| LogicalAnd --> LoopCond (data) - v ^ | - ---> Less ----| | - | v - Const -> Enter -> Merge ---------------------> Switch -> Identity -> Add -> NextIteration - ^ - | - Const---- - """ - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - return [] - - def run_before(self): - from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge - return [TensorIteratorMerge] - - @staticmethod - def pattern(variation): - log.debug('+++++++++++++++ ConditionMatching ++++++++++++++++') - nodes = [ - ('Enter_1_less', dict(kind='op', op='Enter')), - ('Strided_slice', dict(kind='op', op='StridedSlice')), - ('Strided_slice_data', dict(kind='data')), - ('Enter_1_less_data', dict(kind='data')), - - ('Less_1', dict(kind='op', op='Less')), - ('Merge_1', dict(kind='op', op='Merge')), - ('Merge_1_data', dict(kind='data')), - ('Less_1_data', dict(kind='data')), - - ('Less_2', dict(kind='op', op='Less')), - ('Merge_2', dict(kind='op', op='Merge')), - ('Merge_2_data', dict(kind='data')), - ('Less_2_data', dict(kind='data')), - - ('and', dict(kind='op', op='LogicalAnd')), - ('and_data', dict(kind='data')), - ('loop_cond', dict(kind='op', op='LoopCond')), - ('loop_cond_data', dict(kind='data')), - - ('init_1', dict(kind='op', op='Const')), - ('init_1_data', dict(kind='data')), - ('Enter_1', dict(kind='op', op='Enter')), - ('Enter_1_data', dict(kind='data')), - - ('init_2', dict(kind='op', op='Const')), - ('init_2_data', dict(kind='data')), - ('Enter_2', dict(kind='op', op='Enter')), - ('Enter_2_data', dict(kind='data')), - - ('Switch_1', dict(kind='op', op='Switch')), - ('Switch_1_data', dict(kind='data')), - ('Identity_1', dict(kind='op', op='Identity')), - ('Identity_1_data', dict(kind='data')), - ('add_1', dict(kind='op', op='Add')), - ('add_1_y', dict(kind='op', op='Const')), - ('add_1_y_data', dict(kind='data')), - ('add_1_data', dict(kind='data')), - ('NextIteration_1', dict(kind='op', op='NextIteration')), - - ('Switch_2', dict(kind='op', op='Switch')), - ('Switch_2_data', dict(kind='data')), - ('Identity_2', dict(kind='op', op='Identity')), - ('Identity_2_data', dict(kind='data')), - ('add_2', dict(kind='op', op='Add')), - ('add_2_y', dict(kind='op', op='Const')), - ('add_2_y_data', dict(kind='data')), - ('add_2_data', dict(kind='data')), - ('NextIteration_2', dict(kind='op', op='NextIteration')), - - ] - edges = [ - ('Strided_slice', 'Strided_slice_data'), - ('Strided_slice_data', 'Enter_1_less'), - ('Enter_1_less', 'Enter_1_less_data'), - ('Enter_1_less_data', 'Less_1'), - ('Less_1', 'Less_1_data'), - ('Less_1_data', 'and'), - - ('and', 'and_data'), - ('and_data', 'loop_cond'), - ('loop_cond', 'loop_cond_data'), - ('loop_cond_data', 'Switch_1'), - ('loop_cond_data', 'Switch_2'), - - ('init_1', 'init_1_data'), - ('init_1_data', 'Enter_1'), - ('Enter_1', 'Enter_1_data'), - ('Enter_1_data', 'Merge_1'), - ('Merge_1', 'Merge_1_data'), - ('Merge_1_data', 'Less_1'), - - ('Merge_1_data', 'Switch_1'), - ('Switch_1', 'Switch_1_data'), - ('Switch_1_data', 'Identity_1'), - ('Identity_1', 'Identity_1_data'), - ('Identity_1_data', 'add_1'), - ('add_1_y', 'add_1_y_data'), - ('add_1_y_data', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'NextIteration_1'), - - ('Merge_2_data', 'Switch_2'), - ('Switch_2', 'Switch_2_data'), - ('Switch_2_data', 'Identity_2'), - ('Identity_2', 'Identity_2_data'), - ('Identity_2_data', 'add_2'), - ('add_2_y', 'add_2_y_data'), - ('add_2_y_data', 'add_2'), - ('add_2', 'add_2_data'), - ('add_2_data', 'NextIteration_2'), - - ('init_2', 'init_2_data'), - ('init_2_data', 'Enter_2'), - ('Enter_2', 'Enter_2_data'), - ('Enter_2_data', 'Merge_2'), - - ('Merge_2', 'Merge_2_data'), - ('Merge_2_data', 'Less_2'), - ('Less_2', 'Less_2_data'), - ('Less_2_data', 'and'), - ] - if variation == 1: - nodes.extend([ - ('Enter_2_less', dict(kind='op', op='Enter')), - ('Enter_2_less_data', dict(kind='data')), - ('minimum_data', dict(kind='data')) - ]) - edges.extend([ - ('minimum_data', 'Enter_2_less'), - ('Enter_2_less', 'Enter_2_less_data'), - ('Enter_2_less_data', 'Less_2'), - ]) - elif variation == 2: - edges.append(('Enter_1_less_data', 'Less_2')) - else: - raise Exception('Wrong pattern variation') - return dict(nodes=nodes, edges=edges) - - @staticmethod - def looking_for_iteration_counter(graph: Graph, match: dict): - types = ['TensorIteratorInput', 'TensorIteratorOutput'] - candidates = [match['Identity_1_data'], match['Identity_2_data']] - results = [] - for candidate in candidates: - for node in candidate.out_nodes(): - if node['op'] in types: - results.append(candidate) - break - assert len(results) == 1 - return results[0] - - @staticmethod - def check_dynamic_seq_len(graph: Graph, match: dict): - """ - Cycle is dynamic if at least one of the boundaries isn't constant OR this boundaries is different from tensor - shape. - """ - dynamic_seq_len = match['Enter_1_less_data'].value is None - if 'Enter_2_less_data' in match: - dynamic_seq_len = dynamic_seq_len or match['Enter_2_less_data'].value is None or \ - not np.array_equal(match['Enter_1_less_data'].value, match['Enter_2_less_data'].value) - - return dynamic_seq_len - - def find_and_replace_pattern(self, graph: Graph): - apply_pattern(graph, **self.pattern(1), action=self.replace_pattern) # pylint: disable=no-member - apply_pattern(graph, **self.pattern(2), action=self.replace_pattern) # pylint: disable=no-member - - def replace_pattern(self, graph: Graph, match: dict): - log.debug('================== ConditionFind ===============') - # init_1 - init_1 = match['init_1_data'].value - assert init_1 is not None - init_1 = int(init_1) - - # init_2 - init_2 = match['init_2_data'].value - assert init_2 is not None - init_2 = int(init_2) - - # step_1 - assert match['add_1_y_data'].value is not None - step_1 = int(match['add_1_y_data'].value) - - # step_2 - assert match['add_2_y_data'].value is not None - step_2 = int(match['add_2_y_data'].value) - - dynamic_seq_len = self.check_dynamic_seq_len(graph, match) - - # Create condition node and delete all useless nodes from condition pattern - loop_condition = match['loop_cond_data'] - iterator_data = self.looking_for_iteration_counter(graph, match) - - condition_attrs = dict(time=dict(init=init_2, step=step_2), iter=dict(init=init_1, step=step_1), - name=match['loop_cond'].name + '/TensorIteratorCondition_') - condition = TensorIteratorCondition(graph, attrs=condition_attrs) - if 'minimum_data' in match: - condition_inp = [match['Strided_slice_data'], match['minimum_data']] - else: - condition_inp = [match['Strided_slice_data']] - condition_data = condition.create_node_with_data(inputs=condition_inp, - data_nodes=[loop_condition, iterator_data]) - - safe_nodes = ['loop_cond_data', 'Identity_1_data', 'Identity_2_data', 'Strided_slice', 'Strided_slice_data', - 'minimum', 'minimum_data'] - - identity_ops = [n.op for n in iterator_data.out_nodes()] - if 'GreaterEqual' in identity_ops: - greater_equal_id = [n.id for n in iterator_data.out_nodes() if n.op == 'GreaterEqual'][0] - - if dynamic_seq_len: - # Add BackEdge for time iterator node - backedge = TensorIteratorBackEdge(graph, dict(name='/TimeIterator/TensorIteratorBackEdge_')) - backedge_data = backedge.create_node_with_data(inputs=[match['init_2_data'], match['add_2_data'], - condition_data[0]], ) - - graph.remove_edge(match['add_2'].in_node(0).id, match['add_2'].id) - graph.add_edge(backedge_data.id, match['add_2'].id, **{'in': 0}) - - graph.remove_edge(iterator_data.id, greater_equal_id) - graph.add_edge(backedge_data.id, greater_equal_id, **{'in': 0}) - - # nodes for time iterator - safe_nodes += ['init_2_data', 'init_2', 'Identity_2_data', 'add_2_data', 'add_2', 'add_2_y', - 'add_2_y_data'] - - # Manually reshape all iterator nodes (for time) from 0D to 1D - iterator_data_nodes = [backedge_data, match['add_2_data'], match['add_2_y_data'], match['add_2_y'], - match['init_2_data'], match['init_2']] - make_nodes_1D(iterator_data_nodes) - else: - # Delete Selects from this cycle to make it not dynamic: - greater_equal_idxs = [n.id for n in iterator_data.out_nodes() if n.op == 'GreaterEqual'] - delete_selects_from(graph, greater_equal_idxs) - - # Delete useless nodes - nodes_for_remove = [] - for node in match.keys(): - if node not in safe_nodes: - nodes_for_remove.append(match[node].id) - graph.remove_nodes_from(nodes_for_remove) - - -class SimpleConditionMatcher(MiddleReplacementPattern): - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - return [LoopConditionMatcher] - - def run_before(self): - from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge - return [TensorIteratorMerge] - - @staticmethod - def pattern(): - log.debug('+++++++++++++++ SimpleConditionMatching ++++++++++++++++') - return dict( - nodes=[ - ('Enter_1_less', dict(kind='op', op='Enter')), - ('Strided_slice', dict(kind='op', op='StridedSlice')), - ('Strided_slice_data', dict(kind='data')), - ('Enter_1_less_data', dict(kind='data')), - - ('Less_1', dict(kind='op', op='Less')), - ('Merge_1', dict(kind='op', op='Merge')), - ('Merge_1_data', dict(kind='data')), - ('Less_1_data', dict(kind='data')), - - ('loop_cond', dict(kind='op', op='LoopCond')), - ('loop_cond_data', dict(kind='data')), - - ('init_1', dict(kind='op', op='Const')), - ('init_1_data', dict(kind='data')), - ('Enter_1', dict(kind='op', op='Enter')), - ('Enter_1_data', dict(kind='data')), - - ('Switch_1', dict(kind='op', op='Switch')), - ('Switch_1_data', dict(kind='data')), - ('Identity_1', dict(kind='op', op='Identity')), - ('Identity_1_data', dict(kind='data')), - ('add_1', dict(kind='op', op='Add')), - ('add_1_y', dict(kind='op', op='Const')), - ('add_1_y_data', dict(kind='data')), - ('add_1_data', dict(kind='data')), - ('NextIteration_1', dict(kind='op', op='NextIteration')), - ], - edges=[ - ('Strided_slice', 'Strided_slice_data'), - ('Strided_slice_data', 'Enter_1_less'), - ('Enter_1_less', 'Enter_1_less_data'), - ('Enter_1_less_data', 'Less_1'), - ('Less_1', 'Less_1_data'), - ('Less_1_data', 'loop_cond'), - - ('loop_cond', 'loop_cond_data'), - ('loop_cond_data', 'Switch_1'), - - ('init_1', 'init_1_data'), - ('init_1_data', 'Enter_1'), - ('Enter_1', 'Enter_1_data'), - ('Enter_1_data', 'Merge_1'), - ('Merge_1', 'Merge_1_data'), - ('Merge_1_data', 'Less_1'), - - ('Merge_1_data', 'Switch_1'), - ('Switch_1', 'Switch_1_data'), - ('Switch_1_data', 'Identity_1'), - ('Identity_1', 'Identity_1_data'), - ('Identity_1_data', 'add_1'), - ('add_1_y', 'add_1_y_data'), - ('add_1_y_data', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'NextIteration_1'), - - ], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - log.debug('================== SimpleConditionFind ===============') - # init_1 - init_1 = match['init_1_data'].value - assert init_1 is not None - init_1 = int(init_1) - - # step_1 - assert match['add_1_y_data'].value is not None - step_1 = int(match['add_1_y_data'].value) - - match['loop_cond_data'].value = None - - # compute destination (or consumer) ports for time node - identity_node_name = match['Identity_1'].soft_get('name', match['Identity_1'].id) - time_dsts = match['Identity_1'].out_port(0).get_destinations() - - # Create condition node and delete all useless nodes from condition pattern - condition_attrs = dict(iter=dict(init=init_1, step=step_1), - name=match['loop_cond'].name + '/TensorIteratorCondition_') - condition = TensorIteratorCondition(graph, attrs=condition_attrs) - condition.create_node_with_data(inputs=[match['Strided_slice_data']], - data_nodes=[match['loop_cond_data'], match['Identity_1_data']]) - - safe_nodes = ['loop_cond_data', 'Identity_1_data', 'Strided_slice', 'Strided_slice_data'] - - # check if time node has other consumers different from increment node, - # input slicing and output concatenation nodes - other_time_consumers = False - for time_consumer in time_dsts: - if time_consumer.node.soft_get('op') not in ['TensorIteratorInput', 'TensorIteratorOutput'] and \ - time_consumer.node.id != match['add_1'].id: - other_time_consumers = True - break - if other_time_consumers: - # save time related nodes since they have other consumers different from - # input slicing and output concatenation nodes - safe_nodes += ['init_1', 'init_1_data', 'Enter_1', 'Enter_1_data', 'Merge_1', 'Merge_1_data', - 'Switch_1', 'Switch_1_data', 'add_1', 'add_1_y', 'add_1_y_data', 'add_1_data', - 'NextIteration_1'] - switch_node = match['Switch_1'] - new_identity_node = Identity(graph, dict(name=identity_node_name)).create_node() - switch_node.out_port(1).connect(new_identity_node.in_port(0)) - - # make the graph consistent to avoid multiple producers by the same input port - graph.remove_nodes_from([match['Identity_1'].id]) - rename_nodes([(new_identity_node, identity_node_name)]) - - for time_consumer in time_dsts: - if time_consumer.node.soft_get('op') not in ['TensorIteratorInput', 'TensorIteratorOutput']: - time_consumer.get_connection().set_source(new_identity_node.out_port(0)) - - # Delete useless nodes - nodes_for_remove = [] - for node in match.keys(): - if node not in safe_nodes: - nodes_for_remove.append(match[node].id) - graph.remove_nodes_from(nodes_for_remove) - - -class DynamicDecoderConditionMatcher(MiddleReplacementPattern): - """ - This pattern match condition for dynamic decoder and create TensorIteratorCondition node instead of it. - """ - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - return [SimpleConditionMatcher] - - def run_before(self): - from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge - return [TensorIteratorMerge] - - @staticmethod - def pattern(): - log.debug('+++++++++++++++ DynamicDecoderConditionMatching ++++++++++++++++') - return dict( - nodes=[ - ('loop_cond', dict(kind='op', op='LoopCond')), - ('loop_cond_data', dict(kind='data')), - - ('logical_not', dict(kind='op', op='LogicalNot')), - ('logical_not_data', dict(kind='data')), - - ('all', dict(kind='op', op='ReduceAnd')), - ('all_data', dict(kind='data')), - - ('Merge_16', dict(kind='op', op='Merge')), - ('merge_16_data', dict(kind='data')), - - ('NextIteration_16', dict(kind='op', op='NextIteration')), - ('nextIteration_data', dict(kind='data')), - - ('Switch', dict(kind='op', op='Switch')), - ('switch_data', dict(kind='data')), - - ('Identity', dict(kind='op', op='Identity')), - ('identity_data', dict(kind='data')), - - ('add', dict(kind='op', op='Add')), - ('add_data', dict(kind='data')), - - ('Less_enter', dict(kind='op', op='Enter')), - ('Less_enter_data', dict(kind='data')), - - ('And', dict(kind='op', op='LogicalAnd')), - ('And_data', dict(kind='data')), - - ('Less', dict(kind='op', op='Less')), - ('Less_data', dict(kind='data')), - - ('TensorIteratorOutput', dict(kind='op', op='TensorIteratorOutput')), - ('TensorIteratorOutput_1', dict(kind='op', op='TensorIteratorOutput')), - ], - edges=[ - ('NextIteration_16', 'nextIteration_data'), - ('nextIteration_data', 'Merge_16'), - ('Merge_16', 'merge_16_data'), - ('merge_16_data', 'all'), - ('all', 'all_data'), - ('all_data', 'logical_not'), - ('logical_not', 'logical_not_data'), - - ('Less_enter', 'Less_enter_data'), - ('Less_enter_data', 'Less'), - - ('Less', 'Less_data'), - ('Less_data', 'And'), - - ('logical_not_data', 'And'), - ('And', 'And_data'), - ('And_data', 'loop_cond'), - - ('loop_cond', 'loop_cond_data'), - - ('loop_cond_data', 'Switch'), - - ('Switch', 'switch_data'), - - ('switch_data', 'Identity'), - - ('Identity', 'identity_data'), - - ('identity_data', 'add'), - ('add', 'add_data'), - - ('identity_data', 'TensorIteratorOutput'), - ('identity_data', 'TensorIteratorOutput_1'), - ], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - """ - Create condition node and delete all useless nodes (like Switch/Merge/Identity) from condition pattern - """ - log.debug('================== DynamicDecoderConditionFind ==================') - # Create and connect condition node for dynamic decoder in TF - loop_condiiton = match['loop_cond_data'] - iterator_data = match['identity_data'] - - condition_attrs = dict(name=match['loop_cond'].name + '/TensorIteratorCondition_') - condition = TensorIteratorCondition(graph, attrs=condition_attrs) - condition.create_node_with_data(inputs=[match['Less_enter'].in_node()], - data_nodes=[loop_condiiton, iterator_data]) - - # Delete useless nodes - safe_nodes = ['loop_cond_data', 'identity_data', 'TensorIteratorOutput', 'TensorIteratorOutput_1'] - nodes_for_remove = [] - for node in match.keys(): - if node not in safe_nodes: - nodes_for_remove.append(match[node].id) - graph.remove_nodes_from(nodes_for_remove) diff --git a/tools/mo/openvino/tools/mo/middle/TensorIteratorConditionChecker.py b/tools/mo/openvino/tools/mo/middle/TensorIteratorConditionChecker.py deleted file mode 100644 index 69e629beed4885..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/TensorIteratorConditionChecker.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import compatible_dims -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class ConditionChecks(MiddleReplacementPattern): - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - from openvino.tools.mo.middle.TensorIteratorBackEdge import BackEdgesMatching - return [BackEdgesMatching] - - def run_before(self): - from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge - return [TensorIteratorMerge] - - @staticmethod - def pattern(): - log.debug('+++++++++++++++ ConditionCheckerMatching ++++++++++++++++') - return dict( - nodes=[ - ('condition', dict(kind='op', op='TensorIteratorCondition')), - ('Strided_slice', dict(kind='op', op='StridedSlice')), - ('Strided_slice_data', dict(kind='data')), - ('shape', dict(kind='op', op='ShapeOf')), - ('shape_data', dict(kind='data')), - - ('minimum', dict(kind='op', op='Minimum')), - ('minimum_data', dict(kind='data')), - ('Maximum', dict(kind='op', op='Maximum')), - ('Maximum_data', dict(kind='data')), - ], - edges=[ - ('shape', 'shape_data'), - ('shape_data', 'Strided_slice'), - ('Strided_slice', 'Strided_slice_data'), - ('Strided_slice_data', 'condition'), - ('Strided_slice_data', 'minimum'), - - ('Maximum', 'Maximum_data'), - ('Maximum_data', 'minimum'), - ('minimum', 'minimum_data'), - ('minimum_data', 'condition'), - ], - ) - - @staticmethod - def replace_pattern(graph, match: dict): - # Check for SS params - # Sanity check that we iterate over axis of some tensor - ss = match['Strided_slice'] - params = ss.in_nodes() - assert np.all(params[1].in_node().value == 0) - assert np.all(params[2].in_node().value == 1) - assert np.all(params[3].in_node().value == 1) - - # Check for comparing SS and seq_length source (it should be one tensor) - # SIMPLE CHECK - assert match['Strided_slice_data'].value is not None - if match['minimum_data'].value is None: - log.warning('TF loop doesn\'t have a constant upper bound produced by node {}, or ModelOptimizer ' - 'cannot detect a constant in this case. Loops with a dynamic number of iterations are not ' - 'supported, so in the resulting IR, generated TensorIterator will have ' - 'a maximum number of iterations determined by input tensor size: {}' - ''.format(match['minimum_data'].soft_get('name'), match['Strided_slice_data'].value) - ) - else: - assert compatible_dims(match['Strided_slice_data'].value, match['minimum_data'].value), \ - 'Values do not match: {} and {}'.format(match['Strided_slice_data'].value, match['minimum_data'].value) - - # Check that bound for Condition and Inputs/Outputs sizes match - condition_time = match['condition'].out_node(0) - inputs_and_outputs = condition_time.out_nodes() - type_list = ['TensorIteratorInput'] - - for ta in inputs_and_outputs: - if ta.has_valid('kind') and ta['kind'] == 'op' and ta['op'] in type_list: - assert ta.in_node(0).id == ss.id - - log.debug('+++++++++++++++ Condition Check was successful ++++++++++++++++') diff --git a/tools/mo/openvino/tools/mo/middle/TensorIteratorInput.py b/tools/mo/openvino/tools/mo/middle/TensorIteratorInput.py deleted file mode 100644 index 1b558bf2b608d2..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/TensorIteratorInput.py +++ /dev/null @@ -1,420 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.middle.AddIsCyclicAttribute import AddIsCyclicAttribute -from openvino.tools.mo.ops.TensorIterator_ops import TensorIteratorInput -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class SmartInputMatcher(MiddleReplacementPattern): - """ - This pattern match partitioned inputs for TensorIterator in dynamic_rnn loops in TF. - The structure of pattern without Data nodes between ops. Every node is named as op attribute of this node - (data nodes is marked by (data)): - TensorArray - | | - v v Condition (data) - Flow(data) Handle(data)-------------- | - | | | | - v v v v - Value (data) -> StridedSlice () -> Range(0;1) -> TensorArrayScatter -> Enter -> TensorArrayRead - | ^ - |__________________________________________________| - """ - - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - return [AddIsCyclicAttribute] - - def run_before(self): - from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge - return [TensorIteratorMerge] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('TensorArray', dict(kind='op', op='TensorArrayV3')), - ('TensorArray_handle', dict(kind='data')), - ('TensorArray_flow', dict(kind='data')), - ('Enter', dict(kind='op', op='Enter')), - ('Enter_data', dict(kind='data')), - - ('stack', dict(kind='op', op='Const')), - ('stack_data', dict(kind='data')), - ('stack_1', dict(kind='op', op='Const')), - ('stack_1_data', dict(kind='data')), - ('stack_2', dict(kind='op', op='Const')), - ('stack_2_data', dict(kind='data')), - - ('start', dict(kind='op', op='Const')), - ('start_data', dict(kind='data')), - - ('delta', dict(kind='op', op='Const')), - ('delta_data', dict(kind='data')), - - ('StridedSlice', dict(kind='op', op='StridedSlice')), - ('StridedSlice_data', dict(kind='data')), - ('range', dict(kind='op', op='Range')), - ('range_data', dict(kind='data')), - - ('TensorArrayScatter', dict(kind='op', op='TensorArrayScatterV3')), - ('TensorArrayScatter_data', dict(kind='data')), - ('Enter_1', dict(kind='op', op='Enter')), - ('Enter_1_data', dict(kind='data')), - - ('TensorArrayRead', dict(kind='op', op='TensorArrayReadV3')), - ('TensorArrayRead_data', dict(kind='data')), - - ('Condition_data', dict(kind='data')), - ], - edges=[ - ('TensorArray', 'TensorArray_handle'), - ('TensorArray', 'TensorArray_flow'), - ('TensorArray_handle', 'Enter'), - ('Enter', 'Enter_data'), - - ('stack', 'stack_data'), - ('stack_1', 'stack_1_data'), - ('stack_2', 'stack_2_data'), - ('stack_data', 'StridedSlice', {'in': 1}), - ('stack_1_data', 'StridedSlice', {'in': 2}), - ('stack_2_data', 'StridedSlice', {'in': 3}), - - ('StridedSlice', 'StridedSlice_data'), - ('StridedSlice_data', 'range', {'in': 1}), - ('start', 'start_data'), - ('delta', 'delta_data'), - - ('start_data', 'range', {'in': 0}), - ('delta_data', 'range', {'in': 2}), - ('range', 'range_data'), - ('range_data', 'TensorArrayScatter'), - - ('TensorArray_handle', 'TensorArrayScatter'), - ('TensorArray_flow', 'TensorArrayScatter'), - ('TensorArrayScatter', 'TensorArrayScatter_data'), - ('TensorArrayScatter_data', 'Enter_1'), - ('Enter_1', 'Enter_1_data'), - - ('Enter_data', 'TensorArrayRead'), - ('Enter_1_data', 'TensorArrayRead'), - ('Condition_data', 'TensorArrayRead'), - ('TensorArrayRead', 'TensorArrayRead_data'), - ], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - log.debug('================== SmartInputFind ===============') - - assert match['Enter_data'].value is not None - assert match['stack_data']['value'][0] == 0 and match['stack_1_data']['value'][0] == 1 and \ - match['stack_2_data']['value'][0] == 1 - assert match['start_data']['value'] == 0 and match['delta_data']['value'] == 1 - - ta_size_data = match['TensorArray'].in_node() - ta_size = ta_size_data.in_node() - value = match['TensorArrayScatter'].in_node(2) - - start, end = None, None - if 0 in ta_size.in_nodes(): - shape = match['StridedSlice'].in_node(0).in_node(0) - # Case when value for Strided slice is Const, not Shape - if shape['kind'] == 'op' and shape['op'] == 'Const': - start = 0 - end = shape.value[0] - log.warning("Your network cannot be reshaped since shapes of placeholders are constants. " - "Please, provide non-constant shapes. ") - - # Create input node with params - # axis == 0 because in TensorArray we ALWAYS iterate over 0 axis, other params will be fill later (with - # condition) - input_node = TensorIteratorInput(graph, dict(axis=0, start=start, stride=None, part_size=None, - external_port_id=str(match['Enter_data'].value), - internal_layer_id=match['TensorArrayRead_data'].id, - name=match['TensorArrayRead'].name + '/TensorIteratorInput_' - )) - input_node.create_node_with_data(inputs=[ta_size_data, value, match['Condition_data']], - data_nodes=[match['TensorArrayRead_data']]) - # Delete useless nodes - safe_nodes = ['TensorArrayRead_data', 'Condition', 'Condition_data'] - - nodes_for_remove = [] - for node in match.keys(): - if node not in safe_nodes: - nodes_for_remove.append(match[node].id) - graph.remove_nodes_from(nodes_for_remove) - - -class SimpleInputMatcher(MiddleReplacementPattern): - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - from openvino.tools.mo.middle.DeleteNotExecutable import DeleteNotExecutable - return [DeleteNotExecutable] - - def run_before(self): - from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge - return [TensorIteratorMerge] - - """ - This pattern match simple inputs (without partitions) in while loops in TF (this inputs are set by Enter nodes). - """ - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('Enter', dict(kind='op', op='Enter')), - ], - edges=[ - ], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - log.debug('================== SimpletInputFind ===============') - - input_node = TensorIteratorInput(graph, dict(external_port_id=None, - internal_layer_id=None, - name=match['Enter'].name + '/TensorIteratorInput_' - )) - input_node.create_node_with_data(inputs=[match['Enter'].in_node()], data_nodes=[match['Enter'].out_node()]) - - # Delete useless nodes - graph.remove_nodes_from([match['Enter'].id]) - - -class BackEdgeSimpleInputMatcher(MiddleReplacementPattern): - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - return [SimpleInputMatcher] - - def run_before(self): - from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge - return [TensorIteratorMerge] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('BackEdge', dict(kind='op', op='TensorIteratorBackEdge')), - ], - edges=[ - ], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - log.debug('================== SimpleBackEdgeInputFind ===============') - - assert len(match['BackEdge'].in_nodes()) == 3 - condition = match['BackEdge'].in_node(2) - init_input = match['BackEdge'].in_node(0) - cycle_input = match['BackEdge'].in_node(1) - - # We need to create new TensorItertorInput node only if this node doesn't exist already. - if (len(init_input.in_nodes()) == 0 or \ - (len(init_input.in_nodes()) == 1 and init_input.has_valid('value') and - init_input.in_node(0).soft_get('op') != 'TensorIteratorInput')): - - input_node = TensorIteratorInput(graph, dict(external_port_id=None, - internal_layer_id=None, - name=match['BackEdge'].name + '/TensorIteratorInput_' - )) - - # In case if data node has Constant producer - if len(init_input.in_nodes()) == 1: - graph.remove_edge(init_input.in_node(0).id, init_input.id) - - input_data_node = input_node.create_node_with_data(inputs=[init_input]) - input_data_node.shape = int64_array(init_input.shape) - graph.remove_edges_from([(init_input.id, match['BackEdge'].id)]) - graph.add_edges_from([(input_data_node.id, match['BackEdge'].id, {'in': 0, 'out': 0})]) - - -class SmartMatcherInputSlicingWithGather(MiddleReplacementPattern): - r""" - The transformation matches a sub-graph where input tensor is consequently sliced along some axis - for each time step (or index) inside TensorFlow 1.x while_loop operation. - In the original graph StridedSlice with non-constant begin and end attributes performs this slicing. - NonConstBeginStridedSliceReplacement, a front transformation, replaces this StridedSlice with Gather operation - after which the following sub-graph is obtained (Note: no data node is displayed): - - NextIteration <------- Add <--- Time Step - | /\ - \/ | - InitTime ----> Enter --> Merge ---> Switch ---> Identity ------ - | /\ | - \/ | | - MaxTime ---> Less ---> LoopCond Unsqueeze (axis=0) - | | - \/ \/ - Input ---> Enter ----> Merge ---> Switch ---> Identity ---> Gather ---> Squeeze --> Ops (Need Slice at i-th time) - /\ | /\ /\ - | \/ |----Axis----| - -------------------- NextIteration - - Some part of the sub-graph above is replaced with TensorIteratorInput and the following graph is obtained - after the transformation: - - NextIteration <------- Add <--- Time Step - | /\ - \/ | - InitTime ----> Enter --> Merge ---> Switch ---> Identity ------| - | /\ | - \/ | | - MaxTime ---> Less ---> LoopCond | - | | - | |----------------------------------------- - \/ \/ - Input --> TensorIteratorInput(InitTime, TimeStep, Axis) ---> Ops (Need Slice at i-th time) - - Details about TensorIterator (inputs, outputs, and attributes) will be finally used by TensorIteratorMerge - transformation during construction of TensorIterator operation. - """ - - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - return [AddIsCyclicAttribute] - - def run_before(self): - from openvino.tools.mo.middle.TensorIteratorBackEdge import BackEdgesMatching - from openvino.tools.mo.middle.TensorIteratorCondition import LoopConditionMatcher - return [BackEdgesMatching, LoopConditionMatcher] - - @staticmethod - def pattern(): - return dict( - nodes=[ - # LoopCond node and related Condition node - ('EnterMaxIndex', dict(kind='op', op='Enter')), - ('EnterMaxIndexData', dict(kind='data')), - ('Less', dict(kind='op', op='Less')), - ('LessData', dict(kind='data')), - ('LoopCond', dict(kind='op', op='LoopCond')), - ('LoopCondData', dict(kind='data')), - - # a list of Input specific nodes - ('EnterInput', dict(kind='op', op='Enter')), - ('EnterInputData', dict(kind='data')), - ('MergeInput', dict(kind='op', op='Merge')), - ('MergeInputData', dict(kind='data')), - ('SwitchInput', dict(kind='op', op='Switch')), - ('SwitchInputData', dict(kind='data')), - ('IdentityInput', dict(kind='op', op='Identity')), - ('IdentityInputData', dict(kind='data')), - ('NextIterationInput', dict(kind='op', op='NextIteration')), - - # a list of Index specific nodes - ('InitIndex', dict(kind='op', op='Const')), - ('InitIndexData', dict(kind='data')), - ('EnterIndex', dict(kind='op', op='Enter')), - ('EnterIndexData', dict(kind='data')), - ('MergeIndex', dict(kind='op', op='Merge')), - ('MergeIndexData', dict(kind='data')), - ('SwitchIndex', dict(kind='op', op='Switch')), - ('SwitchIndexData', dict(kind='data')), - ('IdentityIndex', dict(kind='op', op='Identity')), - ('IdentityIndexData', dict(kind='data')), - ('UnsqueezeIndex', dict(kind='op', op='Unsqueeze')), - ('UnsqueezeIndexData', dict(kind='data')), - ('AddIndex', dict(kind='op', op='Add')), - ('AddIndexData', dict(kind='data')), - ('NextIterationIndex', dict(kind='op', op='NextIteration')), - ('IndexDelta', dict(kind='op', op='Const')), - ('IndexDeltaData', dict(kind='data')), - - # a list of nodes responsible for slicing - ('Axis', dict(kind='op', op='Const')), - ('AxisData', dict(kind='data')), - ('Gather', dict(kind='op', op='Gather')), - ('GatherData', dict(kind='data')), - ('SqueezeSlice', dict(kind='op', op='Squeeze')), - ('SqueezeSliceData', dict(kind='data')), - ], - edges=[ - ('EnterMaxIndex', 'EnterMaxIndexData'), - ('EnterMaxIndexData', 'Less', {'in': 1}), - ('Less', 'LessData'), - ('LessData', 'LoopCond'), - ('LoopCond', 'LoopCondData'), - ('LoopCondData', 'SwitchInput', {'in': 1}), - - ('EnterInput', 'EnterInputData'), - ('EnterInputData', 'MergeInput', {'in': 0}), - ('MergeInput', 'MergeInputData'), - ('MergeInputData', 'SwitchInput', {'in': 0}), - ('SwitchInput', 'SwitchInputData', {'out': 1}), - ('SwitchInputData', 'IdentityInput'), - ('IdentityInput', 'IdentityInputData'), - ('IdentityInputData', 'Gather', {'in': 0}), - ('IdentityInputData', 'NextIterationInput'), - - ('InitIndex', 'InitIndexData'), - ('InitIndexData', 'EnterIndex'), - ('EnterIndex', 'EnterIndexData'), - ('EnterIndexData', 'MergeIndex', {'in': 0}), - ('MergeIndex', 'MergeIndexData'), - ('MergeIndexData', 'SwitchIndex', {'in': 0}), - ('MergeIndexData', 'Less', {'in': 0}), - ('LoopCondData', 'SwitchIndex', {'in': 1}), - ('SwitchIndex', 'SwitchIndexData', {'out': 1}), - ('SwitchIndexData', 'IdentityIndex'), - ('IdentityIndex', 'IdentityIndexData'), - ('IdentityIndexData', 'AddIndex', {'in': 0}), - ('AddIndex', 'AddIndexData'), - ('AddIndexData', 'NextIterationIndex'), - ('IndexDelta', 'IndexDeltaData'), - ('IndexDeltaData', 'AddIndex', {'in': 1}), - - ('IdentityIndexData', 'UnsqueezeIndex'), - ('UnsqueezeIndex', 'UnsqueezeIndexData'), - ('UnsqueezeIndexData', 'Gather', {'in': 1}), - ('Axis', 'AxisData'), - ('AxisData', 'Gather', {'in': 2}), - ('Gather', 'GatherData'), - ('GatherData', 'SqueezeSlice'), - ('SqueezeSlice', 'SqueezeSliceData'), - ], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - # retrieve attribute values for TensorIteratorInput node - init_time = match['InitIndex'].value.item(0) - time_step = match['IndexDelta'].value.item(0) - axis = match['Axis'].value.item(0) - - # retrieve input and output nodes for TensorIteratorInput node - initial_input_node = match['EnterInput'] - current_index_node = match['IdentityIndex'] - size_node = match['EnterMaxIndex'] - resulted_slice_node = match['SqueezeSlice'] - resulted_slice_node_name = resulted_slice_node.soft_get('name', resulted_slice_node.id) - - # create TensorIteratorInput node that reflects slicing of input for each time step along axis - ti_input_node = TensorIteratorInput(graph, dict(axis=axis, start=init_time, stride=time_step, - name=resulted_slice_node_name + '/TensorIteratorInput') - ).create_node() - size_node.in_port(0).get_connection().add_destination(ti_input_node.in_port(0)) - initial_input_node.in_port(0).get_connection().set_destination(ti_input_node.in_port(1)) - current_index_node.out_port(0).connect(ti_input_node.in_port(2)) - resulted_slice_node.out_port(0).get_connection().set_source(ti_input_node.out_port(0)) - - # delete no longer needed nodes responsible for slicing of input in the original graph - node_names_for_remove = ['EnterInput', 'MergeInput', 'SwitchInput', - 'IdentityInput', 'NextIterationInput', 'SqueezeSlice', 'UnsqueezeIndex', 'Gather'] - graph.remove_nodes_from([match[node_name].id for node_name in node_names_for_remove]) diff --git a/tools/mo/openvino/tools/mo/middle/TensorIteratorLSTMToLSTMSequence.py b/tools/mo/openvino/tools/mo/middle/TensorIteratorLSTMToLSTMSequence.py deleted file mode 100644 index bb01b0a1214c56..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/TensorIteratorLSTMToLSTMSequence.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.ONNXRNNSequenceNormalize import ONNXRNNSequenceNormalize -from openvino.tools.mo.middle.TF_lstm_cell_to_generic import TensorFlowLSTMtoGeneric -from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.pattern_match import find_isomorphisms -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.utils.error import Error - - -class TensorIteratorLSTM(MiddleReplacementPattern): - """ Detects TensorIterator with LSTMCell of supported form. - - Collect original operation names of supported LSTMCells in - the list LSTMCell.instances_supported_by_IE. It will be used at the second - round of the network translation. Mark all supported LSTMCell with flag - supported_by_IE to have a chance to detect all not-supported instances - in a separate pass. - """ - - enabled = False - - def run_after(self): - return [TensorIteratorMerge, ONNXRNNSequenceNormalize, TensorFlowLSTMtoGeneric] - - def pattern(self): - return dict( - nodes=[ - ('ti', dict(kind='op', op='TensorIterator')), - ], - edges=[ - ] - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - nodes = [ - ('input_unsqueezed'), - ('squeeze', dict(op='Reshape')), - ('input_squeezed'), - ('input_hidden'), - ('input_cell'), - ('weights'), - ('biases'), - - ('lstm', dict(op='LSTMCell')), - - ('output_hidden'), - ('output_cell'), - ('unsqueeze', dict(op='Reshape')), - ('output_unsqueezed'), - ] - edges = [ - ('input_unsqueezed', 'squeeze'), - ('squeeze', 'input_squeezed'), - - ('input_squeezed', 'lstm', {'in': 0}), - ('input_hidden', 'lstm', {'in': 1}), - ('input_cell', 'lstm', {'in': 2}), - ('weights', 'lstm', {'in': 3}), - ('biases', 'lstm', {'in': 4}), - - ('lstm', 'output_hidden', {'out': 0}), - ('lstm', 'output_cell', {'out': 1}), - - ('output_hidden', 'unsqueeze'), - ('unsqueeze', 'output_unsqueezed'), - ] - ti = match['ti'] - isomorphisms = find_isomorphisms(ti.body, nodes, edges) - if len(list(isomorphisms)) != 1: - raise Error('Unsupported TensorIterator layer {} was found: either its body, ports or ' - 'edges are not supported by OpenVINO. ' - 'Only TensorIterator with LSTMCell in a body of strict form is supported. ' - 'Please modify the original network ' - 'to meet the requirements.'.format(ti.soft_get('name'))) - body_match = isomorphisms[0] - if body_match['input_hidden'].has_valid('value') or body_match['input_cell'].has_valid('value'): - raise Error('Unsupported TensorIterator layer {} was found: initial hidden and/or cell states ' - 'for LSTMCell are constants. This is not supported. ' - 'Only TensorIterator with LSTMCell in a body of strict form is supported. ' - 'Please modify the original network ' - 'to meet the requirements.'.format(ti.soft_get('name'))) - # TODO Additional checks for port indices diff --git a/tools/mo/openvino/tools/mo/middle/TensorIteratorMerge.py b/tools/mo/openvino/tools/mo/middle/TensorIteratorMerge.py deleted file mode 100644 index 4a3294745c6fb1..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/TensorIteratorMerge.py +++ /dev/null @@ -1,368 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from collections import deque -from copy import deepcopy - -from openvino.tools.mo.ops.tensor_iterator import TensorIterator -from openvino.tools.mo.front.common.partial_infer.utils import shape_insert -from openvino.tools.mo.graph.graph import Node, Graph, add_opoutput -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.ops.unsqueeze import Unsqueeze -from openvino.tools.mo.utils.graph import sub_graph_between_nodes, invert_sub_graph_between_nodes - -stop_nodes = ['TensorIteratorInput', 'TensorIteratorOutput', 'TensorIteratorBackEdge', 'TensorIteratorCondition'] - - -def op_type(graph, node_name: str): - node = Node(graph, node_name) - if node.has_valid('kind') and node['kind'] == 'op': - return node['op'] - else: - return None - - -def update_inputs(graph, inputs: list, node_name: str): - node = Node(graph, node_name) - if node.has_valid('kind') and node['kind'] == 'op' and node['op'] == 'TensorIteratorInput': - if node_name not in inputs: - inputs.append(node_name) - - -def reverse_dfs(graph: Graph, node_name: str, stop_nodes: list, inputs: list, visited: set = None): - d = deque() - - if visited is None: - visited = set() - visited.add(node_name) - d.appendleft(node_name) - while len(d) != 0: - cur_node = d.popleft() - for in_node_name, _ in graph.in_edges(cur_node): - if in_node_name not in visited: - if op_type(graph, in_node_name) not in stop_nodes: - visited.add(in_node_name) - d.append(in_node_name) - else: - update_inputs(graph, inputs, in_node_name) - - -def dfs(graph: Graph, node_name: str, stop_nodes: list, visited: set = None): - d = deque() - - visited.add(node_name) - d.appendleft(node_name) - while len(d) != 0: - cur_node = d.popleft() - for _, out_node_name in graph.out_edges(cur_node): - if out_node_name not in visited: - if op_type(graph, out_node_name) not in stop_nodes: - visited.add(out_node_name) - d.append(out_node_name) - - -def get_body(graph, inputs, outputs): - if len(inputs) == 0: - nodes, extra_inputs = invert_sub_graph_between_nodes( - graph, - outputs, - inputs, - lambda node: node.soft_get('op') == 'TensorIteratorInput' - ) - else: - nodes, extra_inputs = sub_graph_between_nodes( - graph, - inputs, - outputs, - lambda node: node.soft_get('op') == 'TensorIteratorInput' - ) - nodes = list(set(nodes) - set(inputs) - set(outputs) - set(extra_inputs)) - return nodes, extra_inputs - - -class TensorIteratorMerge(MiddleReplacementPattern): - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - return [] - - def run_before(self): - return [] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('condition', dict(kind='op', op='TensorIteratorCondition')), - ], - edges=[], - ) - - @staticmethod - def replace_pattern(graph, match: dict): - # Here we will found all parts of TI: condition, inputs/outputs, back edges, body and create TensorIterator Op - # and make all checks needed for TensorIterator work - cond_data = match['condition'].out_node(0) if not match['condition'].out_port(0).disconnected() else None - time_data = match['condition'].out_node(1) if len(match['condition'].out_nodes()) >= 1 else None - name = match['condition'].name - - back_edges = [] - inputs = [] - outputs = [] - - if cond_data is not None: - for node in cond_data.out_nodes(): - if node['kind'] == 'op' and node['op'] == 'TensorIteratorBackEdge': - back_edges.append(node.id) - elif node['kind'] == 'op' and node['op'] == 'TensorIteratorInput': - inputs.append(node.id) - elif node['kind'] == 'op' and node['op'] == 'TensorIteratorOutput': - outputs.append(node.id) - - if time_data is not None: - for node in time_data.out_nodes(): - if node['kind'] == 'op' and node['op'] == 'TensorIteratorInput': - inputs.append(node.id) - elif node['kind'] == 'op' and node['op'] == 'TensorIteratorOutput': - outputs.append(node.id) - else: - # something goes wrong here - assert False - condition = match['condition'] - tensor_sequence_length = condition.in_node(0) - - nodes_to_remove = [n.id for n in (condition, cond_data, time_data, tensor_sequence_length) if n is not None] - graph.remove_nodes_from(nodes_to_remove) - - body_nodes, extra_inputs = get_body(graph, inputs, outputs) - - if cond_data is not None: - body_nodes = list(set(body_nodes) - set([cond_data])) - - inputs += extra_inputs - - assert all([node in graph.nodes() for node in body_nodes]) - - inputs = [Node(graph, node) for node in inputs] - outputs = [Node(graph, node) for node in outputs] - back_edges = [Node(graph, node) for node in back_edges] - - external_inputs = [ - { - 'external_data_id': node.in_node(1 if node.has_valid('axis') else 0), - 'internal_data_id': node.out_node(0), - 'axis': node.axis, - 'start': node.start, - 'end': node.end, - 'stride': node.stride, - 'part_size': node.part_size - } for node in inputs] - - external_outputs = [ - { - 'external_data_id': node.out_node(0), - 'internal_data_id': node.in_node(1 if node.has_valid('axis') else 0), - 'axis': node.axis, - 'start': node.start, - 'end': node.end, - 'stride': node.stride, - 'part_size': node.part_size - } for node in outputs] - - back_edges_data = [ - { - 'from_data_id': node.in_node(1), - 'to_data_id': node.out_node(0), - 'init_data_id': node.in_node(0), - } for node in back_edges - ] - - body = Graph(name='body') - body.graph = graph.graph - body.add_nodes_from([(node, graph.node[node]) for node in body_nodes]) - body.add_edges_from( - [(u, v, k, d) for u, v, k, d in graph.edges(data=True, keys=True) if u in body_nodes and v in body_nodes]) - - graph.remove_nodes_from( - body_nodes + [match['condition'].id] + [inp.id for inp in inputs] + [out.id for out in outputs]) - internal_id_count = 0 - real_back_edges = [] - for edge in back_edges_data: - assert edge['from_data_id'].id in body.nodes() - assert edge['to_data_id'].id in body.nodes() - assert edge['init_data_id'].id in body.nodes() - edge['from_data_id'] = Node(body, edge['from_data_id'].id) - edge['to_data_id'] = Node(body, edge['to_data_id'].id) - edge['init_data_id'] = Node(body, edge['init_data_id'].id) - add_opoutput(body, edge['from_data_id'].id, 0, False) - - # Assign/reuse ids for the back-edge start; it comes from from_data_id - assert len(edge['from_data_id'].in_nodes()) == 1 - # layer id - if not edge['from_data_id'].in_node().has_valid('internal_layer_id'): - edge['from_data_id'].in_node()['internal_layer_id'] = internal_id_count - internal_id_count += 1 - edge['from_layer'] = edge['from_data_id'].in_node()['internal_layer_id'] - - # port id - if 'internal_port_id' not in edge['from_data_id'].in_edge(): - edge['from_data_id'].in_edge()['internal_port_id'] = internal_id_count - internal_id_count += 1 - edge['from_port'] = edge['from_data_id'].in_edge()['internal_port_id'] - - # Look at all consumers for a data that ends a back-edge - # For each such consumer, there will be a separate back-edge (and input) - current_real_back_edges = [] - for _, consumer, key, edge_attrs in body.out_edges(edge['to_data_id'].id, data=True, keys=True): - - real_edge = {} - real_edge.update(edge) # all real back_edges have the same back-edge start - - consumer = Node(body, consumer) - - if real_edge['to_data_id'].in_node().has_valid('internal_layer_id'): - assert False - real_edge['to_data_id'].out_node()['internal_layer_id'] = \ - real_edge['to_data_id'].in_node().internal_layer_id - elif not consumer.has_valid('internal_layer_id'): - consumer['internal_layer_id'] = internal_id_count - internal_id_count += 1 - real_edge['to_layer'] = consumer['internal_layer_id'] - - assert 'internal_port_id' not in edge_attrs - assert len(real_edge['init_data_id'].out_edges()) == 1 - assert not 'internal_port_id' in real_edge['init_data_id'].out_edge() - edge_attrs['internal_port_id'] = internal_id_count - internal_id_count += 1 - real_edge['to_port'] = edge_attrs['internal_port_id'] - real_edge['consumer'] = consumer - real_edge['consumer_key'] = key - - real_edge['attrs'] = deepcopy(edge_attrs) - current_real_back_edges.append(real_edge) - - # connect initial data node with each consumer providing actual edge attributes - body.add_edges_from([ - ( - real_edge['init_data_id'].id, - real_edge['consumer'].id, - real_edge['consumer_key'], - real_edge['attrs']) - for real_edge in current_real_back_edges]) - - body.remove_nodes_from([edge['to_data_id'].id, edge['to_data_id'].in_node().id]) - real_back_edges += current_real_back_edges - - real_external_inputs = [] - - for ext_inp in external_inputs: - assert ext_inp['external_data_id'].id not in body.nodes() - assert ext_inp['internal_data_id'].id in body.nodes() - ext_inp['internal_data_id'] = Node(body, ext_inp['internal_data_id'].id) - - if ext_inp['axis'] is not None: - # Insert squeezing resize at input port that has partitioning - shape = ext_inp['internal_data_id'].shape.copy() - assert not ext_inp['internal_data_id'].has_valid('value') - new_input_data = Op._create_data_node(body, ext_inp['internal_data_id'].name + '/UnsqueezedInput', - dict(shape=shape_insert(shape, ext_inp['axis'], 1))) - - reshape_op = Squeeze(body, dict(name=ext_inp['internal_data_id'].name + '/InputSqueeze')) - reshape_dim_data = Const(body, {'name': ext_inp['internal_data_id'].name + '/ReshapeDim', - 'value': ext_inp['axis']}).create_node_with_data() - reshape_op.create_node_with_data([new_input_data, reshape_dim_data], - data_nodes=[ext_inp['internal_data_id']]) - ext_inp['internal_data_id'] = new_input_data - - ext_inp['internal_data_id']['is_input'] = True - assert len(ext_inp['internal_data_id'].in_nodes()) == 0 - ext_inp['external_port_id'] = internal_id_count - internal_id_count += 1 - for _, consumer, edge_attrs in body.out_edges(ext_inp['internal_data_id'].id, data=True): - real_ext_inp = {} - real_ext_inp.update(ext_inp) - consumer = Node(body, consumer) - if not consumer.has_valid('internal_layer_id'): - consumer['internal_layer_id'] = internal_id_count - internal_id_count += 1 - if not 'internal_port_id' in edge_attrs: - edge_attrs['internal_port_id'] = internal_id_count - internal_id_count += 1 - real_ext_inp['internal_layer_id'] = consumer['internal_layer_id'] - real_ext_inp['internal_port_id'] = edge_attrs['internal_port_id'] - real_external_inputs.append(real_ext_inp) - - for ext_out in external_outputs: - assert ext_out['external_data_id'].id not in body.nodes() - assert ext_out['internal_data_id'].id in body.nodes() - ext_out['internal_data_id'] = Node(body, ext_out['internal_data_id'].id) - - if ext_out['axis'] is not None: - # Insert unsqueezing resize at output port that has partitioning - reshape_op = Unsqueeze(body, dict(name=ext_out['internal_data_id'].name + '/OutputUnsqueeze')) - reshape_dim_data = Const(body, {'name': ext_out['internal_data_id'].name + '/ReshapeDim', - 'value': ext_out['axis']}).create_node_with_data() - ext_out['internal_data_id'] = reshape_op.create_node_with_data([ext_out['internal_data_id'], - reshape_dim_data]) - - # TODO: add here working with simple outputs - - if not any([out_node.soft_get('op', None) == 'Result' for out_node in ext_out['internal_data_id'].out_nodes()]): - add_opoutput(body, ext_out['internal_data_id'].id, 0, False) - - # assert len(ext_out['internal_data_id'].out_nodes()) == 0 - assert len(ext_out['internal_data_id'].in_nodes()) == 1 - if not 'internal_layer_id' in ext_out['internal_data_id'].in_node(): - ext_out['internal_data_id'].in_node()['internal_layer_id'] = internal_id_count - internal_id_count += 1 - if not 'internal_port_id' in ext_out['internal_data_id'].in_edge(): - ext_out['internal_data_id'].in_edge()['internal_port_id'] = internal_id_count - internal_id_count += 1 - ext_out['internal_layer_id'] = ext_out['internal_data_id'].in_node()['internal_layer_id'] - ext_out['internal_port_id'] = ext_out['internal_data_id'].in_edge()['internal_port_id'] - ext_out['external_port_id'] = internal_id_count - internal_id_count += 1 - - # create TensorIterator layer with pre-computed components - ti_op = TensorIterator(graph, { - 'name': name + '/TensorIterator', - 'body': body, - 'in_ports_count': len(external_inputs), - 'out_ports_count': len(external_outputs), - - 'input_port_map': [ - {field: external_input[field] for field in - ['external_port_id', 'internal_layer_id', 'internal_port_id', 'axis', 'stride', 'part_size', 'start', - 'end']} - for external_input in real_external_inputs], - - 'output_port_map': [ - {field: external_output[field] for field in - ['external_port_id', 'internal_layer_id', 'internal_port_id', 'axis', 'stride', 'part_size', 'start', - 'end']} - for external_output in external_outputs], - 'back_edges': [ - {field: edge[field] for field in ['from_layer', 'from_port', 'to_layer', 'to_port']} - for edge in real_back_edges], - }) - - ti_outs = ti_op.create_node_with_data( - inputs=[inp['external_data_id'] for inp in external_inputs], - edge_attrs=[{'external_port_id': inp['external_port_id']} for inp in external_inputs], - data_nodes=[out['external_data_id'] for out in external_outputs] - ) - - if not isinstance(ti_outs, list): - ti_outs = [ti_outs] - - for i, out in enumerate(ti_outs): - out.in_edge()['external_port_id'] = external_outputs[i]['external_port_id'] - - ti = ti_outs[0].in_node() - TensorIterator.cover_body_input_data_nodes_with_parameter_ops(ti) - TensorIterator.cover_body_constant_data_nodes_with_const_ops(ti) - TensorIterator.normalize_internal_ids(ti) diff --git a/tools/mo/openvino/tools/mo/middle/TensorIteratorOutput.py b/tools/mo/openvino/tools/mo/middle/TensorIteratorOutput.py deleted file mode 100644 index 7910a425068bf7..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/TensorIteratorOutput.py +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.ops.TensorIterator_ops import TensorIteratorOutput -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class SmartOutputMatcher(MiddleReplacementPattern): - """ - This pattern match partitioned outputs for TensorIterator in dynamic_rnn loops in TF. - The structure of pattern without Data nodes between ops. Every node is named as op attribute of this node - (data nodes is marked by (data)): - TensorArray - | | Condition(data) - Flow(data) Handle(data)--------------------------------------------------------------- | - | | | | | - v v v v v - Enter -> Merge -> Switch -> Exit -> TensorArraySize -> Range(0;1) -> TensorArrayGather - | | ^ - | | | - | --------------------------------------------- - | - --------> Identity -> TensorArrayWrite -> NextIteration - """ - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - from openvino.tools.mo.middle.TensorIteratorInput import SmartInputMatcher - return [SmartInputMatcher] - - def run_before(self): - from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge - return [TensorIteratorMerge] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('TensorArray', dict(kind='op', op='TensorArrayV3')), - ('TensorArray_data', dict(kind='data')), - ('TensorArray_flow_data', dict(kind='data')), - ('TensorArrayGather', dict(kind='op', op='TensorArrayGatherV3')), - ('TensorArrayGather_data', dict(kind='data')), - ('range', dict(kind='op', op='Range')), - ('range_data', dict(kind='data')), - ('size', dict(kind='op', op='TensorArraySizeV3')), - ('size_data', dict(kind='data')), - ('start', dict(kind='op', op='Const')), - ('start_data', dict(kind='data')), - ('delta', dict(kind='op', op='Const')), - ('delta_data', dict(kind='data')), - ('TensorArrayWrite', dict(kind='op', op='TensorArrayWriteV3')), - ('TensorArrayWrite_data', dict(kind='data')), - ('NextIteration', dict(kind='op', op='NextIteration')), - ('Condition_data', dict(kind='data')), - ('Identity_2_data', dict(kind='data')), - ('Identity_2', dict(kind='op', op='Identity')), - ('Switch_2', dict(kind='op', op='Switch')), - ('Switch_2_data', dict(kind='data')), - ('Switch_2_data_exit', dict(kind='data')), - ('Merge_2', dict(kind='op', op='Merge')), - ('Merge_2_data', dict(kind='data')), - ('Enter_2', dict(kind='op', op='Enter')), - ('Enter_2_data', dict(kind='data')), - ('WriteEnter', dict(kind='op', op='Enter')), - ('WriteEnter_data', dict(kind='data')), - ('Exit', dict(kind='op', op='Exit')), - ('Exit_data', dict(kind='data')), - ], - edges=[ - ('TensorArray', 'TensorArray_data'), - ('TensorArray', 'TensorArray_flow_data'), - ('TensorArray_flow_data', 'Enter_2'), - ('TensorArray_data', 'WriteEnter'), - ('TensorArray_data', 'TensorArrayGather'), - ('TensorArrayGather', 'TensorArrayGather_data'), - ('TensorArray_data', 'size'), - - ('size', 'size_data'), - ('start', 'start_data'), - ('delta', 'delta_data'), - - ('size_data', 'range', {'in': 1}), - ('start_data', 'range', {'in': 0}), - ('delta_data', 'range', {'in': 2}), - ('range', 'range_data'), - ('range_data', 'TensorArrayGather'), - - ('Enter_2', 'Enter_2_data'), - ('Enter_2_data', 'Merge_2'), - ('Merge_2', 'Merge_2_data'), - ('Merge_2_data', 'Switch_2'), - ('Switch_2', 'Switch_2_data'), - ('Switch_2', 'Switch_2_data_exit'), - ('Switch_2_data', 'Identity_2'), - ('Identity_2', 'Identity_2_data'), - - ('Switch_2_data_exit', 'Exit'), - ('Exit', 'Exit_data'), - ('Exit_data', 'size'), - ('Exit_data', 'TensorArrayGather'), - - ('WriteEnter', 'WriteEnter_data'), - ('WriteEnter_data', 'TensorArrayWrite', {'in': 0}), - - ('Identity_2_data', 'TensorArrayWrite', {'in': 3}), - - ('TensorArrayWrite', 'TensorArrayWrite_data'), - ('TensorArrayWrite_data', 'NextIteration'), - ('Condition_data', 'Switch_2'), - ], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - log.debug('================== SmartOutputFind ===============') - - assert match['WriteEnter_data'].value is not None - assert match['start_data']['value'] == 0 and match['delta_data']['value'] == 1 - - ta_size = match['TensorArray'].in_node() - - index = match['TensorArrayWrite'].in_node(1) - value = match['TensorArrayWrite'].in_node(2) - - # axis == 0 because in TensorArray we ALWAYS iterate over 0 axis, other params will be fill later (with - # condition) - output = TensorIteratorOutput(graph, dict(axis=0, start=None, stride=None, part_size=None, - external_port_id=str(match['WriteEnter_data'].value), - internal_layer_id=value.id, - name=match['TensorArrayWrite'].name + '/TensorIteratorOutput_' - )) - output.create_node_with_data(inputs=[ta_size, value, index], - data_nodes=[match['TensorArrayGather_data']]) - - # Delete useless nodes - safe_nodes = ['TensorArrayGather_data', 'Condition_data'] - nodes_for_remove = [] - for node in match.keys(): - if node not in safe_nodes: - nodes_for_remove.append(match[node].id) - graph.remove_nodes_from(nodes_for_remove) - - -class SimpleOutputMatcher(MiddleReplacementPattern): - """ - This pattern match partitioned outputs for TensorIterator in dynamic_rnn loops in TF. - The structure of pattern without Data nodes between ops. Every node is named as op attribute of this node - (data nodes is marked by (data)): - TensorArray - | | - Flow(data) Handle(data)------------------------------ - | | | - v v v - Enter -> Merge -> Switch -> Exit -> TensorArrayRead - | - | - | - | - --------> Identity -> TensorArrayWrite -> NextIteration - """ - enabled = True - graph_condition = [lambda graph: graph.graph['is_cyclic']] - - def run_after(self): - return [SmartOutputMatcher] - - def run_before(self): - from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge - from openvino.tools.mo.middle.TensorIteratorCondition import LoopConditionMatcher - return [TensorIteratorMerge, LoopConditionMatcher] - - @staticmethod - def pattern(): - return dict( - nodes=[ - ('TensorArray', dict(kind='op', op='TensorArrayV3')), - ('TensorArray_data', dict(kind='data')), - ('TensorArray_flow_data', dict(kind='data')), - - ('TensorArrayWrite', dict(kind='op', op='TensorArrayWriteV3')), - ('TensorArrayWrite_data', dict(kind='data')), - - ('NextIteration', dict(kind='op', op='NextIteration')), - ('NextIteration_data', dict(kind='data')), - - ('Condition_data', dict(kind='data')), - - ('Identity_2', dict(kind='op', op='Identity')), - ('Identity_2_data', dict(kind='data')), - - ('Switch_2', dict(kind='op', op='Switch')), - ('Switch_2_data', dict(kind='data')), - ('Switch_2_data_exit', dict(kind='data')), - - ('Merge_2', dict(kind='op', op='Merge')), - ('Merge_2_data', dict(kind='data')), - - ('Enter_2', dict(kind='op', op='Enter')), - ('Enter_2_data', dict(kind='data')), - - ('WriteEnter', dict(kind='op', op='Enter')), - ('WriteEnter_data', dict(kind='data')), - - ('Exit', dict(kind='op', op='Exit')), - ('Exit_data', dict(kind='data')), - # - ('TensorArrayRead', dict(op='TensorArrayReadV3')), - ('TensorArrayRead_data', dict(kind='data')), - ], - edges=[ - ('TensorArray', 'TensorArray_data'), - ('TensorArray', 'TensorArray_flow_data'), - ('TensorArray_flow_data', 'Enter_2'), - ('TensorArray_data', 'WriteEnter'), - - - ('Enter_2', 'Enter_2_data'), - ('Enter_2_data', 'Merge_2'), - ('Merge_2', 'Merge_2_data'), - ('Merge_2_data', 'Switch_2'), - ('Switch_2', 'Switch_2_data'), - ('Switch_2', 'Switch_2_data_exit'), - ('Switch_2_data', 'Identity_2'), - ('Identity_2', 'Identity_2_data'), - - ('Switch_2_data_exit', 'Exit'), - ('Exit', 'Exit_data'), - ('Exit_data', 'TensorArrayRead'), - - ('WriteEnter', 'WriteEnter_data'), - ('WriteEnter_data', 'TensorArrayWrite', {'in': 0}), - - ('Identity_2_data', 'TensorArrayWrite', {'in': 3}), - # - ('TensorArrayWrite', 'TensorArrayWrite_data'), - ('TensorArrayWrite_data', 'NextIteration'), - ('Condition_data', 'Switch_2'), - # - ('TensorArray_data', 'TensorArrayRead'), - ('TensorArrayRead', 'TensorArrayRead_data'), - ('NextIteration', 'NextIteration_data'), - ('NextIteration_data', 'Merge_2'), - ], - ) - - @staticmethod - def replace_pattern(graph: Graph, match: dict): - log.debug('================== SimpleOutputFind ===============') - assert match['WriteEnter_data'].value is not None - - index = match['TensorArrayWrite'].in_node(1) - value = match['TensorArrayWrite'].in_node(2) - - # axis == 0 because in TensorArray we ALWAYS iterate over 0 axis, other params will be fill later (with - # condition) - output = TensorIteratorOutput(graph, dict( - external_port_id=str(match['WriteEnter_data'].value), - internal_layer_id=value.id, - name=match['TensorArrayWrite'].name + '/TensorIteratorOutput_' - )) - output.create_node_with_data(inputs=[value, index], - data_nodes=[match['TensorArrayRead_data']]) - - # Delete useless nodes - safe_nodes = ['TensorArrayRead_data', 'Condition_data'] - nodes_for_remove = [] - for node in match.keys(): - if node not in safe_nodes: - nodes_for_remove.append(match[node].id) - graph.remove_nodes_from(nodes_for_remove) diff --git a/tools/mo/openvino/tools/mo/middle/TensorIterator_utils.py b/tools/mo/openvino/tools/mo/middle/TensorIterator_utils.py deleted file mode 100644 index 7083b8704f761c..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/TensorIterator_utils.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph, Node - - -def delete_selects_from(graph: Graph, node_idxs: list): - for node_id in node_idxs: - greater_equal = Node(graph, node_id) - for port in greater_equal.out_port(0).get_destinations(): - port_node = port.node - if port_node.soft_get('op') == 'Select': - - port_node.in_port(1).disconnect() - port_node.in_port(0).disconnect() - - # Reconnect select input to next op - next_op_input_port = port_node.out_port(0).get_destination() - select_input = port_node.in_port(2).get_source() - next_op_input_port.get_connection().set_source(select_input) - graph.remove_node(port_node.id) diff --git a/tools/mo/openvino/tools/mo/middle/UnsqueezeTileReshapeBlockToInterpolate.py b/tools/mo/openvino/tools/mo/middle/UnsqueezeTileReshapeBlockToInterpolate.py deleted file mode 100644 index 9496b8ac966ebc..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/UnsqueezeTileReshapeBlockToInterpolate.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Mul -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.utils.shape import node_to_get_shape_value_of_indices - - -class UnsqueezeTileReshapeBlockToInterpolate(MiddleReplacementPattern): - """ - The transformation looks for a sub-graph performing unsqueeze-ing input tensor by some "axis" and then tiling over - it fixed number of "times". This pattern can be represented with the Interpolate operation of mode "nearest" - performing interpolation over specific "axis" with fixed output dimension size equal to "times". - - Note, that the transformation expects that the output from Tile is reshaped back to the tensor with rank equal to - the input tensor rank. This constraints occurs because the pattern appears in the models where these patterns appear - one after another, performing unsqueeze-ing over different dimensions, effectively performing interpolation over - several dimensions. - - These sequences are merged in the 'optimizer/extensions/middle/InterpolateSequenceToInterpolate.py' transformation - into a single Interpolate operation. - - The transformation is applicable only when all following conditions are fulfilled: - - 1. 'Unsqueeze' must be performed with respect to only one axis. - 2. The length of the value of the second input of 'Tile' must be equal to the input rank of 'Unsqueeze' plus 1. - 3. All elements of the value of the second input of 'Tile' must be equal to 1, - except the value corresponding the interpolated axis. - 4. The input rank of 'Unsqueeze' and the output rank of 'Reshape' must be equal. - - Finally, because plugins support only Interpolate-4 with 4D or 5D tensor with interpolated data, - we need to check that the input rank of 'Unsqueeze' is equal to 4 or 5. - - Example. - - Let data = np.arange(0, 1 * 2 * 3 * 4).reshape((1, 2, 3, 4)).astype(np.float32), that is - data = mo_array([[[[ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11]], - [[12, 13, 14, 15], - [16, 17, 18, 19], - [20, 21, 22, 23]]]], dtype=np.float32) - After np.tile(np.expand_dims(data, 3), [1, 1, 1, 2, 1]).reshape((1, 2, 3 * 2, 4)) we get - array([[[[ 0, 1, 2, 3], - [ 0, 1, 2, 3], - [ 4, 5, 6, 7], - [ 4, 5, 6, 7], - [ 8, 9, 10, 11], - [ 8, 9, 10, 11]], - [[12, 13, 14, 15], - [12, 13, 14, 15], - [16, 17, 18, 19], - [16, 17, 18, 19], - [20, 21, 22, 23], - [20, 21, 22, 23]]]], dtype=np.float32) - This result is equal to nearest interpolation along with axis = 2 (the second argument of 'expand_dims') - and scale = 2 (the element from the second argument of 'tile' that is not equal to 1). - """ - enabled = True - force_shape_inference = True - - def run_before(self): - from openvino.tools.mo.middle.InterpolateSequenceToInterpolate import InterpolateSequenceToInterpolate - return [InterpolateSequenceToInterpolate] - - def pattern(self): - log.debug('Enabled replacement of a sequence of Unsqueeze, Tile, Reshape with Interpolate.') - return dict( - nodes=[ - ('unsqueeze', dict(kind='op', op='Unsqueeze')), - ('unsqueeze_data', dict(kind='data')), - ('tile', dict(kind='op', op='Tile')), - ('tile_data', dict(kind='data')), - ('reshape', dict(kind='op', op='Reshape')), - ], - edges=[ - ('unsqueeze', 'unsqueeze_data'), - ('unsqueeze_data', 'tile', {'in': 0}), - ('tile', 'tile_data'), - ('tile_data', 'reshape', {'in': 0}), - ] - ) - - @staticmethod - def is_applicable(match: dict) -> bool: - """ - This function checks whether this transformation is applicable. - :param match: dictionary with nodes from the found pattern - :return: True, if the transformation is applicable - False, otherwise - """ - unsqueeze_node = match['unsqueeze'] - second_input_of_unsqueeze = unsqueeze_node.in_port(1).get_connection().get_source().node - if not second_input_of_unsqueeze.has_valid('value') or len(second_input_of_unsqueeze.value) != 1: - return False - - d_idx = int(second_input_of_unsqueeze.value) - if d_idx == 0: - return False - - second_input_of_tile = match['tile'].in_port(1).get_connection().get_source().node - if not second_input_of_tile.has_valid('value'): - return False - - input_shape_of_unsqueeze = unsqueeze_node.in_port(0).data.get_shape() - input_rank_of_unsqueeze = len(input_shape_of_unsqueeze) - if input_rank_of_unsqueeze not in {4, 5}: - return False - - if input_rank_of_unsqueeze + 1 != len(second_input_of_tile.value): - return False - - expected_tile_constant = np.ones(input_rank_of_unsqueeze + 1, dtype=np.float32) - expected_tile_constant[d_idx] = float(second_input_of_tile.value[d_idx]) - - if not np.array_equal(expected_tile_constant, float32_array(second_input_of_tile.value)): - return False - - reshape_node = match['reshape'] - new_shape = reshape_node.in_port(1).data.get_value() - if new_shape is None or input_rank_of_unsqueeze != len(new_shape): - return False - - return True - - def replace_pattern(self, graph: Graph, match: dict): - if not self.is_applicable(match): - return - - unsqueeze_node = match['unsqueeze'] - unsqueeze_name = unsqueeze_node.soft_get('name', unsqueeze_node.id) - second_input_of_unsqueeze = unsqueeze_node.in_port(1).get_connection().get_source().node - d_idx = int(second_input_of_unsqueeze.value) - axis = d_idx - 1 - - shape_node = Shape(graph, dict(name=unsqueeze_name + '/Shape')).create_node() - axis_len_node = node_to_get_shape_value_of_indices(shape_node, [axis]) - - second_input_of_tile = match['tile'].in_port(1).get_connection().get_source().node - scale = int64_array([second_input_of_tile.value[d_idx]]) - float_scale = float32_array([second_input_of_tile.value[d_idx]]) - mul_node = create_op_with_const_inputs(graph, Mul, {1: scale}, {'name': unsqueeze_name + '/Mul'}) - - axis_len_node.out_port(0).connect(mul_node.in_port(0)) - - interp_node = create_op_with_const_inputs(graph, - Interpolate, - { - 2: float_scale, - 3: int64_array([axis])}, - { - 'mode': 'nearest', - 'antialias': 0, - 'pads_begin': int64_array([0]), - 'pads_end': int64_array([0]), - 'coordinate_transformation_mode': 'half_pixel', - 'nearest_mode': 'round_prefer_floor', - 'cube_coeff': -0.75, - 'version': 'opset4', - 'shape_calculation_mode': 'scales', - 'in_ports_count': 4, - 'maybe_part_of_sequence': True - }) - mul_node.out_port(0).connect(interp_node.in_port(1)) - - reshape_node = match['reshape'] - reshape_node.out_port(0).get_connection().set_source(interp_node.out_port(0)) - reshape_name = reshape_node.soft_get('name', reshape_node.id) - rename_nodes([(reshape_node, reshape_name + '/delete'), (interp_node, reshape_name)]) - - unsqueeze_connection = unsqueeze_node.in_port(0).get_connection() - unsqueeze_connection.set_destination(interp_node.in_port(0)) - unsqueeze_connection.get_source().connect(shape_node.in_port(0)) diff --git a/tools/mo/openvino/tools/mo/middle/UpsampleToResample.py b/tools/mo/openvino/tools/mo/middle/UpsampleToResample.py deleted file mode 100644 index 17f12352e69697..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/UpsampleToResample.py +++ /dev/null @@ -1,144 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import math -from typing import Dict - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.elementwise import Mul -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.common.layout import get_height_dim, get_width_dim, get_depth_dim -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs, create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, Node, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.strided_slice import StridedSlice - - -class UpsampleToResample(MiddleReplacementPattern): - enabled = True - force_clean_up = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleFinish - return [MiddleFinish] - - def pattern(self): - return dict( - nodes=[ - ('upsample', dict(kind='op', op='Upsample')), - ('output', dict(kind='data'))], - edges=[('upsample', 'output')] - ) - - def replace_pattern(self, graph: Graph, match: Dict[str, Node]): - log.debug('UpsampleToResample is triggered') - upsample = match['upsample'] - upsample_name = upsample.soft_get('name', upsample.id) - input_shape = upsample.in_port(0).data.get_shape() - input_shape_rank = len(input_shape) - if input_shape_rank not in [4, 5]: - log.warning('The input shape is not 4D or 5D for op {}'.format(upsample.soft_get('name'))) - return - - depth_scale = None - layout = graph.graph['layout'] - - if len(upsample.in_nodes()) == 2: - if upsample.in_node(1).value is None: - return - scales = upsample.in_node(1).value - assert len(scales) in (4, 5), 'Supported scales rank is 4 or 5, but it is {} for node {}'.format( - len(scales), upsample_name) - if not (math.isclose(scales[0], 1, rel_tol=1e-5) and math.isclose(scales[1], 1, rel_tol=1e-5)): - return - height_scale = scales[get_height_dim(layout, input_shape_rank)] - width_scale = scales[get_width_dim(layout, input_shape_rank)] - if len(scales) == 5: - depth_scale = scales[get_depth_dim(layout, input_shape_rank)] - else: - height_scale = upsample['height_scale'] - width_scale = upsample['width_scale'] - - if 1 in upsample.in_ports() and not upsample.in_port(1).disconnected(): - upsample.in_port(1).disconnect() - - upsample_name = upsample.soft_get('name', upsample.id) - shape = Shape(graph, {'name': upsample_name + '/0_port'}).create_node() - - layout = graph.graph['layout'] - - if input_shape_rank == 4: - begin_value = int64_array([get_height_dim(layout, input_shape_rank)]) - factor_value = float32_array([height_scale, width_scale]) - else: - begin_value = int64_array([get_depth_dim(layout, input_shape_rank)]) - factor_value = float32_array([depth_scale, height_scale, width_scale]) - - ss = create_op_with_const_inputs(graph, StridedSlice, - {1: begin_value, - 2: int64_array([get_width_dim(layout, input_shape_rank) + 1]), - 3: int64_array([1]) - }, - {'name': upsample_name + '/ss_0_port', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0]) - }) - - mul = create_op_node_with_second_input(graph, Mul, factor_value, {'name': upsample_name + '/factor_mul'}) - - source = upsample.in_port(0).get_connection().get_source() - source.connect(shape.in_port(0)) - shape.out_port(0).connect(ss.in_port(0)) - - ss.out_port(0).connect(mul.in_port(0)) - - # Create Interpolate operation - if input_shape_rank == 4: - axes = int64_array([get_height_dim(layout, input_shape_rank), - get_width_dim(layout, input_shape_rank)]) - else: - axes = int64_array([get_depth_dim(layout, input_shape_rank), - get_height_dim(layout, input_shape_rank), - get_width_dim(layout, input_shape_rank)]) - - axes_node = Const(graph, {'name': upsample_name + '/axis', 'value': axes}).create_node() - - interpolate = Interpolate(graph, {'mode': upsample.attrs()['mode'], 'antialias': 0, - 'pads_begin': int64_array([0]), 'pads_end': int64_array([0]), - 'coordinate_transformation_mode': 'half_pixel', - 'nearest_mode': 'round_prefer_floor', 'cube_coeff': -0.75, - 'shape_calculation_mode': 'scales', - 'version': 'opset4', 'in_ports_count': 4}).create_node() - - upsample.add_input_port(1, skip_if_exist=True) - assert upsample.in_port(1).disconnected() - mul.out_port(0).connect(interpolate.in_port(1)) - axes_node.out_port(0).connect(interpolate.in_port(3)) - - scales_node = Const(graph, {'name': upsample_name + '/scales', - 'value': factor_value}).create_node() - scales_node.out_port(0).connect(interpolate.in_port(2)) - - upsample.in_port(0).get_connection().set_destination(interpolate.in_port(0)) - upsample.out_port(0).get_connection().set_source(interpolate.out_port(0)) - - rename_nodes([(upsample, upsample_name + '/delete'), (interpolate, upsample_name)]) - - convert_to_float = Cast(graph, dict(dst_type=np.float32)).create_node() - convert_to_int = Cast(graph, dict(dst_type=np.int64)).create_node() - - mul.in_port(0).get_connection().insert_node(convert_to_float) - mul.out_port(0).get_connection().insert_node(convert_to_int) diff --git a/tools/mo/openvino/tools/mo/middle/UselessMerge.py b/tools/mo/openvino/tools/mo/middle/UselessMerge.py deleted file mode 100644 index 5d2429448be669..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/UselessMerge.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -from openvino.tools.mo.middle.ConstSwitchResolver import ConstSwitchEraser -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.eliminate import remove_op_node_with_data_node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class UselessMergeEraser(MiddleReplacementPattern): - enabled = True - - def run_after(self): - return [ConstSwitchEraser] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleFinish - return [MiddleFinish] - - def pattern(self): - return dict( - nodes=[('merge', dict(kind='op', op='Merge'))], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - if len(graph.in_edges(match['merge'].id)) <= 1: - remove_op_node_with_data_node(graph, match['merge'], list(match['merge'].in_nodes().values())[0]) - log.info("Useles Merge op and data nodes was deleted op='{}'".format(match['merge'].id)) diff --git a/tools/mo/openvino/tools/mo/middle/UselessSplitEraser.py b/tools/mo/openvino/tools/mo/middle/UselessSplitEraser.py deleted file mode 100644 index b3d8291027981e..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/UselessSplitEraser.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class UselessSplitEraser(MiddleReplacementPattern): - enabled = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import PreMiddleStart - return [PreMiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def pattern(self): - return dict( - nodes=[('split', {'op': 'Split', 'num_splits': 1})], - edges=[] - ) - - def replace_pattern(self, graph: Graph, match: dict): - node = match['split'] - name = node.soft_get('name', node.id) - - assert node.soft_get('input_port', 0) == 0, \ - 'Internal attribute `input_port` was not resolved on front phase, broken Split {}'.format(name) - assert len(node.out_ports()) == 1 - - node.out_port(0).get_connection().set_source(node.in_port(0).get_connection().get_source()) diff --git a/tools/mo/openvino/tools/mo/middle/__init__.py b/tools/mo/openvino/tools/mo/middle/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/middle/dequantize_linear_resolver.py b/tools/mo/openvino/tools/mo/middle/dequantize_linear_resolver.py deleted file mode 100644 index d646014fd4f017..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/dequantize_linear_resolver.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.elementwise import Mul, Sub -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.reshape import Reshape - - -class DequantizeLinearResolver(MiddleReplacementPattern): - """ - Transformation result depends on the axis value. - If the axis is not set or x_scale input is scalar or 1D tensor with one element then DequantizeLinear is - replaced with the sub-graph which can be expressed with the following formula: - y = (x - x_zero_point) * x_scale - In other cases DequantizeLinear can be replace to formula with addition reshape x_zero_point and x_scale. - Target shape for reshape depend on axis. - """ - enabled = True - graph_condition = [lambda graph: graph.graph['layout'] == 'NCHW'] - - def run_after(self): - from openvino.tools.mo.middle.quantize_dequantize_linear_resolver import QuantizeDequantizeLinearResolver - return [QuantizeDequantizeLinearResolver] - - def find_and_replace_pattern(self, graph: Graph): - for dequantize_node in graph.get_op_nodes(op='DequantizeLinear'): - node_name = dequantize_node.soft_get('name', dequantize_node.id) - axis = dequantize_node.soft_get('axis', None) - scale_y_shape = dequantize_node.in_port(1).data.get_shape() - model_data_type = data_type_str_to_np(graph.graph['cmd_params'].data_type) - cast = Cast(graph, {'dst_type': model_data_type, 'name': node_name + '/Cast'}).create_node() - dequantize_node.in_port(0).get_connection().set_destination(cast.in_port(0)) - mul = Mul(graph, {'can_be_fused': False}).create_node() - - is_second_port_connected = dequantize_node.is_in_port_connected(2) - if is_second_port_connected: - # its is necessary not to replace subrtract for pattern in offline transformations - # See ConvertQuantizeDequantize transformation in ngraph - sub = Sub(graph, {'name': node_name + '/Sub', 'zero_point_sub': True}).create_node() - cast.out_port(0).connect(sub.in_port(0)) - dequantize_node.in_port(2).get_connection().set_destination(sub.in_port(1)) - sub.out_port(0).connect(mul.in_port(0)) - else: - cast.out_port(0).connect(mul.in_port(0)) - - dequantize_node.in_port(1).get_connection().set_destination(mul.in_port(1)) - dequantize_node.out_port(0).get_connection().set_source(mul.out_port(0)) - rename_nodes([(dequantize_node, node_name + '/TBD'), (mul, node_name)]) - - assert scale_y_shape is not None - if axis is not None and len(scale_y_shape) > 0 and scale_y_shape[0] > 1: - input_shape = cast.in_port(0).data.get_shape() - target_shape = np.ones(len(input_shape), np.int64) - target_shape[axis] = input_shape[axis] - - mul_reshape = create_op_with_const_inputs(graph, Reshape, {1: int64_array(target_shape)}, - {'name': node_name + '/Reshape/Mul'}) - mul.in_port(1).get_connection().set_destination(mul_reshape.in_port(0)) - mul_reshape.out_port(0).connect(mul.in_port(1)) - - if is_second_port_connected: - sub_reshape = create_op_with_const_inputs(graph, Reshape, {1: int64_array(target_shape)}, - {'name': node_name + '/Reshape/Sub'}) - sub.in_port(1).get_connection().set_destination(sub_reshape.in_port(0)) - sub_reshape.out_port(0).connect(sub.in_port(1)) diff --git a/tools/mo/openvino/tools/mo/middle/fusings.py b/tools/mo/openvino/tools/mo/middle/fusings.py deleted file mode 100644 index 9d29853560df63..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/fusings.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.div import Div -from openvino.tools.mo.front.sub import Sub -from openvino.tools.mo.middle.AddFakeQuantizeFuse import AddFakeQuantizeFuse -from openvino.tools.mo.middle.EltwiseInputReshape import normalize_eltwise_inputs -from openvino.tools.mo.middle.MulFakeQuantizeFuse import MulFakeQuantizeFuse -from openvino.tools.mo.middle.RemoveRedundantReshapes import RemoveRedundantReshapes - -from openvino.tools.mo.middle.pass_separator import PostMiddleStart -from openvino.tools.mo.middle.quantize_fuses import MarkNodesToFuseUpToFakeQuantize, FakeQuantizeFuse -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.conv import fuse_pad -from openvino.tools.mo.middle.passes.fusing.decomposition import convert_scale_shift_to_mul_add, convert_batch_norm -from openvino.tools.mo.middle.passes.fusing.fuse_grouped_conv import grouped_convolutions_fusing -from openvino.tools.mo.middle.passes.fusing.fuse_linear_ops import fuse_linear_ops -from openvino.tools.mo.middle.passes.fusing.fuse_linear_seq import fuse_mul_add_sequence -from openvino.tools.mo.middle.passes.fusing.mark_unfused_nodes import mark_unfused_nodes, mark_shape_of_sugraph_as_unfusable -from openvino.tools.mo.middle.passes.fusing.resnet_optimization import stride_optimization -from openvino.tools.mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class Fusing(MiddleReplacementPattern): - enabled = True - replacement_id = "fusing" - force_clean_up = True - run_not_recursively = True - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import MiddleFinish - return [MiddleFinish] - - def run_before(self): - # the Fusing transformation adds Reshape layers in some cases which could be removed by the - # RemoveRedundantReshapes transformation - return [PostMiddleStart, RemoveRedundantReshapes] - - def find_and_replace_pattern(self, graph: Graph): - fw = graph.graph['fw'] - argv = graph.graph['cmd_params'] - layout = graph.graph['layout'] - - mark_shape_of_sugraph_as_unfusable(graph) - for_graph_and_each_sub_graph_recursively(graph, fuse_pad) - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - # Converting FusedBatchNorm layer to Mul->Add->Mul->Add sequence - # IE doesn't support batchNormInference with 4 inputs, so we have to split it to two ScaleShift - for_graph_and_each_sub_graph_recursively(graph, convert_batch_norm) - - if fw == 'caffe': - # Converting ScaleShift layer to Mul->Add - for_graph_and_each_sub_graph_recursively(graph, convert_scale_shift_to_mul_add) - - for_graph_and_each_sub_graph_recursively(graph, Div().find_and_replace_pattern) - for_graph_and_each_sub_graph_recursively(graph, Sub().find_and_replace_pattern) - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - if fw != 'caffe': - # Converting ScaleShift layer to Mul->Add - for_graph_and_each_sub_graph_recursively(graph, convert_scale_shift_to_mul_add) - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - # Fusing the sequences of Mul/Add operations - for_graph_and_each_sub_graph_recursively(graph, fuse_mul_add_sequence) - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - normalize_eltwise_inputs(graph) - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - # Fusing linear operation to Convolution - for_graph_and_each_sub_graph_recursively(graph, fuse_linear_ops) - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - for_graph_and_each_sub_graph_recursively(graph, grouped_convolutions_fusing) - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - for_graph_and_each_sub_graph_recursively(graph, fuse_linear_ops) - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - for_graph_and_each_sub_graph_recursively(graph, normalize_eltwise_inputs) - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - MarkNodesToFuseUpToFakeQuantize().find_and_replace_pattern(graph) - FakeQuantizeFuse().find_and_replace_pattern(graph) - AddFakeQuantizeFuse().find_and_replace_pattern(graph) - MulFakeQuantizeFuse().find_and_replace_pattern(graph) - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - mark_shape_of_sugraph_as_unfusable(graph) - for_graph_and_each_sub_graph_recursively(graph, fuse_pad) - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - if layout != 'NHWC': - stride_optimization(graph) diff --git a/tools/mo/openvino/tools/mo/middle/layer_normalization.py b/tools/mo/openvino/tools/mo/middle/layer_normalization.py deleted file mode 100644 index 7c16aa23c24390..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/layer_normalization.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -import logging as log - -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.elementwise import Mul, Add -from openvino.tools.mo.ops.mvn import MVN -from openvino.tools.mo.ops.unsqueeze import Unsqueeze -from openvino.tools.mo.utils.error import Error - - -class LayerNormalization(MiddleReplacementPattern): - """ - Decompose LayerNorm(x) to MVN(x) * gamma + beta - - LayerNorm is supported with only 1 output. - """ - enabled = True - - def find_and_replace_pattern(self, graph: Graph): - for node in graph.get_op_nodes(op='LayerNorm'): - node_name = node.soft_get('name', node.id) - - if node.output_mean_var is True: - if not node.out_port(1).disconnected() or not node.out_port(2).disconnected(): - raise Error("Node {} is supported with only one output".format(node_name)) - log.error('LayerNorm node {} with attribute "output_mean_var" = True is not supported.' - 'But since the node has one output, the conversion will continue.'.format(node_name), - extra={'is_warning': True}) - - input_shape = node.in_port(0).data.get_shape() - assert node.has_valid('axis'), 'Incorrect axis value for the node {}'.format(node_name) - axis = node.axis - - mvn = create_op_node_with_second_input(graph, MVN, int64_array([axis]), - dict(eps=node.epsilon, name=node_name + '/LayerNorm/MVN_', - across_channels=1, normalize_variance=1, eps_mode='inside_sqrt')) - - mul = Mul(graph, {'name': node_name + '/LayerNorm/mul_'}).create_node() - add = Add(graph, {'name': mul.name + '/LayerNorm/add_'}).create_node() - - node.in_port(0).get_connection().set_destination(mvn.in_port(0)) - node.in_port(1).get_connection().set_destination(mul.in_port(1)) - node.in_port(2).get_connection().set_destination(add.in_port(1)) - - mvn.out_port(0).connect(mul.in_port(0)) - mul.out_port(0).connect(add.in_port(0)) - node.out_port(0).get_connection().set_source(add.out_port(0)) - - # MXNet LayerNorm gamma and beta attributes are 1D tensors with shape = [input_shape[axis]] - # We have to unsqueeze values for Mul and Add operations to avoid shapes incompatibility problems - # if axis != -1 - canonical_axis = get_canonical_axis_index(input_shape, axis) - unsqueeze_value = [] - for idx, val in enumerate(input_shape): - if idx != canonical_axis: - unsqueeze_value.append(idx) - - mul_const_unsqueeze = create_op_node_with_second_input(graph, Unsqueeze, - int64_array(unsqueeze_value), - dict(name=mul.name + '/Unsqueeze', - override_output_shape=True)) - add_const_unsqueeze = create_op_node_with_second_input(graph, Unsqueeze, - int64_array(unsqueeze_value), - dict(name=add.name + '/Unsqueeze', - override_output_shape=True)) - - mul.in_port(1).get_connection().insert_node(mul_const_unsqueeze) - add.in_port(1).get_connection().insert_node(add_const_unsqueeze) - - rename_nodes([(node, node_name + '/ShouldBeDeleted'), (add, node_name)]) diff --git a/tools/mo/openvino/tools/mo/middle/pass_separator.py b/tools/mo/openvino/tools/mo/middle/pass_separator.py deleted file mode 100644 index 2c03c0cebae89f..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/pass_separator.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class PreMiddleStart(MiddleReplacementPattern): - enabled = True - - def run_after(self): - return [] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - pass - - -class MiddleStart(MiddleReplacementPattern): - enabled = True - - def run_after(self): - return [] - - def run_before(self): - - return [] - - def find_and_replace_pattern(self, graph: Graph): - pass - - -class MiddleFinish(MiddleReplacementPattern): - enabled = True - - def run_after(self): - return [] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - pass - - -class PostMiddleStart(MiddleReplacementPattern): - enabled = True - - def run_after(self): - return [] - - def run_before(self): - return [] - - def find_and_replace_pattern(self, graph: Graph): - pass - diff --git a/tools/mo/openvino/tools/mo/middle/passes/__init__.py b/tools/mo/openvino/tools/mo/middle/passes/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/middle/passes/conv.py b/tools/mo/openvino/tools/mo/middle/passes/conv.py deleted file mode 100644 index 774218768f1e3e..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/conv.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.layout import get_batch_dim, get_features_dim -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.extractor import add_attrs_props -from openvino.tools.mo.front.extractor import update_ie_fields -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.fusing.helpers import get_value_id, get_tensor_id -from openvino.tools.mo.middle.pattern_match import apply_pattern - - -def pad_op_transform(graph: Graph, match: dict): - op = match['op'] - pad_op: Node = match['pad_op'] - - # to keep reshape-ability if Pad receives pads_begin/pads_end from shape subgraph - if pad_op.in_port(1).get_source().node.soft_get('can_be_fused') is False: - return - - if pad_op.mode != 'constant': - log.info('The pad node "{}" with pad mode "{}" cannot be fused.'.format(pad_op.soft_get('name'), pad_op.mode)) - return - - if op.type == 'Pooling' and op.pool_method == 'max': - return - - if pad_op.mode == 'constant': - fill_value = pad_op.in_port(3).data.get_value() - if fill_value is None or fill_value != 0.0: - log.info('The pad node "{}" with non-zero fill value cannot be fused.'.format(pad_op.soft_get('name'))) - return - - input_tensor_dims = len(match['pad_output'].shape) - for in_port in [1, 2]: - pads = pad_op.in_port(in_port).data.get_value() - if pads[get_features_dim(op.graph.graph['layout'], input_tensor_dims)] != 0 or \ - pads[get_batch_dim(op.graph.graph['layout'], input_tensor_dims)] != 0: - log.info('The pad node "{}" with padding over feature/batch dimension cannot be fused.'.format( - pad_op.soft_get('name'))) - return - - op.pad += np.concatenate([pad_op.in_port(1).data.get_value().reshape([-1, 1]), - pad_op.in_port(2).data.get_value().reshape([-1, 1])], axis=1) - op.pad_spatial_shape = op.pad[op.spatial_dims] - op['auto_pad'] = None - if op.type == 'Pooling': - op['exclude_pad'] = False - assert (graph[match['pad_output'].node][match['op'].node][0]['in'] == 0) - - match['op'].in_port(0).disconnect() - pad_op.in_port(0).get_connection().add_destination(match['op'].in_port(0)) - - -def fuse_pad(graph: Graph): - for op_type in ['Convolution', 'Pooling', 'Deconvolution']: - apply_pattern( - graph, - nodes=[ - ('pad_op', dict(kind='op', op='Pad')), - ('pad_output', dict(kind='data')), - ('op', dict(kind='op', type=op_type))], - edges=[('pad_op', 'pad_output'), - ('pad_output', 'op', {'in': 0})], - action=pad_op_transform - ) - - -def muladd_to_scaleshift_action(graph: Graph, match: dict): - mul = match['mul'] - add = match['add'] - output = match['output'] - - # Pass works correctly only in case when node have only 1 output - if len(mul.out_port(0).get_destinations()) > 1: - return - - if mul.soft_get('can_be_scaleshift') is False or add.soft_get('can_be_scaleshift') is False: - return - - mul_weights_id = get_value_id(mul) - mul_input_id = get_tensor_id(mul) - add_weights_id = get_value_id(add) - - if mul_weights_id is None: - log.debug("Mul->Add to ScaleShift: Mul {} has no weights".format(mul.name)) - return - if mul_input_id is None: - log.debug("Mul->Add to ScaleShift: Mul {} has no input".format(mul.name)) - return - if add_weights_id is None: - log.debug("Mul->Add to ScaleShift: Add {} has no weights".format(add.name)) - return - - input = mul.in_node(mul_input_id) - weights = mul.in_node(mul_weights_id) - bias = add.in_node(add_weights_id) - - # Transform values - weights.value = np.squeeze(weights.value) - weights.shape = int64_array(weights.value.shape) - - bias.value = np.squeeze(bias.value) - bias.shape = int64_array(bias.value.shape) - - # Broadcast weights if they are scalar - if weights.value.ndim == 0 and bias.value.ndim == 1: - weights.value = np.full(bias.shape, weights.value.item(), dtype=weights.value.dtype) - weights.shape = int64_array(weights.value.shape) - - if bias.shape != weights.shape: - log.warning('Mul->Add to ScaleShift conversion stopped {} != {}'.format(weights.shape, bias.shape)) - return - - if bias.value.ndim != weights.value.ndim or bias.value.size != weights.value.size: - log.debug("Skipping Mul->Add to ScaleShift conversion for nodes {}, {} because of different weights " - "and biases".format(mul.name, add.name)) - return - - if bias.value.size == 1 and weights.value.size == 1: - log.debug("Skipping Mul->Add to ScaleShift conversion for nodes {}, {}. Will be converted to Power" - "".format(mul.name, add.name)) - return - - op_name = "ScaleShift" - - log.debug("Fusing Mul->Add to {}. Input nodes: {} and {}, bias.shape = {}, weights.shape = {}" - "".format(op_name, mul.id, add.id, bias.shape, weights.shape)) - - graph.remove_edge(input.node, mul.id) - graph.remove_edge(weights.node, mul.id) - graph.remove_edge(bias.node, add.id) - graph.remove_edge(add.node, output.id) - - op_node = graph.unique_id(mul.name + '/Fused{}_'.format(op_name)) - - graph.add_node(op_node, **add_attrs_props(dict(kind='op', type=op_name, name=op_node, op=op_name, - data_type=input.data_type))) - scsh = Node(graph, op_node) - scsh.add_input_port(0) - scsh.add_input_port(1) - scsh.add_input_port(2) - scsh.add_output_port(0) - - update_ie_fields(graph.node[op_node]) - - graph.add_edges_from([ - (input.node, op_node, {'in': 0}), - (weights.node, op_node, {'in': 1, 'bin': 'weights'}), - (bias.node, op_node, {'in': 2, 'bin': 'biases'}), - (op_node, output.node, {'out': 0}) - ]) - - return - - -def batch_norm_fuse_action(graph: Graph, match: dict): - """ - Multiply convolution kernel by batch normalization coefficient and remove mul op. - """ - if match['norm'].value is None or match['kernel'].value is None: - # cannot fuse non-const normalization coefficients - return - if len(graph.out_edges(match['conv_output'].node)) > 1 or len(graph.out_edges(match['kernel'].node)) > 1: - # we cannot modify original kernel or convolution, if they are used multiple times - # TODO make a copy of conv and kernel instead of this check - return - match['kernel'].value = match['kernel'].value * match['norm'].value - graph.remove_edge(match['conv_output'].node, match['mul'].node) - graph.remove_edge(match['mul'].node, match['mul_output'].node) - # graph.remove_node(match['mul'].node) # if we remove a node, next iteration over isomorphisms gives an error - graph.add_edge(match['conv'].node, match['mul_output'].node, out=0) - - -def batch_norm_fuse(graph: Graph): - apply_pattern( - graph, - nodes=[ - ('kernel', dict(kind='data')), - ('conv', dict(kind='op', op='Conv2D')), - ('conv_output', dict(kind='data')), - ('norm', dict(kind='data')), - ('mul', dict(kind='op', op='Mul')), - ('mul_output', dict(kind='data'))], - edges=[ - ('kernel', 'conv', {'in': 1}), - ('conv', 'conv_output'), - ('conv_output', 'mul', {'in': 0}), # TODO get rid of explicit input port number, mul is a commutative op - ('norm', 'mul', {'in': 1}), # TODO get rig of explicit input port number, mul is a commutative op - ('mul', 'mul_output')], - action=batch_norm_fuse_action - ) - return graph diff --git a/tools/mo/openvino/tools/mo/middle/passes/convert_data_type.py b/tools/mo/openvino/tools/mo/middle/passes/convert_data_type.py deleted file mode 100644 index 5971e6b5c29a61..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/convert_data_type.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.extractor import get_new_placeholder_name -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - -""" -Packed data of custom types are stored in numpy uint8 data type. -To distinguish true uint8 and custom data we introduce this class not to store, -but to have unique data type in SUPPORTED_DATA_TYPES map -""" - - -class packed_U1(np.generic): - pass - - -class packed_U4(np.generic): - pass - - -class packed_I4(np.generic): - pass - - -SUPPORTED_DATA_TYPES = { - 'float': (np.float32, 'FP32', 'f32'), - 'half': (np.float16, 'FP16', 'f16'), - 'FP32': (np.float32, 'FP32', 'f32'), - 'FP64': (np.float64, 'FP64', 'f64'), - 'FP16': (np.float16, 'FP16', 'f16'), - 'I32': (np.int32, 'I32', 'i32'), - 'I64': (np.int64, 'I64', 'i64'), - 'int8': (np.int8, 'I8', 'i8'), - 'int32': (np.int32, 'I32', 'i32'), - 'int64': (np.int64, 'I64', 'i64'), - 'bool': (bool, 'BOOL', 'boolean'), - 'uint8': (np.uint8, 'U8', 'u8'), - 'uint32': (np.uint32, 'U32', 'u32'), - 'uint64': (np.uint64, 'U64', 'u64'), - - # custom types - 'U1': (packed_U1, 'U1', 'u1'), - 'int4': (packed_I4, 'I4', 'i4'), - 'uint4': (packed_U4, 'U4', 'u4'), - 'I4': (packed_I4, 'I4', 'i4'), - 'U4': (packed_U4, 'U4', 'u4'), -} - - -def data_type_str_to_np(data_type_str: str): - return SUPPORTED_DATA_TYPES[data_type_str][0] if data_type_str in SUPPORTED_DATA_TYPES else None - - -def data_type_str_to_precision(data_type_str: str): - return SUPPORTED_DATA_TYPES[data_type_str][1] if data_type_str in SUPPORTED_DATA_TYPES else None - - -def data_type_str_to_destination_type(data_type_str: str): - return SUPPORTED_DATA_TYPES[data_type_str][2] if data_type_str in SUPPORTED_DATA_TYPES else None - - -def np_data_type_to_precision(np_data_type): - for np_t, precision, _ in SUPPORTED_DATA_TYPES.values(): - if np_t == np_data_type: - return precision - raise Error('Data type "{}" is not supported'.format(np_data_type)) - - -def np_data_type_to_destination_type(np_data_type): - for np_t, _, destination_type in SUPPORTED_DATA_TYPES.values(): - if np_t == np_data_type: - return destination_type - raise Error('Data type "{}" is not supported'.format(np_data_type)) - - -def destination_type_to_np_data_type(dst_type): - for np_t, _, destination_type in SUPPORTED_DATA_TYPES.values(): - if destination_type == dst_type: - return np_t - raise Error('Destination type "{}" is not supported'.format(dst_type)) - - -def precision_to_destination_type(data_type_str): - for _, precision, destination_type in SUPPORTED_DATA_TYPES.values(): - if precision == data_type_str: - return destination_type - raise Error('Data type "{}" is not supported'.format(data_type_str)) - - -def convert_blob(blob: np.ndarray, dst_type: type): - if blob.dtype == dst_type: - return blob, None, None - - converted_blob = blob.astype(dtype=dst_type, casting="unsafe") - if dst_type in (np.int32, np.int64, np.uint8, np.int8) and not np.array_equal(blob, converted_blob): - raise Error('The conversion of blob with value "{}" to dst_type "{}" results in rounding'.format( - blob, dst_type)) - - finite_match = (np.isfinite(blob) != np.isfinite(converted_blob)) - zero_match = ((blob == 0) != (converted_blob == 0)) - finite_match_count = np.count_nonzero(finite_match) - zero_match_count = np.count_nonzero(zero_match) - - return converted_blob, finite_match_count, zero_match_count - - -def convert_node_blobs(graph: Graph, node: Node, data_type: type): - out_edges = graph.out_edges(node.node, data=True) - - # if the data.value is used as binary weights - if any('bin' in d for _, __, d in out_edges): - blob = node.value - if blob.dtype != data_type: - new_blob, finite_match_count, zero_match_count = convert_blob(blob, data_type) - consumers = [x.name if x.has_valid('name') else '' for x in node.out_nodes()] - log.debug( - 'Blob was converted to {} while dumping to the bin file. This blob is an input for {} nodes.'.format( - data_type, consumers)) - if finite_match_count: - log.error( - ("{} elements of {} were clipped to infinity while converting a blob for node [{}] to {}. " + - refer_to_faq_msg(76)).format(finite_match_count, blob.size, consumers, data_type)) - if zero_match_count: - log.warning( - ("{} elements of {} were clipped to zero while converting a blob for node [{}] to {}. " + - refer_to_faq_msg(77)).format(zero_match_count, blob.size, consumers, data_type)) - - node.value = new_blob - # for the constant node need to propagate the converted value to the node output because there is a fake - # input data for the 'Const' nodes being generated in the CreateConstNodesReplacement - if len(node.out_nodes()) == 1 and node.out_node(0).op == 'Const': - const_node = node.out_node(0) - const_node.value = new_blob - const_node.infer(const_node) - const_node.type_infer(const_node) - - -def convert_parameters_data_type(graph: Graph, data_type_str: str): - inputs = graph.get_op_nodes(op='Parameter') - data_type = data_type_str_to_np(data_type_str) - user_defined_data_types = graph.graph['user_shapes'] if 'user_shapes' in graph.graph else None - for input in inputs: - user_defined_type = None - name = input.soft_get('initial_node_name', input.id) - - # override data type for Parameter specified by the user. This is a workaround for the issue in the - # extensions.middle.ChangePlaceholderTypes transformation which has an incorrect condition and always overrides - # Parameter data type to np.float32. When the transformation is fixed the code below must be updated - if user_defined_data_types is not None and name in user_defined_data_types: - for desc in user_defined_data_types[name]: - if 'port' in desc and desc['port'] is None: # neither input nor output port specified - user_defined_type = desc.get('data_type', None) - else: # need to check the particular port the Parameter was created for - p_name = get_new_placeholder_name(name, 'out' in desc, desc['out'] if 'out' in desc else desc['in']) - if p_name == input.soft_get('name'): - user_defined_type = desc.get('data_type', None) - if user_defined_type is not None: - log.info('Overriding Parameter node {} data type to {}'.format(name, user_defined_type)) - input['data_type'] = user_defined_type - input.out_port(0).set_data_type(user_defined_type, True) - elif not input.has_valid('data_type') or input.data_type == np.float32: - input['data_type'] = data_type - input.out_port(0).set_data_type(data_type, True) - else: - log.info('Do not change data type for node {}'.format(input.soft_get('name'))) - - -def convert_blobs(graph: Graph, data_type_str: str): - for node in graph.get_data_nodes(): - if node.value is not None: - try: - if node.value.dtype in [np.float32, np.float64, np.float16] and not node.has_and_set('correct_data_type'): - convert_node_blobs(graph, node, data_type_str_to_np(data_type_str)) - except Exception as e: - raise Error('Coudn\'t convert blob {}, details: {}', node.soft_get('name'), e) from e diff --git a/tools/mo/openvino/tools/mo/middle/passes/debug.py b/tools/mo/openvino/tools/mo/middle/passes/debug.py deleted file mode 100644 index 8b7e3971db6012..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/debug.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import * - - -def print_attributes_in_col(attrs: dict, indent: int, exclude_attrs: list): - for key in sorted(attrs.keys()): - if key not in exclude_attrs: - print(' ' * indent + key + ': ' + str(attrs[key])) - - -def debug_ir_emitter(graph, exclude_attrs: list = []): - print("--- DEBUG IR BEGIN ---") - # print nodes in topological order; print all attributes except 'pb' - nodes = nx.topological_sort(graph) - np.set_printoptions(threshold=10) - for node in nodes: - attrs = graph.node[node] - print('Node:', node) - if attrs['kind'] == 'op': - for idx, value in Node(graph, node).in_nodes().items(): - print('input', idx, ':', value.node, - ': ' + str(graph.in_edges(value.node)[0][0]) if len(graph.in_edges(value.node)) else '') - if 'op' in attrs: - print('Op:', attrs['op']) - print_attributes_in_col(attrs, 4, exclude_attrs) - if attrs['kind'] == 'op': - for idx, value in Node(graph, node).out_nodes().items(): - print('output', idx, ':', value.node, - ': ' + str(graph.out_edges(value.node)[0][1]) if len(graph.out_edges(value.node)) else '') - print('') - print("--- DEBUG IR END ---") - - -def get_output_node_names(graph: Graph): - result = [] - for node in graph.nodes(): - node = Node(graph, node) - if len(node.out_nodes()) == 0: - result.append(node.in_node().name) - return result diff --git a/tools/mo/openvino/tools/mo/middle/passes/eliminate.py b/tools/mo/openvino/tools/mo/middle/passes/eliminate.py deleted file mode 100644 index b9d05e784b17df..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/eliminate.py +++ /dev/null @@ -1,253 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import re -from collections import deque - -import networkx as nx -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined, compatible_shapes -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import deprecated_api - - -# TODO: dep warning -def get_nodes_with_attributes(graph, **attrs: dict): - node_attrs = graph.nodes(data=True) - return [n for n, d in node_attrs if all(a in d.items() for a in attrs.items())] - - -def reverse_dfs(graph, node_name: str, update_func: callable, visited: set = None): - d = deque() - - if visited is None: - visited = set() - visited.add(node_name) - d.appendleft(node_name) - while len(d) != 0: - cur_node = d.popleft() - update_func(graph, cur_node) - for in_node_name, _ in graph.in_edges(cur_node): - if in_node_name not in visited: - visited.add(in_node_name) - d.append(in_node_name) - - -def mark_input_nodes(graph, node_name: str, key: str, value): - for input, _ in graph.in_edges(node_name): - graph.node[input][key] = value - - -def mark_output_nodes(graph, node_name: str, key: str, value): - for output, _ in graph.out_edges(node_name): - graph.node[output][key] = value - - -def mark_output_reachable_nodes(graph): - """ - Mark nodes whether they are outputs reachable or not. The node is considered output reachable if it is connected to - one of the nodes that has attribute op=Result. - """ - nx.set_node_attributes(G=graph, name='is_output_reachable', values=False) - outputs = graph.get_nodes_with_attributes(op='Result') - log.debug('The following nodes are seeded as output reachable:\n{}'.format('\n'.join(sorted(map(str, outputs))))) - nx.set_node_attributes(G=graph, name='is_output_reachable', values={n: True for n in outputs}) - visited = set() - for output_name in outputs: - reverse_dfs(graph, output_name, - lambda graph, node_name: mark_input_nodes(graph, node_name, 'is_output_reachable', True), visited) - - -def mark_undead_nodes(graph, undead_types: list): - """ - Mark output nodes and nodes of the specific type as undead, meaning that they should survive the dead nodes - elimination phase. Then mark all children nodes of the undead nodes (except children of inputs) as undead. - :param graph: graph to operate on. - :param undead_types: list of node types that should be marked as undead. - :return: updated graph where each has attribute 'is_undead'. - """ - from openvino.tools.mo.utils.graph import bfs_search - - nx.set_node_attributes(G=graph, name='is_undead', values=False) - - undead_types_with_result = undead_types + ['Result'] - undead_nodes = [] - for node in graph.get_op_nodes(): - node_type = node.soft_get('type', node.soft_get('op')) - if node_type in undead_types_with_result: - undead_nodes.append(node.id) - - nx.set_node_attributes(G=graph, name='is_undead', values={n: True for n in undead_nodes}) - # propagate 'undead' attribute to children nodes of undead nodes if the node produces constant value - for node_name in bfs_search(graph, undead_nodes): - if graph.node[node_name]['is_undead']: - for _, dst_node_name in graph.out_edges(node_name): - node_attrs = graph.node[dst_node_name] - if 'kind' in node_attrs and ( - node_attrs['kind'] == 'data' and node_attrs['value'] is not None or node_attrs['kind'] == 'op'): - graph.node[dst_node_name]['is_undead'] = True - - # mark input nodes as undead - inputs = graph.get_nodes_with_attributes(is_input=True) - nx.set_node_attributes(G=graph, name='is_undead', values={n: True for n in inputs}) - - -def mark_const_producer_nodes(graph): - """ - Mark nodes that produce constant values. - :param graph: graph to operate on. - :return: . - """ - nx.set_node_attributes(G=graph, name='is_const_producer', values=True) - - for node in graph.pseudo_topological_sort(): - for input, output, attrs in graph.in_edges(node.id, data=True): - if 'control_flow_edge' in attrs and attrs['control_flow_edge']: - graph.node[input]['is_const_producer'] = False - graph.node[output]['is_const_producer'] = False - - if not node.has('value') or node.value is None or not is_fully_defined(node.value): - for input, _ in graph.in_edges(node.id): - graph.node[input]['is_const_producer'] = False - - -def eliminate_dead_nodes(graph): - from openvino.tools.mo.graph.graph import Node - nodes_to_remove = set() - for node_name, node_attrs in graph.nodes(data=True): - # The Const operation node may have set an attribute 'nchw_layout' attribute to prevent shape permutation. - # During graph clean-up the operation node is removed and the attribute is lost. - # This results in permutation of the Const shape in the IR and wrong inference results. - # Here we explicitly save the 'nchw_layout' attribute in the data node to prevent permutation." - if node_attrs.get('type', None) == 'Const': - if node_attrs.get('nchw_layout', False): - Node(graph, node_name).out_node()['nchw_layout'] = True - if np.all(node_attrs.get('force_shape', False)): - Node(graph, node_name).out_node()['force_shape'] = node_attrs['force_shape'] - if node_attrs.get('force_type', False): - Node(graph, node_name).out_node()['force_type'] = node_attrs['force_type'] - - if not node_attrs['is_output_reachable'] or \ - (node_attrs['is_const_producer'] and (not node_attrs['is_undead'] or - node_attrs.get('force_dead_node', False))): - nodes_to_remove.add(node_name) - log.debug('Removing the following dead nodes: {}'.format('\n'.join(sorted(map(str, nodes_to_remove))))) - graph.remove_nodes_from(nodes_to_remove) - - -def add_constant_operations(graph): - data_nodes = graph.get_data_nodes(has_value=True) - for node in data_nodes: - # If data node has no producers we create Const operation - if len(node.in_nodes()) == 0 and len(node.out_nodes()) != 0: - # It's necessary to import here due to cycle dependencies - from openvino.tools.mo.ops.const import Const - from openvino.tools.mo.utils.runtime_info import RTInfo - name = node.soft_get('name', node.id) - new_name = re.sub(r'\/Output_\d+\/Data_(.?)+', '', name) - const_node = Const(graph, dict(value=node.value, name=new_name, - force_shape=node.soft_get('force_shape', None), - override_output_shape=node.has_valid('force_shape'), - force_type=node.soft_get('force_type', None), - correct_data_type=node.soft_get('correct_data_type', False), - rt_info=node.soft_get('rt_info', RTInfo()), - )).create_node() - graph.add_edges_from([(const_node.id, node.id, {'out': 0})]) - - -def shape_inference(graph): - for node in graph.pseudo_topological_sort(): - if node.has_and_set('need_shape_inference'): - old_out_shapes = [port.data.get_shape() for port in node.out_ports().values() if not port.disconnected()] - node.infer(node) - new_out_shapes = [port.data.get_shape() for port in node.out_ports().values() if not port.disconnected()] - if not node.has_and_set('override_output_shape'): - for shape1, shape2 in zip(old_out_shapes, new_out_shapes): - # do not use strict shapes comparison because after applying transformation the output shape may be - # specialized and some dynamic dimension become static - if shape1 is not None and not compatible_shapes(shape1, shape2): - raise Error("After partial shape inference were found shape collision for node {} (old shape: " - "{}, new shape: {})".format(node.name, shape1, shape2)) - else: - del node['override_output_shape'] - node.need_shape_inference = False - - -@deprecated_api('Graph', 'clean_up') -def graph_clean_up(graph, undead_node_types: list = None): - graph.clean_up(undead_node_types) - - -@deprecated_api('Graph', 'clean_up') -def graph_clean_up_tf(graph): - graph.clean_up() - - -@deprecated_api('Graph', 'clean_up') -def graph_clean_up_onnx(graph): - graph.clean_up() - - -# TODO: unit tests -def merge_data_nodes(graph, survived, removed): - if survived.has_and_set('op') and survived.op == 'Result': - graph.node[removed.id].update({'op': 'Result'}) - - for u, v, d in list(graph.in_edges(removed.id, data=True)): - graph.add_edges_from([(u, survived.id, d)]) - graph.remove_edge(u, v) - - for u, v, d in list(graph.out_edges(removed.id, data=True)): - graph.add_edges_from([(survived.id, v, d)]) - graph.remove_edge(u, v) - - for attr in graph.node[removed.id]: - if not attr in ['name']: - # We need to save debug info from removed data node - if attr == 'fw_tensor_debug_info': - if not survived.has_valid(attr): - survived[attr] = [] - for fw_tensor_debug_info in removed[attr]: - survived[attr].append(fw_tensor_debug_info) - else: - survived[attr] = removed[attr] - - -# TODO: unit tests -def remove_op_node_with_data_node(graph, node_to_remove, input_data_node=None): - from openvino.tools.mo.graph.graph import Node - assert node_to_remove.kind == 'op' - if input_data_node is None: - input_data_node = node_to_remove.in_node() - output_node = [v for _, v in graph.out_edges(node_to_remove.id)] - assert len(output_node) == 1, "Cannot remove node producing two or more output tensors" - output_node = Node(graph, output_node[0]) - assert output_node.kind == 'data', "The function must be used after partial infer" - - graph.remove_edge(input_data_node.id, node_to_remove.id) - graph.remove_edge(node_to_remove.id, output_node.id) - - merge_data_nodes(graph, output_node, input_data_node) - - # we just have saved all output edges from 'input' by reconnecting them to 'output', now we can delete 'input' - log.debug('Removing op node: {}'.format(node_to_remove.id)) - graph.remove_nodes_from([node_to_remove.id, input_data_node.id]) - - -def remove_op_nodes(graph, attrs: dict): - for node in graph.get_op_nodes(**attrs): - remove_op_node_with_data_node(graph, node) - - -def remove_edges_for_nodes(graph, node_attrs: dict, edge_attrs: dict): - from openvino.tools.mo.graph.graph import Node - for node in graph.nodes(): - node = Node(graph, node) - if all([node.has(attr) and node[attr] == node_attrs[attr] for attr in node_attrs]): - nodes_edges = node.in_nodes_edges() - for port in nodes_edges: - src_node, edge = nodes_edges[port] - if all([attr in edge and edge[attr] == edge_attrs[attr] for attr in edge_attrs]): - graph.remove_edge(src_node.id, node.id) diff --git a/tools/mo/openvino/tools/mo/middle/passes/fusing/__init__.py b/tools/mo/openvino/tools/mo/middle/passes/fusing/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/fusing/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/middle/passes/fusing/decomposition.py b/tools/mo/openvino/tools/mo/middle/passes/fusing/decomposition.py deleted file mode 100644 index d0afff7b9ca2ad..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/fusing/decomposition.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Mul, Add -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.graph.port import Port -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.reshape import Reshape - - -def expand_node_shape(port: Port, broadcast_dims_cnt): - value = mo_array(port.data.get_value()) - for idx in range(broadcast_dims_cnt): - value = np.expand_dims(value, axis=-1) - port.data.set_value(value) - - -def convert_batch_norm(graph: Graph): - """ - This function finds FusedBatchNorm layer (or BatchNorm for MXNet) and replaces with Mul->Add->Mul->Add sequence. - """ - nodes = graph.get_op_nodes() - for node in nodes: - if node.has_valid('op') and (node.op in ['FusedBatchNorm', 'FusedBatchNormV2', 'FusedBatchNormV3', - 'BatchNorm', 'BatchNormalization', 'batchNormInference']): - - if any([node.in_port(i).data.get_value() is None for i in range(1, len(node.in_ports()))]): - log.warning('Cannot translate FusedBatchNorm {} node with non-constant weights'.format( - node.name if node.has_valid('name') else '')) - continue - - const = node.in_port(1).get_source() - node.in_port(1).disconnect() - - beta = node.in_port(2).get_source() - node.in_port(2).disconnect() - - mean = node.in_port(3).get_source() - node.in_port(3).disconnect() - - variance = node.in_port(4).get_source() - node.in_port(4).disconnect() - - eps = node.eps - - if node.has_valid('fix_gamma') and node.fix_gamma: - const.data.get_value().fill(1.) - - can_be_fused = False if not node.soft_get('can_be_fused') else True - - scale = 1. / np.sqrt(variance.data.get_value() + eps) - shift = (mean.data.get_value() * (-1)) * scale - - # Expand dims for current layout - layout = node.soft_get('data_format', graph.graph['layout']) - broadcast_dims_cnt = len(node.in_port(0).data.get_shape()) - 2 if layout in ['NCHW', "NCDHW"] else 0 - - # Update values and shapes with new shape - expand_node_shape(const, broadcast_dims_cnt) - expand_node_shape(beta, broadcast_dims_cnt) - - for idx in range(broadcast_dims_cnt): - scale = np.expand_dims(scale, axis=-1) - shift = np.expand_dims(shift, axis=-1) - - _fused_batch_norm_decomposition(graph, node.in_port(0), node.out_port(0), const, beta, scale, shift, can_be_fused) - - -def _fused_batch_norm_decomposition(graph: Graph, tinput: Port, toutput: Port, gamma: Port, beta: Port, - mean: np.ndarray, variance: np.ndarray, can_be_fused=True): - """ - This is common function for TF and Caffe - It creates Mul->Add->Mul->Add sub graph - """ - batch_norm_name = tinput.get_connection().get_destination().node.name - - # Create first Mul & Add operations - mul1_node = Mul(graph, dict(name=batch_norm_name + "/mean", can_be_fused=can_be_fused)).create_node() - add1_node = Add(graph, dict(name=batch_norm_name + "/variance", can_be_fused=can_be_fused)).create_node() - - const_mul1_node = Const(graph, dict(name="data_mul_", value=mo_array(mean))).create_node() - const_add1_node = Const(graph, dict(name="data_add_", value=mo_array(variance))).create_node() - - # Broadcast const from scalar - # We can broadcast only when const.value is scalar - if gamma.data.get_shape()[0] != gamma.data.get_value().shape[0]: - value = gamma.data.get_value() - value.resize(gamma.data.get_shape()).fill(value[0]) - gamma.data.set_value(value) - - # Create second Mul & Add - mul2_node = Mul(graph, dict(name=batch_norm_name + "/gamma", can_be_fused=can_be_fused)).create_node() - add2_node = Add(graph, dict(name=batch_norm_name + "/beta", can_be_fused=can_be_fused)).create_node() - - # Connect edges Mul1->Add1->Mul2->Add2 - tinput.get_connection().set_destination(mul1_node.in_port(0)) - mul1_node.in_port(1).get_connection().set_source(const_mul1_node.out_port(0)) - - add1_node.in_port(0).get_connection().set_source(mul1_node.out_port(0)) - add1_node.in_port(1).get_connection().set_source(const_add1_node.out_port(0)) - - mul2_node.in_port(0).get_connection().set_source(add1_node.out_port(0)) - gamma.get_connection().set_destination(mul2_node.in_port(1)) - - add2_node.in_port(0).get_connection().set_source(mul2_node.out_port(0)) - beta.get_connection().set_destination(add2_node.in_port(1)) - - toutput.get_connection().set_source(add2_node.out_port(0)) - - -def convert_scale_shift_to_mul_add(graph: Graph): - nodes = graph.get_op_nodes(op='ScaleShift') - for node in nodes: - if node.soft_get('can_be_fused') is False: - continue - - ports_count = len(node.in_ports()) - - input_port = node.in_port(0) - scale_port = node.in_port(1) if ports_count > 1 and not node.in_port(1).disconnected() else None - shift_port = node.in_port(2) if ports_count > 2 and not node.in_port(2).disconnected() else None - output_port = node.out_port(0) - - has_biases = True - has_weights = True - - # We don't need zero biases - if shift_port is None or (shift_port.data.get_value() is not None and all([x == 0 for x in shift_port.data.get_value()])): - has_biases = False - - # We don't need weights with ones - if scale_port is None or (scale_port.data.get_value() is not None and all([x == 1 for x in scale_port.data.get_value()])): - has_weights = False - - mul_op = Mul(graph, dict(name=node.name + "/Mul_")) - add_op = Add(graph, dict(name=node.name + "/Add_")) - - # Expand dims for current layout - broadcast_dims_cnt = len(input_port.data.get_shape()) - 2 if graph.graph['layout'] == 'NCHW' else 0 - - # In case if we have constant weights/biases we have to broadcast them according to graph layout - # otherwise we insert Reshape with broadcast dim attribute. - def broadcast_value(port): - value = mo_array(port.data.get_value()) - for idx in range(broadcast_dims_cnt): - value = np.expand_dims(value, axis=-1) - port.data.set_value(value) - - def broadcast_with_reshape(port): - input_shape = input_port.data.get_shape() - reshape_dims = np.zeros(len(input_shape), dtype=np.int64) - for i in range(0, node.axis): - reshape_dims[i] = 1 - data_shape = port.data.get_shape() - for i in range(node.axis, node.axis + len(data_shape)): - reshape_dims[i] = data_shape[i - node.axis] - for i in range(node.axis + len(data_shape), len(input_shape)): - reshape_dims[i] = 1 - reshape = create_op_node_with_second_input(graph, Reshape, reshape_dims, - dict(name=port.node.name + "/Broadcast_")) - port.get_connection().set_destination(reshape.in_port(0)) - reshape.out_port(0).connect(port) - - if has_weights and scale_port.data.get_value() is not None: - broadcast_value(scale_port) - elif has_weights: - broadcast_with_reshape(scale_port) - - if has_biases and shift_port.data.get_value() is not None: - broadcast_value(shift_port) - elif has_biases: - broadcast_with_reshape(shift_port) - - if has_biases and has_weights: - # Connect input->mul->out->add->out - add_node = add_op.create_node() - mul_node = mul_op.create_node() - - # Connect Mul operation with inputs - input_port.get_connection().set_destination(mul_node.in_port(0)) - scale_port.get_connection().set_destination(mul_node.in_port(1)) - - # Connect Add operation with inputs - mul_node.out_port(0).connect(add_node.in_port(0)) - shift_port.get_connection().set_destination(add_node.in_port(1)) - - output_port.get_connection().set_source(add_node.out_port(0)) - elif has_weights: - # Connect input->mul->out - mul_node = mul_op.create_node() - - # Connect Mul operation with inputs - input_port.get_connection().set_destination(mul_node.in_port(0)) - scale_port.get_connection().set_destination(mul_node.in_port(1)) - - output_port.get_connection().set_source(mul_node.out_port(0)) - elif has_biases: - # Connect input->add->out - add_node = add_op.create_node() - - # Connect Add operation with inputs - input_port.get_connection().set_destination(add_node.in_port(0)) - shift_port.get_connection().set_destination(add_node.in_port(1)) - - output_port.get_connection().set_source(add_node.out_port(0)) - else: - # Connect input->out - producer_port = input_port.get_source() - input_port.disconnect() - output_port.get_connection().set_source(producer_port) diff --git a/tools/mo/openvino/tools/mo/middle/passes/fusing/fuse_grouped_conv.py b/tools/mo/openvino/tools/mo/middle/passes/fusing/fuse_grouped_conv.py deleted file mode 100644 index 4a08692aaa4cc8..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/fusing/fuse_grouped_conv.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.fusing.helpers import get_next_operation - - -# TODO: unit tests -def concat_convolutions(graph: Graph, start_node: Node, last_node: Node): - """ - This function converts group of convolutions into one - """ - - # Check that concatenation makes in the same order - conv_nodes = get_next_operation(start_node) - assert len(conv_nodes) == len(last_node.in_nodes()) - gconv = conv_nodes[0] - - for id in range(len(conv_nodes)): - conv = conv_nodes[id] - if conv.out_node().id != last_node.in_node(id).id: - return False - # Check that all convolutions have same weights shapes - if not np.array_equal(conv.in_node(1).shape, gconv.in_node(1).shape): - log.debug('Grouped convolutions fusion : convolutions have different weights shape') - return False - - # Check that split and concat dims are valid - channel_dim = gconv.channel_dims[0] - split_axis = start_node.in_port(1).data.get_value() - if channel_dim != split_axis or channel_dim != last_node.axis: - log.debug('Grouped convolutions fusion : split or concat has weird axis!') - return False - - # Check that all convolutions has the same parameters - conv_attrs = ['pad', 'stride'] - for attr in conv_attrs: - for id in range(len(conv_nodes)): - conv = conv_nodes[id] - if not np.array_equal(gconv[attr], conv[attr]): - log.debug('Grouped convolutions fusion : attrs {} doesn\'t match'.format(attr)) - return False - - # Check that all Convolutions has biases (if exists) - has_biases = False - for id in range(len(conv_nodes)): - conv = conv_nodes[id] - if len(conv.in_nodes()) == 3: - if not has_biases: - has_biases = True - elif has_biases: - return False # All convolution mast have biases - - # Check that all biases have same shape - if has_biases: - for id in range(len(conv_nodes)): - conv = conv_nodes[id] - if conv.in_node(2).shape != gconv.in_node(2).shape: - log.debug('Group convolutions fusion : convolutions have different biases shape {} and {}'.format( - conv.in_node(2).shape, gconv.in_node(2).shape)) - return False - - graph.remove_edge(gconv.in_node(0).id, gconv.id) - graph.remove_edge(gconv.id, gconv.out_node().id) - - input = start_node.in_node(0) - output = last_node.out_node() - - # Removing edges from data nodes to Split and Concat - graph.remove_edge(input.id, start_node.id) - graph.remove_edge(last_node.id, output.id) - - # Add edges to grouped convolution - graph.add_edges_from([ - (input.id, gconv.id, {'in': 0}), - (gconv.id, output.id, {'out': 0}) - ]) - - # Concatenation of convolutions - weights_node = gconv.in_node(1) - bias_node = gconv.in_node(2) if has_biases else None - - weights_value = mo_array(weights_node.value) - bias_value = mo_array(bias_node.value) if has_biases else None - - # gconv.get_weights_permute.perm contains permutation indices - # where feature dimension is set to zero position, so 0 value - # in gconv.get_weights_permute.inv indicates original feature dimension index - feature_dim = np.where(gconv.get_weights_permute.inv == 0)[0][0] - - for conv in conv_nodes[1:]: - weights_value = np.concatenate((weights_value, conv.in_node(1).value), axis=feature_dim) - if has_biases: - bias_value = np.concatenate((bias_value, conv.in_node(2).value), axis=-1) # Not validated - - weights_node.value = mo_array(weights_value) - weights_node.shape = mo_array(weights_value.shape) - - if has_biases: - bias_node.value = mo_array(bias_value) - bias_node.shape = mo_array(bias_value.shape) - - log.debug('Start node : {} Last node : {} Nodes inside : {}'.format(start_node.id, last_node.id, - len(start_node.out_nodes()))) - log.debug('Output shape : {}'.format(weights_value.shape)) - - gconv.group = len(conv_nodes) - gconv.output = weights_node.shape[feature_dim] - gconv.output_shape[feature_dim] = weights_node.shape[feature_dim] - - return True - - -# TODO: unit tests -def grouped_convolutions_fusing(graph: Graph): - while True: - is_fused = False - graph.clean_up() - for node in graph.pseudo_topological_sort(): - if node.kind == 'op' and len(node.out_nodes()) > 1: - if node.soft_get('can_be_fused') == False: - continue - - is_valid_convolutions = True - last_layer = None - - next_nodes = get_next_operation(node) - # Check that all operation after this one are Convolutions - # and all convolutions has same output - if len(next_nodes) > 1 and all(_node.soft_get('type') in ['Convolution', 'Deconvolution'] for _node in next_nodes): - for conv in next_nodes: - conv_outputs = get_next_operation(conv) - if conv.soft_get('can_be_fused') == False: - is_valid_convolutions = False - if len(conv_outputs) != 1: - is_valid_convolutions = False - if last_layer is None: - last_layer = conv_outputs[0].id - # TODO: this check is not working for V10 where Biases appears as separate operations - elif conv_outputs[0].id != last_layer: - is_valid_convolutions = False - - if is_valid_convolutions: - is_fused = concat_convolutions(graph, node, Node(graph, last_layer)) - if is_fused: - break - - if not is_fused: - break diff --git a/tools/mo/openvino/tools/mo/middle/passes/fusing/fuse_linear_ops.py b/tools/mo/openvino/tools/mo/middle/passes/fusing/fuse_linear_ops.py deleted file mode 100644 index d45c5453ea5341..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/fusing/fuse_linear_ops.py +++ /dev/null @@ -1,187 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array, shape_array, dynamic_dimension -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.fusing.helpers import backward_bfs, forward_bfs, get_value_in_port, \ - get_tensor_in_port -from openvino.tools.mo.ops.const import Const - - -def _fuse_mul(graph: Graph, node: Node, fuse_nodes: list, backward: bool = True): - """ - This function takes Mul node and array of convolution/fc nodes for further fusion - Parameters - ---------- - x : bool - If backward is False, that means that Convolution/FC goes after Mul node - else means that Mul goes after Convolutions/FC - :param backward: - :param fuse_nodes: - :param node: - :param graph: - """ - is_fused = False - const_port, tensor_port = get_value_in_port(node), get_tensor_in_port(node) - - if const_port is None or tensor_port is None: - log.warning('Cannot do fuse_mul for node {} because this node has wrong inputs'.format(node.id)) - return False - - for fuse_node in fuse_nodes: - if fuse_node.soft_get('can_be_fused') is False: - log.warning('Node {} can\'t be used in fusing because attr can_be_fused = False'.format(fuse_node.name)) - return False - - if len(fuse_node.in_ports()) < 2: - log.warning('Node {} has no weights node'.format(fuse_node.name)) - return False - - if not backward and not fuse_node.has_valid('layout'): - log.warning('Node {} has no layout attr'.format(fuse_node.name)) - return False - - weights_port = fuse_node.in_port(1) - if not weights_port.data.has_valid('output_channel_dim') or \ - not weights_port.data.has_valid('input_channel_dim'): - log.warning( - 'Cannot do fuse_mul for node {} because there is no field ' + - 'output_channel_dim and/or input_channel_dim in weights.' - .format(fuse_node.soft_get('name')) - ) - return False - - inp_ch = weights_port.data.get_attr('input_channel_dim') - out_ch = weights_port.data.get_attr('output_channel_dim') - if max(inp_ch, out_ch) >= len(weights_port.data.get_shape()): - log.warning('Node {} has wrong weights shape'.format(fuse_node.name)) - return False - - for fuse_node in fuse_nodes: - weights_port = fuse_node.in_port(1) - value = mo_array(const_port.data.get_value()) - - value = np.squeeze(value) - - # TODO : ch_dim should be equal to node.in_node(1).value.shape - # We will multiply weights according output/input channel dimension - ch_dim = weights_port.data.get_attr('output_channel_dim' if backward else 'input_channel_dim') - shape = shape_array([weights_port.data.get_shape()[ch_dim]]) - - # no fusing is possible in case dynamic weights - if weights_port.data.get_shape()[ch_dim] is dynamic_dimension: - return False - - # Scalar broadcast - if value.size == 1: - value = np.full(shape, value.item(), dtype=value.dtype) - - # Common broadcast for forward fusion - if not backward: - cnt = shape[-1] / value.shape[0] - if fuse_node.layout == 'NCHW': - tmp = mo_array([], dtype=value.dtype) - for val in value: - tmp = np.concatenate((tmp, np.repeat(val, cnt))) - value = mo_array(tmp) - else: - value = np.tile(value, int(cnt)) - - # Expand dims for multiplication (ex. [38] to [38, 1, 1]) - wdims_number = weights_port.data.get_attr('dims_number') - for x in range(wdims_number - ch_dim - 1): - shape = np.append(shape, 1) - - mul_val = mo_array(value) - # If the value fails to reshape to the provided shape, skip fusing. - # This can happen in case of group != 1 of the convolution. - try: - value = np.reshape(value, shape) - except ValueError: - log.error("Cannot fuse const from {} to {}. Reshape failed. Skipping.".format( - node.soft_get('name', node.id), fuse_node.soft_get('name', fuse_node.id)), extra={'is_warning': True}) - return False - - # Weights multiplication - mul_name = node.name + '_copy' - mul_const = Const(graph, {'value': value, 'name': mul_name + '/const'}).create_node() - w_mul = node.copy_node({'name': mul_name, 'in_ports_count': len(node.in_ports()), - 'out_ports_count': len(node.out_ports()), 'can_be_fused': False}) - w_mul.in_port(const_port.idx).connect(mul_const.out_port(0)) - w_const = weights_port.get_source() - weights_port.get_connection().set_source(w_mul.out_port(0)) - w_const.connect(w_mul.in_port(tensor_port.idx)) - - fuse_node_in_data = fuse_node.in_node(weights_port.idx) - w_const_out_data = w_const.node.out_node(w_const.idx) - - # During this reconnection new data node name is copied from the data node - # outgoing from w_const port. Duplicate names of data nodes lead to appearing - # of duplicate op node names after constant folding. So we should manually - # set a unique name for the new data node. - if fuse_node_in_data.soft_get('name') == w_const_out_data.soft_get('name') and \ - fuse_node_in_data.soft_get('name', None) is not None: - fuse_node.in_node(weights_port.idx)['name'] = graph.unique_id(mul_name) - - # If we fuse in backward direction we should multiply biases if they exists - if backward and len(fuse_node.in_ports()) == 3 and not fuse_node.in_port(2).disconnected() and \ - not fuse_node.has_and_set('shape_input'): - conv_bias = fuse_node.in_port(2) - conv_bias.data.set_value(conv_bias.data.get_value() * np.squeeze(mul_val)) - - mul_const.infer(mul_const) - w_mul.infer(w_mul) - - log.debug('Fused: {} to {}'.format(node.name, fuse_node.name)) - is_fused = True - - if is_fused: - # Delete Mul node - producer_port = tensor_port.get_source() - tensor_port.disconnect() - const_port.disconnect() - # as Mul node is added before convolution, output tensor from Convolution node - # corresponds to original Mul node - if producer_port.node.soft_get('type') == 'Parameter': - node.out_port(0).get_connection().set_source(producer_port, "source") - else: - node.out_port(0).get_connection().set_source(producer_port, "dest") - - return is_fused - - -def fuse_linear_ops(graph: Graph): - """ - This function makes fusing of linear operations (Mul,Add) to Convolution/FC. - """ - fuse_count = 0 - - # Fusion in backward direction - nodes = graph.pseudo_topological_sort() - for node in nodes: - is_fused = False - - # Fuse Mul to Convolution/FC - if node.soft_get('op') == 'Mul' and get_value_in_port(node) is not None and node.has_and_set('can_be_fused'): - fuse_nodes = backward_bfs(node, [], ['Convolution', 'Deconvolution', 'MatMul']) - is_fused = _fuse_mul(graph, node, fuse_nodes) - - fuse_count += is_fused - - # Fusion in forward direction - nodes = graph.pseudo_topological_sort(reverse=True) - for node in nodes: - is_fused = False - - # Fuse Mul to Convolution/FC - if node.soft_get('op') == 'Mul' and get_value_in_port(node) is not None and node.has_and_set('can_be_fused'): - fuse_nodes = forward_bfs(node, [], ['Convolution', 'Deconvolution', 'MatMul']) - is_fused = _fuse_mul(graph, node, fuse_nodes, False) - - fuse_count += is_fused - - log.debug("Fused {} nodes".format(fuse_count)) diff --git a/tools/mo/openvino/tools/mo/middle/passes/fusing/fuse_linear_seq.py b/tools/mo/openvino/tools/mo/middle/passes/fusing/fuse_linear_seq.py deleted file mode 100644 index 1e97dbba72c0be..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/fusing/fuse_linear_seq.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Mul, Add -from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.fusing.helpers import get_value_in_port, get_tensor_in_port -from openvino.tools.mo.ops.const import Const - - -def _fuse_linear_sequence(graph: Graph, start_node: Node): - """ - This function finds the sequence of Mul/Add operations and replaces this sequence with two ops (Mul->Add). - :param graph: - :param start_node: The first operation of the sequence - """ - fnodes = [start_node] - while True: - node = fnodes[-1] - destinations = node.out_port(0).get_destinations() - if len(destinations) != 1: - break - dst_node = destinations[0].node - if dst_node.soft_get('op') in ['Mul', 'Add'] and get_value_in_port(dst_node) is not None and \ - dst_node.soft_get('can_be_fused') is True: - fnodes.append(dst_node) - else: - break - - if len(fnodes) == 1 or (len(fnodes) == 2 and fnodes[0].op == 'Mul' and fnodes[1].op == 'Add'): - return False - - input_shape = get_tensor_in_port(start_node).data.get_shape() - - init_dims_cnt = len(input_shape) - 2 if graph.graph['layout'] == 'NCHW' else 1 - - first_value = get_value_in_port(fnodes[0]).data.get_value() - if not isinstance(first_value, np.ndarray): - first_value = mo_array(first_value) - first_value_type = first_value.dtype - - mul = np.ones([1 for x in range(init_dims_cnt)], dtype=first_value_type) - add = np.zeros([1 for x in range(init_dims_cnt)], dtype=first_value_type) - - first_mul_name = None - first_add_name = None - - for node in fnodes: - const_port_value = get_value_in_port(node).data.get_value() - if node.op == 'Mul': - if first_mul_name is None: - first_mul_name = node.name - mul = mul * const_port_value - add = add * const_port_value - elif node.op == 'Add': - if first_add_name is None: - first_add_name = node.name - add = add + const_port_value - - # If mul is scalar we broadcast it to biases shape - if mul.shape != add.shape and len(mul.shape) == 1 and mul.shape[0] == 1: - mul = mo_array([mul[0] for x in range(add.shape[0])]) - - assert (compatible_shapes(get_tensor_in_port(fnodes[0]).data.get_shape(), fnodes[-1].out_port(0).data.get_shape())) - - mul_op = Mul(graph, dict(name='{}/Fused_Mul_'.format(first_mul_name or ''))) - add_op = Add(graph, dict(name='{}/Fused_Add_'.format(first_add_name or ''))) - - in_port = get_tensor_in_port(fnodes[0]) - out_port = fnodes[-1].out_port(0) - - """ - Four cases considered below: - 1. Mul and Add have valid values (mul value != 1 and add value != 0) - 2. Only Mul has valid values, so we add only Mul node - 3. Only Add has valid values, so we add only Add node - 4. When Mul and Add has not valid values we just merge two data nodes - """ - if any([x != 0 for x in np.nditer(add)]) and any([x != 1 for x in np.nditer(mul)]): - # Const\ Const\ - # ----->Mul------>Add--> - mul_const = Const(graph, dict(name="data_mul_", value=mo_array(mul))).create_node() - add_const = Const(graph, dict(name="data_add_", value=mo_array(add))).create_node() - - mul_node = mul_op.create_node() - add_node = add_op.create_node() - - in_port.get_connection().set_destination(mul_node.in_port(0)) - mul_const.out_port(0).connect(mul_node.in_port(1)) - - mul_node.out_port(0).connect(add_node.in_port(0)) - add_const.out_port(0).connect(add_node.in_port(1)) - out_port.get_connection().set_source(add_node.out_port(0)) - elif any([x != 1 for x in np.nditer(mul)]): - # Const\ - # ----->Mul--> - mul_const = Const(graph, dict(name="data_mul_", value=mo_array(mul))).create_node() - mul_node = mul_op.create_node() - - in_port.get_connection().set_destination(mul_node.in_port(0)) - mul_const.out_port(0).connect(mul_node.in_port(1)) - out_port.get_connection().set_source(mul_node.out_port(0)) - elif any([x != 0 for x in np.nditer(add)]): - # Const\ - # ----->Add--> - add_const = Const(graph, dict(name="data_add_", value=mo_array(add))).create_node() - add_node = add_op.create_node() - - in_port.get_connection().set_destination(add_node.in_port(0)) - add_const.out_port(0).connect(add_node.in_port(1)) - out_port.get_connection().set_source(add_node.out_port(0)) - else: - source_node = in_port.get_source() - in_port.disconnect() - out_port.get_connection().set_source(source_node) - - # Remove fused nodes - for node in fnodes: - graph.remove_node(node.id) - - log.debug('Fused {} operations'.format(len(fnodes))) - return True - - -def fuse_mul_add_sequence(graph: Graph): - """ - This function finds first valid Mul/Add node and pass it to fuse_linear_sequence where full sequence will be found - """ - while True: - is_fused = False - for node in graph.pseudo_topological_sort(): - if node.id in graph: - if node.soft_get('op') in ['Mul', 'Add'] and get_value_in_port(node) is not None and \ - node.soft_get('can_be_fused') is True: - is_fused |= _fuse_linear_sequence(graph, node) - if not is_fused: - break diff --git a/tools/mo/openvino/tools/mo/middle/passes/fusing/helpers.py b/tools/mo/openvino/tools/mo/middle/passes/fusing/helpers.py deleted file mode 100644 index 0d07c31631e72f..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/fusing/helpers.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from collections import deque - -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.graph.port import Port - - -def get_value_id(node: Node): - assert node.has_valid('op') - value_id = None - for port, in_node in node.in_nodes().items(): - if in_node.has_valid('value'): - if value_id: - return None - value_id = port - return value_id - - -def get_tensor_id(node: Node): - assert node.has_valid('op') - tensor_id = None - for port, in_node in node.in_nodes().items(): - if not in_node.has_valid('value'): - if tensor_id: - return None - tensor_id = port - return tensor_id - - -def get_tensor_in_port(node) -> Port: - tensor_ports = [] - for port in node.in_ports().values(): - if port.data.get_value() is None: - tensor_ports.append(port) - return None if len(tensor_ports) != 1 else tensor_ports[0] - - -def get_value_in_port(node) -> Port: - value_ports = [] - for port in node.in_ports().values(): - if port.data.get_value() is not None: - value_ports.append(port) - return None if len(value_ports) != 1 else value_ports[0] - - -def common_bfs(start_node: Node, allowed_ops: list, op_name: list, is_backward: bool = True, allowed_all: bool = False, - attr_to_check='type', follow_multi_consumer_data_nodes=False): - """ - The purpose of this algorithm is to find layers with 'op_name' located in given direction. - In case of branching algorithm goes into each branch, but if it can't find layer in one of them it returns - empty list. - - :param start_node: Start node for BFS algorithm - :param allowed_ops: List of operations that we can jump over - :param op_name: The list with names of operations for searching - :param is_backward: The direction of BFS algorithm - :param allowed_all: Bool flag meaning we can jump over all operations - :param attr_to_check: the attribute to check when looking if the node is in "op_name" list - :param follow_multi_consumer_data_nodes: for backward traversal allow to follow data nodes with multiple consumers - """ - ret = [] - q = deque([start_node]) - used = [] - while len(q) != 0: - node = q.popleft() - if node.id in used: - log.debug("[BFS:ERROR] Graph contains cycle! BFS starts from {} node".format(start_node.id)) - return [] - used.append(node.id) - in_nodes_size = len(node.in_nodes()) if is_backward else len(node.out_nodes()) - for id in range(in_nodes_size): # in_nodes() can return either list or dict - pnode = node.in_node(id) if is_backward else node.out_node(id) - if pnode.has_valid(attr_to_check): - if pnode[attr_to_check] in op_name: - if pnode.id not in ret: - ret.append(pnode.id) - elif allowed_all or pnode.op in allowed_ops: - q.append(pnode) - else: - return [] - elif pnode.kind == 'data' and pnode.value is None: - # If we go backward we don't use data node that have more than one consumer - if not is_backward or (len(pnode.out_nodes()) == 1 or follow_multi_consumer_data_nodes): - q.append(pnode) - return [Node(start_node.graph, x) for x in ret] - - -def forward_bfs(start_node: Node, allowed_ops: list, op_name: list, allowed_all: bool = False): - return common_bfs(start_node, allowed_ops, op_name, False, allowed_all=allowed_all) - - -def backward_bfs(start_node: Node, allowed_ops: list, op_name: list, allowed_all: bool = False): - return common_bfs(start_node, allowed_ops, op_name, allowed_all=allowed_all) - - -def get_next_operation(node: Node): - """ - This function returns next op node, so node should be an operation - """ - assert node.kind == 'op' - - out_nodes = node.out_nodes() - res = [] - for port, out_node in out_nodes.items(): - op_nodes = out_node.out_nodes() - for op_node in op_nodes: - if op_node.id not in [n.id for n in res]: - res.append(op_node) - return res diff --git a/tools/mo/openvino/tools/mo/middle/passes/fusing/mark_unfused_nodes.py b/tools/mo/openvino/tools/mo/middle/passes/fusing/mark_unfused_nodes.py deleted file mode 100644 index 37c015cfaa3ec8..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/fusing/mark_unfused_nodes.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import re - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.MarkSubgraphsWithCorrectLayout import MarkSubGraphsWithCorrectLayout -from openvino.tools.mo.middle.passes.fusing.helpers import get_value_id - - -def _check_lin_op(node: Node, layout: str): - lin_ops = ['Mul', 'Add'] - if node.soft_get('op') in lin_ops: - weights_id = get_value_id(node) - if weights_id is None: - node.graph.node[node.id]['can_be_fused'] = False - log.info('[ FUSING ] Node {} wasn\'t marked as fusable (no weights, probably this is element-wise operation' - ' that is not fusable)'.format(node.id)) - return - - node.graph.node[node.id]['can_be_fused'] = True - log.info('[ FUSING ] Node {} marked as fusable'.format(node.id)) - - -def mark_unfused_nodes(graph: Graph, regex_masks: str): - regex_masks = [] if not regex_masks else regex_masks.split(',') - nodes = graph.get_op_nodes() - for node in nodes: - if node.has_valid('can_be_fused'): - continue - disabled = False - for mask in regex_masks: - res = re.findall(mask, node.name) - if res and len(res): - graph.node[node.id]['can_be_fused'] = False - log.info('[ FUSING ] Node {} wasn\'t marked as fusable (user decision {})'.format(node.id,mask)) - disabled = True - if not disabled: - _check_lin_op(node, graph.graph['layout']) - - -def mark_shape_of_sugraph_as_unfusable(graph: Graph): - def condition_to_continue(node: Node): - for port in node.out_ports().values(): - if port.data.get_value() is None: - return False - return True - - starting_nodes = graph.get_op_nodes(op='ShapeOf') - shapeof_subgraph_nodes = MarkSubGraphsWithCorrectLayout.bfs(starting_nodes, set(), condition_to_continue) - - for node in shapeof_subgraph_nodes: - node['can_be_fused'] = False diff --git a/tools/mo/openvino/tools/mo/middle/passes/fusing/resnet_optimization.py b/tools/mo/openvino/tools/mo/middle/passes/fusing/resnet_optimization.py deleted file mode 100644 index 6ef142dd873adc..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/fusing/resnet_optimization.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.fusing.helpers import get_next_operation -from openvino.tools.mo.ops.pooling import Pooling - - -def _clean_fw_tensor_attrs(node: Node): - attrs = ['fw_tensor_debug_info'] - for attr in attrs: - if node.has_valid(attr): - node[attr] = None - - -def _insert_pooling(graph: Graph, first_node: Node, second_node: Node, spatial_dims): - """ - This function inserts point wise pooling layer between two nodes - """ - log.debug("STRIDE PROP: Insert pooling between {} and {}".format(first_node.name, second_node.name)) - stride_prop = second_node.stride_prop - assert len(graph.get_edge_data(first_node.id, second_node.id)) == 1 - eattrs = graph.get_edge_data(first_node.id, second_node.id)[0] - graph.remove_edge(first_node.id, second_node.id) - - pooling = Pooling(graph, dict(name='Pooling_', spatial_dims=spatial_dims, window=mo_array([1, 1, 1, 1]), - output_spatial_shape=None, - stride=mo_array(stride_prop), pad_spatial_shape=mo_array([[0, 0], [0, 0]]), - pad=mo_array([[0, 0], [0, 0], [0, 0], [0, 0]]), pool_method='max', - is_partial_inferred=False)) - pooling_data = pooling.create_node_with_data([first_node]) - - _clean_fw_tensor_attrs(pooling_data) - - graph.add_edges_from([(pooling_data.id, second_node.id, eattrs)]) - - -def _check_next_ops(next_ops: list): - """ - This function checks list of operation to determine that all ops has same (not 1,1,1,1) stride_prop attr - """ - stride_props = [] - for op in next_ops: - if op.has_valid('stride_prop'): - stride_props.append(mo_array(op.stride_prop)) - else: - continue - - status = not (len(next_ops) != len(stride_props) or (len(stride_props) > 0 and not all( - np.array_equal(x, stride_props[0]) and not np.array_equal(x, [1, 1, 1, 1]) for x in stride_props))) - return stride_props, status - - -def _simple_stride_prop(graph: Graph, node: Node, spatial_dims, supported=True): - """ - This function handles stride propagation for op nodes. If node is in supported ops dict so this is supported operation and we - can propagate stride directly via this op (stride_prop will be set by using bottom stride_prop), otherwise we can't and - stride_prop attr will be set as 1,1,1,1 - """ - next_ops = get_next_operation(node) - stride_props, all_ops_are_valid = _check_next_ops(next_ops) - - if not supported or not all_ops_are_valid: - # We have to insert pooling layers - for op in next_ops: - if op.has_valid('stride_prop') and not np.array_equal(op.stride_prop[spatial_dims], mo_array([1, 1])) and \ - (op.has_valid('has_stride') == False or op.soft_get('has_stride') == False): - _insert_pooling(graph, node.out_node(), op, spatial_dims) - # If Convolution is valid then set `stride_prop` to Convolution stride - node['stride_prop'] = mo_array([1, 1, 1, 1]) - return - - for op in next_ops: - if op.soft_get('has_stride') == True: - op.stride = mo_array([1, 1, 1, 1]) - log.debug("STRIDE PROP: {} {} strides was moved upper via {}".format(op.type, op.name, node.name)) - - node['stride_prop'] = mo_array(stride_props[0]) if len(stride_props) > 0 else mo_array([1, 1, 1, 1]) - node['is_partial_inferred'] = False - _clean_fw_tensor_attrs(node.out_node()) - - -def _conv_stride_prop(graph: Graph, node: Node, spatial_dims, supported=True): - """ - This function handles convolution stride propagation. There is two cases: conv->(op) and conv->conv. In first case - we propagate stride from op, and in second case we also change stride for second conv - """ - next_ops = get_next_operation(node) - stride_props, all_ops_are_valid = _check_next_ops(next_ops) - - def _check_convolution(node: Node): - return node.has_valid('kernel_spatial') and np.array_equal(node.kernel_spatial, mo_array([1, 1])) - - # Check that all ops are valid and have same values - if not all_ops_are_valid: - # We have to insert pooling layers - for op in next_ops: - if op.has_valid('stride_prop') and not np.array_equal(op.stride_prop[spatial_dims], mo_array([1, 1])): - # Insert pooling - _insert_pooling(graph, node.out_node(), op, spatial_dims) - elif len(stride_props) > 0: - node.stride *= stride_props[0] - log.debug('STRIDE PROP: {} got new strides {}'.format(node.name, node.stride)) - for op in next_ops: - if op.soft_get('has_stride') == True: - op.stride = mo_array([1, 1, 1, 1]) - node['is_partial_inferred'] = False - node['output_spatial_shape'] = False - _clean_fw_tensor_attrs(node.out_node()) - - # If Convolution is valid then set `stride_prop` to Convolution stride - node['stride_prop'] = mo_array(node.stride) if _check_convolution(node) else mo_array([1, 1, 1, 1]) - - -supported_ops = { - 'ReLU': {'stride_prop': _simple_stride_prop, 'attrs': {}}, - 'Maximum': {'stride_prop': _simple_stride_prop, 'attrs': {}}, - 'Mul': {'stride_prop': _simple_stride_prop, 'attrs': {}}, - 'Add': {'stride_prop': _simple_stride_prop, 'attrs': {}}, - 'Convolution': {'stride_prop': _conv_stride_prop, 'attrs': {'has_stride': True}}, -} - - -def _stride_propagation(graph: Graph, spatial_dims): - """ - This function do stride propagation for all op nodes - """ - nodes = [node for node in graph.pseudo_topological_sort(reverse=True) if - node.kind == 'op' and node.soft_get('type') != 'Const'] - - for node in nodes: - if node.soft_get('type') in supported_ops: - op = supported_ops[node.type] - # Add node attrs - for key in op['attrs'].keys(): - node[key] = op['attrs'][key] - op['stride_prop'](graph, node, spatial_dims, True) - else: - _simple_stride_prop(graph, node, spatial_dims, False) - - -def stride_optimization(graph: Graph): - """ - This is main function for stride optimization pass - """ - layout = graph.graph['layout'] - if layout == 'NCHW': - spatial_dims = mo_array([2, 3]) - elif layout == 'NHWC': - spatial_dims = mo_array([1, 2]) - else: - log.warning('STRIDE PROP: layout {} is not supported'.format(layout)) - return - _stride_propagation(graph, spatial_dims) - - nodes = [node for node in graph.pseudo_topological_sort() if - node.soft_get('is_partial_inferred') == False] - for node in nodes: - node.infer(node) diff --git a/tools/mo/openvino/tools/mo/middle/passes/infer.py b/tools/mo/openvino/tools/mo/middle/passes/infer.py deleted file mode 100644 index a3e5ae1dd0c19f..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/infer.py +++ /dev/null @@ -1,372 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from typing import List - -import networkx as nx - -from openvino.tools.mo.front.common.layout import get_dim_from_layout -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension -from openvino.tools.mo.graph.graph import Node, Graph, dict_includes -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg, shrink_str_value - - -def log_debug_dict(nodes_per_port: dict, direction_name: str): - for port, node in nodes_per_port.items(): - value = shrink_str_value(node.soft_get('value')) - log.debug('{}[{}]: shape = {}, value = {}'.format(direction_name, port, node.soft_get('shape'), value)) - - -def control_flow_infer(graph: Graph, node_name: str): - """ - Executes constant control flow. Propagates nodes executability - """ - if graph.node[node_name]['kind'] == 'data': - return - - def mark_executability(node_id: str, is_executable: bool): - if is_executable and not graph.node[node_id]['executable']: - return - graph.node[node_id]['executable'] = is_executable - - in_edges_with_data = graph.in_edges(node_name, data=True) - in_df_edges_with_data = [(u, v, attrs) for u, v, attrs in in_edges_with_data - if 'control_flow_edge' not in attrs or not attrs['control_flow_edge']] - in_cf_edges_with_data = [(u, v, attrs) for u, v, attrs in in_edges_with_data - if 'control_flow_edge' in attrs and attrs['control_flow_edge']] - is_executable_df = all([graph.node[u]['executable'] for u, _, attrs in in_df_edges_with_data] - if len(in_df_edges_with_data) else [True]) - is_executable_cf = all([graph.node[u]['executable'] for u, _, attrs in in_cf_edges_with_data] - if len(in_cf_edges_with_data) else [True]) - is_executable = is_executable_df and is_executable_cf - - node = Node(graph, node_name) - if 'cf_infer' in graph.node[node_name] and callable(node.cf_infer): - node.cf_infer(node, is_executable, mark_executability) - else: - for _, out_data in graph.out_edges(node_name): - mark_executability(out_data, is_executable) - - -def exit_bound_edges(graph: Graph, sources: list, end_node_attrs: dict): - """ - Finds all descendant nodes for each node from 'sources' that have given attributes from end_node_attrs. - For each found node, create a tuple with a given element from 'source' and the node. - """ - result = [] - for node in sources: - for end_node in nx.descendants(graph, node): - if dict_includes(big=graph.node[end_node], sub_dict=end_node_attrs): - result.append((node, end_node, 0, {})) - return result - - -def partial_infer(graph: Graph, start_node: str = None): - """ - Tries to execute constant parts of the graph and deduce as much as possible - information following the data flow, e.g. calculate and propagate shapes and - constant values. Partially or completely defined values are stored in data - nodes (kind='data'). - """ - # We have to turn off strict mode due to above we add and remove edeges without attributes that is prohibited - graph.strict_mode = False - cycle_nodes = graph.get_nodes_with_attributes(is_cyclic=True) - cycle_nodes = [Node(graph, node).out_node().id for node in cycle_nodes] - ebunch_cyclic = list(graph.out_edges(nbunch=cycle_nodes, data=True, keys=True)) - ebunch_reconnected = exit_bound_edges(graph, sources=cycle_nodes, end_node_attrs={'op': 'Exit'}) - graph.remove_edges_from(ebunch_cyclic) - graph.add_edges_from(ebunch_reconnected) - - try: - nodes = list(nx.topological_sort(graph)) - except: - raise Error('Graph contains a cycle. Can not proceed. ' + refer_to_faq_msg(97)) - - graph.remove_edges_from(ebunch_reconnected) - graph.add_edges_from(ebunch_cyclic) - graph.strict_mode = True - - # Mark all nodes as not inferred yet - if start_node is not None: - start_index = nodes.index(start_node) - nx.set_node_attributes(G=graph.subgraph(nodes[start_index:]), name='is_partial_inferred', values=False) - else: - nx.set_node_attributes(G=graph, name='is_partial_inferred', values=False) - - nx.set_node_attributes(G=graph, name='executable', - values={n: True for n in graph.get_nodes_with_attributes(kind='data')}) - - # first we infer constant sub-graphs so the reverse infer could use constant values sub-graphs. For example, - # convolution weights may be reshuffled by some operation in the graph and are not directly consumed by the conv - # node - infer_nodes(graph, nodes, True) - - # we may need to deduce shape for Parameter node(s) if it is not defined - need_reverse_infer = False - for parameter in graph.get_op_nodes(op='Parameter'): - if parameter.soft_get('shape', None) is None: - need_reverse_infer = True - - if need_reverse_infer: - reverse_infer(graph, nodes) - - infer_nodes(graph, nodes, False) - - not_fully_inferred = graph.get_nodes_with_attributes(is_not_fully_inferred=True) - for n in not_fully_inferred: - node = Node(graph, n) - if node.has_and_set('infer'): - node.infer(node) - - return graph - - -def infer_nodes(graph: Graph, nodes: List[Node], constant_subgraph_only: bool = False): - """ - Run "infer" function of the specified nodes. - - :param graph: graph with nodes - :param nodes: list of node ids in the topological order - :param constant_subgraph_only: flag which specifies whether only inference of constant sub-graphs should be done - """ - debug_logger = log.getLogger().isEnabledFor(log.DEBUG) - for n in nodes: - # Data Flow Infer - node = Node(graph, n) - node_name = node.soft_get('name', node.id) - try: - if node.has('is_partial_inferred') and not node.is_partial_inferred: - if node.has('infer') and not node.infer is None: - # we consider that operation will produce value if all inputs are constants or it is - # 'ShapeOf' operation - if constant_subgraph_only: - in_values = [port.data.get_value() for port in node.in_ports().values()] - if node.soft_get('op') == 'Parameter' or any(value is None for value in in_values) or \ - (node.soft_get('op') == 'ShapeOf' and node.in_port(0).data.get_shape() is None): - # if here will be any new ShapeOf type operation, we should update condition above - continue - - if debug_logger: - log.debug('-' * 20) - log.debug('Partial infer for {}'.format(node.soft_get('name'))) - log.debug('Op: {}'.format(node.soft_get('op'))) - log.debug('Inputs:') - log_debug_dict(node.in_nodes(), 'input') - - node.infer(node) - out_nodes = node.out_nodes() - - # propagate nchw_layout attributes to data nodes - if node.has('nchw_layout'): - for out_node in out_nodes.values(): - out_node['nchw_layout'] = node.nchw_layout - - # In debug print current node attributes, input shapes/values and output shape/values - if debug_logger: - log.debug('Outputs:') - log_debug_dict(node.out_nodes(), 'output') - - if not constant_subgraph_only: - not_all_output_shapes = False - - for out_port, out_node in out_nodes.items(): - not_all_output_shapes = False - if not out_node.has_valid('shape'): - log.error('Shape is not defined for output {} of "{}".'.format(out_port, node_name)) - not_all_output_shapes = True - - if not_all_output_shapes: - raise Error('Not all output shapes were inferred or fully defined for node "{}". ' + - refer_to_faq_msg(40), - node_name) - elif node.kind != 'data': - raise Error( - 'There is no registered "infer" function for node "{}" with op = "{}". ' + - 'Please implement this function in the extensions. ' + - refer_to_faq_msg(37), - node_name, - node.soft_get('op') - ) - node.is_partial_inferred = True - except Exception as err: - log.error('Cannot infer shapes or values for node "{}".'.format(node.soft_get('name'))) - log.error(str(err)) - log.error('') - log.error('It can happen due to bug in custom shape infer function {}.'.format(node.soft_get('infer'))) - log.error('Or because the node inputs have incorrect values/shapes.') - log.error('Or because input shapes are incorrect (embedded to the model or passed via --input_shape).') - debug_messages = '\n'.join( - ['Layer "' + node_name + '": ' + node_attrs['debug_message'] for node_name, node_attrs in - graph.nodes(data=True) if 'debug_message' in node_attrs]) - if debug_messages != "": - log.error('') - log.error('Other possible failure reasons are listed below:') - log.error(debug_messages) - if not debug_logger: - log.error('Run Model Optimizer with --log_level=DEBUG for more information.') - else: - log.debug('Node "{}" attributes: {}'.format(node.soft_get('name'), node.graph.node[node.id])) - raise Error('Stopped shape/value propagation at "{}" node. '.format(node.soft_get('name')) + - refer_to_faq_msg(38)) from err - control_flow_infer(graph, n) - - -def override_batch(graph: Graph, batch: int): - """ - Overrides batch for nodes with 'op' param set to 'Parameter' - Parameters - ---------- - graph: graph to operate on - batch: user defined integer value to override batch - """ - if batch is not None: - in_nodes = graph.get_op_nodes(op='Parameter') - for node in in_nodes: - if not node.soft_get('fixed_batch', False): - name = node.soft_get('name', node.id) - idx, has_layout = get_dim_from_layout(node, 'N') - if has_layout: - if idx is not None: - node['shape'][idx] = batch - else: - log.warning( - 'Layout for input {} doesn\'t have batch dimension. Skipping this input.'.format(name)) - else: - validate_batch_in_shape(node['shape'], name) - node['shape'][0] = batch - - -def validate_batch_in_shape(shape, layer_name: str): - """ - Raises Error #39 if shape is not valid for setting batch size - Parameters - ---------- - shape: current shape of layer under validation - layer_name: name of layer under validation - """ - if len(shape) == 0 or (shape[0] is not dynamic_dimension and shape[0] not in (-1, 0, 1)): - raise Error(('The input layer {} has a shape {} defined in the model. \n\n' + - 'When you use -b (--batch) option, Model Optimizer applies its value to the first ' + - 'element of the shape if it is equal to -1, 0 or 1. Otherwise, this is the ambiguous ' + - 'situation - Model Optimizer can not know in advance whether the layer has the batch ' + - 'dimension or not.\n\n For example, you want to set batch dimension equals 100 ' + - 'for the input layer "data" with shape (10,34). Although you can not use --batch, ' + - 'you should pass --input_shape (100,34) instead of --batch 100. \n\n' + - 'You can also tell Model Optimizer where batch dimension is located by specifying --layout. \n\n' + - refer_to_faq_msg(39)) - .format(layer_name, shape)) - - -def override_placeholder_shapes(graph: Graph, user_shapes: dict, batch=None): - """ - This function overrides shapes for nodes with 'op' param set to 'Parameter' with shapes defined by users (only - for inputs without in/out port specified). - And override batch if batch was specified and shape for input is not None. - :param graph: graph to operate on - :param user_shapes: dictionary, that represents user defined nodes and shapes - :param batch: user defined integer value to override batch - """ - if user_shapes is None: - # DON'T MOVE UPPER!!! WE NEED TO SET BATCH FIRST - # user did not specify neither shapes nor inputs, keep models values - return - placeholders = graph.get_nodes_with_attributes(kind='op', op='Parameter') - for node_id in placeholders: - node_attrs = graph.node[node_id] - shape = None - if node_id in user_shapes: - values = user_shapes[node_id] - for value in values: - if 'in' not in value and 'out' not in value: - shape = value['shape'] if value['shape'] is not None else None - break # we assume only one specified shape for one input - if shape is not None: - node_attrs['shape'] = shape - if batch is not None and node_attrs['shape'] is not None and len(node_attrs['shape']) > 0: - node_attrs['shape'][0] = batch - - -def type_infer(graph: Graph): - nodes = list(nx.topological_sort(graph)) - for n in nodes: - node = Node(graph, n) - if node.kind == 'op': - node_name = node.soft_get('name') - node_type_infer(node) - log.debug('Type infer for node {}: {}'.format(node_name, - [port.get_data_type() for port in node.out_ports().values()])) - """ - Save the precision of input ports in the nodes. It is not possible to get the precision after the port - re-numbering because the port precision is defined for output port only and for input port it is determined - with the output port producing data to the input port. When output port id is changed it is not possible to - determine input port precision. - """ - for out_port in node.out_ports().values(): - for dest_port in out_port.get_destinations(): - if not dest_port.node.has_valid('_in_port_precision'): - dest_port.node['_in_port_precision'] = {} - dest_port.node['_in_port_precision'][dest_port.idx] = out_port.get_data_type() - - -def node_type_infer(node): - if node.has_valid('type_infer'): - node.type_infer(node) - elif node.has_valid('data_type'): - node.out_port(0).set_data_type(node.data_type) - else: - copy_type_infer(node) - - -def copy_type_infer(node): - for out_port in node.out_ports().values(): - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - if len(connected_in_ports) != 0: - data_type = connected_in_ports[0].get_data_type() - if data_type is not None: - out_port.set_data_type(data_type) - else: - src_node = connected_in_ports[0].get_connection().get_source().node - node_type_infer(src_node) - out_port.set_data_type(connected_in_ports[0].get_data_type()) - else: - raise Error('No input ports of node {} to determine data type'.format(node.soft_get('name'))) - - -def reverse_infer(graph: Graph, nodes: list): - nodes = reversed(nodes) - debug_logger = log.getLogger().isEnabledFor(log.DEBUG) - for n in nodes: - node = Node(graph, n) - if node.has_and_set('reverse_infer'): - log.debug("Executed reverse infer for node '{}'".format(node.soft_get('name', node.id))) - node.reverse_infer(node) - - if debug_logger: - log.debug('-' * 20) - log.debug('Reverse infer for {}'.format(node.soft_get('name'))) - log.debug('Op: {}'.format(node.soft_get('op'))) - log.debug('Outputs:') - log_debug_dict(node.out_nodes(), 'outputs') - - log.debug('Inputs:') - log_debug_dict(node.in_nodes(), 'inputs') - - parameters_with_no_shape = [] - for node in graph.get_op_nodes(op='Parameter'): - if not node.has_valid('shape'): - parameters_with_no_shape.append(node) - - if len(parameters_with_no_shape) == 0: - return - - parameters_names = '' - for idx, node in enumerate(parameters_with_no_shape): - parameters_names += "'{}'".format(node.soft_get('name', node.id)) - if idx < len(parameters_with_no_shape) - 1: - parameters_names += ', ' - - if len(parameters_with_no_shape) > 0: - raise Error("Model Optimizer is unable to deduce input shapes for the following Parameter nodes: {}. " - "Please use cli options --input or --input_shape to set model input shape.".format(parameters_names)) diff --git a/tools/mo/openvino/tools/mo/middle/passes/tensor_names.py b/tools/mo/openvino/tools/mo/middle/passes/tensor_names.py deleted file mode 100644 index 43297416a67153..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/passes/tensor_names.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from defusedxml import defuse_stdlib -from defusedxml.minidom import parseString -import defusedxml.ElementTree as ET - -from openvino.tools.mo.graph.graph import Node, Graph - -# defuse_stdlib provide patched version of xml.etree.ElementTree which allows to use objects from xml.etree.ElementTree -# in a safe manner without including unsafe xml.etree.ElementTree -ET_defused = defuse_stdlib()[ET] -Element = ET_defused.Element -SubElement = ET_defused.SubElement -tostring = ET_defused.tostring - - -def propagate_op_name_to_tensor(graph: Graph): - for node in graph.nodes(): - node = Node(graph, node) - if node.kind == 'op' and node.has_valid('name'): - for out_node, edge in node.out_nodes_edges().values(): - assert out_node.kind == 'data' - out_node['ie_tensor_name'] = node.name - out_node['ie_tensor_port'] = edge['out'] - out_node['ie_tensor_id'] = node.node - - -def output_tensor_names_map(graph: Graph, xml_file_name: str): - mapping = Element('mapping') - for node in graph: - node = Node(graph, node) - if node.has_valid('fw_tensor_debug_info') and node.has_valid('ie_tensor_name'): - for fw_tensor_debug_info in node.fw_tensor_debug_info: - # Check that debug info has valid fw attrs - if not all(attr is not None for attr in fw_tensor_debug_info): - continue - map = SubElement(mapping, 'map') - fw = SubElement(map, 'framework') - ie = SubElement(map, 'IR') - - fw.set('name', fw_tensor_debug_info[0]) - fw.set('out_port_id', str(fw_tensor_debug_info[1])) - - if node.has_valid('ie_tensor_name'): - ie.set('name', node.ie_tensor_name) - if node.has_valid('ie_tensor_port'): - ie.set('out_port_id', str(node.ie_tensor_port)) - if node.has_valid('ie_tensor_id'): - ie.set('id', str(node.ie_tensor_id)) - with open(xml_file_name, 'w') as file: - file.write(parseString(tostring(mapping)).toprettyxml()) diff --git a/tools/mo/openvino/tools/mo/middle/pattern_match.py b/tools/mo/openvino/tools/mo/middle/pattern_match.py deleted file mode 100644 index e0fc96c6840fb4..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/pattern_match.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np -from networkx.algorithms import isomorphism as ism - -from openvino.tools.mo.graph.graph import Node, dict_includes, Graph - - -def inverse_dict(d: dict): - return {v: k for k, v in d.items()} - - -def for_each_sub_graph(graph: Graph, func: callable): - """ Run a given function `func` for each sub-graph in a given graph not recursively. - - It doesn't search for sub-graphs in found sub-graphs recursively. If the recursion is required, - a given function `func` should be implemented in a special way to enable fully recursive traversal. - """ - for node in graph.nodes(): - node = Node(graph, node) - if node.has_valid('sub_graphs'): - for sub_graph_name in node.sub_graphs: - func(node[sub_graph_name]) - - -def for_each_sub_graph_recursively(graph: Graph, func: callable): - """ Run a given function `func` for each sub-graph in a given graph `graph` recursively. - - A given function `func` shouldn't contain a recursion for sub-graphs of the second level. - """ - - def recursive_helper(sub_graph): - # user action - func(sub_graph) - # recursion - for_each_sub_graph(sub_graph, recursive_helper) - - for_each_sub_graph(graph, recursive_helper) - - -def for_graph_and_each_sub_graph_recursively(graph: Graph, func: callable): - """ Run a given function `func` for a given graph `graph` and each sub-graph recursively. """ - func(graph) - for_each_sub_graph_recursively(graph, func) - - -def all_edges_in_nodes(nodes: list, edges: list): - return all([edge[0] in nodes and edge[1] in nodes for edge in edges]) - - -def apply_pattern(graph: Graph, nodes: list, edges: list, action: callable, node_attrs: list = None, - edge_attrs: list = None): - """ - Search for all matches of a given subgraph defined by [nodes, edges] in graph, - then apply action for each such match. - """ - if not all_edges_in_nodes([node[0] for node in nodes], edges): - log.warning("Incorrect pattern attributes: not all nodes from edges are in nodes. " - "Please, mention all nodes you need in pattern in nodes attribute. ") - - matches = [] - for match in find_pattern_matches(graph, nodes, edges, node_attrs, edge_attrs): - matches.append(match) - - for match in matches: - match = inverse_dict(match) - still_valid = True - for k in match: - if not graph.has_node(match[k]): - # Graph changed significantly - still_valid = False - log.warning("The graph has changed significantly during applying pattern:\n" - "nodes: {}\n" - "edges: {}\n" - "node_attrs: {}\n" - "edge_attrs: {}".format(nodes, edges, node_attrs, edge_attrs)) - break - match[k] = Node(graph, match[k]) - if still_valid: - action(graph, match) - - -def check_node_usages_out_of_match(match: dict, node_name_in_match_group: str): - """ - Checks if node is consumed by nodes out of match - :param match: dictionary with pattern match - :param node_name_in_match_group: string - :return: - """ - assert node_name_in_match_group in match - graph = match[node_name_in_match_group].graph - all_node_ids = [match[name].id for name in match] - in_out_node_ids = [u for u, _ in graph.in_edges(match[node_name_in_match_group].id)] - in_out_node_ids.extend([v for _, v in graph.out_edges(match[node_name_in_match_group].id)]) - return all([n in all_node_ids for n in in_out_node_ids]) - - -def node_match(data1: dict, data2: dict): - # We have to skip _in_ports/_out_ports attributes for comparison as they are not comparable - return dict_includes(data1, data2, skip_attr_names=['_in_ports', '_out_ports']) - - -def edge_match(datasets1, datasets2): - attrs = list(datasets2[0].keys()) - values1 = set([]) - for data1 in datasets1.values(): - x = tuple(data1.get(attr, None) for attr in attrs) - values1.add(x) - values2 = set([]) - for data2 in datasets2.values(): - x = tuple(data2.get(attr, None) for attr in attrs) - values2.add(x) - return values1 == values2 - - -def build_matcher(graph: Graph, nodes: list, edges: list, node_attrs: list = None, - edge_attrs: list = None): - if node_attrs is not None or edge_attrs is not None: - log.warning('\'edge_attrs\' or `\'node_attrs\'` parameter was passed to function \'find_pattern_matches\', ' - 'but they are not used anymore. Pattern matching proceeds according to \'nodes\' and \'edges\' ' - 'parameters. Please avoid passing \'edge_attrs\' and \'node_attrs\' parameters to any pattern ' - 'matching function like \'find_pattern_matches\', \'apply_pattern\' and \'pattern\' because it ' - 'will be deprecated in the next release.') - - subgraph = Graph(name='pattern') - subgraph.add_nodes_from(nodes) - subgraph.add_edges_from(edges) - return ism.MultiDiGraphMatcher(graph, subgraph, node_match, edge_match) - - -def find_pattern_matches(graph: Graph, nodes: list, edges: list, node_attrs: list = None, - edge_attrs: list = None): - """ - Find all matches of a given sub-graph defined by [nodes, edges] in graph. - """ - matcher = build_matcher(graph, nodes, edges, node_attrs, edge_attrs) - return matcher.subgraph_isomorphisms_iter() - - -def find_isomorphisms(graph: Graph, nodes: list, edges: list): - ''' Find for isomorphism between a given graph and a pattern specified by a given nodes and edges. - Applies the same rules as apply_pattern. - ''' - matcher = build_matcher(graph, nodes, edges) - result = [] - for match in matcher.isomorphisms_iter(): - match = inverse_dict(match) - match = {k: Node(graph, match[k]) for k in match.keys()} - result.append(match) - return result - - -def check_value(v: np.ndarray, check: callable): - return v is not None and np.all(np.isreal(v)) and check(v) diff --git a/tools/mo/openvino/tools/mo/middle/permute_tensor_iterator.py b/tools/mo/openvino/tools/mo/middle/permute_tensor_iterator.py deleted file mode 100644 index dfc0b3a5294d83..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/permute_tensor_iterator.py +++ /dev/null @@ -1,206 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.middle.LSTMRNNSequenceToTensorIterator import LSTMToTensorIterator -from openvino.tools.mo.middle.ONNXRNNSequenceNormalize import ONNXRNNSequenceNormalize -from openvino.tools.mo.middle.SwapAxesMiddleReplacer import SwapAxisMiddleReplacer -from openvino.tools.mo.middle.TensorIteratorMerge import TensorIteratorMerge -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import dict_includes, Graph -from openvino.tools.mo.middle.passes.eliminate import remove_op_node_with_data_node -from openvino.tools.mo.middle.pattern_match import find_isomorphisms -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class TransposeTensorIteratorLSTM(MiddleReplacementPattern): - """ Fuses Transpose(1,0,2) --> TI --> Transpose(1,0,2) pattern to a single TI with changed axis. - - WARNING This transformation is limited to support of very special case of TI but - code doesn't check all the cases. - """ - - enabled = True - - def run_after(self): - return [TensorIteratorMerge, ONNXRNNSequenceNormalize, LSTMToTensorIterator, SwapAxisMiddleReplacer] - - def run_before(self): - return [] - - def pattern(self): - return dict( - nodes=[ - ('input', dict(kind='data')), - ('direct_permute', dict(kind='op', op='Transpose')), - ('input_permuted', dict(kind='data')), - ('init_hidden', dict(kind='data')), - ('init_cell', dict(kind='data')), - ('ti', dict(kind='op', op='TensorIterator')), - - ('output_permuted', dict(kind='data')), - ('inverse_permute', dict(op='Transpose')), - ('output', dict(kind='data')), - ], - edges=[ - ('input', 'direct_permute'), - ('direct_permute', 'input_permuted'), - - ('input_permuted', 'ti', {'in': 0}), # affected by permute - ('init_hidden', 'ti', {'in': 1}), - ('init_cell', 'ti', {'in': 2}), - ('ti', 'output_permuted', {'out': 0}), # affected by permute - - ('output_permuted', 'inverse_permute'), - ('inverse_permute', 'output'), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - - # This transformation works if and only if a body of TI - # matches the following topology (Squeeze -> LSTMCell -> Unsqueeze) - nodes = [ - ('squeeze_dim', dict(kind='op', op='Const')), - ('squeeze_dim_data', dict(kind='data')), - - ('unsqueeze_dim', dict(kind='op', op='Const')), - ('unsqueeze_dim_data', dict(kind='data')), - - ('input_unsqueezed', dict(kind='data')), - ('squeeze', dict(kind='op', op='Squeeze')), - ('input_squeezed', dict(kind='data')), - ('input_hidden', dict(kind='data')), - ('input_cell', dict(kind='data')), - ('weights', dict(kind='data')), - ('biases', dict(kind='data')), - - ('lstm', dict(kind='op', op='LSTMCell')), - - ('output_hidden', dict(kind='data')), - ('output_cell', dict(kind='data')), - ('unsqueeze', dict(kind='op', op='Unsqueeze')), - ('output_unsqueezed', dict(kind='data')), - - ('const_w', dict(kind='op', op='Const')), - ('const_b', dict(kind='op', op='Const')), - - ('op_output', dict(kind='op', op='Result')), - ('op_output_1', dict(kind='op', op='Result')), - ('op_output_2', dict(kind='op', op='Result')), - - ('input_unsqueezed_i', dict(kind='op', op='Parameter')), - ('input_hidden_i', dict(kind='op', op='Parameter')), - ('input_cell_i', dict(kind='op', op='Parameter')), - ] - edges = [ - ('input_unsqueezed', 'squeeze', {'in': 0}), - ('squeeze', 'input_squeezed'), - - ('squeeze_dim', 'squeeze_dim_data'), - ('squeeze_dim_data', 'squeeze', {'in': 1}), - - ('input_squeezed', 'lstm', {'in': 0}), - ('input_hidden', 'lstm', {'in': 1}), - ('input_cell', 'lstm', {'in': 2}), - ('weights', 'lstm', {'in': 3}), - ('biases', 'lstm', {'in': 4}), - - ('const_w', 'weights'), - ('const_b', 'biases'), - - ('lstm', 'output_hidden', {'out': 0}), - ('lstm', 'output_cell', {'out': 1}), - - ('output_hidden', 'unsqueeze'), - ('unsqueeze', 'output_unsqueezed'), - - ('unsqueeze_dim', 'unsqueeze_dim_data'), - ('unsqueeze_dim_data', 'unsqueeze', {'in': 1}), - - ('output_unsqueezed', 'op_output'), - ('output_hidden', 'op_output_1'), - ('output_cell', 'op_output_2'), - - ('input_unsqueezed_i', 'input_unsqueezed'), - ('input_hidden_i', 'input_hidden'), - ('input_cell_i', 'input_cell'), - ] - ti = match['ti'] - isomorphisms = find_isomorphisms(ti.body, nodes, edges) - if len(list(isomorphisms)) != 1: - return - isomorphism = isomorphisms[0] - - direct_permute = match['direct_permute'] - inverse_permute = match['inverse_permute'] - - permute_order = [1, 0, 2] - - # Check both perumute orders exactly match expected one - [1, 0, 2] - direct_order = direct_permute.in_port(1).data.get_value() - if direct_order is None or not np.array_equal(direct_order, permute_order): - return - inverse_order = inverse_permute.in_port(1).data.get_value() - if inverse_order is None or not np.array_equal(inverse_order, permute_order): - return - - # Check non-ShapeOf output out of direct Transpose is exactly one - direct_permute_dsts = direct_permute.out_port(0).get_destinations() - if len([dst for dst in direct_permute_dsts if dst.node.soft_get('type') != 'ShapeOf']) != 1: - return - for shape_of_dst in [dst for dst in direct_permute_dsts if dst.node.soft_get('type') == 'ShapeOf']: - name = shape_of_dst.node.soft_get('name', shape_of_dst.node.id) + '/FusedToTITranspose' - gather = create_op_with_const_inputs(graph, op=Gather, op_attrs={'name': name}, - port_value_dict={1: int64_array(permute_order), 2: int64_array(0)}) - shape_of_dst.node.out_port(0).get_connection().insert_node(gather) - - def find_ports(port_map: list, attrs: dict): - """ Find all ports in a given port map with specified attributes """ - result = [] - for i, port in enumerate(port_map): - if dict_includes(port, attrs): - result.append(i) - return result - - # Check TI has only single partitioned input/output port; all partitioned ports have defined axis - data_input_port = find_ports(ti.input_port_map, {'axis': lambda attr: attr in [0, 1]}) - data_output_port = find_ports(ti.output_port_map, {'axis': lambda attr: attr in [0, 1]}) - assert len(data_input_port) == 1 - assert len(data_output_port) == 1 - data_input_port = data_input_port[0] - data_output_port = data_output_port[0] - # Verify that they are really connected to Transpose layers (guaranteed by port numbers of TI, see the pattern) - assert ti.in_edge(0)['external_port_id'] == ti.input_port_map[data_input_port]['external_port_id'] - assert ti.out_edge(0)['external_port_id'] == ti.output_port_map[data_output_port]['external_port_id'] - - # Verify that the TI body have required Reshapes connected to the found ports - squeeze = isomorphism['squeeze'] - unsqueeze = isomorphism['unsqueeze'] - - assert len(squeeze.in_node().shape) == 3 - assert len(squeeze.out_node().shape) == 2 - assert len(unsqueeze.in_node().shape) == 2 - assert len(unsqueeze.out_node().shape) == 3 - - # Remove permutes - remove_op_node_with_data_node(graph, direct_permute) - remove_op_node_with_data_node(graph, inverse_permute) - match['output'].shape = match['output'].shape[permute_order] - - # swap 0/1 axis for partitioned ports - ti.input_port_map[data_input_port]['axis'] = 1 - ti.input_port_map[data_input_port]['axis'] - ti.output_port_map[data_output_port]['axis'] = 1 - ti.output_port_map[data_output_port]['axis'] - - isomorphism['input_unsqueezed_i'].shape = isomorphism['input_unsqueezed_i'].shape[[1, 0, 2]] - isomorphism['input_unsqueezed_i'].infer(isomorphism['input_unsqueezed_i']) - isomorphism['squeeze_dim'].value = ti.input_port_map[data_input_port]['axis'] - isomorphism['squeeze_dim'].infer(isomorphism['squeeze_dim']) - isomorphism['squeeze']['need_shape_inference'] = True - - isomorphism['unsqueeze_dim'].value = ti.output_port_map[data_output_port]['axis'] - isomorphism['unsqueeze_dim'].infer(isomorphism['unsqueeze_dim']) - isomorphism['unsqueeze'].infer(isomorphism['unsqueeze']) diff --git a/tools/mo/openvino/tools/mo/middle/preprocessing.py b/tools/mo/openvino/tools/mo/middle/preprocessing.py deleted file mode 100644 index 95fb6cd5b86fb1..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/preprocessing.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.LeakyReluPattern import LeakyReLUFusion -from openvino.tools.mo.middle.pass_separator import PostMiddleStart -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.find_inputs import find_inputs -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class CaffeMeanFileProcessing(MiddleReplacementPattern): - enabled = True - force_clean_up = True - graph_condition = [lambda graph: graph.graph['fw'] == 'caffe'] - - def run_after(self): - return [LeakyReLUFusion] - - def run_before(self): - return [PostMiddleStart] - - def find_and_replace_pattern(self, graph: Graph): - from openvino.tools.mo.front.caffe import loader - argv = graph.graph['cmd_params'] - original_shapes = graph.graph['original_shapes'] - caffe_pb2 = graph.graph['caffe_pb2'] - del graph.graph['caffe_pb2'] - input_names = find_inputs(graph) - graph.graph['input_names'] = input_names diff --git a/tools/mo/openvino/tools/mo/middle/quantize_dequantize_linear_resolver.py b/tools/mo/openvino/tools/mo/middle/quantize_dequantize_linear_resolver.py deleted file mode 100644 index 03a0599ea28bfe..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/quantize_dequantize_linear_resolver.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Graph, rename_nodes -from openvino.tools.mo.middle.quantize_linear_resolver import QuantizeLinearResolver -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class QuantizeDequantizeLinearResolver(MiddleReplacementPattern): - """ - This transformation replaces QuantizeLinear in pair QuantizeLinear/DequantizeLinear with - constant inputs to FakeQuantize with flag stop_value_propagation=True. This transformation prepare FakeQuantize for - ConvertQuantizeDequantize in offline transformations. - """ - enabled = True - graph_condition = [lambda graph: graph.graph['layout'] == 'NCHW'] - - def pattern(self): - return dict( - nodes=[('const_input', dict(kind='op', op='Const')), - ('const_input_d', dict(kind='data')), - ('quantize', dict(kind='op', op='QuantizeLinear')), - ('quantize_d', dict(kind='data')), - ('dequantize', dict(kind='op', op='DequantizeLinear')), - ], - edges=[('const_input', 'const_input_d'), - ('const_input_d', 'quantize', {'in': 0}), - ('quantize', 'quantize_d'), - ('quantize_d', 'dequantize', {'in': 0}) - ] - ) - - def run_after(self): - from openvino.tools.mo.middle.quantize_fuses import MarkNodesToFuseUpToFakeQuantize - return [MarkNodesToFuseUpToFakeQuantize] - - def replace_pattern(self, graph: Graph, match: dict): - dequantize_node = match['dequantize'] - quantize_node = match['quantize'] - - scale_zerop_is_exist = quantize_node.is_in_port_connected(1) and quantize_node.is_in_port_connected(2) and \ - dequantize_node.is_in_port_connected(1) and dequantize_node.is_in_port_connected(2) - if not scale_zerop_is_exist: - return - q_scale = quantize_node.in_port(1).get_source().node - q_zerop = quantize_node.in_port(2).get_source().node - dq_scale = dequantize_node.in_port(1).get_source().node - dq_zerop = dequantize_node.in_port(2).get_source().node - scales_and_zerop_is_const = q_scale.soft_get('type') == 'Const' and dq_scale.soft_get('type') == 'Const' and \ - q_zerop.soft_get('type') == 'Const' and dq_zerop.soft_get('type') == 'Const' - scales_and_zerop_equals = np.array_equal(q_scale.value, dq_scale.value) and \ - np.array_equal(q_zerop.value, dq_zerop.value) - - # only constant as for zero_point/scale supported - # only patterns with same scale/zero_point values for Q and DQ are supported - if not (scales_and_zerop_is_const or scales_and_zerop_equals): - return - - QuantizeLinearResolver.quantize_to_fakequantize(graph, quantize_node, True) - quantize_node['isolated'] = True diff --git a/tools/mo/openvino/tools/mo/middle/quantize_fuses.py b/tools/mo/openvino/tools/mo/middle/quantize_fuses.py deleted file mode 100644 index f111e664c392c7..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/quantize_fuses.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.middle.BinarizeWeightsM1P1 import BinarizeWeightsM1P1 -from openvino.tools.mo.middle.DeleteControlFlowEdges import DeleteControlFlowEdges -from openvino.tools.mo.middle.EltwiseChecker import EltwiseChecker -from openvino.tools.mo.middle.quantize_linear_resolver import QuantizeLinearResolver -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.fusing.helpers import get_value_in_port -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class MarkNodesToFuseUpToFakeQuantize(MiddleReplacementPattern): - """ - Marks special nodes that could be pulled through Quantize operation. - Sets `fuse_up_to_quantize_ports` parameter to list of indexes of input ports of Quantize operation - where specified node should appear. - - """ - enabled = True - - def run_after(self): - return [DeleteControlFlowEdges] - - def run_before(self): - return [] - - @staticmethod - def mark_fusable_muls_on_weights(graph): - for node in graph.get_op_nodes(op='Mul'): - children = node.out_port(0).get_destinations() - if len(children) > 1 or children[0].node.soft_get('type') not in ['Convolution', 'Deconvolution', 'MatMul']: - continue - value_in_port = get_value_in_port(node) - if value_in_port is None: - continue - value_shape = value_in_port.data.get_shape() - non_one_axis = np.argwhere(value_shape != 1) - if non_one_axis.size != 1: - continue - non_one_axis = non_one_axis.item(0) - node['can_be_fused'] = True - EltwiseChecker().mark_eltwise_node(node, non_one_axis) - - def find_and_replace_pattern(self, graph: Graph): - # to prevent fusing of non per channel lin ops, we run EltwiseChecker to mark nodes with can_be_fused attribute - EltwiseChecker().find_and_replace_pattern(graph) - self.mark_fusable_muls_on_weights(graph) - eltwise_nodes = graph.get_op_nodes(op='Mul', can_be_fused=True) + \ - graph.get_op_nodes(op='Sub', can_be_fused=True) + \ - graph.get_op_nodes(op='Add', can_be_fused=True) - for elt in eltwise_nodes: - if elt.in_port(0).data.get_value() is not None or elt.in_port(1).data.get_value() is not None: - elt['fuse_up_to_quantize_ports'] = [3, 4] - - slice = graph.get_op_nodes(op='Slice') - for sl in slice: - sl['fuse_up_to_quantize_ports'] = [0] - - -class FakeQuantizeFuse(MiddleReplacementPattern): - """ - Pulls nodes containing `fuse_up_to_quantize_ports` parameter (node to fuse) through Quantize operation - - If `fuse_up_to_quantize_ports` list contains one input port to which node to fuse should be delivered, - replacer reconnects edges. - - If `fuse_up_to_quantize_ports` list contains more than one input port to which node to fuse should be delivered, - replacer reconnects edges of first port from `fuse_up_to_quantize_ports` list, for other ports - replacer duplicates node to fuse (duplicate connections of inputs of node to fuse to duplicates of it) - """ - enabled = True - - def run_after(self): - return [QuantizeLinearResolver] - - def run_before(self): - return [BinarizeWeightsM1P1] - - def find_and_replace_pattern(self, graph: Graph): - for quantize_node in graph.get_op_nodes(op='FakeQuantize'): - while len(quantize_node.out_port(0).get_destinations()) == 1: - if not quantize_node.out_port(0).get_destination().node.has_valid('fuse_up_to_quantize_ports'): - break - fuse_node = quantize_node.out_port(0).get_destination().node - quantize_to_mul_in_port_index = quantize_node.out_port(0).get_destination().idx - - # connecting the rest of model after mul to quantize, mul node hangs on quantize - fuse_node.out_port(0).get_connection().set_source(quantize_node.out_port(0)) - - # mul node is disconnected from the graph - fuse_node.in_port(quantize_to_mul_in_port_index).disconnect() - - first_port_fusion = True - for in_quantize_port in fuse_node['fuse_up_to_quantize_ports']: - fuse_node_duplicate = fuse_node - if not first_port_fusion: - fuse_node_duplicate = fuse_node.copy_node( - {'in_ports_count': len(fuse_node.in_ports()), - 'out_ports_count': len(fuse_node.out_ports())}) - - quantize_node.in_port(in_quantize_port).get_connection().set_destination( - fuse_node_duplicate.in_port(quantize_to_mul_in_port_index)) - - fuse_node_duplicate.out_port(0).connect(quantize_node.in_port(in_quantize_port)) - - if not first_port_fusion: - for idx, port in fuse_node.in_ports().items(): - if idx == quantize_to_mul_in_port_index: - continue - port.get_source().connect(fuse_node_duplicate.in_port(idx)) - fuse_node_duplicate.infer(fuse_node_duplicate) - - first_port_fusion = False diff --git a/tools/mo/openvino/tools/mo/middle/quantize_linear_resolver.py b/tools/mo/openvino/tools/mo/middle/quantize_linear_resolver.py deleted file mode 100644 index 5610f09b5db3f8..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/quantize_linear_resolver.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.elementwise import Mul -from openvino.tools.mo.ops.fakequantize import FakeQuantize -from openvino.tools.mo.front.common.partial_infer.utils import float_array, int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.tf.graph_utils import create_op_with_const_inputs -from openvino.tools.mo.graph.graph import Graph, rename_nodes, Node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.utils.error import Error - - -class QuantizeLinearResolver(MiddleReplacementPattern): - """ - Replaces QuantizeLinear with FakeQuantize - Transformation result depends on the axis value. - If the axis is not set or x_scale input is scalar or 1D tensor with one element then QuantizeLinear is - replaced with the sub-graph which can be expressed with the following formula: - QuantizeLinear -> FakeQuantize(input - Mul(y_scale, Const(low_value)) - Mul(y_scale, Const(high_value)) - Const(low_value) - Const(high_value)) - low_value and high_value depend on from y_zero_point type - In other cases y_scale and y_zero_point can be transform with addition reshape. - Target shape for y_scale and y_zero_point depend on axis value. - """ - enabled = True - graph_condition = [lambda graph: graph.graph['layout'] == 'NCHW'] - - def run_after(self): - from openvino.tools.mo.middle.quantize_dequantize_linear_resolver import QuantizeDequantizeLinearResolver - return [QuantizeDequantizeLinearResolver] - - def find_and_replace_pattern(self, graph: Graph): - for quantize_node in graph.get_op_nodes(op='QuantizeLinear'): - if quantize_node.has_and_set('isolated'): # node is detached and will be eliminated - # during the next clean up - continue - QuantizeLinearResolver.quantize_to_fakequantize(graph, quantize_node) - - @staticmethod - def quantize_to_fakequantize(graph: Graph, quantize_node: Node, set_stop_value_propagation=False): - node_name = quantize_node.soft_get('name', quantize_node.id) - axis = quantize_node.soft_get('axis', None) - scale_y_shape = quantize_node.in_port(1).data.get_shape() - - if quantize_node.is_in_port_connected(2): - zerop = quantize_node.in_port(2).get_source().node - else: - zerop = Const(graph, - {'value': mo_array(0, dtype=np.uint8), 'name': node_name + '/ZeroPoint'}).create_node() - - assert zerop.soft_get('type') == 'Const', 'only constant for zero_point is supported for QuantizeLinear' - zero_point_type = zerop.value.dtype - # data type affects range of output values: [-128..127] or [0..255] - if zero_point_type == np.int8: - output_low_value = -128.0 - output_high_value = 127.0 - elif zero_point_type == np.uint8: - output_low_value = 0.0 - output_high_value = 255.0 - else: - raise Error('Not expected type {} for zero point value in node {}'.format( - zero_point_type, zerop.soft_get('name'))) - - fake_quantize = create_op_with_const_inputs(graph, FakeQuantize, {3: float_array(output_low_value), - 4: float_array(output_high_value)}, - {'levels': 256, 'name': node_name + '/FakeQuantize'}) - if set_stop_value_propagation: - fake_quantize['stop_compression'] = True - fake_quantize['stop_value_propagation'] = True - quantize_node.in_port(0).get_connection().set_destination(fake_quantize.in_port(0)) - - # Calculate input_low value - mul_low = create_op_with_const_inputs(graph, Mul, {1: float_array(output_low_value - zerop.value)}, - {'name': node_name + '/Mul/Low'}) - quantize_node.in_port(1).get_connection().set_destination(mul_low.in_port(0)) - mul_low.out_port(0).connect(fake_quantize.in_port(1)) - - # Calculate input_high value - mul_high = create_op_with_const_inputs(graph, Mul, {1: float_array(output_high_value - zerop.value)}, - {'name': node_name + '/Mul/High'}) - mul_low.in_port(0).get_connection().add_destination(mul_high.in_port(0)) - mul_high.out_port(0).connect(fake_quantize.in_port(2)) - - cast = Cast(graph, {'dst_type': zero_point_type, 'name': node_name + '/Cast'}).create_node() - fake_quantize.out_port(0).connect(cast.in_port(0)) - quantize_node.out_port(0).get_connection().set_source(cast.out_port(0)) - rename_nodes([(quantize_node, node_name + '/TBD'), (cast, node_name)]) - - assert scale_y_shape is not None, "{0} contains scale(input with port 1) with shape None".\ - format(quantize_node.soft_get('name', quantize_node.id)) - if axis is not None and len(scale_y_shape) > 0 and scale_y_shape[0] > 1: - input_shape = fake_quantize.in_port(0).data.get_shape() - target_shape = np.ones(len(input_shape), int) - target_shape[axis] = input_shape[axis] - mul_low_reshape = create_op_with_const_inputs(graph, Reshape, {1: int64_array(target_shape)}, - {'name': node_name + '/Reshape/Mul/Low'}) - mul_high_reshape = create_op_with_const_inputs(graph, Reshape, {1: int64_array(target_shape)}, - {'name': node_name + '/Reshape/Mul/high'}) - - fake_quantize.in_port(1).get_connection().set_destination(mul_low_reshape.in_port(0)) - fake_quantize.in_port(2).get_connection().set_destination(mul_high_reshape.in_port(0)) - - mul_low_reshape.out_port(0).connect(fake_quantize.in_port(1)) - mul_high_reshape.out_port(0).connect(fake_quantize.in_port(2)) diff --git a/tools/mo/openvino/tools/mo/middle/replacement.py b/tools/mo/openvino/tools/mo/middle/replacement.py deleted file mode 100644 index e646718fca1bb2..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/replacement.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils import class_registration -from openvino.tools.mo.utils.replacement_pattern import ReplacementPattern - - -class MiddleReplacementPattern(ReplacementPattern): - registered_ops = {} - registered_cls = [] - - def run_after(self): - from openvino.tools.mo.middle.pass_separator import MiddleStart - return [MiddleStart] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleFinish - return [MiddleFinish] - - @classmethod - def class_type(cls): - return class_registration.ClassType.MIDDLE_REPLACER - - -ReplacementPattern.excluded_replacers.append(MiddleReplacementPattern) diff --git a/tools/mo/openvino/tools/mo/middle/reverse_tensor_iterator.py b/tools/mo/openvino/tools/mo/middle/reverse_tensor_iterator.py deleted file mode 100644 index b957577bce0033..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/reverse_tensor_iterator.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.middle.ONNXRNNSequenceNormalize import ONNXRNNSequenceNormalize -from openvino.tools.mo.middle.permute_tensor_iterator import TransposeTensorIteratorLSTM -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.eliminate import remove_op_node_with_data_node -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern - - -class ReverseTensorIteratorLSTM(MiddleReplacementPattern): - """ Fuses Reverse operations around TI: ReverseSequence --> TI --> ReverseSequence. - - WARNING This transformation is limited to support of very special case of TI but - code doesn't check all the cases. - """ - - enabled = True - force_clean_up = True - - def run_after(self): - return [ - ONNXRNNSequenceNormalize, - TransposeTensorIteratorLSTM, - ] - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleFinish - return [MiddleFinish] - - @staticmethod - def is_fusable_reverse_sequence(node: Node): - sequence_lengths = node.in_port(1).data.get_value() - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None - - seq_len = input_shape[node.seq_axis] - if sequence_lengths is not None and is_fully_defined(sequence_lengths) and is_fully_defined(seq_len): - return np.all(sequence_lengths == seq_len) - else: - # check that we take sequence_length from input shape based on ReverseV2ToReverseSequence transformation - broadcast_node = node.in_port(1).get_source().node - if broadcast_node.op != 'Broadcast': - return False - gather_node = broadcast_node.in_port(0).get_source().node - if gather_node.op != "Gather" or \ - (np.all(gather_node.in_port(2).data.get_value() != [0]) or - np.all(gather_node.in_port(1).data.get_value() != [node.seq_axis])): - return False - gather_node_2 = broadcast_node.in_port(1).get_source().node - if gather_node_2.op != "Gather" or \ - (np.all(gather_node_2.in_port(2).data.get_value() != [0]) or - np.all(gather_node_2.in_port(1).data.get_value() != [node.batch_axis])): - return False - shape_node = gather_node.in_port(0).get_source().node - if shape_node.op != "ShapeOf": - return False - if shape_node.in_port(0).get_source().node != node.in_port(0).get_source().node: - return False - - return True - - def pattern(self): - return dict( - nodes=[ - ('input', dict(kind='data')), - - ('direct_seq_len_d', dict(kind='data')), - ('direct_reverse', dict(op='ReverseSequence')), - ('input_reversed', dict(kind='data')), - ('init_hidden', dict(kind='data')), - - ('ti', dict(kind='op', op='TensorIterator')), - ('output_reversed', dict(kind='data')), - - ('inverse_seq_len_d', dict(kind='data')), - ('inverse_reverse', dict(op='ReverseSequence')), - ('output', dict(kind='data')), - ], - edges=[ - ('input', 'direct_reverse', {'in': 0}), - ('direct_seq_len_d', 'direct_reverse', {'in': 1}), - ('direct_reverse', 'input_reversed'), - - ('input_reversed', 'ti', {'in': 0}), - ('init_hidden', 'ti', {'in': 1}), - ('ti', 'output_reversed', {'out': 0}), - - ('output_reversed', 'inverse_reverse', {'in': 0}), - ('inverse_seq_len_d', 'inverse_reverse', {'in': 1}), - ('inverse_reverse', 'output'), - ] - ) - - def replace_pattern(self, graph: Graph, match: dict): - ti = match['ti'] - direct_reverse = match['direct_reverse'] - inverse_reverse = match['inverse_reverse'] - - assert direct_reverse.seq_axis == inverse_reverse.seq_axis - assert direct_reverse.batch_axis is None and inverse_reverse.batch_axis is None or \ - direct_reverse.batch_axis == inverse_reverse.batch_axis - - if not self.is_fusable_reverse_sequence(direct_reverse) or \ - not self.is_fusable_reverse_sequence(inverse_reverse): - # we can not merge ReverseSequence without equal sequences - return - - # Modify stride in TI - for port_map in [ti.input_port_map, ti.output_port_map]: - for port in port_map: - if 'axis' in port and port['axis'] is not None and 'external_port_id' in port: - assert port['axis'] == direct_reverse.seq_axis, \ - 'axis == {} != {} == direct_reverse.seq_dim'.format(port['axis'], direct_reverse.seq_axis) - if 'stride' not in port or port['stride'] is None: - port['stride'] = 1 - assert port['stride'] in [-1, 1] - port['stride'] = -port['stride'] - if port['stride'] == -1: - port['start'] = -1 - port['end'] = 0 - elif port['stride'] == 1: - port['start'] = 0 - port['end'] = -1 - - # disconnect subgraph for seq length calculation - direct_reverse.in_port(1).disconnect() - inverse_reverse.in_port(1).disconnect() - # Remove reverses - remove_op_node_with_data_node(graph, direct_reverse) - remove_op_node_with_data_node(graph, inverse_reverse) diff --git a/tools/mo/openvino/tools/mo/middle/sparse_reshape.py b/tools/mo/openvino/tools/mo/middle/sparse_reshape.py deleted file mode 100644 index ad2fd625f8d10f..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/sparse_reshape.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.eliminate import merge_data_nodes -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.utils.error import Error - - -class SparseReshapeMiddleReplacer(MiddleReplacementPattern): - """ - Removes SparseReshape operation if the old shape and the output shape are the same. - """ - enabled = True - - def run_before(self): - from openvino.tools.mo.middle.pass_separator import MiddleFinish - return [MiddleFinish] - - def pattern(self): - return dict( - nodes=[ - ('sparse_reshape', dict(op='SparseReshape')), - ], - edges=[ - ]) - - def replace_pattern(self, graph: Graph, match: dict): - sparse_reshape = match['sparse_reshape'] - - input_shape_value = sparse_reshape.in_port(1).data.get_value() - output_shape_value = sparse_reshape.out_port(1).data.get_value() - - # establish output shape if value of new shape is given as input - new_shape_value = sparse_reshape.in_port(2).data.get_value() - if output_shape_value is None and new_shape_value is not None: - output_shape_value = new_shape_value - if np.count_nonzero(output_shape_value == -1) == 1: - elem = np.prod(input_shape_value) // np.prod(new_shape_value[new_shape_value != -1]) - output_shape_value[output_shape_value == -1] = elem - - if input_shape_value is None or output_shape_value is None: - raise Error("Input shape and output shape values must be defined for node {}".format(sparse_reshape.id)) - if not np.array_equal(input_shape_value, output_shape_value): - raise Error("Input shape and output shape values must be equal for node {}".format(sparse_reshape.id)) - - nodes_to_remove = [sparse_reshape.id] - if sparse_reshape.is_out_port_connected(0): - sparse_reshape.out_port(0).get_connection().set_source(sparse_reshape.in_port(0).get_source()) - output_data_node = sparse_reshape.out_node(0) - nodes_to_remove.append(output_data_node.id) - else: - input_data_node = sparse_reshape.in_node(0) - nodes_to_remove.append(input_data_node.id) - - if sparse_reshape.is_out_port_connected(1): - sparse_reshape.out_port(1).get_connection().set_source(sparse_reshape.in_port(1).get_source()) - output_data_node = sparse_reshape.out_node(1) - nodes_to_remove.append(output_data_node.id) - else: - input_data_node = sparse_reshape.in_node(1) - nodes_to_remove.append(input_data_node.id) - - graph.remove_nodes_from(nodes_to_remove) diff --git a/tools/mo/openvino/tools/mo/middle/split_tdnn_memoryoffset.py b/tools/mo/openvino/tools/mo/middle/split_tdnn_memoryoffset.py deleted file mode 100644 index f656f51e58d06d..00000000000000 --- a/tools/mo/openvino/tools/mo/middle/split_tdnn_memoryoffset.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.memoryoffset import MemoryOffset -from openvino.tools.mo.ops.result import Result - - -class SplitTdnnMemoryOffset(MiddleReplacementPattern): - ''' - Splits MemoryOffsets in TDNN blocks into 2 parts. These parts then will be converted to ReadValue and Assign. - ''' - enabled = True - run_not_recursively = True - - def run_before(self): - from openvino.tools.mo.middle.ReplaceMemoryOffsetWithSplice import ReplaceMemoryOffsetWithMemoryNodePattern, ReplaceMemoryOffsetNodePattern - return [ReplaceMemoryOffsetNodePattern, ReplaceMemoryOffsetWithMemoryNodePattern] - - def find_and_replace_pattern(self, graph: Graph): - for offset_node in graph.get_op_nodes(op='MemoryOffset', splitted=False): - paired_node = MemoryOffset(graph, {'name': offset_node.pair_name, 'splitted': True, 'pair_name': offset_node.id, - 't': offset_node.t, 'has_default': offset_node.has_default}).create_node() - offset_node['splitted'] = True - offset_node.out_port(0).get_connection().set_source(paired_node.out_port(0)) - res_node = Result(graph, {'name': offset_node.id + "_output"}).create_node() - offset_node.out_port(0).connect(res_node.in_port(0)) - - # If 'element_size' is previously copied from Parameter of from node with defined dim - if offset_node.has_valid('element_size'): - paired_node['element_size'] = offset_node['element_size'] - # Copy shape from previous node. Typically (but not always) for TDNN blocks this is the case - else: - paired_node['element_size'] = offset_node.in_port(0).data.get_shape() diff --git a/tools/mo/openvino/tools/mo/mo.py b/tools/mo/openvino/tools/mo/mo.py deleted file mode 100755 index f3b8e92ebfdd1f..00000000000000 --- a/tools/mo/openvino/tools/mo/mo.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -if __name__ == "__main__": - from openvino.tools.mo.utils.telemetry_utils import init_mo_telemetry - init_mo_telemetry() - from openvino.tools.mo.subprocess_main import subprocess_main # nosec; pylint: disable=no-name-in-module - subprocess_main(framework=None) diff --git a/tools/mo/openvino/tools/mo/mo_caffe.py b/tools/mo/openvino/tools/mo/mo_caffe.py deleted file mode 100755 index fd05f686f78993..00000000000000 --- a/tools/mo/openvino/tools/mo/mo_caffe.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -if __name__ == "__main__": - from openvino.tools.mo.subprocess_main import subprocess_main - from openvino.tools.mo.utils.telemetry_utils import init_mo_telemetry - init_mo_telemetry() - subprocess_main(framework='caffe') diff --git a/tools/mo/openvino/tools/mo/mo_kaldi.py b/tools/mo/openvino/tools/mo/mo_kaldi.py deleted file mode 100755 index abdda198dd43ed..00000000000000 --- a/tools/mo/openvino/tools/mo/mo_kaldi.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -if __name__ == "__main__": - from openvino.tools.mo.subprocess_main import subprocess_main - from openvino.tools.mo.utils.telemetry_utils import init_mo_telemetry - init_mo_telemetry() - subprocess_main(framework='kaldi') diff --git a/tools/mo/openvino/tools/mo/mo_onnx.py b/tools/mo/openvino/tools/mo/mo_onnx.py deleted file mode 100755 index 04c058fc73ef83..00000000000000 --- a/tools/mo/openvino/tools/mo/mo_onnx.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -if __name__ == "__main__": - from openvino.tools.mo.subprocess_main import subprocess_main - from openvino.tools.mo.utils.telemetry_utils import init_mo_telemetry - init_mo_telemetry() - subprocess_main(framework='onnx') diff --git a/tools/mo/openvino/tools/mo/mo_paddle.py b/tools/mo/openvino/tools/mo/mo_paddle.py deleted file mode 100755 index c6331e202a5ccd..00000000000000 --- a/tools/mo/openvino/tools/mo/mo_paddle.py +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -if __name__ == "__main__": - from openvino.tools.mo.subprocess_main import subprocess_main - from openvino.tools.mo.utils.telemetry_utils import init_mo_telemetry - init_mo_telemetry() - subprocess_main(framework='paddle') diff --git a/tools/mo/openvino/tools/mo/mo_tf.py b/tools/mo/openvino/tools/mo/mo_tf.py deleted file mode 100755 index 9037b0930d91ca..00000000000000 --- a/tools/mo/openvino/tools/mo/mo_tf.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -if __name__ == "__main__": - from openvino.tools.mo.subprocess_main import subprocess_main - from openvino.tools.mo.utils.telemetry_utils import init_mo_telemetry - init_mo_telemetry() - subprocess_main(framework='tf') diff --git a/tools/mo/openvino/tools/mo/moc_frontend/__init__.py b/tools/mo/openvino/tools/mo/moc_frontend/__init__.py deleted file mode 100644 index 923d56d04145b6..00000000000000 --- a/tools/mo/openvino/tools/mo/moc_frontend/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 diff --git a/tools/mo/openvino/tools/mo/moc_frontend/analysis.py b/tools/mo/openvino/tools/mo/moc_frontend/analysis.py deleted file mode 100644 index cc4d99ed79425e..00000000000000 --- a/tools/mo/openvino/tools/mo/moc_frontend/analysis.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import json -from openvino.runtime import PartialShape, Model, Type # pylint: disable=no-name-in-module,import-error -from openvino.runtime.utils.types import get_dtype - -def json_model_analysis_dump(framework_model: Model): - - def dump_partial_shape(shape: PartialShape): - if shape.rank.is_dynamic: - return 'None' - return [dim.get_length() if dim.is_static else 0 for dim in shape] - - def dump_element_type(ov_type: Type): - try: - return str(get_dtype(ov_type)) - except: - return 'None' - - json_dump = {} - json_dump['inputs'] = {} - for param in framework_model.get_parameters(): - param_name = param.get_friendly_name() - json_dump['inputs'][param_name] = {} - json_dump['inputs'][param_name]['shape'] = dump_partial_shape(param.get_partial_shape()) - json_dump['inputs'][param_name]['data_type'] = dump_element_type(param.get_element_type()) - json_dump['inputs'][param_name]['value'] = 'None' # not supported in 22.1 - - json_dump['intermediate'] = {} - #TODO: extend model analysis dump for operations with body graphs (If, Loop, and TensorIterator) - for op in filter(lambda node: node.type_info.name != "NullNode", framework_model.get_ordered_ops()): - for out_idx in range(op.get_output_size()): - output = op.output(out_idx) - tensor_name = output.get_any_name() - json_dump['intermediate'][tensor_name] = {} - json_dump['intermediate'][tensor_name]['shape'] = dump_partial_shape(output.get_partial_shape()) - json_dump['intermediate'][tensor_name]['data_type'] = dump_element_type(output.get_element_type()) - json_dump['intermediate'][tensor_name]['value'] = 'None' # not supported in 22.1 - - json_model_analysis_print(json_dump) - - -def json_model_analysis_print(json_dump:str): - print(json.dumps(json_dump)) diff --git a/tools/mo/openvino/tools/mo/moc_frontend/check_config.py b/tools/mo/openvino/tools/mo/moc_frontend/check_config.py deleted file mode 100644 index 8f9f6d67b7e223..00000000000000 --- a/tools/mo/openvino/tools/mo/moc_frontend/check_config.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (C) 2022-2023 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -from pathlib import Path - -from openvino.tools.mo.utils.error import Error -import os - - -def default_path(): - EXT_DIR_NAME = '.' - return os.path.abspath(os.getcwd().join(EXT_DIR_NAME)) - - -def any_extensions_used(argv: argparse.Namespace): - # Checks that extensions are provided. - # Allowed types are string containing path to legacy extension directory - # or path to new extension .so file, or classes inherited from BaseExtension. - if not hasattr(argv, 'extensions') or argv.extensions is None: - return False - - if isinstance(argv.extensions, list) and len(argv.extensions) > 0: - has_non_default_path = False - has_non_str_objects = False - for ext in argv.extensions: - if not isinstance(ext, str): - has_non_str_objects = True - continue - if len(ext) == 0 or ext == default_path(): - continue - has_non_default_path = True - - return has_non_default_path or has_non_str_objects - - raise Exception("Expected list of extensions, got {}.".format(type(argv.extensions))) - - -def legacy_extensions_used(argv: argparse.Namespace): - if any_extensions_used(argv): - extensions = argv.extensions - legacy_ext_counter = 0 - for extension in extensions: - if not isinstance(extension, str): - continue - if extension == default_path(): - continue - if not Path(extension).is_file(): - legacy_ext_counter += 1 - if legacy_ext_counter == len(extensions): - return True # provided only legacy extensions - elif legacy_ext_counter == 0: - return False # provided only new extensions - else: - raise Error('Using new and legacy extensions in the same time is forbidden') - return False - - -def new_extensions_used(argv: argparse.Namespace): - if any_extensions_used(argv): - extensions = argv.extensions - if not isinstance(extensions, list): - extensions = [extensions] - new_ext_counter = 0 - for extension in extensions: - if isinstance(extension, str): - path = Path(extension) - if path.is_file() and (path.suffix == '.so' or path.suffix == '.dll'): - new_ext_counter += 1 - else: - new_ext_counter += 1 - if new_ext_counter == len(extensions): - return True # provided only new extensions - elif new_ext_counter == 0: - return False # provided only legacy extensions - else: - raise Error('Using new and legacy extensions in the same time is forbidden') - return False - - -def get_transformations_config_path(argv: argparse.Namespace) -> Path: - if hasattr(argv, 'transformations_config') \ - and argv.transformations_config is not None and len(argv.transformations_config): - if isinstance(argv.transformations_config, str): - path = Path(argv.transformations_config) - if path.is_file(): - return path - return None - - -def legacy_transformations_config_used(argv: argparse.Namespace): - return get_transformations_config_path(argv) != None - - -def tensorflow_custom_operations_config_update_used(argv: argparse.Namespace): - return hasattr(argv, 'tensorflow_custom_operations_config_update') and \ - argv.tensorflow_custom_operations_config_update is not None - - -def input_freezig_used(argv): - return hasattr(argv, 'freeze_placeholder_with_value') and argv.freeze_placeholder_with_value is not None \ - and len(argv.freeze_placeholder_with_value) > 0 diff --git a/tools/mo/openvino/tools/mo/moc_frontend/extractor.py b/tools/mo/openvino/tools/mo/moc_frontend/extractor.py deleted file mode 100644 index 2dc05a812a9455..00000000000000 --- a/tools/mo/openvino/tools/mo/moc_frontend/extractor.py +++ /dev/null @@ -1,461 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import re -from enum import Enum - -import numpy as np -from openvino._pyopenvino import Place, PartialShape # pylint: disable=no-name-in-module,import-error - -from openvino.frontend import InputModel # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.utils.error import Error - - -def raise_no_node(node_name: str): - raise Error('No node with name {}'.format(node_name)) - - -def raise_node_name_collision(node_name: str, found_nodes: list): - raise Error('Name collision was found, there are several nodes for mask "{}": {}. ' - 'If your intention was to specify port for node, please instead specify node names connected to ' - 'this port. If your intention was to specify the node name, please add port to the node ' - 'name'.format(node_name, found_nodes)) - - -class IOType(Enum): - Input = 1 - Output = 2 - - -def decode_name_with_port( - input_model: InputModel, node_name: str, framework="", io_type=IOType.Input -) -> Place or None: - """ - Decode name with optional port specification w/o traversing all the nodes in the graph - TODO: in future node_name can specify input/output port groups as well as indices (58562) - :param input_model: Input Model - :param node_name: user provided node name - :return: decoded place in the graph - """ - found_places = [] - found_place_names = [] - - def get_place_by_operation_name(input_model, name, framework, io_type): - node = input_model.get_place_by_operation_name(name) - if node and framework == "onnx": - if io_type == IOType.Input: - return ( - node.get_input_port(input_port_index=0) - .get_producing_port() - .get_target_tensor() - ) - else: - return node.get_output_port(output_port_index=0).get_target_tensor() - return node - - # find by tensor name - place = input_model.get_place_by_tensor_name(node_name) - if place: - found_place_names.append("Tensor:" + node_name) - found_places.append(place) - else: - # find by operation name - place = get_place_by_operation_name(input_model, node_name, framework, io_type) - name = node_name - if framework == "onnx" and io_type == IOType.Output: - name = "Tensor:" + name - - if place: - found_place_names.append(name) - found_places.append(place) - - def try_get_node(model, name, framework): - node = model.get_place_by_operation_name(name) - if node: - return node - if framework == "onnx": - tensor = model.get_place_by_tensor_name(name) - if tensor: - if tensor.is_input() or tensor.is_output(): - return tensor - return tensor.get_producing_operation() - return None - - def get_port(match, match_starts_with_name, input_model, framework): - if not match: - return None - - if match_starts_with_name: - name = match.group(1) - port_index = match.group(2) - else: - name = match.group(2) - port_index = match.group(1) - - node = try_get_node(input_model, name, framework) - if node: - # if regular expression has structure :, get node output port. - # Otherwise get node input port - if match_starts_with_name: - return node.get_output_port(output_port_index=int(port_index)) - else: - return node.get_input_port(input_port_index=int(port_index)) - else: - return None - - regexp_post = r"(.+):(\d+)" - match = re.search(regexp_post, node_name) - match_port = get_port( - match=match, - match_starts_with_name=True, - input_model=input_model, - framework=framework, - ) - - if match_port: - name = match.group(1) - if framework == "onnx": - found_place_names.append("Tensor:" + name) - found_places.append(match_port.get_target_tensor()) - else: - found_place_names.append(name) - found_places.append(match_port) - - regexp_pre = r"(\d+):(.+)" - match = re.search(regexp_pre, node_name) - match_port = get_port( - match=match, - match_starts_with_name=False, - input_model=input_model, - framework=framework, - ) - - if match_port: - name = match.group(2) - if framework == "onnx": - found_place_names.append("Tensor:" + name) - found_places.append(match_port.get_producing_port().get_target_tensor()) - else: - found_places.append(match_port) - found_place_names.append(name) - - if len(found_places) == 0: - raise_no_node(node_name) - - # Check that there is no collision, all found places shall point to same data - if not all([n.is_equal_data(found_places[0]) for n in found_places]): - raise_node_name_collision(node_name, found_place_names) - - # TODO: Add support for input/output group name and port index here (58562) - # For new frontends logic shall be extended to additionally support input and output group names - return found_places[0] - - -def fe_input_user_data_repack( - input_model: InputModel, - input_user_shapes: [None, list, dict, np.ndarray], - freeze_placeholder: dict, - framework: str, - input_user_data_types=None, -): - """ - Restructures user input cutting request. Splits ports out of node names. - Transforms node names to node ids. - :param input_model: current input model - :param input_user_shapes: data structure representing user input cutting request. It may be: - # None value if user did not provide neither "input" nor "input_shape" keys - # list instance which contains input layer names with or without ports if user provided - only "input" key - # dict instance which contains input layer names with or without ports as keys and shapes as - values if user provided both "input" and "input_shape" - # np.ndarray if user provided only "input_shape" key - :param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values - :param input_user_data_types: dictionary with input nodes and its data types - :return: restructured input shapes and freeze placeholder shapes information - Example of input dictionary: - _input_shapes = - { - 'node_ID': - [ - {'shape': None, 'in': 0}, - {'shape': None, 'in': 1}, - ], - 'node_1_ID': - [ - {'shape': [1, 227, 227, 3], 'port': None, 'data_type': np.int32} - ], - 'node_2_ID': - [ - {'shape': None, 'out': 3} - ] - } - Example of freeze placeholder dictionary: - _freeze_placeholder = - { - 'phase_train' : False - } - """ - _input_shapes = [] - _input_names = [] - model_inputs = input_model.get_inputs() - - if isinstance(input_user_shapes, list) and len(input_user_shapes) > 1 and isinstance(input_user_shapes[0], - PartialShape): - for shape in input_user_shapes: - assert isinstance(shape, PartialShape), "Got incorrect format of input shapes." - assert len(model_inputs) == len(input_user_shapes) - for idx, model_input in enumerate(model_inputs): - _input_shapes.append({"node": model_input, "shape": input_user_shapes[idx]}) - elif isinstance(input_user_shapes, list) or isinstance(input_user_shapes, dict): - for input_name in input_user_shapes: - node = decode_name_with_port( - input_model, input_name, framework, IOType.Input - ) - if node is None: - raise Error( - "Cannot find location {} in the input model".format(input_name) - ) - shape = ( - None - if isinstance(input_user_shapes, list) - else input_user_shapes[input_name] - ) - if isinstance(input_user_data_types, dict) and input_user_data_types.get(input_name) is not None: - data_type = input_user_data_types[input_name] - _input_shapes.append( - { - "node": node, - "shape": shape, - "data_type": data_type, - "input_name": input_name, - } - ) - else: - _input_shapes.append( - { - "node": node, - "shape": shape, - "input_name": input_name - } - ) - _input_names.append(input_name) - elif isinstance(input_user_shapes, PartialShape): - # this branch covers the single use of `input_shape` without `input` option - # but it can be used along with `freeze_placeholder_with_value` option - # for example, input_shape [3] freeze_placeholder_with_value "is_training->False" - # means the model has two inputs: one is is_training to be frozen, the other to re-write the shape - # NOTE: the logic relies on parameters with the single name - frozen_names = freeze_placeholder.keys() - assert len(model_inputs) == len(frozen_names) + 1, \ - "Please check the conversion command-line. Total number of model inputs ({} detected) " \ - "must match to a number of input shapes along with frozen inputs ({} in total).".format( - len(model_inputs), - len(frozen_names) + 1) - for node in model_inputs: - assert len(node.get_names()) > 0, "Original model inputs must have tensor names." - input_name = node.get_names()[0] - if input_name not in frozen_names: - _input_shapes.append( - { - "node": node, - "shape": input_user_shapes, - "input_name": input_name - } - ) - # case when single unnamed input shape and type was specified - if input_name in input_user_data_types: - _input_shapes[-1]['data_type'] = input_user_data_types[input_name] - _input_names.append(input_name) - break - else: - # this case means that we use original inputs of the model - # and they should not be changed and their properties (shape and type) should not be over-written - # NOTE: the logic relies on parameters with the single name - assert input_user_shapes is None - for node in model_inputs: - assert len(node.get_names()) > 0, "Original model inputs must have tensor names." - input_name = node.get_names()[0] - _input_shapes.append( - { - "node": node, - "input_name": input_name - } - ) - # case when types were specified for unnamed inputs - if input_name in input_user_data_types: - _input_shapes[-1]['data_type'] = input_user_data_types[input_name] - # mark-up Place names we already put into the _input_names - # to avoid duplicates in updates by freeze_placeholder below - _input_names.append(input_name) - - if freeze_placeholder: - # in case freezing via freeze_placeholder_with_value option, _input_shapes can miss some frozen places - for input_name in freeze_placeholder: - if input_name in _input_names or input_name + ":0" in _input_names: - continue - node = decode_name_with_port( - input_model, input_name, framework, IOType.Input - ) - _input_shapes.append( - { - "node": node, - "input_name": input_name - } - ) - return _input_shapes, freeze_placeholder - return _input_shapes, dict() - - -def fe_output_user_data_repack(input_model: InputModel, outputs: list, framework: str): - """ - - :param input_model: Input Model to operate on - :param outputs: list of node names provided by user - :return: dictionary with node IDs as keys and list of port dictionaries as values - Example of outputs dictionary: - _outputs = - { - 'node_ID': - [ - {'out': 0}, - {'out': 1}, - ], - 'node_1_ID': - [ - {'port': None} - ], - 'node_2_ID': - [ - {'in': 3} - ] - } - """ - _outputs = [] - if outputs is not None: - for output in outputs: - node = decode_name_with_port(input_model, output, framework, IOType.Output) - if node is None: - raise Error("Cannot find location {} in the graph".format(output)) - _outputs.append({"node": node}) - return _outputs - - -def find_first_unused_input(model_inputs: list, freeze_placeholder: dict, param_dict: dict, param_name: str): - """ - Finds first input in model_inputs, which is not present in freeze_placeholder dictionary or param_dict. - - :param model_inputs: list of model inputs - :param freeze_placeholder: dictionary where key is input name, value is input value for freezing. - :param param_dict: dictionary where key is input name, value is parameter value (shape or type). - :param param_name: name of parameter used in exception message. - - :return: first input name, which is not present in freeze_placeholder dictionary or param_dict. - """ - for inp in model_inputs: - input_names = inp.get_names() - name_found = False - for input_name in input_names: - if input_name in freeze_placeholder or input_name in param_dict: - name_found = True - break - if name_found: - continue - return input_names[0] - raise Error("Could not set {}, as model does not have enough inputs.".format(param_name)) - - -def convert_params_lists_to_dicts(input_model, - input_user_shapes: [list, dict], - input_user_data_types: [list, dict], - freeze_placeholder: dict, - unnamed_freeze_placeholders: list): - """ - Convert lists of unnamed params to dicts using input names from input_model. - - :param input_model: openvino.runtime.InputModel - :param input_user_shapes: list of input shapes or dictionary where key is input name, value is input shape from user. - :param input_user_data_types: list of input types or dictionary where key is input name, value is input type from user. - :param freeze_placeholder: dictionary where key is input name, value is input value from user. - :param unnamed_freeze_placeholders: list of unnamed input values from user. - - :return: (input_user_shapes_dict, input_user_data_types_dict, freeze_placeholder), where - input_user_shapes_dict - dictionary where key is input name, value is shape from user; - input_user_data_types_dict - dictionary where key is input name, value is type from user; - freeze_placeholder - dictionary where key is input name, value is input value from user; - """ - from openvino.runtime import PartialShape - model_inputs = input_model.get_inputs() - input_user_data_types_dict = {} - input_user_shapes_dict = {} - - # input_user_shapes is list only if unnamed inputs were used - if isinstance(input_user_shapes, list): - - # this cycle adds each unnamed shape to dictionary using name from model_inputs - for idx, shape in enumerate(input_user_shapes): - assert isinstance(shape, PartialShape), "Got incorrect format of input shapes {}.".format(type(shape)) - - inp_name = find_first_unused_input(model_inputs, freeze_placeholder, input_user_shapes_dict, "shape") - input_user_shapes_dict[inp_name] = shape - else: - input_user_shapes_dict = input_user_shapes - - # input_user_data_types is list only if unnamed inputs were used - if isinstance(input_user_data_types, list): - from openvino.runtime import Type - - if input_user_shapes_dict is None: - input_user_shapes_dict = {} - - # this cycle adds each unnamed type to dictionary using name from model_inputs - for idx, node_type in enumerate(input_user_data_types): - assert isinstance(node_type, (type, np.dtype, Type)), "Got incorrect format of input types. " \ - "Expected numpy type or openvino.runtime.Type, " \ - "got {}.".format(type(node_type)) - - inp_name = find_first_unused_input(model_inputs, freeze_placeholder, input_user_data_types_dict, "type") - input_user_data_types_dict[inp_name] = node_type - # FE postprocessing expects input_user_shapes_dict to always have shapes for corresponding types. - # If shape is not set it is expected to have None shape in input_user_shapes_dict dictionary. - if inp_name not in input_user_shapes_dict: - input_user_shapes_dict[inp_name] = None - else: - input_user_data_types_dict = input_user_data_types - - # unnamed_freeze_placeholders is always list, it is not empty only if unnamed inputs were used. - for value in unnamed_freeze_placeholders: - assert isinstance(value, list), "Got incorrect format of input values. " \ - "Expected list, " \ - "got {}.".format(type(value)) - inp_name = find_first_unused_input(model_inputs, freeze_placeholder, {}, "input value") - freeze_placeholder[inp_name] = value - - return input_user_shapes_dict, input_user_data_types_dict, freeze_placeholder - - -def fe_user_data_repack( - input_model: InputModel, - input_user_shapes: [None, list, dict, np.array], - input_user_data_types: dict, - outputs: list, - freeze_placeholder: dict, - framework: str, -): - """ - :param input_model: Input Model to operate on - :param input_user_shapes: data structure representing user input cutting request - :param input_user_data_types: dictionary with input nodes and its data types - :param outputs: list of node names to treat as outputs - :param freeze_placeholder: dictionary with placeholder names as keys and freezing value as values - :return: restructured input, output and freeze placeholder dictionaries or None values - """ - _input_shapes, _freeze_placeholder = fe_input_user_data_repack( - input_model, - input_user_shapes, - freeze_placeholder, - framework, - input_user_data_types=input_user_data_types, - ) - _outputs = fe_output_user_data_repack(input_model, outputs, framework) - - return _input_shapes, _outputs, _freeze_placeholder diff --git a/tools/mo/openvino/tools/mo/moc_frontend/layout_utils.py b/tools/mo/openvino/tools/mo/moc_frontend/layout_utils.py deleted file mode 100644 index 0dd0e4d820efba..00000000000000 --- a/tools/mo/openvino/tools/mo/moc_frontend/layout_utils.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import Callable - -from openvino.runtime import PartialShape # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def update_layout_to_dict(inputs: list, layout: [list, dict], get_names_func: Callable): - """ - The function prepares layout values in the dictionary with items of the format: - { node_name : {'source_layout': 'NHWC', 'target_layout': 'NCHW'} } - """ - if isinstance(layout, dict): - if '' in layout: - input_names = [list(get_names_func(cur_input))[0] for cur_input in inputs] - if len(input_names) > 1: - raise Error('Layout without name can be specified for models with only one input, ' - 'but provided model has {} inputs: \'{}\'. ' - 'Please specify explicitly input/output name for "layout" option' - .format(len(input_names), input_names)) - layout = { - input_names[0]: { - 'source_layout': layout[''].get('source_layout'), - 'target_layout': layout[''].get('target_layout') - } - } - return layout - if isinstance(layout, list): - if len(layout) != len(inputs): - raise Error('Numbers of inputs and layout values do not match. ' + refer_to_faq_msg(61)) - layout_dict = {} - for idx, cur_input in enumerate(inputs): - names_list = list(get_names_func(cur_input)) - assert len(names_list) > 0, "No names for input" - node_name = names_list[0] - layout_dict.update( - { - node_name: layout[idx] - } - ) - return layout_dict - raise Error("Unknown layout type. Expected dict, list. Got {}".format(type(layout))) - - -def get_dimension_index_by_label(input_shape: PartialShape, input_names: list, layout_dict: [dict], - dimension_label: str, default_dim: int): - """ - The function returns index of the dimension pointed in the layout - and a flag indicating if the index is chosen by default. - For example, the index for 'D' dimension in "NHWDC" layout is 3. - """ - if input_shape.rank.is_static and input_shape.rank.get_length() == 0: - # in case a scalar, batch dimension is not defined - return None, False - - # search for the corresponding layout - for name, layout_value in layout_dict.items(): - if name in input_names: - layout = layout_value.get('source_layout', None) - if layout is None: - return default_dim, True - from openvino.runtime import Layout # pylint: disable=no-name-in-module,import-error - layout_parsed = Layout(layout) - if layout_parsed.has_name(dimension_label): - return layout_parsed.get_index_by_name(dimension_label), False - else: - # if the layout is specified and the required dimension label is not found, the batch is unknown - return None, False - - return default_dim, True diff --git a/tools/mo/openvino/tools/mo/moc_frontend/moc_emit_ir.py b/tools/mo/openvino/tools/mo/moc_frontend/moc_emit_ir.py deleted file mode 100644 index 51b9b727c7a04b..00000000000000 --- a/tools/mo/openvino/tools/mo/moc_frontend/moc_emit_ir.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse - -from openvino.runtime import Model # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.utils.cli_parser import parse_transform -from openvino.tools.mo.back.preprocessing import apply_preprocessing - - -def moc_emit_ir(ngraph_function: Model, argv: argparse.Namespace): - - # Apply preprocessing (mean/scale/reverse_channels/convert_layout/etc) - apply_preprocessing(ov_function=ngraph_function, argv=argv) - - # Apply transformations - from openvino.tools.mo.back.offline_transformations import apply_user_transformations, \ - apply_moc_legacy_transformations, apply_fused_names_cleanup - - from openvino._offline_transformations import apply_moc_transformations # pylint: disable=import-error,no-name-in-module - apply_moc_transformations(ngraph_function, cf=argv.static_shape, smart_reshape=True) - - from openvino._offline_transformations import compress_quantize_weights_transformation # pylint: disable=no-name-in-module,import-error - compress_quantize_weights_transformation(ngraph_function) - - if argv.framework == "onnx": - # set OldApi map in IR to be executed via OV API 1.x and for parity with legacy MO - params_with_custom_types = [] if argv.placeholder_data_types is None \ - else list(argv.placeholder_data_types.keys()) - apply_moc_legacy_transformations(ngraph_function, params_with_custom_types) - - apply_user_transformations(ngraph_function, parse_transform(argv.transform)) - - if argv.compress_to_fp16: - from openvino.tools.mo.back.offline_transformations import compress_model - compress_model(ngraph_function) - - apply_fused_names_cleanup(ngraph_function) - - del argv.feManager - return ngraph_function diff --git a/tools/mo/openvino/tools/mo/moc_frontend/paddle_frontend_utils.py b/tools/mo/openvino/tools/mo/moc_frontend/paddle_frontend_utils.py deleted file mode 100644 index def49590632e9c..00000000000000 --- a/tools/mo/openvino/tools/mo/moc_frontend/paddle_frontend_utils.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import sys -import tempfile - - -class paddle_frontend_converter: - def __init__(self, model, inputs=None, outputs=None): - self.model = model - self.inputs = inputs - self.outputs = outputs - self.tmp = None - self.model_name = None - self.pdmodel = None - self.pdiparams = None - self.pdiparams_info = None - self.is_generated = False - - def destroy(self): - # close tmp file - if isinstance(self.tmp, tempfile._TemporaryFileWrapper): - self.tmp.close() - - # remove the *.pdmodel - if os.path.exists(self.pdmodel): - os.remove(self.pdmodel) - - # remove the *.pdiparams - if os.path.exists(self.pdiparams): - os.remove(self.pdiparams) - - # remove the *.pdiparams.info - if os.path.exists(self.pdiparams_info): - os.remove(self.pdiparams_info) - - def convert_paddle_to_pdmodel(self): - ''' - There are three paddle model categories: - - High Level API: is a wrapper for dynamic or static model, use `self.save` to serialize - - Dynamic Model: use `paddle.jit.save` to serialize - - Static Model: use `paddle.static.save_inference_model` to serialize - ''' - try: - self.tmp = tempfile.NamedTemporaryFile(delete=True) - self.model_name = self.tmp.name - self.pdmodel = "{}.pdmodel".format(self.model_name) - self.pdiparams = "{}.pdiparams".format(self.model_name) - self.pdiparams_info = "{}.pdiparams.info".format(self.model_name) - - import paddle # pylint: disable=import-error - if isinstance(self.model, paddle.hapi.model.Model): - self.model.save(self.model_name, False) - else: - if self.inputs is None: - raise RuntimeError( - "Saving inference model needs 'inputs' before saving. Please specify 'example_input'" - ) - if isinstance(self.model, paddle.fluid.dygraph.layers.Layer): - with paddle.fluid.framework._dygraph_guard(None): - paddle.jit.save(self.model, self.model_name, input_spec=self.inputs, output_spec=self.outputs) - elif isinstance(self.model, paddle.fluid.executor.Executor): - if self.outputs is None: - raise RuntimeError( - "Model is static. Saving inference model needs 'outputs' before saving. Please specify 'example_output' for this model" - ) - paddle.static.save_inference_model(self.model_name, self.inputs, self.outputs, self.model) - else: - raise RuntimeError( - "Conversion just support paddle.hapi.model.Model, paddle.fluid.dygraph.layers.Layer and paddle.fluid.executor.Executor" - ) - - if not os.path.exists(self.pdmodel): - print("Failed generating paddle inference format model") - sys.exit(1) - - self.is_generated = True - return self.pdmodel - finally: - # close tmp file - if isinstance(self.tmp, tempfile._TemporaryFileWrapper): - self.tmp.close() \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/moc_frontend/pipeline.py b/tools/mo/openvino/tools/mo/moc_frontend/pipeline.py deleted file mode 100644 index 49ab0770043f5e..00000000000000 --- a/tools/mo/openvino/tools/mo/moc_frontend/pipeline.py +++ /dev/null @@ -1,320 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import logging as log -import sys -from copy import copy -from typing import List - -import numpy as np -import os - -from openvino.frontend import FrontEnd, InputModel, NotImplementedFailure, \ - Place # pylint: disable=no-name-in-module,import-error -from openvino.runtime import PartialShape, Type # pylint: disable=no-name-in-module,import-error -from openvino.runtime.utils.types import get_element_type, \ - get_numpy_ctype # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.moc_frontend.analysis import json_model_analysis_dump -from openvino.tools.mo.moc_frontend.extractor import fe_user_data_repack, convert_params_lists_to_dicts, fe_output_user_data_repack -from openvino.tools.mo.moc_frontend.layout_utils import update_layout_to_dict, get_dimension_index_by_label -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.type_utils import np_map_cast -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.middle.passes.infer import validate_batch_in_shape - - -def get_enabled_and_disabled_transforms(): - """ - :return: tuple of lists with force enabled and disabled id of transformations. - """ - disabled_transforms = os.environ['MO_DISABLED_TRANSFORMS'] if 'MO_DISABLED_TRANSFORMS' in os.environ else '' - enabled_transforms = os.environ['MO_ENABLED_TRANSFORMS'] if 'MO_ENABLED_TRANSFORMS' in os.environ else '' - - assert isinstance(enabled_transforms, str) - assert isinstance(disabled_transforms, str) - - disabled_transforms = disabled_transforms.split(',') - enabled_transforms = enabled_transforms.split(',') - - return enabled_transforms, disabled_transforms - - -def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd): - """ - Load input model and convert it to nGraph function - :param: argv: parsed command line arguments - :param: moc_front_end: Loaded Frontend for converting input model - :return: converted nGraph function ready for serialization - """ - input_checkpoint = getattr(argv, 'input_checkpoint', None) - share_weights = getattr(argv, 'share_weights', True) - if argv.input_model and input_checkpoint: - # frozen format with v1 checkpoints - input_model = moc_front_end.load([argv.input_model, argv.input_checkpoint], share_weights) - elif argv.input_model: - input_model = moc_front_end.load(argv.input_model, share_weights) - elif argv.saved_model_dir: - if argv.saved_model_tags: - input_model = moc_front_end.load([argv.saved_model_dir, argv.saved_model_tags], share_weights) - else: - input_model = moc_front_end.load(argv.saved_model_dir, share_weights) - elif argv.input_meta_graph: - input_model = moc_front_end.load(argv.input_meta_graph, share_weights) - if argv.output: - # Simulate original behavior with freezing model - # While freezing we do a cutting of model, to keep similar behavior we - # need to simulate similar behavior with natively supported model - outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name()) - input_model.override_all_outputs([x['node'] for x in outputs]) - - argv.placeholder_shapes, argv.placeholder_data_types, argv.freeze_placeholder_with_value = convert_params_lists_to_dicts( - input_model, argv.placeholder_shapes, argv.placeholder_data_types, - argv.freeze_placeholder_with_value, argv.unnamed_freeze_placeholder_with_value) - - user_shapes, outputs, freeze_placeholder = fe_user_data_repack( - input_model, argv.placeholder_shapes, argv.placeholder_data_types, - argv.output, argv.freeze_placeholder_with_value, moc_front_end.get_name()) - - def check_places_are_same(places_original: List[Place], places_new: List[Place]): - """ - Check if set of new places is same as original or not. - :param places_original: List[Place] Original model places - :param places_new: List[Place] New list of places - :return: True if new list of places is same as original - """ - return len(places_original) == len(places_new) and len( - [item for item in places_original if any( - [item.is_equal(item2['node']) for item2 in places_new])]) == len(places_original) - - def add_names_to_tensors(model: InputModel, places: List[Place]): - """ - Adds additional names to some model input tensors. This helper should be used - when a model modification is going to happen. - :param model The input model loaded by a given frontend - :param places An object containing Places and names that will be used for model modification - """ - for new_input in places: - if 'input_name' not in new_input: - continue - try: - model.add_name_for_tensor(new_input['node'], new_input['input_name']) - except NotImplementedFailure as e: - # some frontends might not implement this method - log.warning('Could not add an additional name to a tensor pointed to by \'{}\'. Details: {}'.format( - new_input['input_name'], str(e))) - - enabled_transforms, disabled_transforms = get_enabled_and_disabled_transforms() - if 'ANALYSIS_JSON_PRINT' in enabled_transforms: - # NOTE that model analysis is performed before applying user's settings (inputs's shapes etc.) - framework_model = moc_front_end.decode(input_model) - json_model_analysis_dump(framework_model) - # a model is not processed further in json analysis mode - sys.exit(0) - - model_inputs = input_model.get_inputs() - inputs_equal = True - if user_shapes: - inputs_equal = check_places_are_same(model_inputs, user_shapes) - - outputs_equal = True - if outputs: - outputs_equal = check_places_are_same(input_model.get_outputs(), outputs) - log.debug('Inputs are same: {}, outputs are same: {}'.format( - inputs_equal, outputs_equal)) - - def create_target_input_shapes(new_input_places): - if isinstance(new_input_places, list) and len(new_input_places) > 1 \ - and isinstance(new_input_places[0], tuple): - return new_input_places - new_input_place_names = [x.get_names()[0] for x in new_input_places] - shapes = [shape for shape in argv.placeholder_shapes.values()] - return dict(zip(new_input_place_names, shapes)) - - if not inputs_equal and not outputs_equal: - log.debug('Using extract subgraph') - new_input_places = [x['node'] for x in user_shapes] - new_output_places = [x['node'] for x in outputs] - add_names_to_tensors(input_model, user_shapes) - input_model.extract_subgraph(new_input_places, new_output_places) - # invalidation of existing Place objects could have happened in the operation above - if user_shapes: - placeholder_shapes = create_target_input_shapes(new_input_places) - new_output_places_name = [x.get_names()[0] for x in new_output_places] - - user_shapes, outputs, _ = fe_user_data_repack( - input_model, placeholder_shapes, argv.placeholder_data_types, - new_output_places_name, argv.freeze_placeholder_with_value, moc_front_end.get_name()) - elif not inputs_equal: - log.debug('Using override_all_inputs') - add_names_to_tensors(input_model, user_shapes) - new_input_places = [x['node'] for x in user_shapes] - input_model.override_all_inputs(new_input_places) - # invalidation of existing Place objects could have happened in the operation above - if user_shapes: - placeholder_shapes = create_target_input_shapes(new_input_places) - - user_shapes, outputs, _ = fe_user_data_repack( - input_model, placeholder_shapes, argv.placeholder_data_types, - argv.output, argv.freeze_placeholder_with_value, moc_front_end.get_name()) - elif not outputs_equal: - log.debug('Using override_all_outputs') - add_names_to_tensors(input_model, user_shapes) - new_output_places = [x['node'] for x in outputs] - input_model.override_all_outputs(new_output_places) - # invalidation of existing Place objects could have happened in the operation above - if user_shapes: - model_inputs = input_model.get_inputs() - - if user_shapes: - for user_shape in user_shapes: - if user_shape.get('shape') is not None: - input_model.set_partial_shape( - user_shape['node'], user_shape['shape']) - if user_shape.get('data_type') is not None: - data_type = get_element_type(user_shape['data_type']) - log.debug('Set data type: {}'.format(data_type)) - input_model.set_element_type(user_shape['node'], data_type) - - if freeze_placeholder: - for name, value in freeze_placeholder.items(): - node = None - # look for the certain place in user_shapes - for node_cur in user_shapes: - if node_cur.get('input_name') == name or node_cur.get('input_name') == name + ":0": - node = node_cur - break - if node is None: - raise Error("Please check correctness of the command-line. " - "Place (operation or tensor) with name {} is not found.".format(name)) - place = node.get('node') - - if node.get('data_type'): - dtype = node['data_type'] - ov_type = Type(dtype) - else: - # we need to detect type of Placeholder - try: - ov_type = input_model.get_element_type(place) - except NotImplementedFailure: - raise Error("Please specify type for value freezing {} node explicitly " - "because the frontend does not support automatic type detection.".format(name)) - # in case of cutting graph (or using custom inputs) and unspecified or dynamic type, - # the default type is fp32 - if ov_type == Type.undefined or ov_type == Type.dynamic: - ov_type = Type.f32 - dtype = get_numpy_ctype(ov_type) - - input_model.set_element_type(place, ov_type) - # prepare and cast value to dtype - if isinstance(value, list): - casted_list = list() - for v in mo_array(value): - casted_list.append(np_map_cast[dtype](v)) - value = mo_array(casted_list, dtype=dtype) - else: - value = np_map_cast[dtype](value) - value = np.array(value, dtype=dtype) - - ov_shape = input_model.get_partial_shape(place) - if node.get('shape'): - # set user defined shape - ov_shape = PartialShape(node['shape']) - input_model.set_partial_shape(place, ov_shape) - elif ov_shape.is_dynamic: - # in case of dynamic shape (dynamic rank or dynamic dimension) - # deduce it based on the value shape and set it - ov_shape = PartialShape(value.shape) - input_model.set_partial_shape(place, ov_shape) - - input_model.set_tensor_value(place, value) - - def shape_to_array(shape: PartialShape): - return [shape.get_dimension(i) for i in range(shape.rank.get_length())] - - # obtain layout for all inputs - layout_values = {} - if 'layout_values' in argv and argv.layout_values: - layout_values = update_layout_to_dict(model_inputs, argv.layout_values, - lambda input_place: input_place.get_names()) - - deferred_batch_names = [] - # set batch size for inputs with a static rank - # for all other inputs, set it after shape deduction is performed during model conversion - if argv.batch is not None and argv.batch > 0: - log.debug('Setting batch size to {}'.format(argv.batch)) - frozen_input_names = list(freeze_placeholder.keys()) if freeze_placeholder else [] - for place in model_inputs: - input_partial_shape = input_model.get_partial_shape(place) - input_names = place.get_names() - joined_name = ' '.join(place.get_names()) - assert len(input_names) > 0, "One input place has no names" - - # if this input is frozen, there is no need to set the batch - is_frozen_input = len([name for name in input_names if name in frozen_input_names]) > 0 - if is_frozen_input: - # skip the frozen input - continue - - if not input_partial_shape.rank.is_static: - # found input with dynamic rank, so have to repeat the batch setting after the model conversion - deferred_batch_names += input_names - continue - - batch_dim, is_default_index = get_dimension_index_by_label(input_partial_shape, - place.get_names(), layout_values, 'N', 0) - if batch_dim is None: - # skip because no batch dimension exists in the input - continue - - if is_default_index: - # if the batch index is chosen by default, we need to ensure that its size equals -1, 0 or 1 - validate_batch_in_shape(shape_to_array(input_partial_shape), joined_name) - - assert batch_dim < input_partial_shape.rank.get_length(), \ - "Incorrect layout is specified for {}:" \ - " index of the batch dimension is out of range.".format(input_names[0]) - - new_partial_shape = copy(input_partial_shape) - new_partial_shape[batch_dim] = argv.batch - - log.debug('Input: {}, Old shape: {}, New shape: {}'.format( - joined_name, input_partial_shape, new_partial_shape)) - input_model.set_partial_shape(place, new_partial_shape) - - ov_model = moc_front_end.convert(input_model) - - if argv.batch is not None and argv.batch > 0 and len(deferred_batch_names) > 0: - # Frontend convert method can include reverse infer functionality that can deduce undefined input shapes - # so try to repeat batch setting again - reshape_dict = {} - log.debug('Deferred batch setting to size {}'.format(argv.batch)) - is_batch_clarified = False - for model_input in ov_model.inputs: - input_name = model_input.any_name - input_partial_shape = model_input.get_partial_shape() - if input_name in deferred_batch_names and input_partial_shape.rank.is_static: - # update input shape with the specified batch for input that originally has dynamic rank - batch_dim, is_default_index = get_dimension_index_by_label(input_partial_shape, - model_input.get_names(), - layout_values, 'N', 0) - if batch_dim is None: - continue - - if is_default_index: - # if the batch index is chosen by default, we need to ensure that its size equals -1, 0 or 1 - validate_batch_in_shape(shape_to_array(input_partial_shape), input_name) - - assert batch_dim < input_partial_shape.rank.get_length(), \ - "Incorrect layout is specified for {}: " \ - "index of the batch dimension is out of range.".format(input_name) - input_partial_shape[batch_dim] = argv.batch - is_batch_clarified = True - - reshape_dict.update({input_name: input_partial_shape}) - - if is_batch_clarified: - # call reshape only if batch dimension for one of the input is clarified - ov_model.reshape(reshape_dict) - - return ov_model diff --git a/tools/mo/openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py b/tools/mo/openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py deleted file mode 100644 index f2f7096f1b7033..00000000000000 --- a/tools/mo/openvino/tools/mo/moc_frontend/pytorch_frontend_utils.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import sys - -import numpy as np -# pylint: disable=no-name-in-module,import-error -from openvino.runtime import Tensor, PartialShape - -from openvino.tools.mo.utils.error import Error - - -def get_pytorch_decoder(model, input_shape, example_inputs, args): - try: - from openvino.frontend.pytorch.ts_decoder import TorchScriptPythonDecoder - except Exception as e: - log.error("PyTorch frontend loading failed") - raise e - if 'nncf' in sys.modules: - is_good_version = True - try: - from nncf.torch.nncf_network import NNCFNetwork - - if isinstance(model, NNCFNetwork): - from packaging import version - if version.parse(sys.modules['nncf'].__version__) < version.parse("2.6"): - is_good_version = False - except: - pass - if not is_good_version: - raise RuntimeError( - "NNCF models produced by nncf<2.6 are not supported directly. Please upgrade nncf or export to ONNX first.") - inputs = prepare_torch_inputs(example_inputs) - if not isinstance(model, TorchScriptPythonDecoder): - decoder = TorchScriptPythonDecoder(model, example_input=inputs, shared_memory=args.get("share_weights", True)) - else: - decoder = model - args['input_model'] = decoder - args["framework"] = "pytorch" - args["example_input"] = inputs - - return args - - -def update_list_or_dict(container, name, idx, value): - if isinstance(container, dict): - if name is None: - name = list(container)[idx] - container[name] = value - return - if idx == len(container): - container.append(value) - elif idx > len(container): - raise Error(f"Wrong {idx}") - else: - container[idx] = value - return - - -def get_value_from_list_or_dict(container, name, idx): - if isinstance(container, dict): - if name is None: - if idx < len(container): - name = list(container)[idx] - return None - return container.get(name) - if idx < len(container): - return container[idx] - return None - - -def extract_input_info_from_example(args, inputs): - try: - from openvino.frontend.pytorch.utils import pt_to_ov_type_map # pylint: disable=no-name-in-module,import-error - except Exception as e: - log.error("PyTorch frontend loading failed") - raise e - example_inputs = args.example_input - data_types = args.placeholder_data_types or {} - input_shapes = args.placeholder_shapes or {} - is_dict_input = isinstance(example_inputs, dict) - list_inputs = list(example_inputs.values()) if is_dict_input else example_inputs - input_names = None - if not isinstance(example_inputs, (list, tuple, dict)): - list_inputs = [list_inputs] - if args.input_model._input_is_list: - list_inputs[0] = list_inputs[0].unsqueeze(0) - if args.input_model._input_signature is not None and not is_dict_input: - input_names = args.input_model._input_signature[1:] if args.input_model._input_signature[0] == "self" else args.input_model._input_signature - if not is_dict_input: - example_inputs = dict(zip(input_names, list_inputs)) - is_dict_input = True - elif is_dict_input: - input_names = list(example_inputs) - if not data_types and input_names is None: - data_types = [] - if not input_shapes and input_names is None: - input_shapes = [] - if inputs: - for input_id, input_info in enumerate(inputs): - input_name = input_info.name - if is_dict_input and input_name in example_inputs: - example_input = example_inputs[input_name] - else: - example_input = list_inputs[input_id] - if is_dict_input and input_name is None: - input_name = input_names[input_id] - dtype = getattr(example_input, "dtype", type(example_input)) - example_dtype = pt_to_ov_type_map.get(str(dtype)) - user_dtype = get_value_from_list_or_dict(data_types, input_name, input_id) - if user_dtype is not None and example_dtype is not None and example_dtype.to_dtype() != user_dtype: - raise Error(f"Defined input type {user_dtype} is not equal to provided example_input type {example_dtype.to_dtype()}") - - data_rank = getattr(example_input, "ndim", 0) - user_input_shape = get_value_from_list_or_dict(input_shapes, input_name, input_id) - if user_input_shape.rank.is_static and user_input_shape.rank.get_length() != data_rank: - raise Error( - f"Requested input shape {user_input_shape.rank.get_length()} rank" - f" is not equal to provided example_input rank {data_rank}") - - input_shape = user_input_shape if user_input_shape is not None else PartialShape([-1] * data_rank) - update_list_or_dict(data_types, input_name, input_id, example_dtype.to_dtype() if example_dtype is not None else None) - update_list_or_dict(input_shapes, input_name, input_id, input_shape) - else: - for input_id, example_input in enumerate(list_inputs): - dtype = getattr(example_input, "dtype", type(example_input)) - ov_dtype = pt_to_ov_type_map.get(str(dtype)) - data_rank = getattr(example_input, "ndim", 0) - input_shape = PartialShape([-1] * data_rank) - input_name = input_names[input_id] if input_names else None - update_list_or_dict(input_shapes, input_name, input_id, input_shape) - update_list_or_dict(data_types, input_name, input_id, ov_dtype.to_dtype() if ov_dtype is not None else None) - - args.placeholder_data_types = data_types - args.placeholder_shapes = input_shapes - if not args.input and input_names: - args.input_list = input_names - args.input = ",".join(input_names) - -# pylint: disable=no-member -def to_torch_tensor(tensor): - import torch # pylint: disable=import-error - if isinstance(tensor, torch.Tensor): - return tensor - if isinstance(tensor, np.ndarray): - return torch.tensor(tensor) - if isinstance(tensor, Tensor): - return torch.tensor(tensor.data) - if isinstance(tensor, (float, int, bool)): - return tensor - if isinstance(tensor, (tuple, list)): - # TODO: Function to_torch_tensor should be renamed as it handles not only a tensor - return tuple(to_torch_tensor(x) for x in tensor) - if isinstance(tensor, dict) and all(isinstance(k, str) for k in tensor.keys()): - return dict((k, to_torch_tensor(x)) for k, x in tensor.items()) - else: - raise Error("Unexpected type of example_input. Supported types torch.Tensor, np.array or ov.Tensor. " - "Got {}".format(type(tensor))) - - -def prepare_torch_inputs(example_inputs): - import torch - inputs = None - if example_inputs is not None: - inputs = example_inputs - if isinstance(inputs, list): - inputs = [to_torch_tensor(x) for x in inputs] - elif isinstance(inputs, tuple): - inputs = [to_torch_tensor(x) for x in inputs] - inputs = tuple(inputs) - elif isinstance(inputs, dict): - for name, tensor in inputs.items(): - assert isinstance(name, str), "Expected dictionary where keys are input names of string type and" \ - " values are tensors. Got key of type {}".format(type(name)) - inputs[name] = to_torch_tensor(tensor) - else: - inputs = to_torch_tensor(inputs) - else: - # No example_input were provided, decoder will use scripting - return None - return inputs diff --git a/tools/mo/openvino/tools/mo/moc_frontend/shape_utils.py b/tools/mo/openvino/tools/mo/moc_frontend/shape_utils.py deleted file mode 100644 index efe0702499d592..00000000000000 --- a/tools/mo/openvino/tools/mo/moc_frontend/shape_utils.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -from openvino.runtime import PartialShape, Dimension -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.cli_parser import get_placeholder_shapes, split_shapes - - -def get_static_shape(shape: [PartialShape, list, tuple], dynamic_value=None): - # Current function returns list with static dimensions with following logic. - # For dynamic dimensions return lower boundaries if they are set, otherwise - # return upper boundaries if they are set. If dimension is fully dynamic then raise error. - shape_list = [] - for idx, dim in enumerate(shape): - if isinstance(dim, int): - if dim == -1: - shape_list.append(dynamic_value) - continue - shape_list.append(dim) - elif isinstance(dim, np.int64): - if dim == np.int64(-1): - shape_list.append(dynamic_value) - continue - shape_list.append(dim) - elif isinstance(dim, tuple): - # tuple where (min_length, max_length), the format which uses MO cli parser - assert len(dim) == 2, "Unknown dimension type {}".format(dim) - if dim[0] > 0: - shape_list.append(dim[0]) - elif dim[1] < np.iinfo(np.int64).max: - shape_list.append(dim[1]) - else: - shape_list.append(dynamic_value) - continue - elif isinstance(dim, Dimension): - if dim.is_static or dim.get_min_length() > 0: - shape_list.append(dim.get_min_length()) - elif dim.get_max_length() != -1: - shape_list.append(dim.get_max_length()) - else: - shape_list.append(dynamic_value) - continue - else: - raise Error("Unknown dimension type {}".format(dim)) - - return tuple(shape_list) - - -def get_dynamic_dims(shape: [PartialShape, list, tuple]): - dynamic_dims = [] - for idx, dim in enumerate(shape): - if isinstance(dim, int): - if dim == -1: - dynamic_dims.append(idx) - if isinstance(dim, np.int64): - if dim == np.int64(-1): - dynamic_dims.append(idx) - elif isinstance(dim, tuple): - dynamic_dims.append(idx) - elif isinstance(dim, Dimension): - if dim.get_min_length() == 0 and dim.get_max_length() == -1: - dynamic_dims.append(idx) - - return dynamic_dims - - -def parse_input_shapes(argv): - input_shapes = None - if 'input_shape' in argv and argv['input_shape'] is not None: - shapes = argv['input_shape'] - if isinstance(shapes, str): - shapes = ["[{}]".format(x) for x in split_shapes(shapes)] - if isinstance(shapes, list) or isinstance(shapes, tuple): - input_shapes = [] - is_single_shape = False - for shape in shapes: - if isinstance(shape, str): - _, shape_tuple, _ = get_placeholder_shapes(argv_input=None, argv_input_shape=shape) - input_shapes.append(shape_tuple) - if is_single_shape: - raise Error("Incorrect format of shape.") - elif isinstance(shape, int) or isinstance(shape, np.int64) or isinstance(shape, Dimension): - is_single_shape = True - input_shapes.append(shape) - else: - input_shapes.append(shape) - if is_single_shape: - return [input_shapes] - else: - return input_shapes - elif isinstance(shapes, PartialShape): - return [shapes] - else: - try: - import torch - if isinstance(shapes, torch.Size): - return [shapes] - except ImportError: - raise Error("Unknown type of input shape {}.".format(type(shapes))) - - return input_shapes \ No newline at end of file diff --git a/tools/mo/openvino/tools/mo/ops/BN.py b/tools/mo/openvino/tools/mo/ops/BN.py deleted file mode 100644 index 450586c543cd44..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/BN.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class BN(Op): - """ - BN operation comes from caffe and will be replaced by BNToScaleShift FrontReplacer. - """ - op = 'BN' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'in_ports_count': 5, - 'out_ports_count': 1, - 'infer': None - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/BatchNormInference.py b/tools/mo/openvino/tools/mo/ops/BatchNormInference.py deleted file mode 100644 index a770ce46b528f7..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/BatchNormInference.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class BatchNormInference(Op): - """ - BatchNormInference will be replaced by BNToScaleShift FrontReplacer for Caffe or convert_batch_norm - function for other frameworks - """ - op = 'batchNormInference' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'in_ports_count': 5, - 'out_ports_count': 1, - 'infer': self.infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - }, attrs) - - @staticmethod - def infer(node): - node.out_port(0).data.set_shape(node.in_port(0).data.get_shape()) diff --git a/tools/mo/openvino/tools/mo/ops/BlockLSTM.py b/tools/mo/openvino/tools/mo/ops/BlockLSTM.py deleted file mode 100644 index 285d6e49d9721d..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/BlockLSTM.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mark_input_bins, dynamic_dimension, shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class BlockLSTM(Op): - op = 'BlockLSTM' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'infer': self.infer, - 'type': None, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def infer(node: Node): - """ - MO input edges: | Description: - ------------------------------------------------- - 0 | x: The sequence input to the LSTM, shape (timelen, batch_size, num_inputs) - 1 | w: The weight matrix - 2 | b: The bias vector - 3 | h_prev: Previous/initial hidden state - 4 | cs_prev: Value of the initial cell state - - MO output edges: | Description: - 0 | hs: Output data / output hidden states concatenated over the whole time sequence - 1 | cs: Output cell states concatenated over the whole time sequence - """ - - node_name = node.soft_get('name', node.id) - connected_in_ports = [port_idx for port_idx, port in node.in_ports().items() if not port.disconnected()] - connected_out_ports = [port_idx for port_idx, port in node.out_ports().items() if not port.disconnected()] - assert len(connected_in_ports) >= 5, "Internal Model Optimizer Error or unsupported BlockLSTM node {}. " \ - "MO expects five inputs for BlockLSTM".format(node_name) - assert len(connected_out_ports) <= 2, "Internal Model Optimizer Error or unsupported BlockLSTM node {}. " \ - "MO expects at most two outputs for BlockLSTM".format(node_name) - - x_shape = node.in_port(0).data.get_shape() - w_shape = node.in_port(1).data.get_shape() - b_shape = node.in_port(2).data.get_shape() - - time_len = dynamic_dimension - batch_size = dynamic_dimension - if len(x_shape) > 2: - time_len = x_shape[0] - batch_size = x_shape[1] - - hidden_size_output = dynamic_dimension - if len(b_shape) > 0 and b_shape[0] is not dynamic_dimension: - hidden_size_output = b_shape[0] // 4 - elif len(w_shape) > 1 and w_shape[1] is not dynamic_dimension: - hidden_size_output = w_shape[1] // 4 - - # mark-up inputs for LSTMRNNSequenceToTensorIterator transformation - mark_input_bins(node) - - x_output_shape = shape_array([time_len, batch_size, hidden_size_output]) - if node.is_out_port_connected(0): - node.out_port(0).data.set_shape(x_output_shape) - - # at this point cell states are in aggregated form from all time steps - # after that the middle transformation BlockLSTMtoLSTMSequence should normalize it to last step cell state - if node.is_out_port_connected(1): - node.out_port(1).data.set_shape(x_output_shape) diff --git a/tools/mo/openvino/tools/mo/ops/Cast.py b/tools/mo/openvino/tools/mo/ops/Cast.py deleted file mode 100644 index 2ea210bffe167e..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/Cast.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_precision, convert_blob, \ - np_data_type_to_destination_type, packed_I4, packed_U4 -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class Cast(Op): - op = 'Cast' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'type': 'Convert', - 'version': 'opset1', - 'infer': self.infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - 'type_infer': self.type_infer, - 'dst_type': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return [('destination_type', lambda node: np_data_type_to_destination_type(node.dst_type))] - - @staticmethod - def type_infer(node: Node): - assert node.has_valid( - 'dst_type'), 'Destination type of "Cast" operation should be extracted earlier' - node.out_port(0).set_data_type(node.dst_type) - - @staticmethod - def helper_value_propagation(node_name, value, dst_type): - new_blob, finite_match_count, zero_match_count = convert_blob( - value, dst_type) - - if finite_match_count: - log.error("{} elements of {} were clipped to infinity while converting an input blob for node '{}' to {}." - " ".format(finite_match_count, new_blob.size, node_name, dst_type) + refer_to_faq_msg(76)) - if zero_match_count: - log.warning("{} elements of {} were clipped to zero while converting an input blob for node '{}' to {}." - " ".format(zero_match_count, new_blob.size, node_name, dst_type) + refer_to_faq_msg(77)) - return new_blob - - @staticmethod - def custom_type_casting_and_packing(node: Node, value, dst_type): - """ - Custom types are not supported by numpy but we still need to write it to the .bin file in a compact way. - To do so we prepare bit representation of int4/uint4 values and store them in a numpy friendly data type. - We pack int4/uint4 values into uint8 type (two int4/uint4 numbers fit in uint8). - If the number of elements in the blob is odd we pad them with zero value to be able to fit the bit sequence - into the uint8 array. - Example: we need to represent 5 elements of int4 dtype - we would pad them to 6 element with the last element as zero and we would pack them into 3 uint8 values - """ - assert dst_type in [packed_U4, packed_I4] - # TODO: Remove this comment when it's clear that we can fix it easily - # raise Exception("Packing of u4/i4 data is no longer supported in mo because it is now incompatible with the new " - # "order of the halfs of a byte that was introduced in OpenVINO runtime recently. Use ovc " - # "command line tool or openvino.convert_model python function instead.") - - minimum_regular_dtype = np.uint8 if dst_type == packed_U4 else np.int8 - # initial casing from the source type to the numpy-friendly type which could absorb all the values of dst_type - casted_to_regular_type = Cast.helper_value_propagation( - node.soft_get('name', node.id), value, minimum_regular_dtype) - - # packing the values - data_shape = node.out_port(0).data.get_shape() - assert data_shape is not None - data_size = np.prod(data_shape) - - num_bits = 4 - assert num_bits < 8 and 8 % num_bits == 0, "Packing algorithm for the data types stored in 1, 2 or 4 bits" - num_values_fitting_into_uint8 = 8 // num_bits - pad = (-data_size) % num_values_fitting_into_uint8 - - flattened = casted_to_regular_type.flatten() - padded = np.concatenate((flattened, np.zeros([pad], dtype=minimum_regular_dtype))) - assert np.prod(padded.shape) % num_values_fitting_into_uint8 == 0 - - bit_order_little = (padded[:, None] & ( - 1 << np.arange(num_bits)) > 0).astype(np.uint8) - bit_order_big_flattened = bit_order_little.flatten() - # u1 still has reversed bit order: - packed = np.packbits(bit_order_big_flattened, - bitorder='little' if num_bits > 1 else 'big') - - node.out_node(0)['force_shape'] = data_shape.copy() - node.out_node(0)['force_type'] = np_data_type_to_precision(dst_type) - node.out_port(0).data.set_value(packed) - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - dst_type = node.soft_get('dst_type', None) - - assert dst_type is not None, \ - 'Destination type of "Cast" operation should be extracted earlier, but it`s not for node: ' + node_name - - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None - node.out_port(0).data.set_shape(input_shape) - - value = node.in_port(0).data.get_value() - if value is None or node.has_and_set('stop_value_propagation'): - return - - if dst_type in [packed_U4, packed_I4]: # custom types conversion - Cast.custom_type_casting_and_packing(node, value, dst_type) - else: - node.out_port(0).data.set_value( - Cast.helper_value_propagation(node_name, value, dst_type)) diff --git a/tools/mo/openvino/tools/mo/ops/ClipByValueTF.py b/tools/mo/openvino/tools/mo/ops/ClipByValueTF.py deleted file mode 100644 index c983cb54bb23bd..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/ClipByValueTF.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class ClibByValueTF(Op): - """ - The ClipByValue from TF which will be replaced with a front transformation. - """ - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': 'ClipByValueTF', - 'out_ports_count': 1, - 'in_ports_count': 3, - 'infer': None - } - super().__init__(graph, mandatory_props, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/Complex.py b/tools/mo/openvino/tools/mo/ops/Complex.py deleted file mode 100644 index ec4c293ba8e076..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/Complex.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class Complex(Op): - op = 'Complex' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': Complex.infer - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [] - - @staticmethod - def infer(node: Node): - real_shape = node.in_port(0).data.get_shape() - imag_shape = node.in_port(1).data.get_shape() - if real_shape is None or imag_shape is None: - return - - assert np.array_equal(real_shape, imag_shape), \ - "Shapes of real and imaginary parts must be the same. Got: {} as real part shape " \ - "and {} as imaginary part shape for Node {} with op {}." \ - "".format(real_shape, imag_shape, node.soft_get("name", node.id), node.op) - - output_shape = np.ma.append(real_shape, 2) - node.out_port(0).data.set_shape(output_shape) diff --git a/tools/mo/openvino/tools/mo/ops/ConvertLike.py b/tools/mo/openvino/tools/mo/ops/ConvertLike.py deleted file mode 100644 index 93b35e4d89d47d..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/ConvertLike.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class ConvertLike(Op): - op = 'ConvertLike' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - - 'infer': copy_shape_infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - 'type_infer': self.type_infer, - - 'in_ports_count': 2, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def type_infer(node: Node): - assert node.is_in_port_connected(1), 'The second input is not connected for a node {}.' \ - ''.format(node.soft_get('name'), node.id) - node.out_port(0).set_data_type(node.in_port(1).get_data_type()) diff --git a/tools/mo/openvino/tools/mo/ops/DetectionOutput.py b/tools/mo/openvino/tools/mo/ops/DetectionOutput.py deleted file mode 100644 index afe00f8126d643..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/DetectionOutput.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined, compatible_dims, \ - undefined_shape_of_rank, set_input_shapes -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class DetectionOutput(Op): - op = 'DetectionOutput' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset8', - 'in_ports_count': 3, - 'out_ports_count': 1, - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - 'input_width': 1, - 'input_height': 1, - 'normalized': True, - 'share_location': True, - 'clip_after_nms': False, - 'clip_before_nms': False, - 'decrease_label_id': False, - 'variance_encoded_in_target': False, - 'type_infer': self.type_infer, - }, attrs) - - def supported_attrs(self): - supported_attrs = [ - 'background_label_id', - ('clip_after_nms', lambda node: bool_to_str(node, 'clip_after_nms')), - ('clip_before_nms', lambda node: bool_to_str(node, 'clip_before_nms')), - 'code_type', - 'confidence_threshold', - ('decrease_label_id', lambda node: bool_to_str(node, 'decrease_label_id')), - 'input_height', - 'input_width', - 'keep_top_k', - 'nms_threshold', - ('normalized', lambda node: bool_to_str(node, 'normalized')), - ('share_location', lambda node: bool_to_str(node, 'share_location')), - 'top_k', - ('variance_encoded_in_target', lambda node: bool_to_str(node, 'variance_encoded_in_target')), - 'objectness_score', - ] - opset = self.get_opset() - if opset == 'opset1': - supported_attrs += ['num_classes'] - return supported_attrs - - @staticmethod - def type_infer(node: Node): - node.out_port(0).set_data_type(np.float32) - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - loc_shape = node.in_port(0).data.get_shape() - conf_shape = node.in_port(1).data.get_shape() - prior_boxes_shape = node.in_port(2).data.get_shape() - - if loc_shape is None or conf_shape is None or prior_boxes_shape is None: - raise Error('Shapes for the Detection Output node "{}" are not defined'.format(node_name)) - - prior_size = 4 - if node.has('normalized') and not node.normalized: - prior_size = 5 - - if is_fully_defined(prior_boxes_shape[-1]) and prior_boxes_shape[-1] % prior_size != 0: - raise Error('Amount of confidences "{}" is not divisible by {} for node "{}"' - ''.format(prior_boxes_shape[-1], prior_size, node_name)) - - num_priors = prior_boxes_shape[-1] // prior_size - if not node.has_valid('keep_top_k') or node.keep_top_k == -1: - node['keep_top_k'] = num_priors - - num_classes = conf_shape[-1] // num_priors - num_loc_classes = num_classes - if node.has_and_set('share_location') and node.share_location: - num_loc_classes = 1 - - if not compatible_dims(num_priors * num_loc_classes * 4, loc_shape[-1]): - raise Error('Locations and prior boxes shapes mismatch: "{}" vs "{}" for node "{}"' - ''.format(loc_shape, prior_boxes_shape, node_name)) - - if not node.variance_encoded_in_target and not compatible_dims(prior_boxes_shape[-2], 2): - raise Error('The "-2" dimension of the prior boxes must be 2 but it is "{}" for node "{}".' - ''.format(prior_boxes_shape[-2], node_name)) - - if is_fully_defined(conf_shape[-1]) and is_fully_defined(num_priors) and conf_shape[-1] % num_priors != 0: - raise Error('Amount of confidences "{}" is not divisible by amount of priors "{}" for node "{}".' - ''.format(conf_shape[-1], num_priors, node_name)) - - node.out_port(0).data.set_shape([1, 1, conf_shape[0] * node.keep_top_k, 7]) - - # the line below is needed for the TF framework so the MO will not change the layout - node.graph.node[node.out_node(0).id]['nchw_layout'] = True - - @staticmethod - def reverse_infer(node): - num_in_ports = len(node.in_ports()) - assert num_in_ports in [3, 6], 'incorrect number of input ports for DetectionOutput node {}'.format(node.soft_get('name', node.id)) - if num_in_ports == 3: - set_input_shapes(node, - undefined_shape_of_rank(2), - undefined_shape_of_rank(2), - undefined_shape_of_rank(3)) - elif num_in_ports == 6: - set_input_shapes(node, - undefined_shape_of_rank(2), - undefined_shape_of_rank(2), - undefined_shape_of_rank(3), - undefined_shape_of_rank(2), - undefined_shape_of_rank(2)) diff --git a/tools/mo/openvino/tools/mo/ops/Enter.py b/tools/mo/openvino/tools/mo/ops/Enter.py deleted file mode 100644 index e53a9dedba8531..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/Enter.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class Enter(Op): - op = "Enter" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'in_ports_count': 1, - 'infer': Enter.enter_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def enter_infer(node: Node): - output_shape = node.in_port(0).data.get_shape() - output_value = node.in_port(0).data.get_value() - - for _, out_node in node.graph.out_edges(node.id): - node.graph.node[out_node]['shape'] = shape_array(output_shape) - node.graph.node[out_node]['value'] = None if output_value is None else output_value.copy() diff --git a/tools/mo/openvino/tools/mo/ops/Exit.py b/tools/mo/openvino/tools/mo/ops/Exit.py deleted file mode 100644 index b6b8f749c4a1b3..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/Exit.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class Exit(Op): - op = "Exit" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': __class__.op, - 'infer': Exit.exit_infer, - 'in_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def exit_infer(node: Node): - output_shape = node.in_port(0).data.get_shape() - output_value = node.in_port(0).data.get_value() - - for port in node.out_ports(): - if not node.out_port(port).disconnected(): - node.out_port(port).data.set_shape(output_shape) - if output_value is not None: - node.out_port(port).data.set_value(output_value) diff --git a/tools/mo/openvino/tools/mo/ops/ExtractImagePatches.py b/tools/mo/openvino/tools/mo/ops/ExtractImagePatches.py deleted file mode 100644 index 71d434d5fbbd59..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/ExtractImagePatches.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.layout import shape_for_layout, get_batch_dim, get_features_dim -from openvino.tools.mo.front.common.partial_infer.utils import tf_window_op_pad_infer, shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class ExtractImagePatches(Op): - op = "ExtractImagePatches" - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - assert 'spatial_dims' in attrs, \ - 'ExtractImagePatches operation should have `spatial_dims` parameter set during creation' - - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset3', - 'infer': self.infer, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - def backend_attrs(self): - return [ - ('sizes', lambda node: ','.join(map(str, node['sizes'][node.spatial_dims]))), - ('strides', lambda node: ','.join(map(str, node['strides'][node.spatial_dims]))), - ('rates', lambda node: ','.join(map(str, node['rates'][node.spatial_dims]))), - 'auto_pad', - ] - - @staticmethod - def infer(node: Node): - assert [port.idx for port in node.in_ports().values() if not port.disconnected()] == [0], \ - 'Wrong input nodes number for node {} with type ExtractImagePatches'.format(node.soft_get('name', node.id)) - input_shape = node.in_port(0).data.get_shape() - name = node.soft_get('name', node.id) - assert input_shape is not None, 'Input shape is not set for node {} with type ExtractImagePatches'.format(name) - - assert len(input_shape) == 4, 'ExtractImagePatches operation supports only 4D tensors' - - layout = node.graph.graph['layout'] - N = input_shape[get_batch_dim(layout, 4)] - C = input_shape[get_features_dim(layout, 4)] - - size_spatial = shape_array(node.sizes)[node.spatial_dims] - - input_spatial_shape = input_shape[node.spatial_dims] - stride_spatial_shape = node.strides[node.spatial_dims] - - size_extent = node.rates[node.spatial_dims] * (size_spatial - 1) + 1 - - pad_spatial_shape, output_spatial_shape = tf_window_op_pad_infer(input_spatial_shape, - size_extent, - stride_spatial_shape, - node.auto_pad, - False) - - out_shape = shape_for_layout(layout, - batch=N, - features=C * np.prod(size_spatial), - height=output_spatial_shape[0], - width=output_spatial_shape[1]) - - node.out_port(0).data.set_shape(out_shape) diff --git a/tools/mo/openvino/tools/mo/ops/GRU.py b/tools/mo/openvino/tools/mo/ops/GRU.py deleted file mode 100644 index a031aa755ded79..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/GRU.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.RNN import rnn_infer, RNN -from openvino.tools.mo.ops.op import Op - - -class GRU(Op): - op = 'GRU' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': 'RNNSequence', # should be never emitted to IR; for debugging purposes - 'op': __class__.op, - 'blobs_wrb': False, - 'has_num_directions': False, - 'direction': 'forward', - 'infer': __class__.infer, - 'reverse_infer': RNN.reverse_infer, - 'multiplier': 3, - 'multilayers': False, - 'gate_order': mo_array([0, 1, 2]), # TODO: change it later - 'normalized': False, - - 'activation_alpha': None, - 'activation_beta': None, - 'activations': None, - 'clip': None, - 'linear_before_reset': None, - 'in_ports_count': 6, - 'out_ports_count': 2, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def supported_attrs(): - return [ - 'hidden_size', # number of the elements in hidden cell size - 'direction', # one of 'forward', 'reverse', or 'bidirectional' - 'axis', - - 'activation_alpha', - 'activation_beta', - 'activations', - 'clip', - 'linear_before_reset', - ] - - def backend_attrs(self): - return [ - 'hidden_size', # number of the elements in hidden cell size - 'direction', # one of 'forward', 'reverse', or 'bidirectional' - 'axis', - - ('activations', lambda node: ','.join(node['activations']) if node.has_and_set('activations') else None), - ('activations_alpha', lambda node: ','.join(map(str, node['activations_alpha'])) - if node.has_and_set('activations_alpha') else None), - ('activations_beta', lambda node: ','.join(map(str, node['activations_beta'])) - if node.has_and_set('activations_beta') else None), - 'clip', - 'linear_before_reset', - ] - - @staticmethod - def infer(node: Node): - assert len(node.in_nodes()) >= 3 # X, W and R - assert len(node.in_nodes()) <= 5 - assert len(node.out_nodes()) <= 2 - - rnn_infer(node, [1]) diff --git a/tools/mo/openvino/tools/mo/ops/GRUBlockCell.py b/tools/mo/openvino/tools/mo/ops/GRUBlockCell.py deleted file mode 100644 index a93449c2e1375f..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/GRUBlockCell.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class GRUBlockCell(Op): - op = 'GRUBlockCell' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'infer': None, - 'in_ports_count': 6, - 'out_ports_count': 4, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/GRUCell.py b/tools/mo/openvino/tools/mo/ops/GRUCell.py deleted file mode 100644 index 17a9459a353f1d..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/GRUCell.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mark_input_bins -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class GRUCell(Op): - """ A single GRU cell (without a loop). - - 2 inputs: - - [0, required] input data (2D), - - [1, required] initial hidden state (2D), - - 2 blobs: - - [2, required] cell FC weights - - [3, required] cell FC biases - - 1 outputs: - - [required] output data / resulting hidden state (2D) - """ - op = 'GRUCell' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': __class__.op, - 'op': __class__.op, - 'infer': __class__.infer, - 'in_ports_count': 4, - 'out_ports_count': 1, - 'version': 'opset3', - 'wr_input_id': 2, - 'gates_count': 3, - 'linear_before_reset': False, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'hidden_size', # number of the elements in hidden cell size - 'activations', - 'activation_alpha', - 'activation_beta', - 'clip', - 'linear_before_reset', - ] - - def backend_attrs(self): - return [ - 'hidden_size', # number of the elements in hidden cell size - ('activations', lambda node: ','.join(node['activations']) if node.has_and_set('activations') else None), - ('activations_alpha', lambda node: ','.join(map(str, node['activations_alpha'])) - if node.has_and_set('activations_alpha') else None), - ('activations_beta', lambda node: ','.join(map(str, node['activations_beta'])) - if node.has_and_set('activations_beta') else None), - 'clip', - ('linear_before_reset', lambda node: bool_to_str(node, 'linear_before_reset')), - ] - - @staticmethod - def infer(node: Node): - assert len(node.out_nodes()) in [1, 2] - - hidden_shape = node.in_port(1).data.get_shape().copy() - - mark_input_bins(node, start_port=2) - node.out_port(0).data.set_shape(hidden_shape) - - hidden_size = hidden_shape[1] - if node.has_valid('hidden_size'): - if node.hidden_size != hidden_size: - raise Error("Input shape {} for hidden size doesn't match pre-defined hidden_size in node {}".format( - node.in_node(1).shape, node.soft_get('name'))) - else: - node['hidden_size'] = hidden_size diff --git a/tools/mo/openvino/tools/mo/ops/GatherTree.py b/tools/mo/openvino/tools/mo/ops/GatherTree.py deleted file mode 100644 index f4a932f31cec6d..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/GatherTree.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class GatherTree(Op): - op = 'GatherTree' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - 'infer': copy_shape_infer, - 'in_ports_count': 4, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [] diff --git a/tools/mo/openvino/tools/mo/ops/If.py b/tools/mo/openvino/tools/mo/ops/If.py deleted file mode 100644 index 510db285de3b11..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/If.py +++ /dev/null @@ -1,354 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, is_fully_defined, dynamic_dimension_value, unmask_shape -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.ops.op import Op - - -class If(Op): - """ - If operation is an operation which has an input with condition which defines what sub-graph "then" or "else" to be - executed. - """ - op = 'If' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - base_attrs = { - 'type': self.op, - 'op': self.op, - 'then_graph': None, # an Graph object with a "then" body sub-graph (condition is True) - 'else_graph': None, # an Graph object with a "else" body sub-graph (condition is False) - 'sub_graphs': ['then_graph', 'else_graph'], # built-in attribute with all sub-graphs - 'version': 'opset8', - 'infer': self.infer, - 'type_infer': self.type_infer, - } - base_attrs.update(attrs) - super().__init__(graph, base_attrs, attrs) - - def port_map_attrs(self): - return [ - 'external_port_id', - 'internal_layer_id' - ] - - @staticmethod - def connect_body_input(if_node: Node, condition: bool, if_input_port_idx: int, body_parameter: Node): - """ - Update the specified body parameter and connect it with If input - - :param if_node: the If node - :param condition: the boolean defining a condition (then/else) graph to add connect the body - :param if_input_port_idx: the input port index to connect - :param body_parameter: the body parameter node to connect - :return: None - """ - assert if_node.soft_get('op') == 'If' - assert body_parameter.soft_get('op') == 'Parameter' - sub_graph = if_node.then_graph if condition else if_node.else_graph - assert body_parameter.id in sub_graph - body_parameter['input_id'] = if_input_port_idx - - @staticmethod - def connect_body_output(if_node: Node, condition: bool, if_output_port_idx: int, internal_result: Node): - """ - Update the specified output port and connect it with If output - - :param if_node: the If node - :param condition: the boolean defining a condition (then/else) graph to add connect the body - :param if_output_port_idx: the output port index to connect - :param internal_result: the body Result node to connect - :return: None - """ - assert if_node.soft_get('op') == 'If' - assert internal_result.soft_get('op') == 'Result' - sub_graph = if_node.then_graph if condition else if_node.else_graph - assert internal_result.id in sub_graph - internal_result['output_id'] = if_output_port_idx - - @staticmethod - def update_body_parameters_type(if_node: Node, condition: bool): - """ - Update the data type for If body Parameter nodes based on data type of the outer graph nodes producing data - for them. - - :param if_node: The If node - :param condition: the boolean defining a condition (then/else) graph - :return: None - """ - assert if_node.soft_get('type') == 'If' - - subgraph = if_node.then_graph if condition else if_node.else_graph - for node in subgraph.get_op_nodes(): - if node.has('input_id'): - assert node.soft_get('type') == 'Parameter' - input_port_id = node['input_id'] - input_type = if_node.in_port(input_port_id).get_data_type() - node.data_type = input_type - log.debug('Updated data type for the body node with name "{}" with value {}' - .format(node.name, node.data_type)) - - @staticmethod - def update_body_parameters_shape(if_node: Node, condition: bool): - """ - Update shape for If body parameters. - - :param if_node: The If node - :param condition: the boolean defining a condition (then/else) graph to add connect the body - :return: None - """ - subgraph = if_node.then_graph if condition else if_node.else_graph - for node in subgraph.get_op_nodes(): - if node.has('input_id'): - assert node.soft_get('type') == 'Parameter' - input_port_id = node['input_id'] - input_shape = if_node.in_port(input_port_id).data.get_shape() - if node.soft_get('shape', None) is None: - node['shape'] = None - node.shape = input_shape.copy() - log.debug('Updated shape for the body node with name "{}" with value {}' - .format(node.soft_get('name', node.soft_get('id')), node.shape)) - - @staticmethod - def results_mapping_and_finding_fake_outputs(output_nodes_in_subgraph, branch_name, outputs_mapping): - """ - This method checked result nodes in subgraph and set map between output from If operation and internal subgraph - result. Also This method return True if internal graph has fake results. - - :param output_nodes_in_subgraph: Result node with attribute 'output_id' - :param branch_name: name of subgraph - :param outputs_mapping: map between If operation output ID and subgraph results - - :return: True if all results of subgraph are empty tensors - """ - graph_contain_fake_outputs = True - - for output_node in output_nodes_in_subgraph: - assert output_node.soft_get('type') == 'Result' - port_id = output_node['output_id'] - assert port_id in outputs_mapping.keys(), 'Incorrect mapping then_graph outputs with {0} outputs! ' \ - 'Can\'t find port with ID {1} in If operation.' \ - .format(output_node.name, port_id) - outputs_mapping[port_id][branch_name] = output_node - out_node_shape = output_node.in_port(0).data.get_shape() - graph_contain_fake_outputs = graph_contain_fake_outputs and np.any(unmask_shape(out_node_shape) == 0) - return graph_contain_fake_outputs - - @staticmethod - def update_if_output_ports_shape(if_node: Node): - """ - Update shape and values for If output ports. - - :param if_node: The If node to update output ports and shapes - :return: None - """ - node_name = if_node.soft_get('name', if_node.id) - - then_outputs = [node for node in if_node.then_graph.get_op_nodes() if node.has('output_id')] - else_outputs = [node for node in if_node.else_graph.get_op_nodes() if node.has('output_id')] - outputs_mapping = {} - outputs_number = len(if_node.out_ports()) - - if outputs_number == 0 and len(if_node.out_ports(control_flow=True)) != 0: - # Some models have if with control flow outputs. - # These shape inference for such ifs - # TODO: need to rethink and redo support for control flow edges in if operation - for node in if_node.out_nodes(control_flow=True).values(): - node.shape = int64_array([]) - return - - for port_id in if_node.out_ports().keys(): - outputs_mapping[port_id] = {} - - # variables then_contains_fake_outputs/else_contains_fake_outputs contains True value - # if all outputs from then_body/else_body have shape [0]. It means then_body/else_body does not return data - # and further shape_inference for this branch is not possible. - # TODO: exclude support fake_outputs from this code when we will support shape_inference with empty tensors - - then_contains_fake_outputs = \ - If.results_mapping_and_finding_fake_outputs(then_outputs, 'then_graph', outputs_mapping) - else_contains_fake_outputs = \ - If.results_mapping_and_finding_fake_outputs(else_outputs, 'else_graph', outputs_mapping) - - # use_then_shape is True when else_body or when both bodies do not return data. If use_then_shape is True If's - # outputs will have the same shapes as then_body results - use_then_shape = else_contains_fake_outputs or not then_contains_fake_outputs - - cond_value = if_node.in_port(0).data.get_value() - - for port_id in outputs_mapping: - then_else_nodes = outputs_mapping[port_id] - assert 'then_graph' in then_else_nodes.keys(), 'then_graph does not connect with If.out_port[{0}] ' \ - 'in {1} node!'.format(port_id, node_name) - assert 'else_graph' in then_else_nodes.keys(), 'else_graph does not connect with If.out_port[{0}] ' \ - 'in {1} node!'.format(port_id, node_name) - - then_shape = then_else_nodes['then_graph'].in_port(0).data.get_shape() - then_value = then_else_nodes['then_graph'].in_port(0).data.get_value() - else_shape = then_else_nodes['else_graph'].in_port(0).data.get_shape() - else_value = then_else_nodes['else_graph'].in_port(0).data.get_value() - - if is_fully_defined(cond_value): - if cond_value.item() is True: - if then_value is not None: - if_node.out_port(port_id).data.set_value(then_value) - else: - if_node.out_port(port_id).data.set_shape(then_shape) - else: - if else_value is not None: - if_node.out_port(port_id).data.set_value(else_value) - else: - if_node.out_port(port_id).data.set_shape(else_shape) - else: - if then_contains_fake_outputs ^ else_contains_fake_outputs: - # if exactly one of the outputs is fake then use another one - if_node.out_port(port_id).data.set_shape(then_shape if use_then_shape else else_shape) - else: - # find "intersection" which is equal to the dimension value if corresponding dimensions are equal - # and dynamic otherwise - assert len(then_shape) == len(else_shape), 'Ranks of "then" and "else" output tensors are ' \ - 'different for node {} for port {}'.format(node_name, - port_id) - output_shape = [d1 if is_fully_defined(d1) and is_fully_defined(d2) and d1 == d2 else - dynamic_dimension_value for d1, d2 in zip(then_shape, else_shape)] - if_node.out_port(port_id).data.set_shape(output_shape) - - - @staticmethod - def update_if_output_ports_type(if_node: Node): - """ - Update types for If output ports. - - :param if_node: The If node to update output ports and types - :return: None - """ - then_outputs = [node for node in if_node.then_graph.get_op_nodes() if node.has('output_id')] - else_outputs = [node for node in if_node.else_graph.get_op_nodes() if node.has('output_id')] - outputs_mapping = {} - outputs_number = len(if_node.out_ports()) - assert outputs_number == len(then_outputs), 'Incorrect number outputs in then_graph of If with ' \ - 'name {0}! then_graph must has {1} outputs' \ - .format(if_node.name, outputs_number) - assert outputs_number == len(else_outputs), 'Incorrect number outputs in else_graph of If with ' \ - 'name {0}! else_graph must has {1} outputs' \ - .format(if_node.name, outputs_number) - for port_id in if_node.out_ports().keys(): - outputs_mapping[port_id] = {} - port_ids = outputs_mapping.keys() - for then_output_node in then_outputs: - assert then_output_node.soft_get('type') == 'Result' - port_id = then_output_node['output_id'] - assert port_id in port_ids, 'Incorrect mapping then_graph outputs with {0} outputs! ' \ - 'Can\'t find port with ID {1} in If operation.' \ - .format(then_output_node.name, port_id) - outputs_mapping[port_id]['then_graph'] = then_output_node - - for else_output_node in else_outputs: - assert else_output_node.soft_get('type') == 'Result' - port_id = else_output_node['output_id'] - assert port_id in port_ids, 'Incorrect mapping then_graph outputs with {0} outputs! ' \ - 'Can\'t find port with ID {1} in If operation.' \ - .format(else_output_node.name, port_id) - outputs_mapping[port_id]['else_graph'] = else_output_node - - for port_id in outputs_mapping: - then_else_nodes = outputs_mapping[port_id] - assert 'then_graph' in then_else_nodes.keys(), 'then_graph does not connect with If.out_port[{0}] ' \ - 'in {1} node!'.format(port_id, if_node.name) - assert 'else_graph' in then_else_nodes.keys(), 'else_graph does not connect with If.out_port[{0}] ' \ - 'in {1} node!'.format(port_id, if_node.name) - then_type = then_else_nodes['then_graph'].in_port(0).get_data_type() - else_type = then_else_nodes['else_graph'].in_port(0).get_data_type() - assert then_type == else_type, 'Cannot get type for if.out_port[{0}]! ' \ - 'Types in then_graph and else_graph are not equal!'.format(port_id) - if_node.out_port(port_id).set_data_type(then_type) - - @staticmethod - def re_numerate_internal_id_and_get_if_id(if_node): - """ - This method is called before IR generation. This method sets internal_layer_id. - - :param if_node: The If node where is necessary to set internal_layer_id in bodies. - :return: if_node - """ - then_graph_nodes = if_node.then_graph.nodes() - for node in if_node.then_graph.get_op_nodes(): - then_graph_nodes[node.id]['internal_layer_id'] = node.id - else_graph_nodes = if_node.else_graph.nodes() - for node in if_node.else_graph.get_op_nodes(): - else_graph_nodes[node.id]['internal_layer_id'] = node.id - return if_node.node - - def substitute_ie_attrs(self, new_attrs: dict): - """ - Replace standard list of attribute in layer/data by attributes - delivered by backend_attrs - """ - - port_map_attrs = self.port_map_attrs() - new_attrs.update({ - 'IE': [( - 'layer', - [('id', lambda node: self.re_numerate_internal_id_and_get_if_id(node)), 'name', 'type', 'version'], - [ - '@ports', - ('then_port_map', [], [ - ('@list', lambda node: self.generate_port_map(node, True, 'in'), - ('input', port_map_attrs, [])), - ('@list', lambda node: self.generate_port_map(node, True, 'out'), - ('output', port_map_attrs, [])), - ]), - ('else_port_map', [], [ - ('@list', lambda node: self.generate_port_map(node, False, 'in'), - ('input', port_map_attrs, [])), - ('@list', lambda node: self.generate_port_map(node, False, 'out'), - ('output', port_map_attrs, [])), - ]), - ('then_body', [], [('@network', 'then_graph')]), - ('else_body', [], [('@network', 'else_graph')]), - ])] - }) - - @staticmethod - def generate_port_map(if_node: Node, condition: bool, dir: str): - """ - Extract port_map attributes from if_node and its subgraphs attributes. - - :param if_node: The If node - :param condition: the boolean defining a condition (then/else) graph - :param dir: the str value defining type (for inputs or for putputs) of port_map - :return: port_map -> list of dictionaries with to values(external_port_id or internal_layer_id) - """ - port_map = [] - subgraph = if_node.then_graph if condition else if_node.else_graph - name_of_connection = 'input_id' if dir == 'in' else 'output_id' - - for internal_node in subgraph.get_op_nodes(): - if internal_node.has(name_of_connection): - port_map.append({'external_port_id': internal_node[name_of_connection], - 'internal_layer_id': internal_node['internal_layer_id']}) - - return port_map - - @staticmethod - def infer(if_node: Node): - If.update_body_parameters_shape(if_node, True) - If.update_body_parameters_shape(if_node, False) - partial_infer(if_node.then_graph) - partial_infer(if_node.else_graph) - If.update_if_output_ports_shape(if_node) - - @staticmethod - def type_infer(if_node: Node): - from openvino.tools.mo.middle.passes.infer import type_infer - If.update_body_parameters_type(if_node, True) - If.update_body_parameters_type(if_node, False) - type_infer(if_node.then_graph) - type_infer(if_node.else_graph) - If.update_if_output_ports_type(if_node) diff --git a/tools/mo/openvino/tools/mo/ops/LSTM.py b/tools/mo/openvino/tools/mo/ops/LSTM.py deleted file mode 100644 index a135ceda832545..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/LSTM.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.RNN import rnn_infer, RNN -from openvino.tools.mo.ops.op import Op - - -class LSTM(Op): - op = 'LSTM' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': 'RNNSequence', # should be never emitted to IR; for debugging purposes - 'op': self.op, - 'blobs_wrb': False, # input blobs have three separate components W, R and B like in ONNX/LSTM - 'has_num_directions': False, # if True, output shape has 4 dimensions; 3D otherwise - 'direction': 'forward', - 'infer': self.infer, - 'reverse_infer': RNN.reverse_infer, - 'multiplier': 4, - 'gate_order': None, - 'normalized': False, - 'multilayers': False, - 'format': None, # format type of input blobs for different frameworks (onnx, tf), - - 'activation_alpha': None, - 'activation_beta': None, - 'activations': None, - 'clip': None, - 'input_forget': None, - 'in_ports_count': 7, - 'out_ports_count': 3, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def supported_attrs(): - return [ - 'hidden_size', # number of the elements in hidden cell size - 'direction', # one of 'forward', 'reverse', or 'bidirectional' - 'axis', - - 'activation_alpha', - 'activation_beta', - 'activations', - 'clip', - # 'input_forget', # Not supported yet - ] - - def backend_attrs(self): - return [ - 'hidden_size', # number of the elements in hidden cell size - 'direction', # one of 'forward', 'reverse', or 'bidirectional' - 'axis', - - ('activations', lambda node: ','.join(node['activations']) if node.has_and_set('activations') else None), - ('activations_alpha', lambda node: ','.join(map(str, node['activations_alpha'])) - if node.has_and_set('activations_alpha') else None), - ('activations_beta', lambda node: ','.join(map(str, node['activations_beta'])) - if node.has_and_set('activations_beta') else None), - 'clip', - # 'input_forget', # Not supported yet - ] - - @staticmethod - def infer(node: Node): - # there are limitations coming from ONNX LSTM definition and normalization rules - assert len(node.in_nodes()) >= 3 # X, W and R - assert len(node.in_nodes()) <= 7 - assert len(node.out_nodes()) <= 3 - - rnn_infer(node, [1, 2]) diff --git a/tools/mo/openvino/tools/mo/ops/LookupTableInsert.py b/tools/mo/openvino/tools/mo/ops/LookupTableInsert.py deleted file mode 100644 index fc7f41e7661216..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/LookupTableInsert.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class LookupTableInsert(Op): - """ - This operation has only output control flow edges and no output data edges in some models. - And for these cases implementation of the shape inference is needed since the shape inference is executed - before control flow edges resolving. This operation has non-tensor output so the output shape is empty. - """ - enabled = False - op = 'LookupTableInsert' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'infer': self.infer, - 'in_ports_count': 3, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - assert len(connected_in_ports) == 3, \ - "Incorrect number of inputs for {} node".format(node_name) - - # check shapes of input tensors - keys_shape = node.in_port(1).data.get_shape() - values_shape = node.in_port(2).data.get_shape() - assert np.array_equal(keys_shape, values_shape), \ - 'Shapes of tensors with keys and values must be equal for {} node'.format(node_name) - - # set output shape that must be empty - # since output is not a tensor - node.out_port(0).data.set_shape([]) diff --git a/tools/mo/openvino/tools/mo/ops/MatMul.py b/tools/mo/openvino/tools/mo/ops/MatMul.py deleted file mode 100644 index 43c2c297afb169..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/MatMul.py +++ /dev/null @@ -1,247 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import assign_dims_to_weights, compatible_dims, compatible_shapes, \ - shape_array, is_fully_defined, shape_delete, shape_insert -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.type_utils import override_data_type_of_constant - - -class MatMul(Op): - """ - Operation is specified at docs/ops/matrix/MatMul_1.md - """ - op = 'MatMul' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'transpose_a': False, - 'transpose_b': False, - 'infer': self.infer, - 'type_infer': self.type_infer, - 'in_ports_count': 2, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - ('transpose_a', lambda node: bool_to_str(node, 'transpose_a')), - ('transpose_b', lambda node: bool_to_str(node, 'transpose_b')), - ] - - @staticmethod - def shape_alignment(node: Node): - """ - Specification of MatMul operation allows inputs to be aligned together before matrix multiplication. - Current method raises an error if input shapes are not valid at any step of alignment process - :return: aligned copies of both input shapes - """ - node_name = node.soft_get('name', str(node.id)) - input_shapes = [node.in_port(i).data.get_shape() for i in range(2)] - transpose_a = node.has_and_set('transpose_a') - transpose_b = node.has_and_set('transpose_b') - - transformed_shapes = [] - for i, shape in enumerate(input_shapes): - input_shape = shape.copy() - # prerequisites check - assert input_shape is not None, "MatMul has shape=`None` for {} input of `{}` node".format(i, node_name) - assert input_shape.ndim == 1, "MatMul doesn't support scalar inputs. {} input of `{}` node has shape {}" \ - "".format(i, node_name, input_shape) - assert input_shape.size >= 1, "MatMul doesn't support inputs with rank lower than 1. {} input of `{}` " \ - "node has shape {}".format(i, node_name, input_shape) - rank = input_shape.size - # shape alignment - if rank != 1 and ((i == 0 and transpose_a) or (i == 1 and transpose_b)): - input_shape[-2], input_shape[-1] = input_shape[-1], input_shape[-2] - if rank == 1: - input_shape = shape_insert(input_shape, int(i == 1), 1) - - max_shape_length = max(input_shapes[0].size, input_shapes[1].size) - input_shape = shape_insert(input_shape, 0, [1] * (max_shape_length - input_shape.size)) - transformed_shapes.append(input_shape) - - A_shape = shape_array(transformed_shapes[0]) - B_shape = shape_array(transformed_shapes[1]) - - assert A_shape.size == B_shape.size, \ - "Shapes were not aligned by length for MatMul `{}`. Shapes: `{}`".format(node_name, transformed_shapes) - - # batch broadcasting - batch_len = A_shape.size - 2 - for i in range(batch_len): - if A_shape[i] != B_shape[i]: - if A_shape[i] == 1: - A_shape[i] = B_shape[i] - if B_shape[i] == 1: - B_shape[i] = A_shape[i] - - assert compatible_shapes(A_shape[:-2], B_shape[:-2]), \ - "MatMul input shapes are incorrect. BATCH_DIMs are not equal. Node: {}. Aligned shapes: {}" \ - "".format(node_name, transformed_shapes) - - return A_shape, B_shape - - @staticmethod - def value_propagation(node: Node): - """ - This function performs a value propagation for MatMul layer. - :param node: MatMul layer - :return: None - """ - a_value = node.in_port(0).get_source().data.get_value() - b_value = node.in_port(1).get_source().data.get_value() - if is_fully_defined(a_value) and is_fully_defined(b_value): - if node.transpose_a: - a_value = transpose(a_value) - if node.transpose_b: - b_value = transpose(b_value) - # np.matmul does not work correctly with masked arrays, so need explicitly convert inputs to regular arrays - if isinstance(a_value, np.ma.masked_array): - a_value = a_value.filled() - if isinstance(b_value, np.ma.masked_array): - b_value = b_value.filled() - node.out_port(0).data.set_value(np.matmul(a_value, b_value)) - - @staticmethod - def infer(node: Node): - """ - Performs shape inference of MatMul node as operation doc-string says - Raises on any shape inconsistency - """ - name = node.soft_get('name', str(node.id)) - connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_in_ports) == 2 and 0 in connected_in_ports and 1 in connected_in_ports, \ - "MatMul should have 2 connected input ports, but it doesn't for node: `{}`. Ports: {}" \ - "".format(name, connected_in_ports) - - log.debug('MatMul `{}` input shapes: {}'.format(name, [node.in_port(i).data.get_shape() for i in range(2)])) - A_shape, B_shape = MatMul.shape_alignment(node) - log.debug('MatMul `{}` aligned input shapes: {}'.format(name, [A_shape, B_shape])) - - assert compatible_dims(A_shape[-1], B_shape[-2]), \ - "MatMul input shapes are incorrect. COL_INDEX_DIMs are not equal. Node: {}. Shapes: {}" \ - "".format(name, [A_shape, B_shape]) - - output_shape = np.ma.concatenate((A_shape[:-1], B_shape[-1:])) - - if node.in_port(0).data.get_shape().size == 1: - assert compatible_dims(output_shape[-2], 1) - output_shape = shape_delete(output_shape, -2) - if node.in_port(1).data.get_shape().size == 1: - assert compatible_dims(output_shape[-1], 1) - output_shape = shape_delete(output_shape, -1) - - node.out_port(0).data.set_shape(output_shape) - - in_ch = 0 if not node.transpose_b else 1 - out_ch = 1 if not node.transpose_b else 0 - assign_dims_to_weights(node.in_node(1), None, in_ch, out_ch, node.in_port(1).data.get_shape().size) - MatMul.value_propagation(node) - - @staticmethod - def type_infer(node): - override_data_type_of_constant(node) - node.out_port(0).set_data_type(node.in_port(0).get_data_type()) - - -def transpose(value): - num_of_dims = value.ndim - if num_of_dims == 1: - return value - else: - return np.transpose(value, [*range(0, num_of_dims - 2), num_of_dims - 1, num_of_dims - 2]) - - -# MatMul-like operation from frameworks -class GemmONNX(Op): - """ - Represents Gemm operation from ONNX - - Missing `type` and `infer` attributes on purpose - node should be decomposed on front phase - and should never be inferred or translated to IR as is - """ - op = 'Gemm' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'transpose_a': False, - 'transpose_b': False, - 'alpha': 1, - 'beta': 1, - 'broadcast_c': True, - 'in_ports_count': 3, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - -class FullyConnected(Op): - # TODO: remove `infer`, `type` and supported_attrs after op removal from IR Spec - op = 'FullyConnected' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'infer': self.infer, - 'in_ports_count': 3, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return [ - 'out-size', - ] - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - - connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_in_ports) >= 2 and 0 in connected_in_ports and 1 in connected_in_ports, \ - 'FullyConnected should have 2 connected input ports, but it doesn\'t for node: `{}`. Ports: {}' \ - ''.format(name, connected_in_ports) - - assert node.has_valid('out-size') - input_shape = node.in_port(0).data.get_shape() - weights_shape = node.in_port(1).data.get_shape() - assert input_shape is not None and weights_shape is not None, \ - 'Incorrect FullyConnected input shapes. Node: {}. Shapes: {}'.format(name, [input_shape, weights_shape]) - assert weights_shape.size == 2 - out_size = node.soft_get('out-size') - assert compatible_dims(weights_shape[0], out_size), \ - 'weights_shape={}, out-size={}'.format(weights_shape, out_size) - - if 2 in connected_in_ports: - bias_value = node.in_port(2).data.get_value() - bias_shape = node.in_port(2).data.get_shape() - assert bias_shape is not None, 'Shape was not inferred for biases of FullyConnected {}'.format(name) - assert bias_value is not None, 'Value was not inferred for biases of FullyConnected {}'.format(name) - assert compatible_shapes(bias_shape, [out_size]) or compatible_shapes(bias_shape, [1, out_size]), \ - 'Incorrect FullyConnected bias shape `{}` for node {}. `out-size`={}'.format(bias_shape, node, out_size) - - node.out_port(0).data.set_shape([*input_shape[:-1], out_size]) - - -# MatMul-like operations for IR V6 -class Gemm(MatMul): - """ - Represents GEMM operation that is acceptable to appear in v6 IRs - Inherits MatMul semantic to be re-inferred in back phase and to be successfully translated to IR (v6) - """ - op = 'GEMM' - enabled = False diff --git a/tools/mo/openvino/tools/mo/ops/NextIteration.py b/tools/mo/openvino/tools/mo/ops/NextIteration.py deleted file mode 100644 index 64ff627c702108..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/NextIteration.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class NextIteration(Op): - op = "NextIteration" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': __class__.op, - 'in_ports_count': 1, - 'infer': NextIteration.enter_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def enter_infer(node: Node): - output_shape = node.in_node(0).shape - output_value = node.in_node(0).value - for _, out_node in node.graph.out_edges(node.id): - node.graph.node[out_node]['shape'] = mo_array(output_shape) - node.graph.node[out_node]['value'] = None if output_value is None else mo_array(output_value) diff --git a/tools/mo/openvino/tools/mo/ops/ONNXResize10.py b/tools/mo/openvino/tools/mo/ops/ONNXResize10.py deleted file mode 100644 index f9d98e668387fc..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/ONNXResize10.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class ONNXResize10(Op): - op = 'ONNXResize10' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'in_ports_count': 2, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/ONNXResize11.py b/tools/mo/openvino/tools/mo/ops/ONNXResize11.py deleted file mode 100644 index 3990dd867fc851..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/ONNXResize11.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class ONNXResize11Op(Op): - op = 'ONNXResize11' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'out_ports_count': 1, - 'infer': ONNXResize11Op.onnx_resize_infer - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'coordinate_transformation_mode', - 'cube_coeff', - 'exclude_outside', - 'extrapolation_value', - 'mode', - 'nearest_mode' - ] - - @staticmethod - def onnx_resize_infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - if input_shape is None: - return - - assert (node.is_in_port_connected(0) and (node.is_in_port_connected(2) or node.is_in_port_connected(3))), \ - "One of the scales or sizes inputs must be connected to Node {} with op {}." \ - "".format(node.soft_get("name", node.id), node.op) - - assert node.coordinate_transformation_mode != 'tf_crop_and_resize', \ - 'Mode tf_crop_and_resize is not supported for op {} with name {}'.format(node.op, - node.soft_get("name", node.id)) - - if not node.is_in_port_connected(3): - # i.e. input 'sizes' is not given - input2_value = node.in_port(2).data.get_value() - assert input2_value is not None, \ - "Node {} with op {} has no value in input port 2".format(node.soft_get('name', node.id), node.op) - scale = mo_array(input2_value) - output_shape = np.floor(input_shape * scale + 1.0e-6).astype(np.int64) - else: - # i.e. input 'sizes' is given - sizes = node.in_port(3).data.get_value() - assert sizes is not None, \ - "Node {} with op {} has no value in input port 3".format(node.soft_get("name", node.id), node.op) - output_shape = input_shape.copy() - spatial_dimension_indices = range(2, len(input_shape)) - output_shape[spatial_dimension_indices] = sizes[2:] - - node.out_port(0).data.set_shape(output_shape) diff --git a/tools/mo/openvino/tools/mo/ops/RNN.py b/tools/mo/openvino/tools/mo/ops/RNN.py deleted file mode 100644 index ea73368cc1fa10..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/RNN.py +++ /dev/null @@ -1,242 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mark_input_bins, shape_insert, dynamic_dimension, \ - shape_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Node, Graph, add_opoutput, Error -from openvino.tools.mo.ops.op import Op - - -class RNN(Op): - op = 'RNN' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': 'RNNSequence', # should be never emitted to IR; for debugging purposes - 'op': self.op, - 'blobs_wrb': False, - 'has_num_directions': False, - 'direction': 'forward', - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - 'multiplier': 1, - 'gate_order': mo_array([0]), # Only one gate in this cell - 'normalized': False, - - 'activation_alpha': None, - 'activation_beta': None, - 'activations': None, - 'clip': None, - 'in_ports_count': 6, - 'out_ports_count': 2, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def supported_attrs(): - return [ - 'hidden_size', # number of the elements in hidden cell size - 'direction', # one of 'forward', 'reverse', or 'bidirectional' - 'axis', - - # Additional attributes - 'activation_alpha', - 'activation_beta', - 'activations', - 'clip', - ] - - def backend_attrs(self): - return [ - 'hidden_size', # number of the elements in hidden cell size - 'direction', # one of 'forward', 'reverse', or 'bidirectional' - 'axis', - - # Additional attributes - 'activation_alpha', - 'activation_beta', - ('activations', lambda node: ','.join(node.activations) if node.activations is not None else None), - 'clip', - ] - - @staticmethod - def infer(node: Node): - assert len(node.in_nodes()) >= 3 # X, W and R - assert len(node.in_nodes()) <= 5 - assert len(node.out_nodes()) <= 2 - - rnn_infer(node, [1]) - - @staticmethod - def reverse_infer(node: Node): - if node.in_port(0).data.get_shape() is not None: - return - - input_size = get_rnn_input_size(node) - batch_size, seq_len = get_rnn_batch_size_and_seq_len(node) - # ONNX has the same input layout - input_shape = shape_array([seq_len, batch_size, input_size]) - if node.format == 'tf': - input_shape = shape_array([batch_size, seq_len, input_size]) - - node.in_port(0).data.set_shape(input_shape) - - -def rnn_infer(node: Node, out_ports=None): - """ - General infer function for RNN, GRU, LSTM layers. - Assume that 0-port input of node is input data for recurrent layer and node have attrs: - hidden_size, - """ - if out_ports is None: - out_ports = [] - - # 1. Necessary checks (from ONNX specification) - assert node.batch_dim <= 1 - assert node.sequence_dim <= 1 - assert node.batch_dim != node.sequence_dim - assert node.direction in ['forward', 'reverse', 'bidirectional'] - - if node.blobs_wrb: - mark_input_bins(node, ['W', 'R', 'B']) - else: - mark_input_bins(node) - - # 2. Output shape calculations - input_shape = node.in_node(0).shape - assert len(input_shape) == 3 - - # Reshape input nodes - for port in [2, 3]: - if port in node.in_nodes() and len(node.in_node(port).in_nodes()) > 0 and \ - 'zero_shapes' in node.in_node(port).in_node(): - for i in node.in_node(port).in_node().zero_shapes: - if node.in_node(port).shape[i] != input_shape[i]: - node.in_node(port).value = np.repeat(node.in_node(port).value, input_shape[i], axis=i) - node.in_node(port).shape[i] = input_shape[i] - - out_shape = [input_shape[node.sequence_dim], input_shape[node.batch_dim], node.hidden_size] - - if node.batch_dim == 0: - out_shape = [input_shape[node.batch_dim], input_shape[node.sequence_dim], node.hidden_size] - - num_directions = 2 if node.direction in ['bidirectional'] else 1 - if node.has_num_directions: - # ONNX-like, insert extra dimension to output shape for num_directions - out_shape = shape_insert(out_shape, 1, np.int64(num_directions)) - - # 0 output is required creating it if doesn't exist - if 0 not in node.out_nodes(): - data_node = Op._create_data_node( - node.graph, - name=node.node + '/ExtraOutput/{}'.format(0), - attrs={'executable': True} - ) - if 0 not in node.out_ports(): - node.add_output_port(0) - node.graph.add_edge(node.id, data_node.id, key=0, out=0) - add_opoutput(node.graph, data_node.id, 0, False) - node.out_port(0).data.set_shape(out_shape) - - # 3. Extra outputs for hidden/cell states shape calculations (optional) - state_size = [input_shape[node.batch_dim], node.hidden_size] - if node.has_num_directions: - state_size = shape_insert(state_size, 0, num_directions) - - if node.multilayers: - # For multilayer case state sizes from every layer will be concatenated by last axis - num_layers = node.num_layers - state_size[-1] *= num_layers - - for i in out_ports: - # If node hasn't consumers for hidden/cells state -> create them - if i not in node.out_nodes(): - data_node = Op._create_data_node( - node.graph, - name=node.node + '/ExtraOutput/' + str(i), - attrs={'executable': True} - ) - if i not in node.out_ports(): - node.add_output_port(i) - node.graph.add_edge(node.id, data_node.id, key=0, out=i) - add_opoutput(node.graph, data_node.id, 0, False) - else: - data_node = node.out_node(i) - data_node.shape = shape_array(state_size) - - -def get_rnn_batch_size_and_seq_len(node: Node): - """ - Gets batch_size and sequence_length from RNN constant inputs - and output shapes retrieved during reverse_infer - - :param node: - :return: - """ - node_name = node.soft_get('name', node.id) - out_shape = node.out_port(0).data.get_shape() - batch_size = dynamic_dimension - seq_len = dynamic_dimension - in_port_with_initial_states = 3 # initial hidden size values is framework dependent - - if out_shape is not None: - # note that op is not in opset state but in the state of the original framework - if node.batch_dim == 1: - seq_len = out_shape[0] - - if node.format == 'onnx': - assert len(out_shape) == 4, 'incorrect out_shape rank for node {}'.format(node_name) - # even for ONNX in extractor 'batch_dim': 1 (front/onnx/lstm_ext.py:26) despite the fact that - # out_shape = [seq_len, num_directions, batch_size, hidden_size] - batch_size = out_shape[2] - in_port_with_initial_states = 5 - elif node.format == 'tf': - log.error('reverse infer for TensorFlow RNN operation {} is not implemented yet'.format(node_name), - extra={'is_warning': True}) - else: - raise Error('Incorrect framework name') - elif node.batch_dim == 0: - # out_shape = [batch_size, num_directions, seq_len, hidden_size] - batch_size = out_shape[0] - seq_len = out_shape[2] - in_port_with_initial_states = 3 - else: - raise Error('incorrect batch_dim for node {}'.format(node_name)) - - if batch_size is dynamic_dimension: - if node.is_in_port_connected(in_port_with_initial_states): - initial_hidden_state_size = node.in_port(in_port_with_initial_states).data.get_shape() - if initial_hidden_state_size is not None: - batch_size = initial_hidden_state_size[1] - - if seq_len is dynamic_dimension and node.format == 'onnx': - # ONNX can store seq_len in optional input - if node.is_in_port_connected(4): - seq_len_val = node.in_port(4).data.get_value() - if seq_len_val is not None: - seq_len = seq_len.item() - - return [batch_size, seq_len] - - -def get_rnn_input_size(node: Node): - node_name = node.soft_get('name', node.id) - assert node.is_in_port_connected(1), 'weights input is not connected' - - if node.format == 'onnx': - # ONNX weights on input 1 contain only W part, R, and B are connected separately - # weights_shape = `[num_directions, 4 * hidden_size, input_size]` - weights_size = node.in_port(1).data.get_shape() - assert len(weights_size) == 3, 'incorrect weights ranks for ONNX {} node {}'.format(node.op, node_name) - input_size = weights_size[2] - return input_size - elif node.format == 'tf': - log.error('reverse infer for TensorFlow RNN operation {} is not implemented yet'.format(node_name), - extra={'is_warning': True}) - else: - raise Error('Incorrect framework name') diff --git a/tools/mo/openvino/tools/mo/ops/RNNCell.py b/tools/mo/openvino/tools/mo/ops/RNNCell.py deleted file mode 100644 index 0f48644eb4039e..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/RNNCell.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mark_input_bins -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class RNNCell(Op): - """ A single RNN cell (without a loop). - - 2 inputs: - - [0, required] input data (2D), - - [1, required] initial hidden state (2D), - - 2 blobs: - - [2, required] cell FC weights - - [3, required] cell FC biases - - 1 outputs: - - [required] output data / resulting hidden state (2D) - """ - op = 'RNNCell' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'infer': self.infer, - 'in_ports_count': 4, - 'out_ports_count': 1, - 'version': 'opset3', - 'wr_input_id': 2, - 'gates_count': 1 - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'hidden_size', # number of the elements in hidden cell size - 'activations', - 'activation_alpha', - 'activation_beta', - 'clip', - ] - - def backend_attrs(self): - return [ - 'hidden_size', # number of the elements in hidden cell size - ('activations', lambda node: ','.join(node.activations) if node.activations is not None else None), - 'activation_alpha', - 'activation_beta', - 'clip', - ] - - @staticmethod - def infer(node: Node): - assert len(node.out_nodes()) in [1, 2] - - hidden_shape = node.in_node(1).shape.copy() - - mark_input_bins(node, start_port=2) - node.out_node(0).shape = hidden_shape - - hidden_size = hidden_shape[1] - if node.has_valid('hidden_size'): - if node.hidden_size != hidden_size: - raise Error("Input shape {} for hidden size doesn't match pre-defined hidden_size in node {}".format( - node.in_node(1).shape, node.soft_get('name'))) - else: - node['hidden_size'] = hidden_size diff --git a/tools/mo/openvino/tools/mo/ops/ReduceOps.py b/tools/mo/openvino/tools/mo/ops/ReduceOps.py deleted file mode 100644 index 569d50fce953ac..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/ReduceOps.py +++ /dev/null @@ -1,181 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, is_fully_defined -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op - -reduce_map = { - 'ReduceSum': np.sum, - 'ReduceProd': np.prod, - 'ReduceL1': lambda x, axis, keepdims: np.sum(a=np.absolute(x), axis=axis, keepdims=keepdims), - 'ReduceL2': lambda x, axis, keepdims: np.sqrt(np.sum(a=np.square(x), axis=axis, keepdims=keepdims)), - 'ReduceMax': np.max, - 'ReduceMin': np.min, - 'ReduceMean': np.mean, - 'ReduceAnd': np.all, - 'ReduceLogicalAnd': np.all, - 'ReduceLogicalOr': np.any, -} - - -def reduce_helper(func: callable, x: np.array, axis: tuple, keepdims: bool): - """ - Performs the reduction of input data tensor "x" over axis "axis" with function "func" and optionally removes reduced - dimensions (if "keepdims" is False). If the input tensor has dynamic values, all elements of the result tensor - are changed to be dynamic. - - :param func: numpy reduce function - :param x: the data to perform reduction on - :param axis: the axis for reduction - :param keepdims: flag specifying whether keep reduce dimensions or not - :return: the result tensor - """ - result = func(x, axis=axis, keepdims=keepdims) - # we need to handle this case specially to avoid problems with deepcopy method with MaskedConstant converted to - # masked_array - see issue https://github.com/numpy/numpy/issues/21022 - if isinstance(result, np.ma.core.MaskedConstant): - return np.ma.masked_array(data=-1, mask=True, dtype=result.dtype) - if is_fully_defined(x): - return result - else: - return np.ma.masked_array(result, mask=np.ones(result.shape, dtype=bool)) - - -def reduce_infer(node: Node): - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - assert len(connected_in_ports) == 2, \ - "{} node `{}` should have 2 input ports, where 0-input is data input and 1-input represent " \ - "`reduction_indices`".format(node.op, node.id) - - in_data = node.in_port(0).data - in_shape = in_data.get_shape() - axis = node.in_port(1).data.get_value() - - # If the axis is None then reduce over all the dimensions of the input tensor - if axis.size == 1 and axis.item() is None: - axis = int64_array(list(range(len(in_shape)))) - node.in_port(1).data.set_value(axis) - - assert in_shape is not None, "Can not infer {} node `{}`: shape of 0-input unknown".format(node.op, node.id) - - axis = axis.copy() - if axis.size == 1: - axis = int64_array([axis.item()]) - - in_value = in_data.get_value() - - if in_value is not None: - value = reduce_helper(reduce_map[node.op], in_value.copy(), axis=tuple(axis), keepdims=node.keep_dims) - node.out_port(0).data.set_value(value) - else: - used_dims = np.zeros(len(in_shape), dtype=bool) - output_shape = in_shape.copy() - - for dim in axis: - used_dims[dim] = True - output_shape[dim] = 1 - - # In case if keep dims == False, we should remove all 1 dims that was used in reduction - if not node.keep_dims: - output_shape = output_shape[np.invert(used_dims)] - - node.out_port(0).data.set_shape(output_shape) - - # if the operation changes the rank of the output tensor then it is necessary to insert Permute if the input is 4D - # or 5D - if not node.keep_dims: - node['reinterp_shape'] = True - - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'axis') - - -class ReduceOp(Op): - enabled = False - op = None - op_type = None - version = 'opset1' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op_type, - 'version': self.version, - 'infer': reduce_infer, - 'keep_dims': 0, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'force_precision_in_ports': { - 1: 'int64'}, - }, attrs) - assert isinstance(self.attrs['keep_dims'], int) or isinstance(self.attrs['keep_dims'], bool) - self.attrs['keep_dims'] = bool(self.attrs['keep_dims']) - - def supported_attrs(self): - return [ - ('keep_dims', lambda node: bool_to_str(node, 'keep_dims')), - ] - - -class ReduceSum(ReduceOp): - enabled = True - op = 'ReduceSum' - op_type = 'ReduceSum' - - -class ReduceProd(ReduceOp): - op = 'ReduceProd' - op_type = 'ReduceProd' - enabled = True - - -class ReduceMin(ReduceOp): - op = 'ReduceMin' - op_type = 'ReduceMin' - enabled = True - - -class ReduceMax(ReduceOp): - op = 'ReduceMax' - op_type = 'ReduceMax' - enabled = True - - -class ReduceMean(ReduceOp): - op = 'ReduceMean' - op_type = 'ReduceMean' - enabled = True - - -class ReduceL1(ReduceOp): - op = 'ReduceL1' - op_type = 'ReduceL1' - version = 'opset4' - - -class ReduceL2(ReduceOp): - op = 'ReduceL2' - op_type = 'ReduceL2' - version = 'opset4' - - -class ReduceAnd(ReduceOp): - op = 'ReduceAnd' - op_type = 'ReduceLogicalAnd' - enabled = True - - -class ReduceLogicalAnd(ReduceOp): - op = 'ReduceLogicalAnd' - op_type = 'ReduceLogicalAnd' - enabled = True - - -class ReduceLogicalOr(ReduceOp): - op = 'ReduceLogicalOr' - op_type = 'ReduceLogicalOr' - enabled = True diff --git a/tools/mo/openvino/tools/mo/ops/Reverse.py b/tools/mo/openvino/tools/mo/ops/Reverse.py deleted file mode 100644 index 9b4af1157b451f..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/Reverse.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class Reverse(Op): - op = 'Reverse' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'axis': None, - 'op': self.op, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': self.infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def infer(node): - input_shape = node.in_port(0).data.get_shape() - input_value = node.in_port(0).data.get_value() - assert input_shape is not None - if not node.has_valid('axis'): - assert 1 in node.in_nodes() - assert node.in_node(1).has_valid('value') - assert node.in_node(1).value.size == 1 - - node['axis'] = node.in_node(1).value.item() - node.in_port(1).disconnect() - - assert node.has_valid('axis') - - assert len(node.out_nodes()) == 1 - if input_value is not None: - node.out_port(0).data.set_value(np.flip(input_value, node.axis)) - else: - node.out_port(0).data.set_shape(input_shape) diff --git a/tools/mo/openvino/tools/mo/ops/TFFFT.py b/tools/mo/openvino/tools/mo/ops/TFFFT.py deleted file mode 100644 index d0f10aa5c0b038..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/TFFFT.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class TFFFT(Op): - """ - This operation is intended to read TF operations FFT, FFT2D, FFT3D, IFFT, IFFT2D, IFFT3D, RFFT, RFFT2D, RFFT3D, - IRFFT, IRFFT2D, IRFFT3D. The operation TFFFT has two attributes: an integer attribute num_of_dimensions and - a string attribute fft_kind. - - If an operation is used to read FFT, FFT2D, or FFT3D, then the attribute 'fft_kind' is 'DFT'. - If an operation is used to read IFFT, IFFT2D, or IFFT3D, then the attribute 'fft_kind' is 'IDFT'. - If an operation is used to read RFFT, RFFT2D, or RFFT3D, then the attribute 'fft_kind' is 'RDFT'. - If an operation is used to read IRFFT, IRFFT2D, or IRFFT3D, then the attribute 'fft_kind' is 'IRDFT'. - - The attribute 'num_of_dimensions' is equal to number of transformed axes, i.e. 1 for FFT, IFFT, RFFT, and IRFFT; - 2 for FFT2D, IFFT2D, RFFT2D, and IRFFT2D; 3 for FFT3D, IFFT3D, RFFT3D, and IRFFT3D. - - The transformation TFFFTToDFT converts the operation TFFFT into MO operation according to the following rules: - 1) FFT, FFT2D, FFT3D are converted into DFT; - 2) IFFT, IFFT2D, IFFT3D are converted into IDFT; - 3) RFFT, RFFT2D, RFFT3D are converted into RDFT; - 4) IRFFT, IRFFT2D, IRFFT3D are converted into IRDFT. - """ - op = 'TFFFT' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'out_ports_count': 1, - 'in_ports_count': 1, - } - assert 'fft_kind' in attrs, 'Attribute fft_kind is not given for the operation TFFFT.' - assert 'num_of_dimensions' in attrs, 'Attribute num_of_dimensions is not given for the operation TFFFT.' - super().__init__(graph, mandatory_props, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/TFResize.py b/tools/mo/openvino/tools/mo/ops/TFResize.py deleted file mode 100644 index 7fa4ab4600d6fd..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/TFResize.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class TFResize(Op): - op = 'TFResize' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'out_ports_count': 1, - 'in_ports_count': 2, - 'infer': TFResize.tf_resize_infer - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def tf_resize_infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - if input_shape is None: - return - - attrs_msg = "If half_pixel_centers attribute of the node {} with op {} is True, " \ - "the attribute align_corners must be False" - node_name = node.soft_get('name', node.id) - assert not node.half_pixel_centers or (node.half_pixel_centers and not node.align_corners), \ - attrs_msg.format(node_name, node.op) - - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - assert len(connected_in_ports) == 2, \ - "Node {} with op {} number of inputs must be equal to 2.".format(node_name, node.op) - - new_sizes_value = node.in_port(1).data.get_value() - assert new_sizes_value is not None, "Node {} with op {} has no value in input port 1".format(node_name, node.op) - - input_rank = len(input_shape) - assert input_rank == 4, \ - "Resized input data of the node {} with op {} must be 4D tensor".format(node_name, node.op) - - len_msg = "Op {} with name {} supports only resize with respect to height and width dimension simultaneously" - assert len(new_sizes_value) == 2, len_msg.format(node_name, node.op) - - output_shape = input_shape.copy() - - output_shape[1] = new_sizes_value[0] - output_shape[2] = new_sizes_value[1] - - node.out_port(0).data.set_shape(output_shape) diff --git a/tools/mo/openvino/tools/mo/ops/TensorArray.py b/tools/mo/openvino/tools/mo/ops/TensorArray.py deleted file mode 100644 index c0d5c531dbf758..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/TensorArray.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class TensorArray(Op): - op = "TensorArrayV3" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'infer': TensorArray.array_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def array_infer(node: Node): - size = node.in_node(0) - assert size.value is not None - - # 0 port: handle - if 0 in node.out_nodes().keys(): - if node.has_valid('element_shape'): - element_shape = node['element_shape'] - else: - element_shape = None - - out_node = node.out_node(0).id - output_value = node.out_node(0).id - node.graph.node[out_node]['value'] = mo_array(output_value) - - output_shape = node.graph.node[out_node]['value'].shape - node.graph.node[out_node]['shape'] = shape_array(output_shape) - - node.graph.node[out_node]['element_shape'] = shape_array(element_shape) - node.graph.node[out_node]['size'] = size.value - # 1 port flow - if 1 in node.out_nodes().keys(): - output_value = None - - out_node = node.out_node(1).id - node.graph.node[out_node]['value'] = None if output_value is None else mo_array(output_value) - node.graph.node[out_node]['shape'] = shape_array(output_shape) diff --git a/tools/mo/openvino/tools/mo/ops/TensorArrayGather.py b/tools/mo/openvino/tools/mo/ops/TensorArrayGather.py deleted file mode 100644 index b62960d694f785..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/TensorArrayGather.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.utils import symm_match_shapes - - -class TensorArrayGather(Op): - op = "TensorArrayGatherV3" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'infer': TensorArrayGather.array_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def array_infer(node: Node): - assert len(node.in_nodes()) == 3 - - handle = node.in_node(0) - - ta_node = Node(node.graph, str(handle.value)) - - if ta_node.has_valid('element_shape') and ta_node.element_shape is not None and len(ta_node.element_shape) > 0: - assert symm_match_shapes(ta_node['element_shape'], node.element_shape) - else: - ta_node['element_shape'] = node.element_shape - data_shape = ta_node['element_shape'] - - assert ta_node.has_valid('size') - size = ta_node['size'] - - output_shape = [size] + [data_shape[i] for i in range(len(data_shape))] - - for _, out_node in node.graph.out_edges(node.id): - node.graph.node[out_node]['shape'] = shape_array(output_shape) - node.graph.node[out_node]['value'] = None diff --git a/tools/mo/openvino/tools/mo/ops/TensorArrayRead.py b/tools/mo/openvino/tools/mo/ops/TensorArrayRead.py deleted file mode 100644 index 3275c2921252f7..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/TensorArrayRead.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class TensorArrayReader(Op): - op = "TensorArrayReadV3" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'infer': TensorArrayReader.array_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def array_infer(node: Node): - assert len(node.in_nodes()) == 3 - - handle = node.in_node(0) - - ta_node = Node(node.graph, str(handle.value)) - assert ta_node.has_valid('element_shape') - - for _, out_node in node.graph.out_edges(node.id): - node.graph.node[out_node]['shape'] = shape_array(ta_node['element_shape']) - node.graph.node[out_node]['value'] = None diff --git a/tools/mo/openvino/tools/mo/ops/TensorArrayScatter.py b/tools/mo/openvino/tools/mo/ops/TensorArrayScatter.py deleted file mode 100644 index 0294f5bee38a47..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/TensorArrayScatter.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.utils import match_shapes - - -class TensorArrayScatter(Op): - op = "TensorArrayScatterV3" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'infer': TensorArrayScatter.array_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def array_infer(node: Node): - handle = node.in_node(0) - value = node.in_node(2) - flow_in = node.in_node(3) - - ta_node = Node(node.graph, str(handle.value)) - if ta_node.has_valid('element_shape') and len(ta_node.element_shape) > 0: - assert match_shapes(ta_node['element_shape'], value.shape[1:]), \ - 'Shapes are not compatible: {} and {}'.format(ta_node['element_shape'], value.shape[1:]) - else: - ta_node['element_shape'] = value.shape[1:] - - # Assign element_shape anyway, because the original element_shape can contain -1 - ta_node['element_shape'] = value.shape[1:] - - output_value = flow_in.value - for _, out_node in node.graph.out_edges(node.id): - node.graph.node[out_node]['shape'] = shape_array(flow_in.shape) - node.graph.node[out_node]['value'] = None if output_value is None else mo_array(output_value) diff --git a/tools/mo/openvino/tools/mo/ops/TensorArraySize.py b/tools/mo/openvino/tools/mo/ops/TensorArraySize.py deleted file mode 100644 index d0322999f0c9ac..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/TensorArraySize.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class TensorArraySize(Op): - op = "TensorArraySizeV3" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'infer': TensorArraySize.array_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def array_infer(node: Node): - assert len(node.in_nodes()) == 2 - - handle = node.in_node(0) - - ta_node = Node(node.graph, str(handle.value)) - assert ta_node.has_valid('size') - - output_value = mo_array(ta_node['size']) - - for _, out_node in node.graph.out_edges(node.id): - node.graph.node[out_node]['shape'] = shape_array(output_value.shape) - node.graph.node[out_node]['value'] = output_value.copy() diff --git a/tools/mo/openvino/tools/mo/ops/TensorArrayWrite.py b/tools/mo/openvino/tools/mo/ops/TensorArrayWrite.py deleted file mode 100644 index 516412572c04ff..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/TensorArrayWrite.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.utils import match_shapes - - -class TensorArrayWriter(Op): - op = "TensorArrayWriteV3" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'infer': TensorArrayWriter.array_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def array_infer(node: Node): - assert len(node.in_nodes()) == 4 - - handle = node.in_node(0) - index = node.in_node(1) - value = node.in_node(2) - flow_in = node.in_node(3) - - value_shape = value.shape - - ta_node = Node(node.graph, str(handle.value)) - if ta_node.has_valid('element_shape') and len(ta_node.element_shape) > 0: - assert match_shapes(ta_node['element_shape'], value.shape), \ - 'Shapes are not compatible: {} and {}'.format(ta_node['element_shape'], value.shape) - ta_node['element_shape'] = value_shape - - output_value = flow_in.value - - for _, out_node in node.graph.out_edges(node.id): - node.graph.node[out_node]['shape'] = shape_array(flow_in.shape) - node.graph.node[out_node]['value'] = None if output_value is None else output_value.copy() diff --git a/tools/mo/openvino/tools/mo/ops/TensorIterator_ops.py b/tools/mo/openvino/tools/mo/ops/TensorIterator_ops.py deleted file mode 100644 index 786419f178eb02..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/TensorIterator_ops.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -# TODO: check all supported attributes in this file -class TensorIteratorInput(Op): - op = "TensorIteratorInput" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': __class__.op, - 'axis': None, - 'start': None, - 'end': None, - 'stride': None, - 'part_size': None, - 'in_ports_count': 3, - 'out_ports_count': 1, - 'infer': TensorIteratorInput.input_infer, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return ['external_port_id', 'internal_layer_id', 'internal_port_id', 'axis', 'start', 'stride', 'part_size'] - - @staticmethod - def input_infer(node: Node): - pass - - -class TensorIteratorOutput(Op): - op = "TensorIteratorOutput" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': __class__.op, - 'axis': None, - 'start': None, - 'end': None, - 'stride': None, - 'part_size': None, - 'in_ports_count': 3, - 'out_ports_count': 1, - 'infer': TensorIteratorOutput.input_infer, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return ['external_port_id', 'internal_layer_id', 'internal_port_id', 'axis', 'start', 'stride', 'part_size'] - - @staticmethod - def input_infer(node: Node): - pass - - -class TensorIteratorCondition(Op): - op = "TensorIteratorCondition" - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': __class__.op, - 'in_ports_count': 2, - 'out_ports_count': 2, - 'infer': TensorIteratorCondition.input_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def input_infer(node: Node): - pass - - -class TensorIteratorBackEdge(Op): - op = 'TensorIteratorBackEdge' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': __class__.op, - 'in_ports_count': 3, - 'out_ports_count': 1, - 'infer': TensorIteratorBackEdge.input_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def input_infer(node: Node): - pass diff --git a/tools/mo/openvino/tools/mo/ops/__init__.py b/tools/mo/openvino/tools/mo/ops/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/ops/activation.py b/tools/mo/openvino/tools/mo/ops/activation.py deleted file mode 100644 index 586f5307a3d65a..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/activation.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class Activation(Op): - op = 'Activation' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': __class__.op, - 'op': __class__.op, - 'infer': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return ['operation'] - - def backend_attrs(self): - return [('type', 'operation'), 'alpha'] # operation --> type diff --git a/tools/mo/openvino/tools/mo/ops/activation_ops.py b/tools/mo/openvino/tools/mo/ops/activation_ops.py deleted file mode 100644 index 133e02b744b68d..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/activation_ops.py +++ /dev/null @@ -1,302 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.clamp import AttributedClamp -from openvino.tools.mo.ops.op import Op - -activation_ops = ['Sigmoid', 'Tanh', 'ReLU6', 'Exp', 'Elu', 'LogicalNot', 'Floor', 'Ceiling'] - - -class Activation(Op): - enabled = False - operation = None - op = None - version = 'opset1' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'operation': self.operation, - 'version': self.version, - 'infer': self.infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - @classmethod - def infer(cls, node: Node): - return eltwise_infer(node, node.operation) - - -class Sigmoid(Activation): - op = 'Sigmoid' - operation = staticmethod(lambda x: 1 / (1 + np.ma.exp(-x))) - - -class Sin(Activation): - op = 'Sin' - operation = staticmethod(lambda x: np.ma.sin(x)) - - -class Sinh(Activation): - op = 'Sinh' - operation = staticmethod(lambda x: np.ma.sinh(x)) - - -class Asin(Activation): - op = 'Asin' - operation = staticmethod(lambda x: np.ma.arcsin(x)) - - -class Asinh(Activation): - op = 'Asinh' - version = 'opset4' - operation = staticmethod(lambda x: np.arcsinh(x)) - - -class Cos(Activation): - op = 'Cos' - operation = staticmethod(lambda x: np.ma.cos(x)) - - -class Cosh(Activation): - op = 'Cosh' - operation = staticmethod(lambda x: np.ma.cosh(x)) - - -class Acos(Activation): - op = 'Acos' - operation = staticmethod(lambda x: np.ma.arccos(x)) - - -class Acosh(Activation): - op = 'Acosh' - version = 'opset4' - operation = staticmethod(lambda x: np.ma.arccosh(x)) - - -class Tan(Activation): - op = 'Tan' - operation = staticmethod(lambda x: np.ma.tan(x)) - - -class Tanh(Activation): - op = 'Tanh' - operation = staticmethod(lambda x: np.ma.tanh(x)) - - -class Atan(Activation): - op = 'Atan' - operation = staticmethod(lambda x: np.ma.arctan(x)) - - -class Atanh(Activation): - op = 'Atanh' - version = 'opset4' - operation = staticmethod(lambda x: np.ma.arctanh(x)) - - -class ReLU6(AttributedClamp): - def __init__(self, graph: Graph, attrs: dict): - relu6_attrs = {'min': 0, 'max': 6} - relu6_attrs.update(attrs) - super().__init__(graph, relu6_attrs) - - -class Exp(Activation): - op = 'Exp' - operation = staticmethod(lambda x: np.ma.exp(x)) - - -class ReLU(Activation): - op = 'ReLU' - operation = staticmethod(lambda x: np.ma.maximum(0, x)) - - -class Erf(Activation): - op = 'Erf' - operation = None - - -class Floor(Activation): - op = 'Floor' - operation = staticmethod(lambda x: x if np.issubdtype(x.dtype, np.integer) else np.ma.floor(x)) - - -class Ceiling(Activation): - op = 'Ceiling' - operation = staticmethod(lambda x: np.ma.ceil(x)) - - -class Abs(Activation): - op = 'Abs' - operation = staticmethod(lambda x: np.ma.abs(x)) - - -class Sign(Activation): - op = 'Sign' - operation = staticmethod(lambda x: np.sign(x)) - - -class Elu(Activation): - op = 'Elu' - - def __init__(self, graph: Graph, attrs): - elu_attrs = {'alpha': 1.0} - elu_attrs.update(attrs) - super().__init__(graph, elu_attrs) - - @staticmethod - def elu(values: np.ndarray, alpha: float): - values = values.astype(float) - for index, x in np.ndenumerate(values): - if x < 0: - values[index] = alpha * (np.ma.exp(x) - 1) - return values - - @classmethod - def infer(cls, node: Node): - return eltwise_infer(node, lambda x, alpha: Elu.elu(x, alpha), alpha=node.alpha) - - def backend_attrs(self): - return ['alpha'] - - -class ThresholdedRelu(Activation): - # The operation will be decomposed to primitive operations - op = 'ThresholdedRelu' - - def __init__(self, graph: Graph, attrs): - trelu_attrs = {'alpha': 1.0, 'type': None} - trelu_attrs.update(attrs) - super().__init__(graph, trelu_attrs) - - @staticmethod - def thresholded_relu(values: np.ndarray, alpha: float): - values = values.astype(float) - for index, x in np.ndenumerate(values): - values[index] = values[index] * (x > alpha) - return values - - @classmethod - def infer(cls, node: Node): - return eltwise_infer(node, lambda x, alpha: ThresholdedRelu.thresholded_relu(x, alpha), alpha=node.alpha) - - -class LeakyReLU(Op): - op = 'LeakyReLU' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'infer': self.infer, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def leaky_relu(values: np.ndarray, negative_slope: float): - for index, x in np.ndenumerate(values): - if x < 0: - values[index] = negative_slope * x - return values - - @staticmethod - def infer(node: Node): - return eltwise_infer(node, lambda x, negative_slope: LeakyReLU.leaky_relu(x, negative_slope), - negative_slope=node.negative_slope) - - def supported_attrs(self): - return ['negative_slope'] - - -class LogicalNot(Activation): - op = 'LogicalNot' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - not_attrs = {'type_infer': self.type_infer} - not_attrs.update(attrs) - super().__init__(graph, not_attrs) - - operation = staticmethod(lambda x: np.ma.logical_not(x)) - - @staticmethod - def type_infer(node: Node): - node.out_port(0).set_data_type(bool) - - -class Log(Activation): - op = 'Log' - operation = staticmethod(lambda x: np.ma.log(x)) - - -class SoftPlus(Activation): - op = 'SoftPlus' - version = 'opset4' - operation = staticmethod(lambda x: np.ma.log(np.ma.exp(x) + 1.0)) - - -class Mish(Activation): - op = 'Mish' - version = 'opset4' - operation = staticmethod(lambda x: x * np.ma.tanh(np.ma.log(np.ma.exp(x) + 1.0))) - - -class HSwish(Activation): - op = 'HSwish' - version = 'opset4' - operation = staticmethod(lambda x: x * np.ma.minimum(np.ma.maximum(x + 3.0, 0.0), 6.0) / 6.0) - - -class HSigmoid(Activation): - op = 'HSigmoid' - version = 'opset5' - operation = staticmethod(lambda x: np.ma.minimum(np.ma.maximum(x + 3.0, 0.0), 6.0) / 6.0) - - -class Swish(Op): - op = 'Swish' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'type': self.op, - 'version': 'opset4', - - 'infer': self.infer, - - 'in_ports_count': 2, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - node.out_port(0).data.set_shape(node.in_port(0).data.get_shape()) - - beta = 1.0 - if node.is_in_port_connected(1): - beta = node.in_port(1).data.get_value() - if beta is not None: - assert beta.ndim == 0, 'The "beta" value for node {} must be a scalar'.format(node_name) - beta = beta.item() - - input_value = node.in_port(0).data.get_value() - if input_value is not None and beta is not None: - node.out_port(0).data.set_value(input_value / (1.0 + np.exp(-input_value * beta))) - - -class SoftSign(Activation): - op = "SoftSign" - version = "opset9" - operation = staticmethod(lambda x: x / (np.ma.abs(x) + 1)) diff --git a/tools/mo/openvino/tools/mo/ops/adaptive_avg_pooling.py b/tools/mo/openvino/tools/mo/ops/adaptive_avg_pooling.py deleted file mode 100644 index ceead9e5edf07e..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/adaptive_avg_pooling.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.pooling import Pooling - - -class AdaptiveAvgPooling(Op): - ''' - Non-reshape-able op. - ''' - enabled = False - op = 'AdaptiveAvgPooling' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'infer': __class__.infer, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - @classmethod - def infer(cls, node: Node): - input_shape = node.in_node(0).shape - input_h = input_shape[2] - input_w = input_shape[3] - output_h = node.output_size[0] - output_w = node.output_size[1] - - stride_h = input_h // output_h - stride_w = input_w // output_w - kernel_h = input_h - (output_h - 1) * stride_h - kernel_w = input_w - (output_w - 1) * stride_w - - data = { - 'window': int64_array([1, 1, kernel_h, kernel_w]), - 'stride': int64_array([1, 1, stride_h, stride_w]), - 'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]), - 'pad_spatial_shape': int64_array([[0, 0], [0, 0]]), - 'pool_method': 'avg', - 'exclude_pad': False, - 'output_spatial_shape': None, - 'spatial_dims': None, - 'channel_dims': int64_array([1]), - 'batch_dims': int64_array([0]), - 'layout': 'NCHW', - 'rounding_type': 'floor', - 'pooling_convention': 'valid' - } - - # update the attributes of the node - Pooling.update_node_stat(node, data) - Pooling.infer(node) diff --git a/tools/mo/openvino/tools/mo/ops/argmax.py b/tools/mo/openvino/tools/mo/ops/argmax.py deleted file mode 100644 index 6bb00b0109334f..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/argmax.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op, PermuteAttrs - - -def arg_ops_infer(node: Node): - shape = node.in_port(0).data.get_shape() - node_name = node.soft_get('name', node.id) - assert shape is not None, "Input shape for the node {} is None".format(node_name) - - # there are two inputs in TensorFlow. The second input is the axis for ArgMax - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - if len(connected_in_ports) == 2: - axis = node.in_port(1).data.get_value() - if axis is None: - log.debug('The second argument to {} is None'.format(node.soft_get('name', node.id))) - return - node.axis = axis - # remove the unnecessary input - node.in_port(1).disconnect() - - num_top_axes = shape.size - if num_top_axes < 3: - num_top_axes = 3 - - out_shape = np.ones(num_top_axes, dtype=np.int64) - - if node.has_valid('axis'): - axis = get_canonical_axis_index(shape, node.axis) - node.axis = axis - out_shape = shape.copy() - out_shape[axis] = node.top_k - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) - else: - out_shape[0] = shape[0] - out_shape[2] = node.top_k - if node.has_and_set('out_max_val'): - out_shape[1] = 2 - - node.out_port(0).data.set_shape(out_shape) - - -class ArgMaxOp(Op): - op = 'ArgMax' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'infer': arg_ops_infer, - 'output_type': np.int64, - 'in_ports_count': 2, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'out_max_val', - 'top_k', - 'axis', - ] diff --git a/tools/mo/openvino/tools/mo/ops/argmin.py b/tools/mo/openvino/tools/mo/ops/argmin.py deleted file mode 100644 index 448856e73f82d0..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/argmin.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.argmax import arg_ops_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class ArgMinOp(Op): - op = 'ArgMin' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'infer': arg_ops_infer, - 'output_type': np.int64, - 'in_ports_count': 2, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'top_k', - 'axis', - ] diff --git a/tools/mo/openvino/tools/mo/ops/assert_op.py b/tools/mo/openvino/tools/mo/ops/assert_op.py deleted file mode 100644 index 7725918dc3fe5d..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/assert_op.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class Assert(Op): - op = 'Assert' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': __class__.op, - 'infer': Assert.assert_infer, - 'cf_infer': Assert.assert_control_flow_infer - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def assert_infer(node: Node): - assert_value = node.in_node(0).value - node.out_node().value = assert_value.copy() - node.out_node().shape = [] - - @staticmethod - def assert_control_flow_infer(node: Node, is_executable: bool, mark_executability: callable): - """ - Infers control flow through assert operation node. It marks output data nodes executability according to - executability of current node and assert data value - :param node: Node instance to infer control flow through - :param is_executable: if current node is executable - :param mark_executability: function to mark executability of node - """ - graph = node.graph - assert_value = node.out_node().value - for n in [v for _, v in graph.out_edges(node.id)]: - mark_executability(n, assert_value and is_executable) - diff --git a/tools/mo/openvino/tools/mo/ops/assign.py b/tools/mo/openvino/tools/mo/ops/assign.py deleted file mode 100644 index 3ab82769a27409..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/assign.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op - - -class Assign(Op): - op = 'Assign' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset6', - 'infer': self.infer, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - def backend_attrs(self): - return ['variable_id'] - - @staticmethod - def infer(node: Node): - assert node.has_valid('variable_id'), \ - "There is no required attribute variable_id in Assign op with name " + node.id - node.out_port(0).data.set_shape(node.in_port(0).data.get_shape()) diff --git a/tools/mo/openvino/tools/mo/ops/aten.py b/tools/mo/openvino/tools/mo/ops/aten.py deleted file mode 100644 index 9fbefaa8f96142..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/aten.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class ATen(Op): - op = 'ATen' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': None, - - 'infer': None, - }, attrs) - - def supported_attrs(self): - return ['mode', 'operator'] diff --git a/tools/mo/openvino/tools/mo/ops/axpy.py b/tools/mo/openvino/tools/mo/ops/axpy.py deleted file mode 100644 index 0ff157c16b2ef2..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/axpy.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class AxpyOp(Op): - """ - Empty Op for Axpy layer. It will be replaced by AxpyToSSandAdd FrontReplacer - """ - op = 'Axpy' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': __class__.op, - 'infer': None - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/binarization.py b/tools/mo/openvino/tools/mo/ops/binarization.py deleted file mode 100644 index 9187756cf46dd5..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/binarization.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class Binarization(Op): - op = 'Binarization' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': __class__.op, - 'infer': None, - 'dst_type': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/box_nms.py b/tools/mo/openvino/tools/mo/ops/box_nms.py deleted file mode 100644 index 7484d4809cd2cb..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/box_nms.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class BoxNms(Op): - """ - It is assumed that there is no equivalent of this op in IE. - """ - op = '_contrib_box_nms' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'coord_start': 2, - 'force_suppress': False, - 'id_index': 0, - 'overlap_thresh': 0.45, - 'score_index': 1, - 'topk': 400, - 'valid_thresh': 0.01, - 'infer': self.infer - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'coord_start', - 'force_suppress', - 'id_index', - 'overlap_thresh', - 'score_index', - 'topk', - 'valid_thresh', - ] - - @staticmethod - def infer(node: Node): - raise Error( - "Operation _contrib_box_nms not not supported. " + - "For gluoncv ssd topologies use cmd parameter: '--enable_ssd_gluoncv' " + - refer_to_faq_msg(102)) diff --git a/tools/mo/openvino/tools/mo/ops/broadcast.py b/tools/mo/openvino/tools/mo/ops/broadcast.py deleted file mode 100644 index 5bc1b89d4a9008..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/broadcast.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined, shape_array, undefined_shape_of_rank -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.broadcasting import bi_directional_shape_broadcasting, uni_directional_shape_broadcasting, \ - uni_directional_broadcasting, bi_directional_broadcasting, explicit_broadcasting, explicit_shape_broadcasting -from openvino.tools.mo.utils.error import Error - - -class Broadcast(Op): - """ Broadcast tensor to a given shape with optional axis parameter - - Inputs: - [0] - tensor to be broadcasted - [1] - shape to be broadcast to - [2] - optional axes_mapping tensor - """ - - op = 'Broadcast' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset3', - 'mode': 'numpy', - 'in_ports_count': 3, - 'out_ports_count': 1, - 'force_precision_in_ports': - {1: 'int64', - 2: 'int64', - }, - 'infer': self.infer, - }, attrs) - - def supported_attrs(self): - return ['mode'] - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - - input_shape = node.in_port(0).data.get_shape() - input_value = node.in_port(0).data.get_value() - target_shape_shape = node.in_port(1).data.get_shape() - target_shape = node.in_port(1).data.get_value() - assert node.has_and_set('mode'), 'Broadcasting mode is not defined for node "{}"'.format(node_name) - - PermuteInputs().set_input_permutation(node.in_node(1), node, 'output:0', 'shape') - - # Dynamic target shape is possible to infer only if shape of target shape is static - if target_shape is None: - assert len(target_shape_shape) == 1, 'Shape of target_shape must be [1] for node "{}"'.format(node_name) - assert is_fully_defined(target_shape_shape), 'Output shape is not defined for node "{}"'.format(node_name) - new_shape = undefined_shape_of_rank(target_shape_shape.item(0)) - node.out_port(0).data.set_shape(new_shape) - if node.mode == 'explicit': - assert node.is_in_port_connected( - 2), 'Axes mapping must be specified for Broadcast(mode="explicit"). Node: `{}`'.format(node_name) - PermuteInputs().set_input_permutation(node.in_node(2), node, 'output:0', 'axis') - return - - if input_value is not None and not node.has_and_set('stop_value_propagation') and \ - is_fully_defined(target_shape): - if node.mode == 'numpy': - node.out_port(0).data.set_value(uni_directional_broadcasting(input_value, target_shape)) - elif node.mode == 'bidirectional': - node.out_port(0).data.set_value(bi_directional_broadcasting(input_value, target_shape)) - elif node.mode == 'explicit': - axes_mapping = node.in_port(2).data.get_value() - assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \ - 'is not supported. Node: `{}`'.format(node_name) - PermuteInputs().set_input_permutation(node.in_node(2), node, 'output:0', 'axis') - axes_mapping = node.in_port(2).data.get_value() - node.out_port(0).data.set_value(explicit_broadcasting(input_value, target_shape, axes_mapping)) - else: - raise Error('The node "{}" has unsupported mode "{}"'.format(node_name, node.mode)) - else: - if node.mode == 'numpy': - node.out_port(0).data.set_shape(uni_directional_shape_broadcasting(input_shape, target_shape)) - elif node.mode == 'bidirectional': - node.out_port(0).data.set_shape(bi_directional_shape_broadcasting(input_shape, target_shape)) - elif node.mode == 'explicit': - axes_mapping = node.in_port(2).data.get_value() - assert axes_mapping is not None, 'Broadcast(mode="explicit") with dynamic axes_mapping input ' \ - 'is not supported. Node: `{}`'.format(node_name) - PermuteInputs().set_input_permutation(node.in_node(2), node, 'output:0', 'axis') - axes_mapping = node.in_port(2).data.get_value() - new_shape, _ = explicit_shape_broadcasting(input_shape, target_shape, axes_mapping) - node.out_port(0).data.set_shape(new_shape) - else: - raise Error('The node "{}" has unsupported mode "{}"'.format(node_name, node.mode)) diff --git a/tools/mo/openvino/tools/mo/ops/bucketize.py b/tools/mo/openvino/tools/mo/ops/bucketize.py deleted file mode 100644 index f7ddc506f64489..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/bucketize.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op - - -class Bucketize(Op): - op = 'Bucketize' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'kind': 'op', - 'type': self.op, - 'op': self.op, - 'version': 'opset3', - - 'type_infer': self.type_infer, - 'infer': self.infer, - - 'in_ports_count': 2, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - version = self.get_opset() - if version == "extension": - return [('with_right_bound', lambda node: bool_to_str(node, 'with_right_bound'))] - else: - return [ - ('with_right_bound', lambda node: bool_to_str(node, 'with_right_bound')), - ('output_type', lambda node: np_data_type_to_destination_type(node.output_type)), - ] - - @staticmethod - def type_infer(node): - # the output is always integer since the layer outputs a bucket index - if node.get_opset() == "extension": - node.out_port(0).set_data_type(np.int32) - else: - assert node.output_type in [np.int64, np.int32], \ - 'Bucketize `output_type` attribute must be int32 or int64, `{}` found' \ - ''.format(np.dtype(node.output_type).name) - node.out_port(0).set_data_type(node.output_type) - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - assert node.with_right_bound is not None, \ - "Attribute \"with_right_bound\" is not defined" - assert len(node.in_nodes()) == 2, \ - "Incorrect number of inputs for {} node".format(node.id) - if node.get_opset() != "extension": - assert node.has_valid('output_type'), \ - '`output_type` attribute is not set for Bucketize node `{}`'.format(node_name) - assert node.output_type in [np.int64, np.int32], \ - 'Bucketize `output_type` attribute must be int32 or int64, `{}` found'.format(np.dtype(node.output_type).name) - - output_shape = node.in_port(0).data.get_shape() - node.out_port(0).data.set_shape(output_shape) - - input_value = node.in_port(0).data.get_value() - buckets_value = node.in_port(1).data.get_value() - - # compute if all input is constant - if input_value is not None and buckets_value is not None: - node.out_port(0).data.set_value(mo_array(np.digitize(input_value, buckets_value, right=node.with_right_bound), dtype=node.output_type)) diff --git a/tools/mo/openvino/tools/mo/ops/clamp.py b/tools/mo/openvino/tools/mo/ops/clamp.py deleted file mode 100644 index 56d9a5b348ff06..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/clamp.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class AttributedClamp(Op): - op = 'AttributedClamp' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': 'Clamp', - 'op': self.op, - 'version': 'opset1', - 'infer': self.infer, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return [ - 'max', - 'min' - ] - - @staticmethod - def infer(node): - name = node.soft_get('name', node.id) - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - - assert len(connected_in_ports) == 1 and connected_in_ports[0].idx == 0, \ - 'AttributedClamp should have only one input, but it has {}'.format(len(connected_in_ports)) - assert node.has_valid('max') and node.has_valid('min'), \ - 'Mandatory attributes `max` and `min` were not set for AttributedClamp node: `{}`'.format(name) - assert node.max >= node.min, \ - 'AttributedClamp max=={} is less than min=={} for node `{}`'.format(node.max, node.min, name) - - if node.in_port(0).data.get_value() is not None: - node.out_port(0).data.set_value(np.clip(node.in_port(0).data.get_value(), node['min'], node['max'])) - else: - node.out_port(0).data.set_shape(node.in_port(0).data.get_shape()) - - -class Clamp(Op): - op = 'Clamp' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'infer': self.infer, - 'in_ports_count': 3, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node): - name = node.soft_get('name', node.id) - min_input_connected = node.has_port('in', 1) and not node.in_port(1).disconnected() - max_input_connected = node.has_port('in', 2) and not node.in_port(2).disconnected() - - input_value = node.in_port(0).data.get_value() - min_value = node.in_port(1).data.get_value() if min_input_connected else np.finfo(np.float32).min - max_value = node.in_port(2).data.get_value() if max_input_connected else np.finfo(np.float32).max - - if input_value is not None and min_value is not None and max_value is not None: - assert np.all(max_value >= min_value), \ - 'Clamp max_value=={} is less than min_value=={} for node `{}`'.format(max_value, min_value, name) - node.out_port(0).data.set_value(np.clip(node.in_port(0).data.get_value(), min_value, max_value)) - else: - node.out_port(0).data.set_shape(node.in_port(0).data.get_shape()) diff --git a/tools/mo/openvino/tools/mo/ops/concat.py b/tools/mo/openvino/tools/mo/ops/concat.py deleted file mode 100644 index 9a561b4af77e6f..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/concat.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.concat import concat_infer -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.op import Op - - -class Concat(Op): - op = 'Concat' - enabled = True - - def __init__(self, graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'axis': 1, - 'infer': concat_infer, - 'reverse_infer': self.reverse_infer, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return ['axis'] - - @staticmethod - def reverse_infer(node: Node): - assert hasattr(node, 'axis') - out_shape = node.out_port(0).data.get_shape() - - if out_shape is None: - return - - out_shape[node.axis] = dynamic_dimension - - for in_port in node.in_ports().values(): - in_shape = in_port.data.get_shape() - if in_shape is None: - in_port.data.set_shape(out_shape) diff --git a/tools/mo/openvino/tools/mo/ops/const.py b/tools/mo/openvino/tools/mo/ops/const.py deleted file mode 100644 index 8c6e3cb1380018..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/const.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array, int64_array -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np, np_data_type_to_destination_type, \ - precision_to_destination_type -from openvino.tools.mo.ops.op import Op - - -class Const(Op): - """ - Operation producing constant value stored in the attribute 'value' of shape 'shape'. - """ - op = 'Const' - - def __init__(self, graph, attrs: dict = None): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'infer': self.infer, - 'value': None, - 'shape': None, - 'data_type': None, - 'out_ports_count': 1, - 'type_infer': self.type_infer, - }, attrs) - if not isinstance(self.attrs['value'], np.ndarray): - self.attrs['value'] = mo_array(self.attrs['value']) - - self.attrs['shape'] = int64_array(self.attrs['value'].shape) - if 'force_shape' in self.attrs and self.attrs['force_shape'] is not None: - self.attrs['shape'] = int64_array(self.attrs['force_shape']) - - self.attrs['data_type'] = self.attrs['value'].dtype - if 'force_type' in self.attrs and self.attrs['force_type'] is not None: - self.attrs['data_type'] = data_type_str_to_np(self.attrs['force_type']) - - def supported_attrs(self): - return [ - 'offset', - 'size', - ('shape', lambda node: ','.join([str(i) for i in node.shape])), - ('element_type', lambda node: precision_to_destination_type(node.force_type) - if node.has_valid('force_type') else np_data_type_to_destination_type(node.value.dtype)), - ] - - @staticmethod - def type_infer(node): - node.out_port(0).set_data_type(node.value.dtype, override=True) - if node.has_valid('force_type'): - node.out_port(0).set_data_type(node.data_type, override=True) - - @staticmethod - def infer(node): - # no broadcast, copy as-is (tensor or scalar) or apply broadcast depending on value and shape - output_value = node.value if isinstance(node.value, np.ndarray) or len(node.shape) == 0 \ - else np.full(node.shape, node.value) - - node.out_port(0).data.set_value(output_value) diff --git a/tools/mo/openvino/tools/mo/ops/constant_fill.py b/tools/mo/openvino/tools/mo/ops/constant_fill.py deleted file mode 100644 index 294e1d8bfd7c35..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/constant_fill.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class ConstantFill(Op): - """ Constant blob generation by broadcasting specified value to a given shape. - - It is assumed that there is no equivalent of this op in IE, - so it is usually relevant to constant folding. - """ - op = 'ConstantFill' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'input_as_shape': 1, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'infer': self.infer - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'input_as_shape', - 'fill_value' - ] - - @staticmethod - def infer(node: Node): - assert len(node.in_nodes()) == 1 - assert node.fill_value is not None - assert node.input_as_shape - - shape = node.in_port(0).data.get_value() - assert shape is not None - - if is_fully_defined(shape): - node.out_port(0).data.set_value(np.full(shape, node.fill_value, np.float32)) - else: - node.out_port(0).data.set_shape(shape) diff --git a/tools/mo/openvino/tools/mo/ops/constant_of_shape.py b/tools/mo/openvino/tools/mo/ops/constant_of_shape.py deleted file mode 100644 index 76ced6f78fcb53..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/constant_of_shape.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class ConstantOfShape(Op): - """ Create a tensor of the shape specified in the first input with all values equal to attribute 'value'. - The operation is converted to Broadcast operation - """ - - op = 'ConstantOfShape' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'kind': 'op', - 'type': None, - 'op': __class__.op, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'fill_value': 0, - 'infer': None, - }, attrs) - - def supported_attrs(self): - return ['fill_value'] diff --git a/tools/mo/openvino/tools/mo/ops/convolution.py b/tools/mo/openvino/tools/mo/ops/convolution.py deleted file mode 100644 index 6a75ad1d45b39f..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/convolution.py +++ /dev/null @@ -1,310 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mark_input_bins, assign_dims_to_weights, \ - tf_window_op_pad_infer, dynamic_dimension_value, shape_array, is_fully_defined, undefined_shape_of_rank -from openvino.tools.mo.front.onnx.extractors.utils import get_backend_pad -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op, PermuteAttrs -from openvino.tools.mo.pipeline.common import convert_const_node_value_type -from openvino.tools.mo.utils.error import Error - - -class Convolution(Op): - op = 'Convolution' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - 'type_infer': self.type_infer, - 'multiplication_transparent': True, - 'multiplication_transparent_ports': [(0, 0), (1, 0)], - 'in_ports_count': 3, - 'out_ports_count': 1, - }, attrs) - - def backend_attrs(self): - def pad_attribute_helper(node: Node, pad_type: str='begin'): - assert pad_type in ['begin', 'end'] - if not node.has_valid('pad'): - return None - pad = get_backend_pad(node.pad, node.spatial_dims, 0 if pad_type == 'begin' else 1) - if node.has_valid('auto_pad') and node.auto_pad != 'explicit': - pad = [0 for _ in pad] - return ','.join(map(str, pad)) - - return [ - ('auto_pad', lambda node: node.auto_pad if node.has_valid('auto_pad') else 'explicit'), - ('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))), - ('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims]))), - ('pads_begin', lambda node: pad_attribute_helper(node, 'begin')), - ('pads_end', lambda node: pad_attribute_helper(node, 'end')), - - # for Backpropdata operations only - according to spec - ('output_padding', lambda node: ','.join(map(str, node.output_padding[node.spatial_dims])) \ - if node.has_valid('output_padding') and node.type in - ('GroupConvolutionBackpropData', 'ConvolutionBackpropData') else None), - - # for BinaryConvolution only - 'pad_value', - 'mode', - ] - - @staticmethod - def calc_convolution(input_spatial_shape, stride_spatial_shape, pad_spatial_shape, kernel_extent): - """ - Calculates output shape for Convolution. - Verified to be applicable for both Caffe and ONNX. - """ - spatial_val_wo_stride = input_spatial_shape + pad_spatial_shape - kernel_extent - - if np.any(spatial_val_wo_stride < 0): - raise Error("Data after padding has dimension less than window size. " + - "Possible reason of error is incorrectly specified model input shape(s).") - - return spatial_val_wo_stride / stride_spatial_shape + 1 - - @staticmethod - def calc_deconvolution(node, input_spatial_shape, pad_spatial_shape, kernel_extent): - """ - Calculates output shape for Deconvolution. - Verified to be applicable for both Caffe and ONNX with explicitly defined pads. - If pads are not specified for ONNX operator, this function is not applicable. - """ - return node.stride[node.spatial_dims] * (input_spatial_shape - 1) + kernel_extent - pad_spatial_shape - - @staticmethod - def infer(node: Node): - """ - Infers shape of convolution node as it is done in ONNX. - It is very similar to one that Caffe does, but slightly different. - We made a complete fork of this function because they are supposed to be - supported differently by different people. - Args: - node: graph convolution node - """ - input_shape = node.in_port(0).data.get_shape() - if input_shape is None: - raise Error('Input data shape is None for node {}'.format(node.soft_get('name', node.id))) - - # bias_term cannot be deduced earlier for frameworks that represent - # convolution weights/biases as regular inputs; so the number of inputs - # is being checked here and restore correct value for bias_term to - # have the rest of the code unchanged. It will be used after we merge - # several infer functions for convolution in different FWs to a single one. - if not node.has_valid('bias_term'): - node['bias_term'] = len(node.in_nodes()) == 3 - - weights_index = node.weights_index if node.has_valid('weights_index') else 1 - # Reshape weights kernel to original shape - # In case of Caffe framework, values for weights have no structured shape like OIHW - # so we have to reshape weights to normal shape - # For this case, Convolution node should have attribute reshape_kernel = True - if node.has_valid('reshape_kernel') and node.reshape_kernel: - if not (node.has_valid('output') and node.has_valid('channel_dims') and node.has_valid( - 'group') and node.has_valid('kernel_spatial')): - log.error('Cannot reshape kernel due to not all required attrs was set to {} node'.format(node.id)) - return - - # since item() unmasks values, result should be masked back - num_in_channels = shape_array(input_shape[node.channel_dims].item()) - - # layout for Convolution weights is OIHW - kernel_shape = shape_array([node.output, num_in_channels / node.group, - *[node.kernel_spatial[i] for i in range(len(node.kernel_spatial))]]) - if node.type == 'Deconvolution': # layout for Deconvolution weights is IOHW - kernel_shape[[0, 1]] = kernel_shape[[1, 0]] - - if is_fully_defined(kernel_shape) and np.prod(kernel_shape) != np.prod(node.in_node(weights_index).value.shape): - log.error("Size of weights {} does not match kernel shape: {}\n" - "".format(np.prod(node.in_node(weights_index).value.shape), kernel_shape) + - " Possible reason is wrong channel number in input shape\n") - raise Error("Cannot reshape weights to kernel shape") - - if not is_fully_defined(kernel_shape): - num_undefined = np.count_nonzero(kernel_shape.mask is True) # pylint: disable=no-member - if num_undefined > 1: - raise Error('Too many undefined dimensions of the kernel shape for node {}. Use --input_shape ' - 'command line parameter to specify model input shapes'.format(node.soft_get('name', - node.id))) - kernel_size = np.prod(node.in_node(weights_index).value.shape) - # calculate undefined dimension using fully defined shape of the weights input and known kernel_shape - # dimensions - kernel_shape[np.where(kernel_shape == np.ma.masked)[0][0]] = kernel_size // np.prod(kernel_shape) - - node.in_node(weights_index).shape = shape_array(kernel_shape) - node.in_node(weights_index).value = np.reshape(node.in_node(weights_index).value, kernel_shape) - node.reshape_kernel = False - - # Pass weights shape to node attribute kernel_shape - kernel_shape = node.in_node(weights_index).shape - node['kernel_shape'] = kernel_shape - # Calculate kernel_spatial_idx and spatial_dims if it is not specified - # It is necessary for ONNX dut to convolution can be 1D/2D/3D - if not node.has_valid('kernel_spatial_idx'): - node['kernel_spatial_idx'] = np.delete([x for x in range(len(kernel_shape))], - (node.input_feature_channel, node.output_feature_channel)) - - if not node.has_valid('spatial_dims'): - node['spatial_dims'] = np.delete([x for x in range(len(input_shape))], - (node.channel_dims[0], node.batch_dims[0])) - - node['kernel_spatial'] = kernel_shape[node.kernel_spatial_idx] - - if not node.has_valid('output'): - # restore the number of output feature maps from the second argument that is weights - if node.type in ['Convolution', 'Deconvolution', 'DeformableConvolution', 'BinaryConvolution']: - node['output'] = kernel_shape[node.output_feature_channel] - else: - raise Error( - 'Convolution infer function was called for a node {} with unsupported type {}', - node.soft_get('name'), - node.type - ) - - # Set default values for dilation, strides and pads if not set - if not node.has_valid('dilation'): - node['dilation'] = np.full([len(input_shape)], 1, dtype=np.int64) - if not node.has_valid('stride'): - node['stride'] = np.full([len(input_shape)], 1, dtype=np.int64) - if not node.has_valid('pad'): - node['pad'] = int64_array([[0, 0]] * len(input_shape)) - node['pad_spatial_shape'] = node.pad[node.spatial_dims] - - if not node.has_valid('output_padding'): - node['output_padding'] = np.full([len(input_shape)], 0, dtype=np.int64) - - if node.has_valid('output_padding') and len(input_shape) > len(node['output_padding']): - output_padding = np.zeros(len(input_shape), dtype=np.int64) - for i in range(len(node['output_padding'])): - output_padding[i] = node['output_padding'][i] - node['output_padding'] = output_padding - - input_spatial_shape = input_shape[node.spatial_dims] - stride_spatial_shape = node.stride[node.spatial_dims] - - kernel_extent = node.dilation[node.spatial_dims] * (node.kernel_spatial - 1) + 1 - # TensorFlow always has auto_pad attribute that can be either valid or same_upper - # In ONNX auto_pad attribute is deprecated but appears in some models (could be valid, same_upper or same_lower) - # Caffe do not use auto_pad attribute - if node.has_valid('auto_pad') and node.auto_pad != 'explicit' and not node.has_valid('output_spatial_shape'): - node['pad_spatial_shape'], node['output_spatial_shape'] = tf_window_op_pad_infer(input_spatial_shape, - kernel_extent, - stride_spatial_shape, - node.auto_pad, - node.type == 'Deconvolution') - - pad = np.zeros((len(input_shape), 2), dtype=np.int64) - pad[node.spatial_dims] = node.pad_spatial_shape - node.pad = pad - else: - pad_spatial_shape = np.add.reduce(node.pad_spatial_shape, axis=1) - if node.type in ('Convolution', 'BinaryConvolution'): - float_spatial = Convolution.calc_convolution(input_spatial_shape, stride_spatial_shape, - pad_spatial_shape, - kernel_extent) - node['output_spatial_shape'] = shape_array(float_spatial) - elif node.type == 'Deconvolution': - # In case of given output_spatial_shape we calculate pads spatial - if node.has_valid('output_spatial_shape'): - if node.has_valid('get_pad'): - node['pad'] = node.get_pad(node, input_shape, kernel_shape) - else: - log.debug('Can\'t calculate paddings due to missing lambda get_pad in {} node'.format(node.id)) - return - else: - output_padding = node.output_padding[node.spatial_dims] if node.has_valid('output_padding') else None - if output_padding is not None and any(output_padding): - pad_spatial_shape -= output_padding - for dim in range(len(pad_spatial_shape)): - node.pad_spatial_shape[dim][1] -= pad_spatial_shape[dim] - - float_spatial = Convolution.calc_deconvolution(node, input_spatial_shape, pad_spatial_shape, - kernel_extent) - node['output_spatial_shape'] = shape_array(float_spatial) - elif node.type == 'DeformableConvolution': - # get the output spatial shape from the second input with offsets - node['output_spatial_shape'] = int64_array([node.in_node(1).shape[2:4]]) - else: - assert 'Unsupported layer type "{}"'.format(node.type) - - # For cases when group attribute wasn't set in extractor we should specify get_group attribute - # this attribute should store lambda node: ... (check tf convolution extractor) - if node.has_valid('get_group'): - node['group'] = node.get_group(node) - output_shape = shape_array([dynamic_dimension_value for _ in range(len(input_shape))]) - output_shape[node.batch_dims] = input_shape[node.batch_dims] # pylint: disable=unsupported-assignment-operation - output_shape[node.spatial_dims] = node.output_spatial_shape # pylint: disable=unsupported-assignment-operation - - # For cases when output attribute wasn't set in extractor we should specify get_output_feature_dim attribute - # this attribute should store lambda node: ... (check tf convolution extractor) - if node.has_valid('get_output_feature_dim'): - node['output'] = node.get_output_feature_dim(node) - output_shape[node.channel_dims] = node.output # pylint: disable=unsupported-assignment-operation - node['output_shape'] = output_shape - - node.out_port(0).data.set_shape(output_shape) - - # bin attribute is used for pre-processing, but it will be deleted in BlobNormalizer transformation - # and the blobs (weights, biases) will be represented as inputs to the node - mark_input_bins(node, start_port=1 if node.type != 'DeformableConvolution' else 2) - assign_dims_to_weights(node.in_node(weights_index), node.kernel_spatial_idx, node.input_feature_channel, - node.output_feature_channel, len(kernel_shape)) - - PermuteAttrs.create_permute_attrs(node, attrs=[('pad', 'input:0'), - ('stride', 'input:0'), - ('dilation', 'input:0'), - ('output_shape', 'input:0'), - ('batch_dims', 'input:0'), - ('channel_dims', 'input:0'), - ('spatial_dims', 'input:0'), - - ('kernel_shape', 'input:{}'.format(weights_index)), - ('kernel_spatial_idx', 'input:{}'.format(weights_index)), - ('input_feature_channel', 'input:{}'.format(weights_index)), - ('output_feature_channel', 'input:{}'.format(weights_index)), - ]) - - # is needed to permute Conv weights from the original TF [H, W, C_IN, C_OUT] into OV [C_OUT, C_IN, H, W] - # but for other nodes in weights subgraph permutations must turned off - # by marking with MarkSubGraphsWithCorrectLayout even if graph layout is NCHW. - PermuteAttrs.set_permutation(node.in_node(weights_index), node, node.soft_get('get_weights_permute', None)) - PermuteInputs().set_input_permutation( - node.in_node(weights_index), node, 'input:{}'.format(weights_index), 'transpose') - - @staticmethod - def reverse_infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - if input_shape is None: - shape = None - # TODO FIXME this is ugly solution based on various attributes which may not be set in some cases - for attr in ['dilation', 'stride', 'pad']: - if node.has_valid(attr): - shape = undefined_shape_of_rank(len(node.soft_get(attr))) - break - if shape is not None: - node.in_port(0).data.set_shape(shape) - - @staticmethod - def type_infer(node): - in_type_0 = node.in_port(0).get_data_type() - in_type_1 = node.in_port(1).get_data_type() - in_node_1 = node.in_port(1).get_source().node - # in case of input values data type mismatch we try to change the type of the constant to match the type of - # input at index 0. - if in_type_1 in [np.float16, np.float32, np.float64] and in_type_0 != in_type_1 and in_node_1.op == 'Const': - in_node_1 = node.in_port(1).get_source().node - log.error("Changing Const node '{}' data type from {} to {} for Convolution operation".format( - in_node_1.soft_get('name', in_node_1.id), in_type_1, in_type_0), - extra={'is_warning': True}) - convert_const_node_value_type(in_node_1, in_type_0) - node.out_port(0).set_data_type(node.in_port(0).get_data_type()) diff --git a/tools/mo/openvino/tools/mo/ops/copyop.py b/tools/mo/openvino/tools/mo/ops/copyop.py deleted file mode 100644 index bdf1322826c43f..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/copyop.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class CopyOp(Op): - """ - Empty Op for Copy layer. It will be replaced by FrontReplacer - """ - op = 'copy' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': __class__.op, - 'infer': None - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/crop.py b/tools/mo/openvino/tools/mo/ops/crop.py deleted file mode 100644 index 2344da7d4f12d4..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/crop.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op, PermuteAttrs -from openvino.tools.mo.utils.error import Error - - -class Crop(Op): - op = 'Crop' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'infer': self.infer, - 'in_ports_count': 2, - 'out_ports_count': 1, - }, attrs) - - def backend_attrs(self): - return [ - ('axis', lambda node: None if not node.has_valid('axis') else ','.join(map(str, node.axis))), - ('offset', lambda node: None if not node.has_valid('offset') else ','.join(map(str, node.offset))), - - ('dim', lambda node: None if not node.has_valid('dim') else ','.join(map(str, node.dim))), - - ('crop_begin', lambda node: None if not node.has_valid('crop_begin') else ','.join(map(str, - node.crop_begin))), - ('crop_end', lambda node: None if not node.has_valid('crop_end') else ','.join(map(str, node.crop_end))), - ] - - @staticmethod - def infer(node: Node): - """ - Crops the shape of the output blob according to input ones be specified params. - Detailed Crop description can be found in IR Catalog specification. - In short: crop layer can be represented in three ways: - 1. Two inputs, where the shape of the second input is crop dim (axis and offset attrs) - 2. One input and dim, axis and offset attributes. - 3. Ont input and axis, crop_begin and crop_end attributes - """ - - input_count = len(node.in_nodes()) - - if input_count == 2: - Crop._two_inputs_infer(node) - elif input_count == 1: - Crop._one_input_infer(node) - else: - log.error('Wrong number of input tensors ({}) in {}'.format(input_count, node.name)) - return - - @staticmethod - def _one_input_infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - node_name = node.soft_get('name', node.id) - if input_shape is None: - raise Error('input_shape is none for {} node'.format(node_name)) - - if not node.has_valid('axis'): - raise Error('axis attribute is missing for {} node. should be set in crop extractor'.format(node_name)) - - output_shape = input_shape.copy() - if node.has_valid('dim'): - if len(node.dim) != len(node.axis): - raise Error('Number of axis "{}" should match number of dim "{}" for node "{}"' - ''.format(node.axis, node.dim, node_name)) - output_shape[node.axis] = node.dim - elif node.has_valid('crop_begin') and node.has_valid('crop_end'): - if len(node.crop_begin) != len(node.axis) or len(node.crop_end) != len(node.axis): - raise Error('number of crop_begin({})/crop_end({}) should match number of axis "{}" for node "{}"' - ''.format(node.crop_begin, node.crop_end, node.axis, node_name)) - if type(node.axis) in [list, tuple]: - for i in range(len(node.axis)): - output_shape[node.axis[i]] = output_shape[node.axis[i]] - node.crop_begin[i] - node.crop_end[i] - else: - output_shape[node.axis] = output_shape[node.axis] - node.crop_begin - node.crop_end - else: - raise Error('Crop node {} should have either dim or crop_begin and crop_end attributes'.format(node_name)) - - node.out_port(0).data.set_shape(output_shape) - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) - - @staticmethod - def _two_inputs_infer(node: Node): - N = len(node.in_nodes()) - node_name = node.soft_get('name', node.id) - - shapes = [node.in_port(i).data.get_shape() for i in range(N)] - if any(s is None for s in shapes): - raise Error('Not all input shapes were defined for {} node'.format(node_name)) - - if not node.has_valid('axis'): - raise Error('axis attribute is missing for {} node. should be set in crop extractor'.format(node_name)) - - if not node.has_valid('offset'): - raise Error('offset attribute is missing for {} node. should be set in crop extractor'.format(node_name)) - - input_shape = shapes[0].copy() - start_axis = get_canonical_axis_index(input_shape, node.axis) - node.axis = start_axis - - reference_shape = shapes[1].copy() - if node.has_valid('axes'): - # The axes parameter contain shape indexes for second input and show which shape indexes we need to use for - # dim attribute. - input_dim = node.axes - node.in_port(1).disconnect() - else: - input_dim = list(range(0, input_shape.size)) - - # set new shape to current shape - new_shape = input_shape.copy() - ir_axis = [] - ir_offset = [] - dim = [] - - for i in input_dim: - if i < start_axis: - new_shape[i] = input_shape[i] - continue - - crop_offset = 0 - if len(node.offset) == 1: - crop_offset = node.offset[0] - elif len(node.offset) > 1: - crop_offset = node.offset[i - start_axis] - - if input_shape[i] - crop_offset < reference_shape[i]: - raise Error('The crop for dimension is out of bounds in node {}'.format(node_name)) - - dim.append(reference_shape[i]) - ir_axis.append(i) - ir_offset.append(crop_offset) - new_shape[i] = reference_shape[i] - - node.axis = ir_axis - node.offset = ir_offset - node['dim'] = dim - node.out_port(0).data.set_shape(new_shape) - - if node.in_node(0).has_valid('value') and \ - not getattr(node.graph.graph['cmd_params'], 'enable_ssd_gluoncv', False): - out_value = np.copy(node.in_node(0).value) - - slice_indexes = [] - for s in out_value.shape: - slice_indexes.append(slice(0, s)) - - for axis in input_dim: - slice_indexes[axis] = slice(0, new_shape[axis]) - out_value = out_value[tuple(slice_indexes)] - node.out_port(0).data.set_value(out_value) - - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) diff --git a/tools/mo/openvino/tools/mo/ops/ctc_greedy_decoder.py b/tools/mo/openvino/tools/mo/ops/ctc_greedy_decoder.py deleted file mode 100644 index 6966628aef3a68..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/ctc_greedy_decoder.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, compatible_dims -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class CTCGreedyDecoderOp(Op): - op = 'CTCGreedyDecoder' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - - 'infer': self.infer, - 'reinterp_shape': True, - - 'in_ports_count': 2, - 'out_ports_count': 1, - - 'ctc_merge_repeated': True - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - ('ctc_merge_repeated', lambda node: bool_to_str(node, 'ctc_merge_repeated')) - ] - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - assert len(connected_in_ports) == 2, \ - "Incorrect number of inputs for {} node".format(node_name) - - logits_shape = node.in_port(0).data.get_shape() - sequence_mask_shape = node.in_port(1).data.get_shape() - - # check shapes of input tensors - assert len(logits_shape) == 3, \ - 'Incorrect rank of logits for {} node'.format(node_name) - assert len(sequence_mask_shape) == 2, \ - 'Incorrect rank of sequence length tensor for {} node'.format(node_name) - assert compatible_dims(logits_shape[1], sequence_mask_shape[1]), \ - 'Batch dimensions of input tensors must be the same for {} node'.format(node_name) - assert compatible_dims(logits_shape[0], sequence_mask_shape[0]), \ - 'Time dimensions of input tensors must be the same for {} node'.format(node_name) - - batch_size = logits_shape[1] - time_size = logits_shape[0] - node.out_port(0).data.set_shape([batch_size, time_size, 1, 1]) diff --git a/tools/mo/openvino/tools/mo/ops/ctc_greedy_decoder_seq_len.py b/tools/mo/openvino/tools/mo/ops/ctc_greedy_decoder_seq_len.py deleted file mode 100644 index 1d65597a905b1c..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/ctc_greedy_decoder_seq_len.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, compatible_dims -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class CTCGreedyDecoderSeqLenOp(Op): - op = 'CTCGreedyDecoderSeqLen' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset6', - - 'infer': self.infer, - 'type_infer': self.type_infer, - - 'in_ports_count': 3, - 'out_ports_count': 2, - - 'merge_repeated': True, - 'classes_index_type': np.int32, - 'sequence_length_type': np.int32 - } - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - version = self.get_opset() - if version == 'opset6': - return [('classes_index_type', lambda node: np_data_type_to_destination_type(node.classes_index_type)), - ('sequence_length_type', lambda node: np_data_type_to_destination_type(node.sequence_length_type)), - ('merge_repeated', lambda node: bool_to_str(node, 'merge_repeated'))] - else: - raise Error('Unknown opset version "{}"'.format(version)) - - @staticmethod - def type_infer(node): - opset = node.get_opset() - if opset == 'opset6': - node.out_port(0).set_data_type(node.classes_index_type) - node.out_port(1).set_data_type(node.sequence_length_type) - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - assert len(connected_in_ports) in [2, 3], \ - "Incorrect number of inputs for {} node".format(node_name) - - logits_shape = node.in_port(0).data.get_shape() - sequence_len_shape = node.in_port(1).data.get_shape() - if len(node.in_nodes()) == 3: - blank_index_shape = node.in_port(2).data.get_shape() - assert len(blank_index_shape) == 1, \ - 'Incorrect rank of blank_index for {} node'.format(node_name) - - # check shapes of input tensors - assert len(logits_shape) == 3, \ - 'Incorrect rank of logits for {} node'.format(node_name) - - assert len(sequence_len_shape) == 1, \ - 'Incorrect rank of sequence length tensor for {} node'.format(node_name) - assert compatible_dims(logits_shape[0], sequence_len_shape[0]), \ - 'Batch dimensions of input tensors must be the same for {} node'.format(node_name) - - batch_size = logits_shape[0] - time_size = logits_shape[1] - if node.is_out_port_connected(0): - node.out_port(0).data.set_shape([batch_size, time_size]) - if node.is_out_port_connected(1): - node.out_port(1).data.set_shape([batch_size]) diff --git a/tools/mo/openvino/tools/mo/ops/ctc_loss.py b/tools/mo/openvino/tools/mo/ops/ctc_loss.py deleted file mode 100644 index 2066be6afbe8eb..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/ctc_loss.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, compatible_dims -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class CTCLoss(Op): - op = 'CTCLoss' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset4', - - 'type_infer': self.type_infer, - 'infer': self.infer, - - 'in_ports_count': 5, - 'out_ports_count': 1, - - 'preprocess_collapse_repeated': False, - 'ctc_merge_repeated': True, - 'unique': False - } - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return [('preprocess_collapse_repeated', lambda node: bool_to_str(node, 'preprocess_collapse_repeated')), - ('ctc_merge_repeated', lambda node: bool_to_str(node, 'ctc_merge_repeated')), - ('unique', lambda node: bool_to_str(node, 'unique'))] - - @staticmethod - def type_infer(node): - logits_type = node.in_port(0).get_data_type() - logit_length_type = node.in_port(1).get_data_type() - labels_type = node.in_port(2).get_data_type() - label_length_type = node.in_port(3).get_data_type() - blank_index_type = labels_type - if not node.in_port(4).disconnected(): - blank_index_type = node.in_port(4).get_data_type() - - assert logit_length_type == label_length_type and logit_length_type in [np.int64, np.int32], \ - 'Inputs with logits and labels lengths for node {} must be the same and int32 or int64, {} and {} found'.format( - node.soft_get('name'), logit_length_type, label_length_type) - assert labels_type == blank_index_type and labels_type in [np.int64, np.int32], \ - 'Inputs with labels and blank index for node {} must be the same and int32 or int64, {} and {} found'.format( - node.soft_get('name'), labels_type, blank_index_type) - - node.out_port(0).set_data_type(logits_type) - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - assert len(connected_in_ports) in [4, 5], \ - "Incorrect number of inputs for {} node".format(node_name) - - logits_shape = node.in_port(0).data.get_shape() - logit_length_shape = node.in_port(1).data.get_shape() - labels_shape = node.in_port(2).data.get_shape() - label_length_shape = node.in_port(3).data.get_shape() - blank_index_shape = int64_array([]) - if len(node.in_nodes()) == 5: - blank_index_shape = node.in_port(4).data.get_shape() - - # check shapes of input tensors - assert len(logits_shape) == 3 and len(logit_length_shape) == 1 and len(labels_shape) == 2\ - and len(label_length_shape) == 1 and len(blank_index_shape) == 0, \ - 'Incorrect rank of some input tensor for {} node'.format(node_name) - assert compatible_dims(logits_shape[0], logit_length_shape[0]) and \ - compatible_dims(logits_shape[0], labels_shape[0]) and \ - compatible_dims(logits_shape[0], label_length_shape[0]), \ - 'Batch dimensions of input tensors must be the same for {} node'.format(node_name) - assert compatible_dims(logits_shape[1], labels_shape[1]), \ - 'Time dimensions of input tensors must be the same for {} node'.format(node_name) - - batch_size = logits_shape[0] - node.out_port(0).data.set_shape([batch_size]) diff --git a/tools/mo/openvino/tools/mo/ops/cumsum.py b/tools/mo/openvino/tools/mo/ops/cumsum.py deleted file mode 100644 index 0cd72270a6153e..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/cumsum.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -def cumsum(a, axis=None, exclusive=False, reverse=False): - if reverse: - a = np.flip(a, axis) - res = np.cumsum(a, axis=axis) - if exclusive: - res -= a - if reverse: - res = np.flip(res, axis) - return res - - -class CumSum(Op): - enabled = False - op = 'CumSum' - version = 'opset3' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': self.version, - - 'infer': self.infer, - - 'in_ports_count': 2, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return [('exclusive', lambda node: bool_to_str(node, 'exclusive')), - ('reverse', lambda node: bool_to_str(node, 'reverse'))] - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None, 'Input shape is None for node "{}"'.format(node_name) - if not node.in_port(1).disconnected(): - assert len(node.in_port(1).data.get_shape()) == 0, 'Axis is not scalar for node: {}'.format(node_name) - - node.out_port(0).data.set_shape(input_shape.copy()) - - input_value = node.in_port(0).data.get_value() - if input_value is not None: - axis = None if node.in_port(1).disconnected() else node.in_port(1).data.get_value() - reverse = node.reverse if node.has_valid('reverse') else False - exclusive = node.exclusive if node.has_valid('exclusive') else False - node.out_port(0).data.set_value(cumsum(input_value, axis=axis, reverse=reverse, exclusive=exclusive)) diff --git a/tools/mo/openvino/tools/mo/ops/deconvolution.py b/tools/mo/openvino/tools/mo/ops/deconvolution.py deleted file mode 100644 index 5dc917ec993e7d..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/deconvolution.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mark_input_bins, assign_dims_to_weights, tf_window_op_pad_infer, \ - shape_array, compatible_shapes -from openvino.tools.mo.front.onnx.extractors.utils import get_backend_pad -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op, PermuteAttrs - - -class Deconvolution(Op): - op = 'Deconvolution' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'infer': self.infer, - 'in_ports_count': 3, - 'out_ports_count': 1, - }, attrs) - - def backend_attrs(self): - return [ - ('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims]))), - ('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))), - ('pads_begin', - lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0))) if node.has_valid( - 'pad') else None), - ('pads_end', - lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1))) if node.has_valid( - 'pad') else None), - 'auto_pad', - ] - - @staticmethod - def infer(node: Node): - """ - Deconvolution has an input argument that explicitly determines output shape, so in contrast - to the forward Conv2d we shouldn't infer output shape. We just use this output shape as - an input shape and pass it to our utilities that computes numeric values for padding. - They also deliver output shape that is interpreted here as input shape for convolution. - We need to check that the real input shape and shape inferred by those utility functions match. - """ - output_shape = shape_array(node.in_node(2).value) - output_shape[0] = node.in_port(0).data.get_shape()[0] - kernel_shape = node.in_port(1).data.get_shape() - node['kernel_shape'] = kernel_shape - if output_shape is None or kernel_shape is None or node.spatial_dims is None or node.stride is None: - return - - if not node.has_valid('kernel_spatial_idx'): - node['kernel_spatial_idx'] = np.delete([x for x in range(len(kernel_shape))], - (node.input_feature_channel, node.output_feature_channel)) - - if not node.has_valid('dilation'): - node['dilation'] = np.full([len(output_shape)], 1, dtype=np.int64) - - if node.has_valid('get_group'): - node['group'] = node.get_group(node) - - spatial_dims = node.spatial_dims - output_spatial = shape_array(output_shape[spatial_dims]) - stride_spatial = shape_array(node.stride[spatial_dims]) - node['kernel_spatial'] = shape_array(kernel_shape[node.kernel_spatial_idx]) - node.pad_spatial_shape, input_spatial_for_check = tf_window_op_pad_infer( - output_spatial, node.kernel_spatial, stride_spatial, node.auto_pad) - - assert compatible_shapes(input_spatial_for_check, node.in_node(0).shape[spatial_dims]) - - pad = np.zeros((len(output_shape), 2), dtype=np.int64) - pad[spatial_dims] = node.pad_spatial_shape - node.pad = pad - - node.output = output_shape[node.channel_dims][0] - node.output_shape = output_shape - node.out_port(0).data.set_shape(output_shape) - - mark_input_bins(node, ['weights'], 1) - assign_dims_to_weights(node.in_node(1), node.kernel_spatial_idx, node.input_feature_channel, - node.output_feature_channel, len(kernel_shape)) - - # OK, now we are sure this is a supported Deconvolution layer - node.type = 'Deconvolution' - node.op = 'Deconv2D' - - # Add permute_attrs - PermuteAttrs.create_permute_attrs(node, attrs=[('pad', 'input:0'), - ('stride', 'input:0'), - ('output_shape', 'input:0'), - ('batch_dims', 'input:0'), - ('channel_dims', 'input:0'), - ('spatial_dims', 'input:0'), - - ('kernel_shape', 'input:1'), - ('kernel_spatial_idx', 'input:1'), - ('input_feature_channel', 'input:1'), - ('output_feature_channel', 'input:1'), - ]) - - # is needed to permute Deconv weights from the original TF [H, W, C_OUT, C_IN] into OV [C_IN, C_OUT, H, W] - # but for other nodes in weights subgraph permutations must turned off - # by marking with MarkSubGraphsWithCorrectLayout even if graph layout is NCHW. - PermuteAttrs.set_permutation(node.in_node(1), node, node.soft_get('get_weights_permute', None)) - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:1', 'transpose') - PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'shape') - - node['force_precision_in_ports'] = {2: 'int64'} diff --git a/tools/mo/openvino/tools/mo/ops/deformable_convolution.py b/tools/mo/openvino/tools/mo/ops/deformable_convolution.py deleted file mode 100644 index 5a7fe12ceb8faf..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/deformable_convolution.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import undefined_shape_of_rank -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.ops.op import Op - - -class DeformableConvolution(Op): - op = 'DeformableConvolution' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': __class__.op, - 'op': __class__.op, - 'version': 'opset8', - 'infer': Convolution.infer, - 'group': 1, - 'deformable_group': 1, - 'multiplication_transparent': True, - 'multiplication_transparent_ports': [(0, 0), (2, 0)], - 'in_ports_count': 3, - 'out_ports_count': 1, - 'bilinear_interpolation_pad': False, - }, attrs) - - def backend_attrs(self): - # the same attributes as in a regular convolution and additional attributes 'deformable_group', 'group' - # and 'bilinear_interpolation_pad' - attrs = Convolution(self.graph, {}).backend_attrs() + ['deformable_group', 'group'] - if self.get_opset() == 'opset8': - attrs.append('bilinear_interpolation_pad') - return attrs - - @staticmethod - def reverse_infer(node: Node): - input_shape_1 = node.in_port(0).data.get_shape() - input_shape_2 = node.in_port(1).data.get_shape() - if input_shape_1 is None: - node.in_port(0).data.set_shape(undefined_shape_of_rank(4)) - if input_shape_2 is None: - node.in_port(1).data.set_shape(undefined_shape_of_rank(4)) diff --git a/tools/mo/openvino/tools/mo/ops/depth_to_space.py b/tools/mo/openvino/tools/mo/ops/depth_to_space.py deleted file mode 100644 index 741c7373927f6b..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/depth_to_space.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.layout import shape_for_layout, get_height_dim, get_batch_dim, get_features_dim, get_width_dim -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension, is_fully_defined -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class DepthToSpaceOp(Op): - op = 'DepthToSpace' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - - 'mode': 'blocks_first', - - 'infer': self.infer, - - 'in_ports_count': 1, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return ['mode', 'block_size'] - - @staticmethod - def infer(node: Node): - in_shape = node.in_port(0).data.get_shape() - if in_shape.size != 4: - raise Error('TensorFlow DepthToSpace operation is supported for 4D \'NHWC\' input layout only. ' - 'Current input shape is \'{}\''.format(in_shape)) - - layout = node.graph.graph['layout'] - - N = in_shape[get_batch_dim(layout, 4)] - H = in_shape[get_height_dim(layout, 4)] - W = in_shape[get_width_dim(layout, 4)] - C = in_shape[get_features_dim(layout, 4)] - - block_size = node['block_size'] - if C is not dynamic_dimension and C % (block_size ** 2): - raise Error('Feature dimensions of input tensor of DepthToSpace operation have to be divisible by square ' - 'of DepthToSpace \'block_size\' parameter. Input tensor shape = {}. Feature dimension = {}. ' - 'block_size = {}'.format(in_shape, C, block_size)) - - out_shape = shape_for_layout(layout, - batch=N, - features=C // (block_size * block_size), - height=H * block_size, - width=W * block_size) - - if is_fully_defined(in_shape) and is_fully_defined(out_shape) and np.prod(in_shape) != np.prod(out_shape): - raise Error('Number of input elements "{}" is not equal to number of output elements "" for node "{}"' - ''.format(in_shape, out_shape, node.soft_get('name', node.id))) - node.out_port(0).data.set_shape(out_shape) diff --git a/tools/mo/openvino/tools/mo/ops/dequantize_linear.py b/tools/mo/openvino/tools/mo/ops/dequantize_linear.py deleted file mode 100644 index b2ae98107ab865..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/dequantize_linear.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class DequantizeLinear(Op): - op = 'DequantizeLinear' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'axis': None, - 'version': None, - 'infer': copy_shape_infer, - 'out_ports_count': 1, - 'in_ports_count': 3, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return ['axis'] diff --git a/tools/mo/openvino/tools/mo/ops/detection_output_onnx.py b/tools/mo/openvino/tools/mo/ops/detection_output_onnx.py deleted file mode 100644 index f4ab98497f6403..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/detection_output_onnx.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes -from openvino.tools.mo.ops.op import Op - - -class ExperimentalDetectronDetectionOutput(Op): - op = 'ExperimentalDetectronDetectionOutput' - enabled = True - - def __init__(self, graph, attrs): - mandatory_props = dict( - type=self.op, - op=self.op, - version='opset6', - infer=self.infer, - reverse_infer=self.reverse_infer, - type_infer=self.type_infer, - in_ports_count=4, - out_ports_count=3, - ) - - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return [ - ('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()), - 'max_detections_per_image', - 'nms_threshold', - 'num_classes', - 'post_nms_count', - 'score_threshold', - 'max_delta_log_wh', - ('deltas_weights', lambda node: ','.join(map(str, node['deltas_weights'])))] - - @staticmethod - def infer(node): - rois_num = node.max_detections_per_image - # boxes - node.out_port(0).data.set_shape([rois_num, 4]) - # classes, scores, batch indices - # We use range(1, 1 + max(node.out_ports().keys())) instead of range(1, 3), because there are incorrectly - # generated models where ExperimentalDetectronDetectionOutput has 4 outputs. - for port_ind in range(1, 1 + max(node.out_ports().keys())): - if not node.out_port(port_ind).disconnected(): - node.out_port(port_ind).data.set_shape([rois_num]) - - @staticmethod - def type_infer(node): - in_data_type = node.in_port(0).get_data_type() - node.out_port(0).set_data_type(in_data_type) - node.out_port(1).set_data_type(np.int32) # the second output contains class indices - node.out_port(2).set_data_type(in_data_type) - if node.is_out_port_connected(3): - node.out_port(3).set_data_type(np.int32) # the fourth output contains batch indices - - @staticmethod - def reverse_infer(node): - set_input_shapes(node, - shape_array([dynamic_dimension_value, 4]), - shape_array([dynamic_dimension_value, node['num_classes'] * 4]), - shape_array([dynamic_dimension_value, node['num_classes']]), - shape_array([1, 3])) diff --git a/tools/mo/openvino/tools/mo/ops/dft.py b/tools/mo/openvino/tools/mo/ops/dft.py deleted file mode 100644 index 5ca6e2eb9326a7..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/dft.py +++ /dev/null @@ -1,276 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class FFTBase(Op): - enabled = False - op = None - version = 'opset7' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'out_ports_count': 1, - 'in_ports_count': 3, - 'version': self.version, - 'infer': self.infer - } - super().__init__(graph, mandatory_props, attrs) - - def infer(self, node: Node): - node_name = node.soft_get(node.name, node.id) - assert len([p for p in node.in_ports().values() if not p.disconnected()]) in [2, 3], \ - '(I)DFT node {} must have 2 or 3 inputs'.format(node_name) - - src_shape = node.in_port(0).data.get_shape() - assert src_shape is not None, 'The input data shape of (I)DFT node {} must not be None'.format(node_name) - assert src_shape[-1] == 2, \ - 'The last dimension of input shape of (I)DFT node {} should be equal to 2'.format(node_name) - - input_rank = len(src_shape) - assert input_rank >= 2, 'The input rank of (I)DFT node {} should be greater or equal to 2'.format(node_name) - - axes = FFTBase.get_axes(node) - assert input_rank >= len(axes) + 1, \ - 'The input rank must be greater than number of (I)DFT node {} axes'.format(node_name) - axes = FFTBase.canonicalize_axes(axes, input_rank) - assert (input_rank - 1) not in axes, '(I)DFT node {} axes cannot contain the last axis'.format(node_name) - assert len(set(axes)) == len(axes), '(I)DFT node {} axes must be unique.'.format(node_name) - - output_shape = src_shape.copy() - if node.is_in_port_connected(2): - signal_size = FFTBase.get_signal_size(node) - signal_size = FFTBase.canonicalize_signal_size(signal_size, axes, src_shape) - output_shape[axes] = signal_size - - node.out_port(0).data.set_shape(output_shape) - - @staticmethod - def canonicalize_axes(axes, input_rank): - """ - FFT operation supports for negative axes to transform. More precisely, according to the FFT operation - specification, axes should be integers from -(r - 1) to (r - 2) inclusively, where r = rank(data). - A negative axis 'a' is interpreted as an axis 'r - 1 + a'. The reason is the following: real input - tensor of the shape [n_0, ..., n_{r - 1}, 2] is interpreted as a complex tensor with the shape - [n_0, ..., n_{r - 1}]. Hence, we need to 'canonicalize' axes using the formula 'r - 1 + a'. - - :param axes: axes to canonicalize - :param input_rank: input tensor rank - :return: canonicalized axes - """ - result = axes.copy() - for i, axis in enumerate(axes): - if axis < 0: - result[i] = axis + input_rank - 1 - return result - - @staticmethod - def canonicalize_signal_size(signal_size, axes, input_shape): - result = signal_size.copy() - for i, axis in enumerate(axes): - size = signal_size[i] - if size == -1: - result[i] = input_shape[axis] - return result - - @staticmethod - def get_axes(node: Node): - axes = node.in_port(1).get_source().data.get_value() - node_name = node.soft_get('name', node.id) - assert axes is not None, 'The input with axes is not constant for node {}'.format(node_name) - return int64_array(axes) - - @staticmethod - def get_signal_size(node: Node): - src_shape = node.in_port(0).data.get_shape() - assert src_shape is not None - input_rank = len(src_shape) - if node.is_in_port_connected(2): - signal_size = node.in_port(2).get_source().data.get_value() - else: - axes = FFTBase.get_axes(node) - signal_size = [src_shape[: input_rank - 1][a] for a in axes] - - node_name = node.soft_get('name', node.id) - assert signal_size is not None, 'The input with signal_size is not constant for node {}'.format(node_name) - - return int64_array(signal_size) - - -class DFT(FFTBase): - op = 'DFT' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - } - mandatory_props.update(attrs) - super().__init__(graph, mandatory_props) - - -class IDFT(FFTBase): - op = 'IDFT' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - } - mandatory_props.update(attrs) - super().__init__(graph, mandatory_props) - - -class RDFT(Op): - op = 'RDFT' - enabled = False - version = 'opset9' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'out_ports_count': 1, - 'in_ports_count': 3, - 'version': self.version, - 'infer': self.infer, - 'type': self.op, - 'op': self.op, - } - mandatory_props.update(attrs) - super().__init__(graph, mandatory_props) - - def infer(self, node: Node): - node_name = node.soft_get(node.name, node.id) - assert len([p for p in node.in_ports().values() if not p.disconnected()]) in [2, 3], \ - 'RDFT node {} must have 2 or 3 inputs'.format(node_name) - - src_shape = node.in_port(0).data.get_shape() - assert src_shape is not None, 'The input data shape of RDFT node {} must not be None'.format(node_name) - - input_rank = len(src_shape) - assert input_rank >= 1, 'The input rank of RDFT node {} should be greater or equal to 1'.format(node_name) - - axes = RDFT.get_axes(node) - assert input_rank >= len(axes), \ - 'The input rank must be greater than or equal to number of RDFT node {} axes'.format(node_name) - axes = RDFT.canonicalize_axes(axes, input_rank) - assert len(set(axes)) == len(axes), 'RDFT node {} axes must be unique.'.format(node_name) - - output_shape = src_shape.copy() - if node.is_in_port_connected(2): - signal_size = RDFT.get_signal_size(node) - signal_size = RDFT.canonicalize_signal_size(signal_size, axes, src_shape) - output_shape[axes] = signal_size - output_shape[axes[-1]] = output_shape[axes[-1]] // 2 + 1 - output_shape = np.ma.append(output_shape, 2) - - node.out_port(0).data.set_shape(output_shape) - - @staticmethod - def canonicalize_axes(axes, input_rank): - """ - RDFT operation supports for negative axes to transform. More precisely, according to the RDFT operation - specification, axes should be integers from -r to (r - 1) inclusively, where r = rank(data). A negative - axis 'a' is interpreted as an axis 'r + a'. Hence, we need to 'canonicalize' axes using the formula 'r + a'. - - :param axes: axes to canonicalize - :param input_rank: input tensor rank - :return: canonicalized axes - """ - result = axes.copy() - for i, axis in enumerate(axes): - if axis < 0: - result[i] = axis + input_rank - return result - - @staticmethod - def canonicalize_signal_size(signal_size, axes, input_shape): - result = signal_size.copy() - for i, axis in enumerate(axes): - size = signal_size[i] - if size == -1: - result[i] = input_shape[axis] - return result - - @staticmethod - def get_axes(node: Node): - axes = node.in_port(1).get_source().data.get_value() - node_name = node.soft_get('name', node.id) - assert axes is not None, 'The input with axes is not constant for node {}'.format(node_name) - return int64_array(axes) - - @staticmethod - def get_signal_size(node: Node): - src_shape = node.in_port(0).data.get_shape() - assert src_shape is not None - input_rank = len(src_shape) - if node.is_in_port_connected(2): - signal_size = node.in_port(2).get_source().data.get_value() - else: - axes = RDFT.get_axes(node) - signal_size = [src_shape[: input_rank][a] for a in axes] - - node_name = node.soft_get('name', node.id) - assert signal_size is not None, 'The input with signal_size is not constant for node {}'.format(node_name) - - return int64_array(signal_size) - - -class IRDFT(FFTBase): - enabled = False - op = 'IRDFT' - version = 'opset9' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'out_ports_count': 1, - 'in_ports_count': 3, - 'version': self.version, - 'infer': self.infer, - 'type': self.op, - 'op': self.op, - } - mandatory_props.update(attrs) - super().__init__(graph, mandatory_props) - - def infer(self, node: Node): - node_name = node.soft_get(node.name, node.id) - assert len([p for p in node.in_ports().values() if not p.disconnected()]) in [2, 3], \ - 'IRDFT node {} must have 2 or 3 inputs'.format(node_name) - - src_shape = node.in_port(0).data.get_shape() - assert src_shape is not None, 'The input data shape of IRDFT node {} must not be None'.format(node_name) - assert src_shape[-1] == 2, \ - 'The last dimension of input shape of IRDFT node {} should be equal to 2'.format(node_name) - - input_rank = len(src_shape) - assert input_rank >= 2, 'The input rank of IRDFT node {} should be greater or equal to 2'.format(node_name) - - axes = FFTBase.get_axes(node) - assert input_rank >= len(axes) + 1, \ - 'The input rank must be greater than number of IRDFT node {} axes'.format(node_name) - axes = FFTBase.canonicalize_axes(axes, input_rank) - assert (input_rank - 1) not in axes, 'IRDFT node {} axes cannot contain the last axis'.format(node_name) - assert len(set(axes)) == len(axes), 'IRDFT node {} axes must be unique.'.format(node_name) - - output_shape = src_shape.copy() - input_rank = len(output_shape) - output_shape = output_shape[0: input_rank - 1] - if node.is_in_port_connected(2): - signal_size = FFTBase.get_signal_size(node) - for i, axis in enumerate(axes): - if signal_size[i] != -1: - output_shape[axis] = signal_size[i] - if signal_size[-1] == -1: - output_shape[axes[-1]] = 2 * (src_shape[axes[-1]] - 1) - else: - output_shape[axes[-1]] = 2 * (src_shape[axes[-1]] - 1) - - node.out_port(0).data.set_shape(output_shape) diff --git a/tools/mo/openvino/tools/mo/ops/dropoutmask.py b/tools/mo/openvino/tools/mo/ops/dropoutmask.py deleted file mode 100644 index e6062d62ae3e45..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/dropoutmask.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class DropoutMask(Op): - """ - Operation for dropout proportion, it will be replaced by broadcast constant on front stage - """ - op = 'dropoutmaskcomponent' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'dropout_proportion': None, - 'type': None, # type is None because this operation should not appear in IR - 'infer': None, - 'in_ports_count': 0, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/einsum.py b/tools/mo/openvino/tools/mo/ops/einsum.py deleted file mode 100644 index ef3c58d5286367..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/einsum.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import re - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.broadcasting import bi_directional_shape_broadcasting - - -class Einsum(Op): - op = 'Einsum' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset7', - 'infer': self.infer, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return ['equation'] - - @staticmethod - def is_label_elsewhere(input_subscripts: list, label_to_check: str, excluded_subscript_inds: list) -> bool: - """ - Check if the given label is met in input subscripts excluding ones specified by a list of indices - excluded_subscript_inds - - :param input_subscripts: input subscripts among which to check if the label is met - :param label_to_check: a label to check - :param excluded_subscript_inds: indices of input subscripts to be excluded for this check - :return: True - met, False - otherwise - """ - for ind, input_subscript in enumerate(input_subscripts): - if ind not in excluded_subscript_inds and label_to_check in input_subscript: - return True - return False - - @staticmethod - def parse_equation(node_name: str, equation: str) -> (list, str): - """ - Parse Einsum equation and check that its format is correct to make sure that - all input subscripts consists of only alphabetic letters or alphabetic letters with one ellipsis. - In case of implicit mode the method recovers the right-hand part. - - :param node_name: Einsum node name for which to parse an equation - :param equation: Equation to be parsed and checked - :return: A tuple of a list of input subscripts and output subscript - """ - # normalize equation by removing white-spaces - equation = equation.strip() - - # split equation into the left and right hands - splitted_equation = equation.split('->') - assert len(splitted_equation) <= 2, "Einsum node {} has `equation` of incorrect format".format(node_name) - - # split left-hand side of the equation and check a format of input subscripts - input_subscripts = splitted_equation[0] - input_subscripts_list = input_subscripts.split(',') - - # prepare pattern to check a format of subscripts - subscript_pattern = re.compile("^[a-zA-Z]*(\\.\\.\\.){0,1}[a-zA-Z]*$") - ellipsis_pattern = re.compile("\\.\\.\\.") - - is_ellipsis_met = False - for input_subscript in input_subscripts_list: - assert re.match(subscript_pattern, input_subscript) is not None, \ - "Einsum node {} has `equation` with incorrect input subscript: {}".format(node_name, input_subscript) - is_ellipsis_met = is_ellipsis_met or re.search(ellipsis_pattern, input_subscript) - - if len(splitted_equation) == 2: - output_subscript = splitted_equation[1] - assert re.match(subscript_pattern, output_subscript), \ - "Einsum node {} has `equation` with incorrect output subscript: {}".format(node_name, output_subscript) - # if ellipsis is met, the output subscript must contain it as well - if is_ellipsis_met: - assert re.search(ellipsis_pattern, output_subscript), \ - "The output subscript of Einsum node {} must contain ellipsis".format(node_name) - elif len(splitted_equation) == 1: - # recover output subscript in case implicit mode - output_subscript = "" - for ind, input_subscript in enumerate(input_subscripts_list): - labels = Einsum.extract_subscript_labels(node_name, input_subscript) - for label in labels: - if Einsum.is_label_elsewhere(input_subscripts_list, label, [ind]) is False: - output_subscript += label - output_subscript = ''.join(sorted(list(set(output_subscript) - {'.'}))) - if is_ellipsis_met: - output_subscript = "..." + output_subscript - else: - assert False, "Einsum node {} equation has incorrect format. " \ - "It must be in either explicit or implicit mode.".format(node_name) - - return input_subscripts_list, output_subscript - - @staticmethod - def normalize_equation(node_name: str, equation: str) -> str: - """ - Recover explicit mode of equation. - - :param node_name: Einsum node name for which to recover explicit mode - :param equation: Einsum equation to recover explicit mode - :return: Recovered equation in explicit mode - """ - input_subscripts_list, output_subscript = Einsum.parse_equation(node_name, equation) - return ','.join(input_subscripts_list) + "->" + output_subscript - - @staticmethod - def extract_subscript_labels(node_name: str, subscript: str) -> list: - """ - Extract labels for given subscript. Each label can be either alphabetic letter or ellipsis - - :param node_name: Einsum node name - :param subscript: Given subscript - :return: A list of labels - """ - labels = [] - len_subscript = len(subscript) - label_ind = 0 - while label_ind < len_subscript: - if subscript[label_ind].isalpha(): - labels.append(subscript[label_ind]) - label_ind += 1 - elif len_subscript - label_ind > 2 and subscript[label_ind:label_ind + 3] == "...": - labels.append("...") - label_ind += 3 - else: - assert False, "Einsum node {} has `equation` with incorrect subscript: {}".format(node_name, subscript) - return labels - - @staticmethod - def adjust_equation_with_NCHW_layout(node_name: str, equation: str, input_ranks: list, output_rank: int, - input_correct_layout_mask: list, output_correct_layout_mask: bool) -> ( - str, list, bool): - """ - In order to satisfy NCHW layout, subscripts for tensors with rank greater than three must be adjusted by moving labels - of the last dimension to the second position in the subscript. There is an exception for such tensors when - the label is ellipsis and it covers multiple tail dimensions. The method returns equation with adjusted subscripts - to NCHW layout along with a boolean mask to indicate which subscripts are adjusted. - - :param node_name: Einsum node name for which equation is adjusted - :param equation: Equation to be adjusted - :param input_ranks: a list of input ranks - :param output_rank: output rank - :return: adjusted equation, boolean mask for inputs, and boolean flag if output subscript is adjusted - """ - is_inputs_adjusted = [] - input_subscripts, output_subscript = Einsum.parse_equation(node_name, equation) - num_inputs = len(input_ranks) - assert len(input_subscripts) == num_inputs, "The number of inputs must match a number " \ - "of input subscripts" - assert len(input_correct_layout_mask) == num_inputs, "The number of inputs must match a number " \ - "elements in input_correct_layout_mask list" - - # permute labels in input subscripts and mark inputs for which inference in NCHW layout is acceptable - # in case ellipsis covering multiple dimensions in the end, the permutation is impossible - # so the corresponding input must be in the original format (NHWC) - permuted_input_subscripts = [] - for input_ind in range(num_inputs): - input_subscript = input_subscripts[input_ind] - input_rank = input_ranks[input_ind] - labels = Einsum.extract_subscript_labels(node_name, input_subscript) - num_broadcasted_dims = input_rank - len(labels) + 1 - if input_correct_layout_mask[input_ind]: - is_inputs_adjusted.append(True) - elif input_rank > 3 and (labels[-1] != "..." or labels[-1] == "..." and num_broadcasted_dims == 1): - is_inputs_adjusted.append(True) - labels.insert(1, labels[-1]) - del labels[-1] - else: - is_inputs_adjusted.append(False) - permuted_input_subscript = ''.join(labels) - permuted_input_subscripts.append(permuted_input_subscript) - - # perform the same procedure for the output subscript as for the inputs subscripts - labels = Einsum.extract_subscript_labels(node_name, output_subscript) - num_broadcasted_dims = output_rank - len(labels) + 1 - if output_correct_layout_mask: - is_output_adjusted = True - elif output_rank > 3 and (labels[-1] != "..." or labels[-1] == "..." and num_broadcasted_dims == 1): - is_output_adjusted = True - labels.insert(1, labels[-1]) - del labels[-1] - else: - is_output_adjusted = False - permuted_output_subscript = ''.join(labels) - - # concatenate the left and right hands of the resulted equation - left_hand = ','.join(permuted_input_subscripts) - right_hand = permuted_output_subscript - permuted_equation = left_hand + "->" + right_hand - return permuted_equation, is_inputs_adjusted, is_output_adjusted - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - num_inputs = len(connected_in_ports) - assert node.has_valid('equation'), "Einsum node {} must contain `equation` attribute".format(node_name) - equation = node.equation - - # parse the equation and extract input and output subscripts - input_subscripts, output_subscript = Einsum.parse_equation(node_name, equation) - - # check that each operand has the corresponding input subscript - assert len(input_subscripts) == num_inputs, "The number of input operands of Einsum node {} " \ - "must match the number of input subscripts " \ - "in `equation`".format(node_name) - - # check compatibility of dimension sizes with the same label and generate a dictionary of shapes for labels - label_to_shape = {} - for input_ind in range(num_inputs): - input_shape = node.in_port(input_ind).data.get_shape() - input_subscript = input_subscripts[input_ind] - labels = Einsum.extract_subscript_labels(node_name, input_subscript) - num_dims = len(input_shape) - num_labels = len(labels) - num_broadcasted_dims = num_dims - num_labels + 1 - dim_ind = 0 - label_ind = 0 - while label_ind < num_labels and dim_ind < num_dims: - label = labels[label_ind] - if label == "...": - sub_shape = input_shape[dim_ind:dim_ind + num_broadcasted_dims] - if label in label_to_shape.keys(): - common_shape = bi_directional_shape_broadcasting(sub_shape, label_to_shape[label]) - assert common_shape is not None, "The dimensions labeled of ellipsis must be broadcastable " \ - "for Einsum node {}".format(node_name) - label_to_shape[label] = common_shape - else: - label_to_shape[label] = sub_shape - dim_ind += num_broadcasted_dims - else: - dim_size = input_shape[dim_ind] - sub_shape = shape_array([dim_size]) - assert label not in label_to_shape.keys() or np.array_equal(label_to_shape[label], sub_shape), \ - "Sizes of dimensions with the same label of Einsum node {} " \ - "must be compatible".format(node_name) - label_to_shape[label] = sub_shape - dim_ind += 1 - label_ind += 1 - - # generate output shape based on the output subscript - output_shape = shape_array([]) - labels = Einsum.extract_subscript_labels(node_name, output_subscript) - for label in labels: - assert label in label_to_shape.keys(), "The label in the output subscript must appear" \ - " in input subscripts in equation {} " \ - "of Einsum node {}".format(equation, node_name) - output_shape = np.ma.concatenate((output_shape, label_to_shape[label])) - - node.out_port(0).data.set_shape(output_shape) diff --git a/tools/mo/openvino/tools/mo/ops/elementwise.py b/tools/mo/openvino/tools/mo/ops/elementwise.py deleted file mode 100644 index 0c9a6f8249a5ae..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/elementwise.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer, bias_add_infer, eltwise_reverse_infer -from openvino.tools.mo.front.common.partial_infer.utils import float32_array, reverse_bypass_infer -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.infer import copy_type_infer -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.type_utils import override_data_type_of_constant - - -class Elementwise(Op): - enabled = False - operation = None - op = None - op_type = None - version = 'opset1' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op_type, - 'version': self.version, - 'infer': lambda node: eltwise_infer(node, self.operation), - 'reverse_infer': eltwise_reverse_infer, - 'type_infer': self.type_infer, - 'can_be_bias': True, - 'can_be_fused': True, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'is_eltwise': True, - 'stop_value_propagation': False, - 'auto_broadcast': 'numpy' - }, attrs) - - @staticmethod - def type_infer(node): - override_data_type_of_constant(node) - node.out_port(0).set_data_type(node.in_port(0).get_data_type()) - - def backend_attrs(self): - return ['auto_broadcast'] - - -class UnaryElementwise(Elementwise): - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, {**{ - 'in_ports_count': 1, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - }, **attrs}) - - @staticmethod - def type_infer(node): - copy_type_infer(node) - - def backend_attrs(self): - return [] - - -class Add(Elementwise): - op = 'Add' - op_type = 'Add' - operation = staticmethod(lambda a, b: a + b) - - -class BiasAdd(Add): - op_type = 'BiasAdd' - - def __init__(self, graph: Graph, attrs: dict): - attrs.update({'infer': lambda node: bias_add_infer(node, self.operation)}) - super().__init__(graph, attrs) - - -class Sub(Elementwise): - op = 'Sub' - op_type = 'Subtract' - operation = staticmethod(lambda a, b: a - b) - - -class Mul(Elementwise): - op = 'Mul' - op_type = 'Multiply' - operation = staticmethod(lambda a, b: a * b) - - -def both_types_are_integer(a, b): - return np.issubdtype(a.dtype, np.integer) and np.issubdtype(b.dtype, np.integer) - - -class Div(Elementwise): - op = 'Div' - op_type = 'Divide' - operation = staticmethod(lambda a, b: a // b if both_types_are_integer(a, b) else a / b) - - -class SquaredDifference(Elementwise): - op = 'SquaredDifference' - op_type = 'SquaredDifference' - operation = staticmethod(lambda a, b: (a - b) * (a - b)) - - -class Pow(Elementwise): - op = 'Pow' - op_type = 'Power' - - @staticmethod - def operation(a, b): - if np.any(b < 0) and np.issubdtype(a.dtype, np.signedinteger): - return float32_array(a.astype(np.float32) ** b) - return a ** b - - -class LogicalElementwise(Elementwise): - @staticmethod - def type_infer(node): - override_data_type_of_constant(node) - node.out_port(0).set_data_type(bool) - - -class Greater(LogicalElementwise): - op = 'Greater' - op_type = 'Greater' - operation = staticmethod(lambda a, b: np.ma.greater(a, b)) - - -class GreaterEqual(LogicalElementwise): - op = 'GreaterEqual' - op_type = 'GreaterEqual' - operation = staticmethod(lambda a, b: np.ma.greater_equal(a, b)) - - -class Less(LogicalElementwise): - op = 'Less' - op_type = 'Less' - operation = staticmethod(lambda a, b: np.ma.less(a, b)) - - -class LessEqual(LogicalElementwise): - op = 'LessEqual' - op_type = 'LessEqual' - operation = staticmethod(lambda a, b: np.ma.less_equal(a, b)) - - -class Equal(LogicalElementwise): - op = 'Equal' - op_type = 'Equal' - operation = staticmethod(lambda a, b: np.ma.equal(a, b)) - - -class NotEqual(LogicalElementwise): - op = 'NotEqual' - op_type = 'NotEqual' - operation = staticmethod(lambda a, b: np.ma.not_equal(a, b)) - - -class Maximum(Elementwise): - op = 'Maximum' - op_type = 'Maximum' - operation = staticmethod(lambda a, b: np.ma.maximum(a, b)) - - -class Minimum(Elementwise): - op = 'Minimum' - op_type = 'Minimum' - operation = staticmethod(lambda a, b: np.ma.minimum(a, b)) - - -class Round(UnaryElementwise): - op = 'Round' - op_type = 'Round' - version = 'opset5' - - def __init__(self, graph: Graph, attrs): - round_attrs = {'mode': 'half_to_even', - 'infer': self.infer - } - round_attrs.update(attrs) - super().__init__(graph, round_attrs) - - def backend_attrs(self): - return ['mode'] - - @classmethod - def infer(cls, node: Node): - node.out_port(0).data.set_shape(node.in_port(0).data.get_shape()) - - a = node.in_port(0).data.get_value() - if a is not None: - assert node.soft_get('mode') in ['half_to_even', 'half_away_from_zero'], \ - 'Round node {} has unsupported "mode" attribute value: {}'.format(node.soft_get('name', node.id), - node.soft_get('mode')) - if node.mode == 'half_away_from_zero': - mask = (a >= 0) - out = np.ma.empty_like(a) - out[mask] = np.ma.floor(a[mask] + 0.5) - out[~mask] = np.ma.ceil(a[~mask] - 0.5) - else: - out = np.ma.round(a) - node.out_port(0).data.set_value(out) - - -class LogicalOr(LogicalElementwise): - op = 'LogicalOr' - op_type = 'LogicalOr' - operation = staticmethod(lambda a, b: np.ma.logical_or(a, b)) - - -class LogicalXor(Elementwise): - op = 'LogicalXor' - op_type = 'LogicalXor' - operation = staticmethod(lambda a, b: np.ma.logical_xor(a, b)) - - -class LogicalAnd(LogicalElementwise): - op = 'LogicalAnd' - op_type = 'LogicalAnd' - operation = staticmethod(lambda a, b: np.ma.logical_and(a, b)) - - -class FloorMod(Elementwise): - op = 'FloorMod' - op_type = 'FloorMod' - operation = staticmethod(lambda a, b: np.ma.fmod(a, b)) - - -class Mod(Elementwise): - op = 'Mod' - op_type = 'Mod' - operation = staticmethod(lambda a, b: np.ma.mod(a, b)) - - -class Negative(UnaryElementwise): - op = 'Negative' - op_type = 'Negative' - operation = staticmethod(lambda a: -a) - - -class Sqrt(UnaryElementwise): - op = 'Sqrt' - op_type = 'Sqrt' - - @staticmethod - def operation(a): - if np.issubdtype(a.dtype, np.signedinteger): - return float32_array(a.astype(np.float32) ** 0.5) - return a ** 0.5 - - -class BitwiseAnd(Elementwise): - op = 'BitwiseAnd' - op_type = 'BitwiseAnd' - version = 'opset13' - - -class BitwiseOr(Elementwise): - op = 'BitwiseOr' - op_type = 'BitwiseOr' - version = 'opset13' - - -class BitwiseXor(Elementwise): - op = 'BitwiseXor' - op_type = 'BitwiseXor' - version = 'opset13' - - -class BitwiseNot(UnaryElementwise): - op = 'BitwiseNot' - op_type = 'BitwiseNot' - version = 'opset13' diff --git a/tools/mo/openvino/tools/mo/ops/eltwise.py b/tools/mo/openvino/tools/mo/ops/eltwise.py deleted file mode 100644 index da7852bf5de0ba..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/eltwise.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class Eltwise(Op): - op = 'Eltwise' - - def __init__(self, graph: Graph, attrs: dict): - operations = { - 'sum': ('Add', lambda a, b: a + b), - 'mul': ('Mul', lambda a, b: a * b), - 'max': ('Max', lambda a, b: np.ma.maximum(a, b)), - 'pow': ('Pow', lambda a, b: np.ma.power(a, b)), - 'less': ('Less', lambda a, b: np.ma.less(a, b)), - 'less_equal': ('LessEqual', lambda a, b: np.ma.less_equal(a, b)), - 'greater': ('Greater', lambda a, b: np.ma.greater(a, b)), - 'greater_equal': ('GreaterEqual', lambda a, b: np.ma.greater_equal(a, b)), - 'equal': ('Equal', lambda a, b: np.ma.equal(a, b)), - 'floor_mod': ('FloorMod', lambda a, b: np.ma.fmod(a, b)), - 'not_equal': ('NotEqual', lambda a, b: np.ma.not_equal(a, b)), - 'logical_or': ('LogicalOr', lambda a, b: bool(a) or bool(b)), - 'logical_and': ('LogicalAnd', lambda a, b: bool(a) and bool(b)), - 'logical_xor': ('LogicalXor', lambda a, b: bool(a) ^ bool(b)), - 'log': ('Log', lambda x: np.ma.log(x)), - } - - super().__init__(graph, { - 'type': self.op, - 'op': operations[attrs['operation']][0], - 'infer': lambda node: eltwise_infer(node, operations[node.operation][1]), - 'in_ports_count': 2, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return ['operation'] diff --git a/tools/mo/openvino/tools/mo/ops/eltwise_n.py b/tools/mo/openvino/tools/mo/ops/eltwise_n.py deleted file mode 100644 index d7679ae5980779..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/eltwise_n.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class EltwiseN(Op): - """ - The elementwise operation that has more than 2 inputs. This operation is replaced in a front phase with a number of - simple elementwise operations with 2 inputs. Refer to EltwiseNFrontReplacer for a list of supported operations. - """ - op = 'EltwiseN' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': None, # type is None because this operation should not appear in IR - 'infer': None, - 'out_ports_count': 1, - }, attrs) - if 'operation' not in self.attrs: - raise Error('"operation" attribute is not set for operation "{}".'.format(self.op)) - - -class EltwiseNMul(EltwiseN): - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, {'operation': 'mul'}) - - -class EltwiseNMin(EltwiseN): - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, {'operation': 'min'}) - - -class EltwiseNMax(EltwiseN): - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, {'operation': 'max'}) - - -class EltwiseNAdd(EltwiseN): - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, {'operation': 'sum'}) diff --git a/tools/mo/openvino/tools/mo/ops/eltwise_ninputs_in_1.py b/tools/mo/openvino/tools/mo/ops/eltwise_ninputs_in_1.py deleted file mode 100644 index f3176b2442038c..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/eltwise_ninputs_in_1.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class EltwiseNin1(Op): - """ - The elementwise operation that has all inputs in 1 input. This operation is replaced in a front phase with - a number of simple elementwise operations with 2 inputs. - """ - op = 'EltwiseNin1' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': __class__.op, - 'type': None, # type is None because this operation should not appear in IR - 'infer': None, - 'out_ports_count': 1, - }, attrs) - if 'operation' not in self.attrs: - raise Error('"operation" attribute is not set for operation "{}".'.format(__class__.op)) diff --git a/tools/mo/openvino/tools/mo/ops/embedding_bag.py b/tools/mo/openvino/tools/mo/ops/embedding_bag.py deleted file mode 100644 index d32d35804e37ab..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/embedding_bag.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class EmbeddingBagBase(Op): - enabled = False - - op = op_type = None - version = None - in_ports_count = None - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op_type, - 'version': self.version, - - 'infer': self.infer, - - 'in_ports_count': self.in_ports_count, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node: Node): - raise NotImplementedError('Please use specialized EmbeddingBag operation class, EmbeddingBagBase is base class') - - -class EmbeddingBagOffsetsSum(EmbeddingBagBase): - op = op_type = 'EmbeddingBagOffsetsSum' - version = 'opset3' - in_ports_count = 5 - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - - connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_in_ports) >= 3 and all(p in connected_in_ports for p in [0, 1, 2]), \ - "EmbeddingBagOffsetsSum should have at least 3 connected input port, but it doesn't " \ - "for node: `{}`. Ports: {}".format(name, connected_in_ports) - - weights_shape = node.in_port(0).data.get_shape() - assert len(weights_shape) >= 2, \ - "EmbeddingBagOffsetsSum should have at least 2D weights for node: `{}`".format(name) - offsets_shape = node.in_port(2).data.get_shape() - assert offsets_shape is not None and len(offsets_shape) == 1, \ - "Rank of the offsets in EmbeddingBagOffsetsSum should be equal to 1 for node: `{}`".format(name) - - node.out_port(0).data.set_shape(np.ma.concatenate((offsets_shape[:1], weights_shape[1:]))) - - -class EmbeddingBagPackedSum(EmbeddingBagBase): - op = op_type = 'EmbeddingBagPackedSum' - version = 'opset3' - in_ports_count = 3 - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - - connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_in_ports) >= 2 and all(p in connected_in_ports for p in [0, 1]), \ - "EmbeddingBagPackedSum should have at least 2 connected input port, but it doesn't for node: `{}`. " \ - "Ports: {}".format(name, connected_in_ports) - - weights_shape = node.in_port(0).data.get_shape() - assert len(weights_shape) >= 2, \ - "EmbeddingBagPackedSum should have at least 2D weights for node: `{}`".format(name) - input_shape = node.in_port(1).data.get_shape() - - node.out_port(0).data.set_shape(np.ma.concatenate((input_shape[:1], weights_shape[1:]))) - - -class EmbeddingSegmentsSum(EmbeddingBagBase): - op = op_type = 'EmbeddingSegmentsSum' - version = 'opset3' - in_ports_count = 6 - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - - connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_in_ports) >= 4 and all(p in connected_in_ports for p in [0, 1, 2, 3]), \ - "{} should have at least 4 connected input port, but it doesn't for node: `{}`. " \ - "Ports: {}".format(node.op, name, connected_in_ports) - - weights_shape = node.in_port(0).data.get_shape() - assert len(weights_shape) >= 2, \ - "{} should have at least 2D weights for node: `{}`".format(node.op, name) - indices_shape = node.in_port(1).data.get_shape() - segment_ids = node.in_port(2).data.get_shape() - assert len(indices_shape) == 1 and len(segment_ids) == 1 and indices_shape == segment_ids, \ - "Both indices and segment_ids should have the same shape for node: `{}`".format(name) - num_segments = node.in_port(3).data.get_value() - assert num_segments is not None, "{} should have a constant num_segments provided, but it " \ - "doesn't for node: `{}`.".format(node.op, name) - output_shape = np.ma.concatenate(([num_segments], weights_shape[1:])) - node.out_port(0).data.set_shape(output_shape) - - -class EmbeddingSegmentsMean(Op): - """ - Internal Operation. - - In order not to overload transformations (EmbeddingSegmentsOperationSingleFeatureFusing, - EmbeddingSegmentsOperationMultipleFeaturesFusing) with additional sub-graph computing mean value of embedding - vectors, we introduce internal operation EmbeddingSegmentsMean. After these transformations, this operation - is decomposed into EmbeddingSegmentSum with appropriate computation of mean value for embedding vectors collected - for each object in a batch. - """ - op = "EmbeddingSegmentsMean" - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'in_ports_count': 6, - 'out_ports_count': 1, - # it must have the same shape infer function as EmbeddingSegmentsSum - 'infer': EmbeddingSegmentsSum.infer - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/expand_dims.py b/tools/mo/openvino/tools/mo/ops/expand_dims.py deleted file mode 100644 index 5a2412174bd3fc..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/expand_dims.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, is_fully_defined, shape_insert -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class ExpandDims(Op): - """ - The ExpandDims layer adds dimensions with shape 1 to the specified positions. The positions is a layer attribute, - not a separate input. - """ - op = 'ExpandDims' - enabled = False - - def __init__(self, graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'reinterp_shape': True, - 'infer': self.infer, - 'expand_axis': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - input_shape = node.in_port(0).data.get_shape() - input_value = node.in_port(0).data.get_value() - if input_shape is None: - raise Error('Input shape for node "{}" is None'.format(node_name)) - - assert len(node.in_nodes()) == 1, 'Wrong number of inputs to the layer {}'.format(node_name) - - if not node.has_valid('expand_axis'): - raise Error('ExpandDims axis is not defined for node {}'.format(node_name)) - - expand_axes = node.expand_axis - if expand_axes is None: - raise Error('The "expand_axis" attribute is None for node "{}"'.format(node_name)) - - if isinstance(expand_axes, int): - expand_axes = int64_array([expand_axes]) - elif expand_axes.ndim == 0: - expand_axes = expand_axes.reshape([1]) - - # expand_axis is a position where the new axis is placed so expand_dims works for negative axis in a different - # way not as insert operation - for expand_axis in expand_axes: - if expand_axis < 0: - expand_axis += len(input_shape) + 1 - - expand_axes = sorted(expand_axes) - output_shape = input_shape.copy() - for expand_axis in expand_axes: - output_shape = shape_insert(output_shape, expand_axis, 1) - - if input_value is not None and is_fully_defined(output_shape): - node.out_port(0).data.set_value(input_value.reshape(output_shape)) - else: - node.out_port(0).data.set_shape(output_shape) diff --git a/tools/mo/openvino/tools/mo/ops/eye.py b/tools/mo/openvino/tools/mo/ops/eye.py deleted file mode 100644 index 73e52da5ea0d6f..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/eye.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension, shape_array -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined - - -class Eye(Op): - """ - Eye operation that generates shift matrix or a batch of matrices. - """ - op = 'Eye' - enabled = False - in_ports_count = 4 - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset9', - 'infer': self.infer, - 'in_ports_count': 4, - 'out_ports_count': 1, - 'type_infer': self.type_infer, - 'output_type': np.float32, - }, attrs) - - def backend_attrs(self): - return [('output_type', lambda node: np_data_type_to_destination_type(node.output_type))] - - @staticmethod - def type_infer(node: Node): - node.out_port(0).set_data_type(node['output_type']) - - @staticmethod - def infer(node: Node): - assert node.has_valid('output_type') - - connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_in_ports) >= 3 and all(p in connected_in_ports for p in [0, 1, 2]), \ - "Eye should have at least 3 connected input port." \ - "Got ports: `{}`.".format(connected_in_ports) - - num_rows_port = 0 - num_columns_port = 1 - diagonal_index_port = 2 - batch_shape_port = 3 - - num_rows_shape = node.in_port(num_rows_port).data.get_shape() - assert len(num_rows_shape) <= 1, \ - '"num_rows" should be 1D tensor or scalar. Got: '.format(len(num_rows_shape)) - num_rows = node.in_port(num_rows_port).data.get_value() - if num_rows is None: - num_rows = dynamic_dimension - else: - num_rows = np.array(num_rows).item() - - num_columns_shape = node.in_port(num_columns_port).data.get_shape() - assert len(num_columns_shape) <= 1, \ - '"num_columns" should be 1D tensor or scalar. Got: '.format(len(num_columns_shape)) - num_columns = node.in_port(num_columns_port).data.get_value() - if num_columns is None: - num_columns = dynamic_dimension - else: - num_columns = np.array(num_columns).item() - - diagonal_index_shape = node.in_port(diagonal_index_port).data.get_shape() - assert len(diagonal_index_shape) <= 1, \ - '"diagonal_index" should be 1D tensor or scalar. Got: '.format(len(diagonal_index_shape)) - diagonal_index = node.in_port(diagonal_index_port).data.get_value() - - if batch_shape_port in connected_in_ports: - batch_shape_shape = node.in_port(batch_shape_port).data.get_shape() - assert len(batch_shape_shape) == 1, \ - '"batch_shape" should be 1D tensor. Got: '.format(len(batch_shape_shape)) - batch_shape = node.in_port(batch_shape_port).data.get_value() - if batch_shape is None: - batch_shape = [dynamic_dimension] * batch_shape_shape[0] - else: - batch_shape = [] - - output_shape = [*batch_shape, num_rows, num_columns] - node.out_port(0).data.set_shape(output_shape) - - if is_fully_defined(output_shape) and diagonal_index is not None: - tile_shape = [*batch_shape, 1, 1] - one_matrix = np.eye(num_rows, M=num_columns, k=np.array(diagonal_index).item(), dtype=node.output_type) - output_value = np.tile(one_matrix, tile_shape) - node.out_port(0).data.set_value(shape_array(output_value)) - - -class TFEye(Op): - """ Eye operation that that generates shift matrix or a batch of matrices. - Eye operation from TensorFlow has three inputs: row number, column number and batch shape - """ - op = 'TFEye' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'infer': None, - 'in_ports_count': 3, - 'out_ports_count': 1, - 'output_type': np.float32, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/fake_output.py b/tools/mo/openvino/tools/mo/ops/fake_output.py deleted file mode 100644 index 3bac58daf58614..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/fake_output.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer, copy_value -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class FakeOutput(Op): - """ - This op is needed only to store the output name, it will be transformed into opset op and is doing nothing - """ - op = 'FakeOutput' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': None, - 'version': None, - - 'infer': lambda n: copy_shape_infer(n, copy_value), - - 'type_infer': None, - - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/fakequantize.py b/tools/mo/openvino/tools/mo/ops/fakequantize.py deleted file mode 100644 index 1c21fb7c57b543..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/fakequantize.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -def broadcastable(broadcast_from, broadcast_to): - """Check if shape broadcast_from can be broadcasted to broadcast_to""" - broadcast_to = int64_array(broadcast_to) - broadcast_from = int64_array(broadcast_from) - if broadcast_from.size > broadcast_to.size: - return False - broadcast_from = np.concatenate( - (int64_array([1] * (broadcast_to.size - broadcast_from.size)), broadcast_from)) - return np.all(np.logical_or(broadcast_from == 1, broadcast_from == broadcast_to)) - - -def round_half_up(n): - return np.floor(n + 0.5) - - -class FakeQuantize(Op): - op = 'FakeQuantize' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'levels': None, - 'is_eltwise': True, - 'infer': self.infer, - 'in_ports_count': 5, - 'out_ports_count': 1, - 'auto_broadcast': 'numpy' - } - super().__init__(graph, mandatory_props, attrs) - if self.attrs['levels'] is None: - raise Error("FakeQuantize operation has no levels parameter") - - def supported_attrs(self): - return [ - 'levels', - 'auto_broadcast' - ] - - @staticmethod - def infer(node: Node): - assert len(node.in_nodes()) == 5 - assert len(node.out_nodes()) == 1 - inputs = [node.in_node(i) for i in range(5)] - x, input_low, input_high, output_low, output_high = inputs - assert x.has_valid('shape') - # TODO Check all inputs[1..4] shapes are broadcastable to inputs[0] shape - assert all([broadcastable(inputs[i].shape, inputs[0].shape) for i in range(1, 5)]), \ - "Not all shapes from FakeQuantize inputs can be broadcasted to input[0] for node {}".format( - node.soft_get('name')) - node.out_node().shape = x.shape.copy() - - if all([node.in_node(i).has_valid('value') for i in range(5)]): - x, input_low, input_high, output_low, output_high = \ - [float32_array(np.broadcast_to(node.value, x.value.shape)) for node in inputs] - - assert node.has_valid('levels') - assert isinstance(node.levels, int) - - underflow_mask = x <= input_low - overflow_mask = x > input_high - # pylint: disable=assignment-from-no-return - middle_mask = np.logical_not(np.logical_or(underflow_mask, overflow_mask)) - - def middle_part(x, input_low, input_high, output_low, output_high): - return round_half_up((x - input_low) / (input_high - input_low) * (node.levels - 1)) / \ - (node.levels - 1) * (output_high - output_low) + output_low - - output = np.zeros_like(x) - # pylint: disable=unsupported-assignment-operation - output[middle_mask] = middle_part( - x[middle_mask], - input_low[middle_mask], - input_high[middle_mask], - output_low[middle_mask], - output_high[middle_mask], - ) - - # pylint: disable=unsupported-assignment-operation - output[overflow_mask] = output_high[overflow_mask] - # pylint: disable=unsupported-assignment-operation - output[underflow_mask] = output_low[underflow_mask] - - if not node.has_and_set('stop_value_propagation'): - node.out_node().value = output diff --git a/tools/mo/openvino/tools/mo/ops/fill.py b/tools/mo/openvino/tools/mo/ops/fill.py deleted file mode 100644 index 8cb27f5eeeceeb..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/fill.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.op import Op - - -class Fill(Op): - """ - The Fill layer tiles the second input tensor (0D constant) to the shape specified in the first input. - - This operation is converted to Broadcast layer. - """ - op = 'Fill' - enabled = False - - def __init__(self, graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': __class__.op, - 'infer': None, - 'in_ports_count': 2, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/flatten.py b/tools/mo/openvino/tools/mo/ops/flatten.py deleted file mode 100644 index 7c2541876f120f..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/flatten.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class Flatten(Op): - op = 'Flatten' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': __class__.op, - 'type': None, - - 'axis': None, - 'end_axis': np.int64(-1), - 'infer': None, - - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - -class FlattenONNX(Op): - op = 'FlattenONNX' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': __class__.op, - 'type': None, - - 'axis': None, - 'infer': None, - - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/gather.py b/tools/mo/openvino/tools/mo/ops/gather.py deleted file mode 100644 index 2a899de53ce726..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/gather.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.front.common.partial_infer.utils import mo_array, int64_array, is_fully_defined, shape_array, \ - dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op, PermuteAttrs -from openvino.tools.mo.utils.error import Error - - -class Gather(Op): - op = 'Gather' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset8', - 'batch_dims': 0, - 'reinterp_shape': True, - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - 'force_precision_in_ports': {1: 'int32', 2: 'int64'}, - 'in_ports_count': 3, - 'out_ports_count': 1, - }, attrs) - - assert 'axis' not in self.attrs, \ - 'Use AttributedGather operation instead of Gather to create it with `axis` as a parameter' - - def backend_attrs(self): - version = self.get_opset() - if version in ['opset7', 'opset8']: - return ['batch_dims'] - elif version == 'opset1': - return [] - else: - raise Error('Unsupported operation opset version "{}"'.format(version)) - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - - connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_in_ports) == 3 and 0 in connected_in_ports and 1 in connected_in_ports and \ - 2 in connected_in_ports, "Gather should have 3 connected input port, but it doesn't for " \ - "node: `{}`. Ports: {}".format(name, connected_in_ports) - - data_shape = node.in_port(0).data.get_shape() - assert data_shape is not None - indices_shape = node.in_port(1).data.get_shape() - assert indices_shape is not None - axis = node.in_port(2).data.get_value() - - # axis of Gather could be accepted as both scalar and 1D tensor - if isinstance(axis, np.ndarray): - axis = axis.item() - assert axis is not None, 'axis input is undefined' - - assert -len(data_shape) <= axis < len(data_shape), \ - 'axis must be within interval [-data_rank, data_rank). Instead got axis = {}, data_rank = {} '.\ - format(axis, len(data_shape)) - - batch_dims = node.batch_dims - assert -len(indices_shape) <= batch_dims <= len(indices_shape), \ - 'batch_dims must be within interval [-indices_rank, indices_rank]. Instead got batch_dims = {}, ' \ - 'indices_rank = {} '.format(batch_dims, len(indices_shape)) - - # normalize to positive values - axis = axis + len(data_shape) if axis < 0 else axis - batch_dims = batch_dims + len(indices_shape) if batch_dims < 0 else batch_dims - - assert np.ma.allequal(data_shape[:batch_dims], indices_shape[:batch_dims]), \ - 'data and indices inputs must have equal first dimensions until batch_dims' - - assert batch_dims <= axis, \ - 'normalized batch_dims must be <= axis. Instead got batch_dims = {}, axis = {}'.format(axis, batch_dims) - - # we import PermuteInputs locally because it uses Gather inside and we have recursive imports - from openvino.tools.mo.graph.perm_inputs import PermuteInputs - PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'axis') - - batch_dims_range = indices_shape[:batch_dims] - out_shape = np.concatenate((data_shape[:axis], indices_shape[batch_dims:], data_shape[axis + 1:])) - - data_value = node.in_port(0).data.get_value() - indices_value = node.in_port(1).data.get_value() - if data_value is not None and indices_value is not None and is_fully_defined(indices_value): - indices_value = int64_array(indices_value) - if batch_dims == 0: - node.out_port(0).data.set_value(np.ma.take(data_value, indices_value, axis)) - else: - out_value = np.empty(out_shape) - for batch_idx in np.ndindex(tuple(batch_dims_range)): - out_value[batch_idx] = np.ma.take(data_value[batch_idx], indices_value[batch_idx], - axis - batch_dims) - node.out_port(0).data.set_value(out_value) - else: - node.out_port(0).data.set_shape(out_shape) - - @staticmethod - def reverse_infer(node: Node): - out_shape = node.out_port(0).data.get_shape() - data_shape = node.in_port(0).data.get_shape() - indices_shape = node.in_port(1).data.get_shape() - batch_dims = node.batch_dims - batch_dims = batch_dims + len(indices_shape) if batch_dims < 0 else batch_dims - - axis = node.in_port(2).data.get_value() - # axis of Gather could be accepted as both scalar and 1D tensor - if isinstance(axis, np.ndarray): - axis = axis.item() - assert axis is not None, 'axis input is undefined' - - # we can deduce data or indices partial shapes from output shape calculation formula - # out_shape = Concat(data_shape[:axis], indices_shape[batch_dims:batch_dims + indices_rank], data_shape[axis + 1:]) - - # data partial shape is unknown - if out_shape is not None and data_shape is None and indices_shape is not None: - out_rank = len(out_shape) - indices_rank = len(indices_shape) - - deduced_data_shape = out_shape.tolist(dynamic_dimension_value) - for i in range(indices_rank): - deduced_data_shape.pop(axis) - deduced_data_shape.insert(axis, dynamic_dimension_value) - node.in_port(0).data.set_shape(shape_array(deduced_data_shape)) - - # indices partial shape is unknown - if out_shape is not None and indices_shape is None and data_shape is not None: - out_rank = len(out_shape) - data_rank = len(data_shape) - indices_rank = out_rank + 1 - data_rank + batch_dims - - indices_shape = out_shape[axis:axis + indices_rank] - node.in_port(1).data.set_shape(indices_shape) - - -class AttributedGather(Op): - op = 'AttributedGather' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': 'Gather', - - 'axis': 0, - 'reinterp_shape': True, - 'infer': self.infer, - # reverse_infer is not needed since is replaced by Gather on the front (AttributedGatherNormalizer) - - 'force_precision_in_ports': {1: 'int32'}, - - 'in_ports_count': 2, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return [ - 'axis', - ] - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - - connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_in_ports) == 2 and 0 in connected_in_ports and 1 in connected_in_ports, \ - "AttributedGather should have 2 connected input port, but it doesn't for node: `{}`. Ports: {}" \ - "".format(name, connected_in_ports) - - axis = node.soft_get('axis', None) - assert axis is not None - - data_shape = node.in_port(0).data.get_shape() - assert data_shape is not None - indices_shape = node.in_port(1).data.get_shape() - assert indices_shape is not None - - # Convert negative axis - axis = get_canonical_axis_index(data_shape, axis) - node.axis = axis - - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) - - data_value = node.in_port(0).data.get_value() - indices_value = node.in_port(1).data.get_value() - if data_value is not None and indices_value is not None: - node.out_port(0).data.set_value(mo_array(np.take(data_value, indices_value, axis), dtype=data_value.dtype)) - return - - shape = np.concatenate((data_shape[:axis], indices_shape)) - if axis < len(data_shape) - 1: - shape = np.concatenate((shape, data_shape[axis + 1:])) - - node.out_port(0).data.set_shape(int64_array(shape)) diff --git a/tools/mo/openvino/tools/mo/ops/gatherelements.py b/tools/mo/openvino/tools/mo/ops/gatherelements.py deleted file mode 100644 index 8f026223b1411e..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/gatherelements.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op, PermuteAttrs -from openvino.tools.mo.utils.error import Error - - -class GatherElements(Op): - op = 'GatherElements' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset6', - 'infer': self.infer, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'axis': 0, - }, attrs) - - def backend_attrs(self): - return ['axis'] - - @staticmethod - def infer(node: Node): - data_shape = node.in_port(0).data.get_shape() - indices_shape = node.in_port(1).data.get_shape() - axis = node.axis - data_rank = len(data_shape) - - assert data_rank >= 1, 'data_rank must be >= 1' - assert data_rank == len(indices_shape), 'data and indices inputs for node {} must be of the ' \ - 'same rank. Instead got {} and {}'. \ - format(node.name, data_rank, len(indices_shape)) - assert -data_rank <= axis < data_rank, 'axis for node {0} must be within interval ' \ - '[-{1}, {1} - 1]. Instead got: axis={2}'. \ - format(node.name, data_rank, axis) - if axis < 0: - axis += data_rank - out_shape = indices_shape.copy() - for idx, (data_sz, ind_sz) in enumerate(zip(data_shape, indices_shape)): - out_shape[idx] = ind_sz if ind_sz is not dynamic_dimension or idx == axis else data_sz - if idx != axis and data_sz != ind_sz: - raise Error('Sizes along axis {} for node {} do not match. data and indices must have ' - 'equal size along all axes except for axis {}'.format(idx, node.name, axis)) - - data = node.in_port(0).data.get_value() - indices = node.in_port(1).data.get_value() - - if data is not None and indices is not None: - out_value = np.empty(indices_shape, dtype=data.dtype) - for idx in np.ndindex(*indices_shape): - data_idx = list(idx) - data_idx[node.axis] = indices[idx] - out_value[idx] = data[tuple(data_idx)] - node.out_port(0).data.set_value(out_value) - else: - node.out_port(0).data.set_shape(out_shape) - - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) diff --git a/tools/mo/openvino/tools/mo/ops/gathernd.py b/tools/mo/openvino/tools/mo/ops/gathernd.py deleted file mode 100644 index 02b900688f8c06..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/gathernd.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, is_fully_defined, dynamic_dimension_value, \ - compatible_dims -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class GatherND(Op): - op = 'GatherND' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset8', - 'infer': self.infer, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'batch_dims': 0 - } - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return ['batch_dims'] - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - assert len(connected_in_ports) == 2, \ - "Incorrect number of inputs for {} node".format(node_name) - - data_shape = node.in_port(0).data.get_shape() - data_value = node.in_port(0).data.get_value() - indices_shape = node.in_port(1).data.get_shape() - indices_value = node.in_port(1).data.get_value() - - assert node.has_valid('batch_dims'), "Node {} must contain `batch_dims` attribute".format(node_name) - batch_dims = node.batch_dims - - # check that a number of batch dimensions is less than both ranks of data and indices tensors - assert batch_dims < len(data_shape), "Number of batch dimensions must be less than a rank of data" - assert batch_dims < len(indices_shape), "Number of batch dimensions must be less than a rank of indices" - - # check that batch dimensions of data and indices are the same - for batch_dim in range(batch_dims): - assert compatible_dims(data_shape[batch_dim], indices_shape[batch_dim]), \ - "The dimension {} for data and indices tensors must be the same".format(batch_dim) - - # check ranks of input tensors - assert len(data_shape) > 0, "Data must not be a scalar" - assert len(indices_shape) > 0, "Indices must not be a scalar" - assert (batch_dims + indices_shape[-1]) <= len(data_shape), \ - "Length of a tuple with indices must not exceed a rank of data tensor excluding batch dimensions" - assert node['version'] in ['opset5', 'opset8'], 'Unsupported version of GatherND operation: {}, operation ' \ - 'name : {}'.format(node['version'], node.soft_get('name')) - - # compute output shape - batch = [] - if batch_dims > 0: - if node['version'] == 'opset5': # Support old version of gatherND shape inference - if is_fully_defined(data_shape[:batch_dims]): - batch = [np.prod(data_shape[:batch_dims]).tolist()] - else: - batch = [dynamic_dimension_value] - elif node['version'] == 'opset8': - for dim in range(batch_dims): - assert compatible_dims(indices_shape[dim], data_shape[dim]),\ - "Batch dimensions in data.shape and indices.shape must be compatible" - if is_fully_defined(indices_shape[:batch_dims]): - batch = indices_shape[:batch_dims].tolist() - elif is_fully_defined(data_shape[:batch_dims]): - batch = data_shape[:batch_dims].tolist() - else: - for ind in range(batch_dims): - if indices_shape[ind] != dynamic_dimension_value: - batch.append(indices_shape[ind]) - elif data_shape[ind] != dynamic_dimension_value: - batch.append(data_shape[ind]) - else: - batch.append(dynamic_dimension_value) - - slice_shape = list(data_shape[(batch_dims + indices_shape[-1]):]) - - output_shape = batch + list(indices_shape)[batch_dims:-1] + slice_shape - node.out_port(0).data.set_shape(output_shape) - - # compute output value if all input indices are defined - if is_fully_defined(indices_value) and data_value is not None: - batch_dims_size = 1 - - for i in range(batch_dims): - batch_dims_size *= indices_shape[i] - - output_data = [] - - reshaped_indices = indices_value.reshape(batch_dims_size, -1, indices_shape[-1]) - - reshaped_data = data_value.reshape((batch_dims_size,) + tuple((data_shape[batch_dims:]))) - - for batch_dim in range(reshaped_indices.shape[0]): - for outer_dim in range(reshaped_indices.shape[1]): - gather_index = tuple(reshaped_indices[batch_dim][outer_dim]) - output_data.append(reshaped_data[(batch_dim,) + gather_index]) - output_value = np.asarray(output_data, dtype=data_value.dtype).reshape(output_shape) - node.out_port(0).data.set_value(output_value) diff --git a/tools/mo/openvino/tools/mo/ops/gelu.py b/tools/mo/openvino/tools/mo/ops/gelu.py deleted file mode 100644 index 8b6be1b9e4c6aa..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/gelu.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class GeLUOP(Op): - op = 'Gelu' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'version': 'opset7', - 'infer': copy_shape_infer - } - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - if self.get_opset() == 'opset7': - return ['approximation_mode'] - else: - return [] diff --git a/tools/mo/openvino/tools/mo/ops/grn.py b/tools/mo/openvino/tools/mo/ops/grn.py deleted file mode 100644 index 3b36e7c2c1c08f..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/grn.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class GRNOp(Op): - op = 'GRN' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': __class__.op, - 'op': __class__.op, - 'version': 'opset1', - 'in_ports_count': 1, - 'out_ports_count': 1, - 'infer': copy_shape_infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'bias' - ] diff --git a/tools/mo/openvino/tools/mo/ops/group_norm.py b/tools/mo/openvino/tools/mo/ops/group_norm.py deleted file mode 100644 index 647ea518e911e0..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/group_norm.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class GroupNorm(Op): - op = 'GroupNorm' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': __class__.op, - 'infer': copy_shape_infer, - 'in_ports_count': 3, - 'out_ports_count': 1, - }, attrs) - - -class GroupNormalization(Op): - op = 'GroupNormalization' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'infer': copy_shape_infer, - 'version': 'opset12', - - 'num_groups': None, - 'epsilon': None, - - 'in_ports_count': 3, - 'out_ports_count': 1, - }, attrs) - - def backend_attrs(self): - return ['num_groups', 'epsilon'] diff --git a/tools/mo/openvino/tools/mo/ops/hard_sigmoid.py b/tools/mo/openvino/tools/mo/ops/hard_sigmoid.py deleted file mode 100644 index 1f0eeb971ace0a..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/hard_sigmoid.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class HardSigmoid(Op): - op = 'HardSigmoid' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - 'infer': self.infer, - 'in_ports_count': 3, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node: Node): - input_node = node.in_node(0) - data_value = node.in_port(0).data.get_value() - alpha_value = node.in_port(1).data.get_value() - beta_value = node.in_port(2).data.get_value() - if data_value is not None and alpha_value is not None and beta_value is not None: - node.out_port(0).data.set_value(np.clip(data_value * alpha_value + beta_value, 0, 1)) - return - - node.out_port(0).data.set_shape(input_node.shape.copy()) diff --git a/tools/mo/openvino/tools/mo/ops/identity.py b/tools/mo/openvino/tools/mo/ops/identity.py deleted file mode 100644 index 0861a0dda5c2bf..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/identity.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class Identity(Op): - op = 'Identity' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': None, - - 'identity': True, - 'infer': self.infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node): - node.out_port(0).data.set_shape(node.in_port(0).data.get_shape()) - if node.in_port(0).data.get_value() is not None: - node.out_port(0).data.set_value(node.in_port(0).data.get_value()) - - -class IdentityN(Op): - op = 'IdentityN' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': None, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/instance_normalization.py b/tools/mo/openvino/tools/mo/ops/instance_normalization.py deleted file mode 100644 index 500ddf8d9e34cb..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/instance_normalization.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class InstanceNormalization(Op): - ''' InstanceNormalization like it is defined in ONNX - - y = scale * (x - mean) / sqrt(variance + epsilon) + B - - where x is input(0), scale is input(1) and B is input(2) - ''' - op = 'InstanceNormalization' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': __class__.op, - 'epsilon': None, - #'infer' - is not needed, this op should be replaced by a front replacer - }, attrs) - - def supported_attrs(self): - return ['epsilon'] diff --git a/tools/mo/openvino/tools/mo/ops/interp.py b/tools/mo/openvino/tools/mo/ops/interp.py deleted file mode 100644 index 8cd31cd56960e2..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/interp.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import inspect -import logging as log - -from openvino.tools.mo.ops.resize_factor_utils import factor_update -from openvino.tools.mo.front.common.layout import get_batch_dim, get_features_dim, get_height_dim, get_width_dim, shape_for_layout -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class InterpOp(Op): - op = 'Interp' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': __class__.op, - 'op': __class__.op, - 'factor': None, - 'align_corners': 1, - 'parse_2nd_input': 'value', - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': None - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'height', - 'width', - 'zoom_factor', - 'shrink_factor', - 'factor', # float factor required by OV shape inference - 'pad_beg', - 'pad_end', - 'align_corners' - ] - - @staticmethod - def interp_infer(node: Node): - layout = node.graph.graph['layout'] - assert len(layout) == 4 - if len(node.in_nodes()) == 2: - src_shape = node.in_node(0).shape - dst_shape = node.in_node(1).shape - - # in Caffe can be 2 inputs too, but shape should be got from shape of the second input - if node.parse_2nd_input == 'shape': - dst_shape = [dst_shape[get_height_dim(layout, 4)], dst_shape[get_width_dim(layout, 4)]] - else: - # it is TF case - dst_shape = node.in_node(1).value - - if src_shape is None or dst_shape is None or len(src_shape) != 4 or len(dst_shape) != 2: - log.error( - 'Node {} with op {} cannot be converted to Resample layer because there is no enough info about ' - 'src/dst shapes: src_shape = {}, dst_shape = {}'.format(node.name, node.op, src_shape, dst_shape)) - node.type = None # prevent translation to a valid OV layer - return - in_height = src_shape[get_height_dim(layout, 4)] - in_width = src_shape[get_width_dim(layout, 4)] - out_height = dst_shape[0] - out_width = dst_shape[1] - - node.factor = factor_update( - node.factor, - [float(out_height) / in_height, float(out_width) / in_width], - [in_height, in_width], - [out_height, out_width], - node.soft_get('name') - ) - - if node.factor is None: - node['width'] = out_width - node['height'] = out_height - - node.out_node().shape = shape_for_layout(layout, - batch=src_shape[get_batch_dim(layout, 4)], - features=src_shape[get_features_dim(layout, 4)], - height=out_height, - width=out_width) - node.graph.remove_edge(node.in_node(1).id, node.id) - else: - outn = node.out_node(0) - - in_shape = node.in_node(0) - num_ = in_shape.shape[get_batch_dim(layout, 4)] - channels_ = in_shape.shape[get_features_dim(layout, 4)] - height_in_ = in_shape.shape[get_height_dim(layout, 4)] - width_in_ = in_shape.shape[get_width_dim(layout, 4)] - - height_out_ = height_in_ + node.pad_beg + node.pad_end - width_out_ = width_in_ + node.pad_beg + node.pad_end - - if node.shrink_factor != 1 and node.zoom_factor == 1: - shrink_factor = node.shrink_factor - if shrink_factor < 1: - log.error('Shrink factor should be positive in node {}'.format(node.id)) - return None - height_out_ = (height_out_ - 1) / shrink_factor + 1 - width_out_ = (width_out_ - 1) / shrink_factor + 1 - elif node.shrink_factor == 1 and node.zoom_factor != 1: - zoom_factor = node.zoom_factor - if zoom_factor < 1: - log.error('Zoom factor should be positive in node {}'.format(node.id)) - return None - - node['debug_message'] = 'Interp layer shape inference function may be wrong, please, try to update ' \ - 'layer shape inference function in the file (openvino/tools/mo/ops/interp.op at the ' \ - 'line {}).'.format(inspect.currentframe().f_lineno) + refer_to_faq_msg(100) - # Reshape methods can be different in some cases - # Commented out section represents reshape that used in deeplab-caffe - # Uncomment the following lines, if your model was trained with deeplab-caffe - # or have the same reshape method - # height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1) - # width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1) - - # Comment out the following lines if you use the reshape method from previous section - height_out_ = height_out_ * zoom_factor - width_out_ = width_out_ * zoom_factor - elif node.width != 0 and node.height != 0: - height_out_ = node.height - width_out_ = node.width - elif node.shrink_factor != 1 and node.zoom_factor != 1: - shrink_factor = node.shrink_factor - zoom_factor = node.zoom_factor - if shrink_factor < 1: - log.error('Shrink factor should be positive in node {}'.format(node.id)) - return None - if zoom_factor < 1: - log.error('Zoom factor should be positive in node {}'.format(node.id)) - return None - height_out_ = (height_out_ - 1) / shrink_factor + 1 - width_out_ = (width_out_ - 1) / shrink_factor + 1 - height_out_ = height_out_ + (height_out_ - 1) * (zoom_factor - 1) - width_out_ = width_out_ + (width_out_ - 1) * (zoom_factor - 1) - - outn.shape = shape_for_layout(layout, - batch=num_, - features=channels_, - height=height_out_, - width=width_out_) diff --git a/tools/mo/openvino/tools/mo/ops/interpolate.py b/tools/mo/openvino/tools/mo/ops/interpolate.py deleted file mode 100644 index 6bd38456d94203..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/interpolate.py +++ /dev/null @@ -1,188 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import math - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension, dynamic_dimension_value -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op, PermuteAttrs - - -def infer_for_opsetX(node: Node, opset: str): - if opset == "opset4": - scales_port = 2 - axes_port = 3 - min_inputs_num = 3 - elif opset == "opset11": - scales_port = 1 - axes_port = 2 - min_inputs_num = 2 - else: - raise "Unknown opset: {}".format(opset) - assert len([p for p in node.in_ports().values() if not p.disconnected()]) in [min_inputs_num, min_inputs_num + 1], \ - "Interpolate node {} must have at least {} inputs".format(node.soft_get(node.name, node.id), axes_port) - assert node.has_valid('mode') - assert node.has_valid('shape_calculation_mode') - src_shape = node.in_port(0).data.get_shape() - assert src_shape is not None - - input_rank = len(src_shape) - - pads_begin = correct_pad(node.soft_get('pads_begin', [0]), input_rank) - pads_end = correct_pad(node.soft_get('pads_end', [0]), input_rank) - node['pads_begin'] = pads_begin - node['pads_end'] = pads_end - - if len(node.in_ports()) == axes_port: - axes = list(range(0, input_rank)) - else: - axes = node.in_port(axes_port).get_source().data.get_value() - assert axes is not None, \ - "Interpolate node with name {} has None as 'axes' input".format(node.soft_get('name', node.id)) - - axes = int64_array(axes) - output_shape = src_shape + pads_begin + pads_end - if node.shape_calculation_mode == 'sizes': - dst_shape = node.in_port(1).data.get_value() - assert dst_shape is not None - if node.get_opset() != "opset11": - correct_scales_using_dst_shape(node, dst_shape, src_shape, axes) - for i, axis in enumerate(axes): - output_shape[axis] = dst_shape[i] - else: - scales = node.in_port(scales_port).data.get_value() - assert scales is not None - for i, axis in enumerate(axes): - if output_shape[axis] is not dynamic_dimension and scales[i] is not dynamic_dimension: - output_shape[axis] = math.floor(scales[i] * output_shape[axis] + 1.0e-5) - else: - output_shape[axis] = dynamic_dimension_value - - if node.is_in_port_connected(axes_port): - PermuteInputs().set_input_permutation(node.in_node(axes_port), node, 'input:0', 'axis') - - node.out_port(0).data.set_shape(output_shape) - - -def infer_for_opset1(node: Node): - assert len([p for p in node.in_ports().values() if not p.disconnected()]) == 2 - assert node.has_valid('mode') - assert node.has_valid('axes') - - src_shape = node.in_port(0).data.get_shape() - - assert src_shape is not None - dst_shape = node.in_port(1).data.get_value() - assert dst_shape is not None - - output_shape = src_shape.copy() - for ind, axis in enumerate(node.axes): - output_shape[axis] = dst_shape[ind] - - node.out_port(0).data.set_shape(output_shape) - - PermuteAttrs.create_permute_attrs(node, attrs=[('axes', 'input:0')]) - - -def pad_attribute_to_str(node: Node, attr: str): - return ','.join(map(str, node[attr])) if node.has_valid(attr) else None - - -def correct_pad(pad, rank): - pad_len = len(pad) - if pad_len < rank: - return np.pad(pad, (0, rank - pad_len), 'constant').astype(np.int64) - elif pad_len > rank: - return int64_array(pad[: rank]) - else: - return int64_array(pad) - - -def correct_scales_using_dst_shape(node, dst_shape, src_shape, axes): - scales_value = node.in_port(2).data.get_value() - if scales_value is None or len(scales_value) != len(dst_shape): - corrected_scales = np.zeros(len(dst_shape), dtype=np.float32) - for i, axis in enumerate(list(axes)): - corrected_scales[i] = dst_shape[i] / src_shape[axis] - node.in_port(2).data.set_value(corrected_scales) - - -class Interpolate(Op): - op = 'Interpolate' - enabled = False - infers = { - 'opset1': infer_for_opset1, - 'opset4': lambda node: infer_for_opsetX(node, "opset4"), - 'opset11': lambda node: infer_for_opsetX(node, "opset11") - } - - def __init__(self, graph: Graph, attrs: dict): - self.attributes_for_opsets = { - 'opset1': [ - ('axes', lambda node: ','.join(map(str, node.axes))), - ('antialias', lambda node: bool_to_str(node, 'antialias')), - ('align_corners', lambda node: bool_to_str(node, 'align_corners')), - 'mode', 'pads_begin', 'pads_end', - ], - 'opset4': [ - 'mode', 'nearest_mode', 'cube_coeff', 'coordinate_transformation_mode', - 'shape_calculation_mode', - ('antialias', lambda node: bool_to_str(node, 'antialias')), - ('pads_begin', lambda node: pad_attribute_to_str(node, 'pads_begin')), - ('pads_end', lambda node: pad_attribute_to_str(node, 'pads_end')), - ] - } - # attributes for opset11 are same as for opset4 - self.attributes_for_opsets['opset11'] = self.attributes_for_opsets['opset4'] - - mandatory_props = { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - - 'axes': None, - 'mode': None, - 'align_corners': 0, - 'antialias': 0, - 'pads_begin': 0, - 'pads_end': 0, - - 'infer': self.infer, - 'force_precision_in_ports': {1: 'int64'}, - 'in_ports_count': 2, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - if self.attrs['version'] == 'opset11' and self.attrs['shape_calculation_mode'] != 'sizes': - del self.attrs['force_precision_in_ports'] - - def supported_attrs(self): - opset = self.get_opset() - key = opset if opset in self.attributes_for_opsets else 'opset1' - return self.attributes_for_opsets[key] - - def infer(self, node: Node): - opset = self.get_opset() - key = opset if opset in self.infers else 'opset1' - self.infers[key](node) - - @staticmethod - def get_axes(node: Node) -> np.ndarray: - opset = node.get_opset() - if opset == 'opset1': - interp_axes = node.soft_get('axes', None) - return interp_axes if interp_axes is None else int64_array(interp_axes) - - src_shape = node.in_port(0).data.get_shape() - assert src_shape is not None - input_rank = len(src_shape) - - if len(node.in_ports()) == 3: - axes = list(range(0, input_rank)) - else: - axes = node.in_port(3).get_source().data.get_value() - return int64_array(axes) diff --git a/tools/mo/openvino/tools/mo/ops/layer_norm.py b/tools/mo/openvino/tools/mo/ops/layer_norm.py deleted file mode 100644 index 02feb1b6399691..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/layer_norm.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class LayerNorm(Op): - """ - MXNet operation which normalizes the channels of the input tensor by mean and variance, and applies a scale gamma - and offset beta. Operation computes output with the same shape as input as following formula: - - out = ((data - mean(data, axis)) / sqrt(var(data, axis) + eps)) * gamma + beta - - inputs: - data - input data - gamma - gamma array - beta - beta array - - attributes: - axis - axis to perform layer normalization - eps - epsilon parameter to prevent division by zero - output_mean_var - output the mean and std calculated along the given axis. Default value is False. Non default value - is not supported - """ - - op = 'LayerNorm' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': None, - 'axis': -1, - 'epsilon': 0.001, - 'output_mean_var': False, - 'infer': copy_shape_infer, - 'in_ports_count': 3 if attrs.get('output_mean_var') is True else 1, - 'out_ports_count': 3, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/log_softmax.py b/tools/mo/openvino/tools/mo/ops/log_softmax.py deleted file mode 100644 index 1f1e4491fb5d05..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/log_softmax.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op, PermuteAttrs - - -class LogSoftmax(Op): - op = 'LogSoftmax' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset5', - 'infer': self.infer, - 'axis': 1, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return ['axis'] - - @staticmethod - def infer(node: Node): - assert len([port for port in node.in_ports().values() if not port.disconnected()]) == 1,\ - 'LogSoftmax node with id {} have more than one port connected'.format(node.id) - if node.axis < 0: - node.axis = len(node.in_port(0).data.get_shape()) + node.axis - assert 0 <= node.axis < len(node.in_port(0).data.get_shape()),\ - 'LogSoftmax node with id {} has wrong axis attribute'.format(node.id) - copy_shape_infer(node) - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) - - -class LogSoftmaxONNX(Op): - op = 'LogSoftmaxONNX' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'infer': None, - 'kind': 'op', - 'axis': 1, - 'type': None, # the operation will be replaced with a - # Reshape(LogSoftmax(FlattenONNX(x, axis), 1), x.shape) sub-graph - 'op': self.op, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/loop.py b/tools/mo/openvino/tools/mo/ops/loop.py deleted file mode 100644 index 6c6cd297b87f2a..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/loop.py +++ /dev/null @@ -1,548 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.ops.tensor_iterator import TensorIterator -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, is_fully_defined, dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.fusing.helpers import common_bfs -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.ops.const import Const - - -class Loop(TensorIterator): - """ - Loop layer that iterates over tensors and execute embedded sub-graph. The main difference from the TensorIterator is - that Loop operation performs explicit slicing of data using special input called "current_iteration". Also the Loop - has special input determining the execution condition and special output producing execution condition for the next - iteration. - """ - op = 'Loop' - - def __init__(self, graph: Graph, attrs: dict): - base_attrs = { - 'type': self.op, - 'op': self.op, - 'version': 'opset5', - 'input_port_map': [], # a list of dicts with such attrs as external_port_id, etc. - 'output_port_map': [], # a list of dicts with such attrs as external_port_id, etc. - 'back_edges': [], # a list of dicts with such attrs as from_layer, from_port, etc. - 'body': None, # an Graph object with a body sub-graph - 'sub_graphs': ['body'], # built-in attribute with all sub-graphs - 'infer': self.infer, - 'type_infer': self.type_infer, - } - base_attrs.update(attrs) - super().__init__(graph, base_attrs) - - def port_map_attrs(self): - return super().port_map_attrs() + ['purpose'] - - @staticmethod - def generate_port_map(node: Node, src_port_map, dir: str): - result_list = [] - for record in src_port_map: - # do not update ids for not-connected output which is used in the Loop operation only - if record['external_port_id'] != -1: - if dir == 'out': # increase the output port id by the number of input ports - # update the port id for proper generation of a "ports" section - record['external_port_id'] += len(node.in_ports()) - record['internal_layer_id'] = TensorIterator.find_internal_layer_id(node.body, record['internal_layer_id']) - result_list.append(record) - return result_list - - @staticmethod - def get_body_node_by_internal_id(loop_node: Node, internal_id: int): - suitable_nodes = loop_node.body.get_op_nodes(internal_layer_id=internal_id) - assert len(suitable_nodes) <= 1, \ - 'Expected 0 or 1 node with `internal_layer_id`={}, {} found'.format(internal_id, len(suitable_nodes)) - return suitable_nodes[0] if len(suitable_nodes) == 1 else None - - @staticmethod - def get_external_nodes_by_internal_id(loop_node: Node, internal_layer_id: int) -> list: - """ - Get a list of nodes from the main graph that are connected with a node with internal_layer_id - from the body graph - - :param loop_node: The Loop node - :param internal_layer_id: Internal layer ID of the node in the body graph - :return: A list of external nodes (from the main graph) that are connected with a node with - internal_layer_id from the body graph - """ - for map_item in loop_node.input_port_map: - if map_item['internal_layer_id'] == internal_layer_id \ - and loop_node.is_in_port_connected(map_item['external_port_id']): - return [loop_node.in_port(map_item['external_port_id']).get_source().node] - for map_item in loop_node.output_port_map: - if map_item['internal_layer_id'] == internal_layer_id \ - and loop_node.is_out_port_connected(map_item['external_port_id']): - return [dest.node for dest in loop_node.out_port(map_item['external_port_id']).get_destinations()] - return [] - - @staticmethod - def updated_body_parameters_shape(loop_node: Node): - """ - Update shape for Loop body parameters. - The input shape of the "current_iteration" number input is handled separately because it is not connected with - the Loop operation. - - :param loop_node: The Loop node - :return: None - """ - for record in loop_node.input_port_map: - body_node = Loop.get_body_node_by_internal_id(loop_node, record['internal_layer_id']) - # the Parameter may be removed because it was not used in the body, for example, the current iteration - # number input - if body_node is not None: - assert body_node.soft_get('type') == 'Parameter' - - input_shape = shape_array([]) # this is a current iteration number input shape - loop_port_idx = record['external_port_id'] - if loop_port_idx != -1: - input_shape = loop_node.in_port(loop_port_idx).get_connection().get_source().data.get_shape() - slice_axis = record['axis'] - body_node.shape = input_shape.copy() - if slice_axis is not None: - body_node.shape[slice_axis] = 1 - log.debug('Updated shape for the body node with internal_id "{}" with value {}' - ''.format(record['internal_layer_id'], body_node.shape)) - - @staticmethod - def updated_loop_output_ports_shape_and_value(loop_node: Node): - """ - Update shape and values for Loop output ports. If the number of iterations is dynamic then the corresponding - dimension for the scan outputs (having "axis" attribute) are set to 1 because MO cannot generate IR with - undefined dimensions. - - :param loop_node: The Loop node - :return: None - """ - loop_name = loop_node.soft_get('name', loop_node.id) - for record in loop_node.output_port_map: - body_node = Loop.get_body_node_by_internal_id(loop_node, record['internal_layer_id']) - assert body_node is not None - assert body_node.soft_get('type') == 'Result' - - loop_port_idx = record['external_port_id'] - if loop_port_idx != -1: # the id = -1 for execution condition output which is not connected anywhere - output_value = body_node.in_port(0).data.get_value() - output_shape = body_node.in_port(0).data.get_shape().copy() - concat_axis = record['axis'] - if concat_axis is not None: - assert output_shape[concat_axis] == 1, 'Dimension for concatenation is not equal to 1 for scan ' \ - 'output for Loop node "{}" for loop output port "{}"'.\ - format(loop_name, loop_port_idx) - output_shape[concat_axis] = Loop.iterations_count(loop_node) - # MO does not support evaluation of Loop scan outputs with const values - if concat_axis is None and output_value is not None: - loop_node.out_port(loop_port_idx).data.set_value(output_value) - else: - loop_node.out_port(loop_port_idx).data.set_shape(output_shape) - - @staticmethod - def iterations_count(loop_node: Node): - """ - Try to determine the number of loop iterations. If we detect that the number is dynamic then return None. - - :param loop_node: Loop operation node - :return: number of iterations or dynamic_dimensions if the number depends on runtime values. - """ - assert loop_node.soft_get('type') == 'Loop' - - if loop_node.is_in_port_connected(1): - execution_condition = loop_node.in_port(1).data.get_value() - if not is_fully_defined(execution_condition): # dynamic execution condition - return dynamic_dimension_value - execution_condition = execution_condition.item() - if not execution_condition: # 0 iterations - return 0 - num_iterations = loop_node.in_port(0).data.get_value() - if not is_fully_defined(num_iterations): - return dynamic_dimension_value - if num_iterations is not None: - num_iterations = num_iterations.item(0) - # in some ONNX models the num_iterations input is equal to max(int64) meaning dynamic number of iterations - if num_iterations < 0 or num_iterations == np.iinfo(np.int64).max: - return dynamic_dimension_value - return num_iterations - - @staticmethod - def update_body_parameters_type(loop_node: Node): - """ - Update the data type for Loop body Parameter nodes based on data type of the outer graph nodes producing data - for them. - - :param loop_node: The Loop node - :return: None - """ - assert loop_node.soft_get('type') == 'Loop' - for record in loop_node.input_port_map: - body_node = Loop.get_body_node_by_internal_id(loop_node, record['internal_layer_id']) - # the Parameter may be removed because it was not used in the body, for example, the current iteration - # number input - if body_node is not None: - assert body_node.soft_get('type') == 'Parameter' - - loop_port_idx = record['external_port_id'] - if loop_port_idx != -1: - input_type = loop_node.in_port(loop_port_idx).get_data_type() - else: # this is a current iteration number input node which is not connected to the Loop node - assert record['purpose'] == 'current_iteration' - input_type = np.int64 - - body_node.data_type = input_type - log.debug('Updated data type for the body node with internal_id "{}" with value {}' - ''.format(record['internal_layer_id'], body_node.data_type)) - - @staticmethod - def update_loop_output_ports_type(loop_node: Node): - """ - Update the data type for Loop output ports. - - :param loop_node: The Loop node - :return: None - """ - assert loop_node.soft_get('type') == 'Loop' - for record in loop_node.output_port_map: - body_node = Loop.get_body_node_by_internal_id(loop_node, record['internal_layer_id']) - assert body_node is not None - assert body_node.soft_get('type') == 'Result' - - loop_port_idx = record['external_port_id'] - if loop_port_idx != -1: # the id = -1 for execution condition output which is not connected anywhere - output_type = body_node.in_port(0).get_data_type() - loop_node.out_port(loop_port_idx).set_data_type(output_type) - - @staticmethod - def mark_current_iteration_parameter_node(loop_node: Node, body_parameter_node: Node): - assert body_parameter_node.id in loop_node.body - assert body_parameter_node.soft_get('op') == 'Parameter' - assert body_parameter_node.has_valid('internal_layer_id') - assert len(loop_node.body.get_op_nodes(purpose='current_iteration')) == 0 - - loop_node.input_port_map.append({'axis': None, 'stride': None, 'part_size': None, 'start': None, 'end': None, - 'external_port_id': -1, 'purpose': 'current_iteration', - 'internal_layer_id': body_parameter_node['internal_layer_id']}) - - @staticmethod - def mark_execution_condition_result_node(loop_node: Node, body_result_node: Node): - assert body_result_node.id in loop_node.body - assert body_result_node.soft_get('op') == 'Result' - assert body_result_node.has_valid('internal_layer_id') - assert len(loop_node.body.get_op_nodes(purpose='execution_condition')) == 0 - - loop_node.output_port_map.append({'axis': None, 'stride': None, 'part_size': None, 'start': None, 'end': None, - 'external_port_id': -1, 'purpose': 'execution_condition', - 'internal_layer_id': body_result_node['internal_layer_id']}) - - @staticmethod - def external_port_id_to_body_node(loop_node: Node, external_port_id: int, port_map: dict): - """ - Return the body Node connected to the Loop port with number "external_port_id". - - :param loop_node: the Loop node - :param external_port_id: the Loop node port idx - :param port_map: the port_map value to look for external_port_id - :return: the corresponding body Node - """ - assert loop_node.soft_get('type') == 'Loop' - body_graph = loop_node.body - result_nodes = [] - for record in port_map: - if record['external_port_id'] == external_port_id: - result_nodes.extend(body_graph.get_op_nodes(internal_layer_id=record['internal_layer_id'])) - assert len(result_nodes) == 1, 'There should be just one body node for external port "{}", but there "{}"' \ - ''.format(external_port_id, len(result_nodes)) - return result_nodes[0] - - @staticmethod - def connect_body_input(loop_node: Node, loop_input_port_idx: int, body_parameter: Node, - axis: [int, None] = None, start: [int, None] = None, end: [int, None] = None, - stride: [int, None] = None, part_size: [int, None] = None): - """ - Update the input port map to connect the input port with the specified body parameter - - :param loop_node: the Loop node - :param loop_input_port_idx: the input port index to connect - :param body_parameter: the body parameter node to connect - :param axis: dimension for input slicing - :param start: start value of dimension from which to start slicing - :param end: end value of dimension when to finish slicing - :param stride: a step value for slicing - :param part_size: a partial size for slicing, i.e. slicing [start; start + part_size) - :return: None - """ - assert loop_node.soft_get('op') == 'Loop' - assert body_parameter.soft_get('op') == 'Parameter' - assert body_parameter.id in loop_node.body - - loop_node.input_port_map.append({'axis': axis, 'stride': stride, 'part_size': part_size, 'start': start, - 'end': end, 'external_port_id': loop_input_port_idx, - 'internal_layer_id': body_parameter['internal_layer_id']}) - - @staticmethod - def connect_body_output(loop_node: Node, loop_output_port_idx: int, internal_result: Node, axis: [int, None] = None, - start: [int, None] = None, end: [int, None] = None, stride: [int, None] = None, - part_size: [int, None] = None): - """ - Update the output port map to connect the body Result node with the specified output port - - :param loop_node: the Loop node - :param loop_output_port_idx: the output port index to connect - :param internal_result: the body Result node to connect - :param axis: dimension for output concatenation - :param start: start value of dimension from which to start concatenation - :param end: end value of dimension when to finish concatenation - :param stride: a step value for concatenation - :param part_size: a partial size for concatenation, i.e. concatenation [start; start + part_size) - :return: None - """ - assert loop_node.soft_get('op') == 'Loop' - assert internal_result.soft_get('op') == 'Result' - assert internal_result.id in loop_node.body - - loop_node.output_port_map.append({'axis': axis, 'stride': stride, 'part_size': part_size, 'start': start, - 'end': end, 'external_port_id': loop_output_port_idx, - 'internal_layer_id': internal_result['internal_layer_id']}) - - @staticmethod - def add_back_edge(loop_node: Node, internal_parameter: Node, internal_result: Node): - assert internal_parameter.id in loop_node.body - assert internal_parameter.soft_get('op') == 'Parameter' - assert internal_result.id in loop_node.body - assert internal_result.soft_get('op') == 'Result' - - loop_node.back_edges.append({'from_layer': internal_result['internal_layer_id'], - 'to_layer': internal_parameter['internal_layer_id'], - 'from_port': 0, - 'to_port': 0}) - - @staticmethod - def parameter_unchanged_after_iteration(loop_node: Node, body_parameter: Node): - """ - Checks if the body Parameter node is connected to some body Result and the data provided to Result is not - changed between iterations. The data is considered unchanged if: - 1. There is no back edge for this Parameter OR - 2. There is a back edge from some Result to Parameter and there are only Identity ops in between or - Parameter is connected to Result directly. - - :param loop_node: the Loop node to check - :param body_parameter: the body Parameter node - :return: the result of the check - """ - assert body_parameter.id in loop_node.body - assert body_parameter.soft_get('op') == 'Parameter' - if not any([attr['to_layer'] == body_parameter.soft_get('internal_layer_id') for attr in loop_node.back_edges]): - return True - - for back_edge_attrs in loop_node.back_edges: - if back_edge_attrs['to_layer'] == body_parameter.soft_get('internal_layer_id'): - result_internal_id = back_edge_attrs['from_layer'] - result_nodes = loop_node.body.get_op_nodes(internal_layer_id=result_internal_id) - assert len(result_nodes) == 1, 'There should be exactly one node with id {}, but there are {}' \ - ''.format(result_internal_id, len(result_nodes)) - result_node = result_nodes[0] - # check that the Result node consumes data from Parameter node directly or through Identity operations - parameters = common_bfs(result_node, ['Identity'], ['Parameter'], is_backward=True, attr_to_check='op', - follow_multi_consumer_data_nodes=True) - if any([node.soft_get('internal_layer_id') == body_parameter.internal_layer_id for node in parameters]): - return True - return False - - @staticmethod - def pull_constant_inputs_into_body(loop_node: Node): - for port_idx, in_port in reversed(loop_node.in_ports().items()): - if port_idx > 1 and not in_port.disconnected() and in_port.get_source().node.soft_get('type') == 'Const': - body_parameter = Loop.external_port_id_to_body_node(loop_node, port_idx, loop_node.input_port_map) - # if there is a back edge into a body Parameter then we cannot replace it with a Const if the value - # is updated during each iteration. So we need to check that the tensor is passed to the next iteration - # unchanged - if not Loop.parameter_unchanged_after_iteration(loop_node, body_parameter): - continue - - original_const_node = in_port.get_source().node - new_const_node = Const(loop_node.body, original_const_node.attrs()).create_node() - - body_parameter.out_port(0).get_connection().set_source(new_const_node.out_port(0)) - loop_node.body.remove_nodes_from([body_parameter.id]) - loop_node.delete_input_port(port_idx) - - @staticmethod - def update_port_map_value(port_map: dict, attr: str, original_value: int, new_value: int): - matched = 0 - for record in port_map: - if record[attr] == original_value: - record[attr] = new_value - matched += 1 - assert matched == 1, 'More than one record in the portmap for attr "{}" with original value "{}"' \ - ''.format(attr, original_value) - - @staticmethod - def update_port_map_value_ext(port_map: dict, layer_id_attr: str, layer_id_value: int, - updated_attr: str, new_attr_value: int): - """ - Updates a value of requested attribute for a certain layer id in a port map - :param port_map: a map of external ports to internal layer ids - :param layer_id_attr: layer id attribute for which to update attribute - :param layer_id_value: layer id value for which to update attribute - :param updated_attr: a name of attribute which to update - :param new_attr_value: new value of attribute - """ - matched = 0 - for record in port_map: - if record.get(layer_id_attr) == layer_id_value: - record[updated_attr] = new_attr_value - matched += 1 - assert matched == 1, 'More than one record in the portmap for attr "{}" with original value "{}"' \ - ''.format(layer_id_attr, layer_id_value) - - @staticmethod - def back_edge_exists(back_edges_map: dict, from_layer: int, to_layer: int): - """ - Checks if a back edge exists in the back_edges_map connecting specific nodes - :param back_edges_map: a map where to search for specified back edge - :param from_layer: id of Result node that belongs a back edge - :param to_layer: id of Parameter node that belongs a back edge - :return: True or False - """ - for back_edge in back_edges_map: - if back_edge['from_layer'] == from_layer and back_edge['to_layer'] == to_layer: - return True - return False - - @staticmethod - def inter_edge_exists(port_map: dict, external_port_id: int, internal_layer_id: int): - """ - Check if inter-graph edge (i.e. an edge between the main graph and body graph) exists - :param port_map: a port map where to search for inter-graph edge - :param external_port_id: port index from/to which edge goes - :param internal_layer_id: layer id from/to which edge goes - :return: True or False - """ - for i_port in port_map: - if i_port['external_port_id'] == external_port_id and \ - i_port['internal_layer_id'] == internal_layer_id: - return True - return False - - @staticmethod - def re_numerate_input_ports(loop_node: Node): - """ - Update input ports ids to be consecutive from 0 to num_input_ports - 1 and update the port_map values of the - Loop node. - - :param loop_node: the Loop node - :return: None - """ - def re_number_input_port(loop_node: Node, old_port_id: int, new_port_id: int): - loop_node.add_input_port(new_port_id, skip_if_exist=True) - loop_node.in_port(old_port_id).get_connection().set_destination(loop_node.in_port(new_port_id)) - Loop.update_port_map_value(loop_node.input_port_map, 'external_port_id', old_port_id, new_port_id) - - if len(loop_node.in_ports()) > 0: - max_port_id = sorted(loop_node.in_ports().keys())[-1] - new_port_id = 0 - for port_id in range(max_port_id + 1): - if loop_node.is_in_port_connected(port_id): - if port_id != new_port_id: - re_number_input_port(loop_node, port_id, new_port_id) - new_port_id += 1 - - for port_idx_to_remove in reversed(range(new_port_id, max_port_id + 1)): - if port_idx_to_remove in loop_node.in_ports().keys(): - loop_node.delete_input_port(port_idx_to_remove) - - @staticmethod - def re_numerate_output_ports(loop_node: Node): - """ - Update output ports ids to be consecutive from 0 to num_output_ports - 1 and update the port_map values of the - Loop node. - - :param loop_node: the Loop node - :return: None - """ - assert loop_node.soft_get('type') == 'Loop' - - def re_number_output_port(loop_node: Node, old_port_id: int, new_port_id: int): - loop_node.add_output_port(new_port_id, skip_if_exist=True) - loop_node.out_port(old_port_id).get_connection().set_source(loop_node.out_port(new_port_id)) - Loop.update_port_map_value(loop_node.output_port_map, 'external_port_id', old_port_id, new_port_id) - - if len(loop_node.out_ports()) > 0: - max_port_id = sorted(loop_node.out_ports().keys())[-1] - new_port_id = 0 - for port_id in range(max_port_id + 1): - if port_id in loop_node.out_ports(): - if port_id != new_port_id: - re_number_output_port(loop_node, port_id, new_port_id) - new_port_id += 1 - - for port_idx_to_remove in reversed(range(new_port_id, max_port_id + 1)): - if port_idx_to_remove in loop_node.out_ports().keys(): - loop_node.delete_output_port(port_idx_to_remove) - - @staticmethod - def remove_unused_ops_from_port_map(loop_node: Node, port_map: dict, port_map_attr: str, dir: [None, str] = None): - """ - Find unused operations in the Loop body referenced in the port_map and removes Loop ports connected to it. - Loop input port with index 0 and 1 are mandatory so cannot be removed. - Output ports of the Loop may not be connected so check for that case also and remove such an ops from the - port_map. The only exception is the "execution_condition" output which is a mandatory. - - :param loop_node: the Loop node to update - :param port_map: the port_map (input, output or back edges) - :param port_map_attr: the port_map attribute containing the `internal_layer_id` - :param dir: the direction of the port_map meaning 'in' or 'out' port of the Loop - :return: - """ - record_ids_to_remove = [] - for record_id, record in enumerate(port_map): - if len(loop_node.body.get_op_nodes(internal_layer_id=record[port_map_attr])) == 0 or \ - (dir == 'out' and record.get('purpose', "") != 'execution_condition' and - record['external_port_id'] not in loop_node.out_ports()): - record_ids_to_remove.append(record_id) - for record_id_to_remove in reversed(record_ids_to_remove): - if dir in ['in', 'out']: - port_to_remove = port_map[record_id_to_remove]['external_port_id'] - if port_to_remove != -1: - if dir == 'in': - # input port 0 and 1 are mandatory for the Loop node - if port_to_remove not in [0, 1] and port_to_remove in loop_node.in_ports().keys(): - loop_node.delete_input_port(port_to_remove) - elif dir == 'out' and port_to_remove in loop_node.out_ports(): - loop_node.delete_output_port(port_to_remove) - del port_map[record_id_to_remove] - - @staticmethod - def normalize_input_output_ports(loop_node: Node): - """ - Remove inputs, outputs, back edges from the port map which are not used in the body and were removed by a - graph clean up, for example, in case of not used current_iteration body Parameter. Then renumber input/output - ports. - - :param loop_node: the Loop node to normalize - :return: None - """ - Loop.remove_unused_ops_from_port_map(loop_node, loop_node.input_port_map, 'internal_layer_id', 'in') - Loop.remove_unused_ops_from_port_map(loop_node, loop_node.output_port_map, 'internal_layer_id', 'out') - Loop.remove_unused_ops_from_port_map(loop_node, loop_node.back_edges, 'to_layer') - - # remove not connected input/output ports - Loop.re_numerate_input_ports(loop_node) - Loop.re_numerate_output_ports(loop_node) - - @staticmethod - def infer(loop_node: Node): - Loop.updated_body_parameters_shape(loop_node) - partial_infer(loop_node.body) - Loop.updated_loop_output_ports_shape_and_value(loop_node) - - @staticmethod - def type_infer(loop_node: Node): - from openvino.tools.mo.middle.passes.infer import type_infer - Loop.update_body_parameters_type(loop_node) - type_infer(loop_node.body) - Loop.update_loop_output_ports_type(loop_node) diff --git a/tools/mo/openvino/tools/mo/ops/lrn.py b/tools/mo/openvino/tools/mo/ops/lrn.py deleted file mode 100644 index f7c0337e510474..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/lrn.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class LRN(Op): - op = 'LRN' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - assert 'alpha' in attrs, 'LRN operation should have `alpha` parameter set while creation' - assert 'beta' in attrs, 'LRN operation should have `beta` parameter set while creation' - assert 'bias' in attrs, 'LRN operation should have `bias` parameter set while creation' - assert 'size' in attrs, 'LRN operation should have `size` parameter set while creation' - assert 'region' not in attrs, \ - 'LRN operation should not have `region` parameter set while creation, please use AttributedLRN operation ' \ - 'instead or keep using LRN operation with region expressed as second `axis`-input' - - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - - 'infer': self.infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - 'in_ports_count': 2, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return ['alpha', 'beta', 'bias', 'size'] - - @staticmethod - def infer(node): - name = node.soft_get('name', node.id) - - connected_inputs = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_inputs) == 2 and 0 in connected_inputs and 1 in connected_inputs, \ - 'LRN should have 2 connected input ports, but it doesn`t for node: `{}`. Ports: {}' \ - ''.format(name, connected_inputs) - - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None, 'Input shape is unknown for node {}'.format(name) - node.out_port(0).data.set_shape(input_shape) - - -class AttributedLRN(Op): - op = 'AttributedLRN' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - assert 'alpha' in attrs, 'AttributedLRN operation should have `alpha` parameter set while creation' - assert 'beta' in attrs, 'AttributedLRN operation should have `beta` parameter set while creation' - assert 'local_size' in attrs, 'AttributedLRN operation should have `local_size` parameter set while creation' - - super().__init__(graph, { - 'op': self.op, - 'type': 'Norm', - 'version': 'opset1', - - 'bias': 1, - 'region': 'across', - 'infer': self.infer, - - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - assert 'region' in self.attrs, 'AttributedLRN operation should have `region` parameter set while creation' - assert self.attrs['region'] in ['across', 'same'], \ - 'AttributedLRN operation should have `region` parameter set to `across` or `same`, but it is `{}`' \ - ''.format(self.attrs['region']) - - def supported_attrs(self): - return [ - 'alpha', - 'beta', - ('local-size', lambda node: node.local_size), - 'region' # deprecated in V10 attribute, but it is kept for V6 compatibility - ] - - @staticmethod - def infer(node): - name = node.soft_get('name', node.id) - - connected_inputs = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_inputs) == 1 and 0 in connected_inputs, \ - 'AttributedLRN should have 1 connected input port, but it doesn`t for node: `{}`. Ports: {}' \ - ''.format(name, connected_inputs) - - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None, 'Input shape is unknown for node {}'.format(name) - node.out_port(0).data.set_shape(input_shape) diff --git a/tools/mo/openvino/tools/mo/ops/lstm_cell.py b/tools/mo/openvino/tools/mo/ops/lstm_cell.py deleted file mode 100644 index 0de05611d0644b..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/lstm_cell.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mark_input_bins, compatible_dims -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class LSTMCell(Op): - ''' A single LSTM cell (without a loop). - - 3 inputs: - - [0, required] input data (2D), - - [1, required] initial hidden state (2D), - - [2, required] initial cell state (2D), - - 2 blobs: - - [3, required] LSTM FC weights - - [4, required] LSTM FC biases - - 2 outputs: - - [required] output data / resulting hidden state (2D) - - [required] resulting cell state (2D) - ''' - op = 'LSTMCell' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset4', - 'infer': self.infer, - 'in_ports_count': 5, - 'out_ports_count': 2, - 'wr_input_id': 3, - 'gates_count': 4 - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'hidden_size', # number of the elements in hidden cell size - 'activations', - 'activation_alpha', - 'activation_beta', - 'clip', - ] - - def backend_attrs(self): - return [ - 'hidden_size', # number of the elements in hidden cell size - ('activations', lambda node: ','.join(node['activations']) if node.has_and_set('activations') else None), - ('activations_alpha', lambda node: ','.join(map(str, node['activations_alpha'])) - if node.has_and_set('activations_alpha') else None), - ('activations_beta', lambda node: ','.join(map(str, node['activations_beta'])) - if node.has_and_set('activations_beta') else None), - 'clip', - ] - - @staticmethod - def infer(node: Node): - if node.has_and_set('extra_inputs'): - assert len(node.in_nodes()) == 8 - else: - assert len(node.in_nodes()) == 5 - assert len(node.out_nodes()) in [1, 2] - - hidden_shape = node.in_node(1).shape.copy() - cell_shape = node.in_node(2).shape.copy() - - mark_input_bins(node, start_port=3) - node.out_node(0).shape = hidden_shape - if len(node.out_nodes()) == 2: - node.out_node(1).shape = cell_shape - - hidden_size = hidden_shape[1] - - if node.has_valid('hidden_size'): - if node.hidden_size != hidden_size: - raise Error("Input shape {} for hidden size doesn't match pre-defined hidden_size in node {}".format( - node.in_node(1).shape, node.soft_get('name'))) - else: - node['hidden_size'] = hidden_size - - assert cell_shape[1] == hidden_size - - input_shape = node.in_node(0).shape - assert input_shape is not None - assert compatible_dims(hidden_shape[0], cell_shape[0]) and \ - compatible_dims(cell_shape[0], input_shape[0]), 'States are not broadcast-able by batch for node {}' \ - ''.format(node.soft_get('name', node.id)) diff --git a/tools/mo/openvino/tools/mo/ops/lstm_sequence.py b/tools/mo/openvino/tools/mo/ops/lstm_sequence.py deleted file mode 100644 index 8db2e629c9af92..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/lstm_sequence.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mark_input_bins, shape_array, shape_insert -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node, add_opoutput, Graph -from openvino.tools.mo.ops.op import Op - - -class LSTMSequence(Op): - """ Implements a layer that incorporates LSTM cell in a loop like it is specified in ONNX - - It is assumed that there is no equivalent of this op in IE, - so it is considered as intermediate operation that will be translated differently. - We define type for this operation to enable debuggin at OV side. - - There are several flavors of this op depending on how it was created and in which framework. - There are several attributes that specifies the LSTM flavor: - - ONNX/LSTM gives this op in non-normalized form and will require normalization - as a separate transformation (see LSTMSequenceNormalize middle transformation); - in this case blobs_wrb=True. Normalized weights/biases for MatMul is used when - blobs_wrb=True. - - ONNX/LSTM defines output shape as 4D: [seq_length, num_directions, batch_size, - hidden_size], where num_directions = 1 is supported only. In this case - has_num_directions=True. Otherwise, output is 3D and doesn't contain num_directions. - - Depending on the original framework, `format` attrtibutes is specified accordingly. - Its value controls which normalize transformations are called. - """ - op = 'LSTMSequence' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, # should be never emitted to IR; for debugging purposes - 'op': self.op, - 'blobs_wrb': False, - 'has_num_directions': False, - 'direction': 'forward', - 'num_layers': 1, - 'infer': self.infer, - 'blob_bidirectional_split': lambda node: ( - LSTMSequence.split_helper(node, 0, 'forward'), - LSTMSequence.split_helper(node, 1, 'reverse') - ) - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'hidden_size', # number of the elements in hidden cell size - 'direction', # one of 'forward', 'reverse', or 'bidirectional' - 'batch_dim', # batch dimension index in input/output shape - 'sequence_dim', # sequence dimension index in input shape - 'blobs_wrb', # input blobs have three separate components W, R and B like in ONNX/LSTM - 'has_num_directions', # if True, output shape has 4 dimensions; 3D otherwise - 'format', # format type of input blobs for different frameworks (onnx, tf) - ] - - def backend_attrs(self): - return [ - 'hidden_size', - ('activations', lambda node: ','.join(node['activations']) if node.has_and_set('activations') else None), - ('activations_alpha', lambda node: ','.join(map(str, node['activations_alpha'])) - if node.has_and_set('activations_alpha') else None), - ('activations_beta', lambda node: ','.join(map(str, node['activations_beta'])) - if node.has_and_set('activations_beta') else None), - 'clip', - 'direction', - ] - - @staticmethod - def split_helper(node, index: int, direction: str): - return Op._create_data_node( - node.graph, - name=node.name + '/SplittedBiLSTM/{}/'.format(direction), - attrs={'value': node.value[index], 'shape': int64_array(node.value[index].shape)} - ) - - @staticmethod - def infer(node: Node): - # there are limitations coming from ONNX LSTM definition and normalization rules - assert len(node.in_nodes()) >= 3 # X, W and R - assert len(node.in_nodes()) <= 7 - assert len(node.out_nodes()) <= 3 - assert node.batch_dim <= 1 - assert node.sequence_dim <= 1 - assert node.batch_dim != node.sequence_dim - - assert node.direction in ['forward', 'reverse', 'bidirectional'] - - if node.blobs_wrb: - mark_input_bins(node, ['W', 'R', 'B']) - else: - mark_input_bins(node) - input_shape = node.in_node(0).shape - assert len(input_shape) == 3 - - for port in [2, 3]: - if port in node.in_nodes() and len(node.in_node(port).in_nodes()) > 0 and \ - 'zero_shapes' in node.in_node(port).in_node(): - for i in node.in_node(port).in_node().zero_shapes: - if node.in_node(port).shape[i] != input_shape[i]: - node.in_node(port).value = np.repeat(node.in_node(port).value, input_shape[i], axis=i) - node.in_node(port).shape[i] = input_shape[i] - - out_shape = shape_array([input_shape[node.sequence_dim], input_shape[node.batch_dim], node.hidden_size]) - assert not node.has_num_directions or node.sequence_dim == 0, \ - 'If has_num_directions == True, then node.sequence_dim should be equal 0, but it is {}'.format( - node.sequence_dim) - num_directions = 2 if node.direction in ['bidirectional'] else 1 - num_layers = node.num_layers - if node.has_num_directions: - # insert extra dimension to output shape for num_directions - out_shape = shape_insert(out_shape, 1, np.int64(num_directions)) - node.out_node(0).shape = out_shape - # extra outputs for hidden/cell states - state_size = shape_array([input_shape[1], node.hidden_size]) - if node.has_num_directions: - state_size = shape_insert(state_size, 0, num_directions * num_layers) - for i in [1,2]: - if i not in node.out_nodes(): - data_node = Op._create_data_node( - node.graph, - name=node.node+'/ExtraOutput/' + str(i), - attrs={'executable': True} - ) - node.graph.add_edge(node.id, data_node.id, key=0, out=i) - add_opoutput(node.graph, data_node.id, 0, False) - else: - data_node = node.out_node(i) - data_node.shape = state_size.copy() diff --git a/tools/mo/openvino/tools/mo/ops/lstmnonlinearity.py b/tools/mo/openvino/tools/mo/ops/lstmnonlinearity.py deleted file mode 100644 index cff8da3a98c1df..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/lstmnonlinearity.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class LstmNonLinearity(Op): - """ - """ - op = 'LstmNonLinearity' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': __class__.op, - 'use_dropout': False, - 'type': None, # type is None because this operation should not appear in IR - 'infer': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/memory.py b/tools/mo/openvino/tools/mo/ops/memory.py deleted file mode 100644 index 5c337eaf588245..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/memory.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -class Memory(Op): - op = 'Memory' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': 'Memory', - 'op': 'Memory', - 'id': None, - 'size': None, - 'index': None, - 'infer': Memory.infer, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'type_infer': __class__.type_infer, - }, attrs) - - def supported_attrs(self): - return ['id', 'size', 'index'] - - @staticmethod - def infer(node: Node): - if len(node.in_nodes()) > 0: - # In case this is a memory node with input, - # It should not have output - # However in order not to break MO pipeline, - # we just set the same shape to the output - # node that will be removed later in pipeline - copy_shape_infer(node) - return - elif node.has_valid('shape'): - # For Memories, that has not input infer shapes is very difficult - # But often we can know shape in extracting attributes - # And we can set the attribute 'shape' in extracting - batch = 1 - for out_node in node.out_nodes().values(): - out_node.shape = shape_array([batch, *node.shape[:]]) - return - else: - raise Error('Model Optimizer is unable to calculate output shape of Memory node {}. ' + - refer_to_faq_msg(88), - node.id) - - @staticmethod - def type_infer(node: Node): - if node.has_valid('dst_type'): - node.out_port(0).set_data_type(node.dst_type) - else: - node.out_port(0).set_data_type(data_type_str_to_np(node.graph.graph['cmd_params'].data_type)) diff --git a/tools/mo/openvino/tools/mo/ops/memoryoffset.py b/tools/mo/openvino/tools/mo/ops/memoryoffset.py deleted file mode 100644 index 0e59385b47640e..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/memoryoffset.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op - - -class MemoryOffset(Op): - op = 'MemoryOffset' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': 'MemoryOffset', - 'type': None, - 'pair_name': None, - 'splitted': False, - 'has_default': False, - 'infer': self.infer, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node: Node): - if node.has_valid('element_size'): - # element_size should be set by Kaldi loader or MemoryOffsetAdjustment or SplitRecurrentMemoryOffset - node.out_port(0).data.set_shape(node.element_size) - else: - # for TDNN blocks - copy_shape_infer(node) diff --git a/tools/mo/openvino/tools/mo/ops/merge.py b/tools/mo/openvino/tools/mo/ops/merge.py deleted file mode 100644 index f98a5d12cd51b1..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/merge.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes, shape_array, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class Merge(Op): - op = 'Merge' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'infer': self.merge_infer, - 'cf_infer': self.control_flow_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def merge_infer(node: Node): - # we infer only through executable input nodes - inferred_nodes = [n for n in node.in_nodes().values() if n['is_partial_inferred']] - assert len(inferred_nodes) != 0 - tensor = inferred_nodes[0] - - if len(inferred_nodes) < len(node.in_nodes()): - node['is_not_fully_inferred'] = True - else: - node['is_not_fully_inferred'] = False - assert np.all(compatible_shapes(node.shape, inferred_nodes[0].shape) for node in inferred_nodes) - - inferred_and_executable = [n for n in node.in_nodes().values() if n['is_partial_inferred'] and - 'executable' in n and n['executable']] - if len(inferred_and_executable) > 0: - tensor = inferred_and_executable[0] - - if all([tensor.has_valid('value') and n.has_valid('value') and strict_compare_tensors(tensor.value, - n.value) - for n in inferred_and_executable]): - node.out_node().value = tensor.value.copy() - else: - node.out_node().value = None - - # do not use set_shape(tensor.shape) here because input port shape may be different from the calculated output - # shape and `set_shape` will raise an error that shape has changed - node.out_node(0).shape = shape_array(tensor.shape) - - @staticmethod - def control_flow_infer(node: Node, is_executable: bool, mark_executability: callable): - in_data_nodes = node.in_nodes(control_flow=True) - out_data_nodes = node.out_nodes(control_flow=True) - - is_executable = any([d.has_and_set('executable') for i, d in in_data_nodes.items()] - if len(in_data_nodes) else [False]) - - for i, d in out_data_nodes.items(): - mark_executability(d.id, is_executable) - diff --git a/tools/mo/openvino/tools/mo/ops/multinomial.py b/tools/mo/openvino/tools/mo/ops/multinomial.py deleted file mode 100644 index 0bf3c7c3385fc2..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/multinomial.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Graph, Node - -from openvino.tools.mo.ops.op import Op - - -class Multinomial(Op): - op = 'Multinomial' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset13', - 'infer': self.infer, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'type_infer': self.type_infer, - 'with_replacement': False, - 'log_probs': False, - 'global_seed': 0, - 'op_seed': 0, - 'convert_type': np.int64, - }, attrs) - - def backend_attrs(self): - return ['convert_type', - ('with_replacement', lambda node: bool_to_str( - node, 'with_replacement')), - ('log_probs', lambda node: bool_to_str(node, 'log_probs')), - 'global_seed', - 'op_seed'] - - def supported_attrs(self): - return ['convert_type', - 'with_replacement', - 'log_probs', - 'global_seed', - 'op_seed'] - - @staticmethod - def type_infer(node: Node): - assert node.has_valid('convert_type') - if node['convert_type'] == 'i32': - node.out_port(0).set_data_type(np.int32) - else: - node.out_port(0).set_data_type(np.int64) - - @staticmethod - def infer(node: Node): - - input_shape = node.in_node(0).shape - output_shape = [] - if input_shape is not None and input_shape.size == 2: - output_shape.append(input_shape[0]) - - num_samples = node.in_port(1).data.get_value() - if num_samples is not None: - output_shape.append(np.array(num_samples).item()) - else: - output_shape.append(dynamic_dimension_value) - node.out_port(0).data.set_shape(shape_array(output_shape)) diff --git a/tools/mo/openvino/tools/mo/ops/mvn.py b/tools/mo/openvino/tools/mo/ops/mvn.py deleted file mode 100644 index fa6f010e75a48a..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/mvn.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class MVN(Op): - op = 'MVN' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'kind': 'op', - 'type': self.op, - 'op': self.op, - 'version': 'opset6', - 'eps': None, - 'normalize_variance': None, - 'eps_mode': None, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': self.infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - }, attrs) - - def supported_attrs(self): - return ['eps', 'eps_mode', 'normalize_variance'] - - def backend_attrs(self): - version = self.get_opset() - if version == 'opset2': - return ['eps', - ('across_channels', lambda node: bool_to_str(node, 'across_channels')), - ('normalize_variance', lambda node: bool_to_str(node, 'normalize_variance'))] - elif version == 'opset6': - return ['eps', 'eps_mode', ('normalize_variance', lambda node: bool_to_str(node, 'normalize_variance'))] - else: - raise Error('Unsupported MVN opset version "{}"'.format(version)) - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - - assert node.eps is not None, 'MVN required attribute `eps` unspecified for node {}'.format(name) - assert node.normalize_variance is not None, \ - 'MVN required attribute `normalize_variance` unspecified for node {}'.format(name) - - if node.version == 'opset6': - assert node.eps_mode is not None, 'MVN required attribute `eps_mode` unspecified for node {}'.format(name) - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'axis') - - copy_shape_infer(node) - - -class MVNOnnx(Op): - op = 'MVNOnnx' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'kind': 'op', - 'type': None, - 'op': self.op, - 'version': None, - 'eps': None, - 'eps_mode': None, - 'normalize_variance': None, - 'axes': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'infer': None - }, attrs) - - -class MVNCaffe(Op): - op = 'MVNCaffe' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'kind': 'op', - 'type': None, - 'op': self.op, - 'version': None, - 'eps': 1e-9, - 'normalize_variance': None, - 'across_channels': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'infer': None - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/mxfft.py b/tools/mo/openvino/tools/mo/ops/mxfft.py deleted file mode 100644 index fa37ebc63c9b5e..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/mxfft.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op - - -class MXFFT(Op): - """ - This operation is intended to read MxNet operations FFT and IFFT. - The operation MXFFT has one attribute: a boolean attribute is_inverse. - - If an operation to read is FFT, then the attribute 'is_inverse' is False, and True otherwise. - - The transformation MXFFTToDFT converts the operation MXFFT into MO DFT (if the attribute 'is_inverse' - is False), or into MO IDFT (otherwise). - """ - op = 'MXFFT' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'out_ports_count': 1, - 'in_ports_count': 1, - 'infer': self.infer - } - assert 'is_inverse' in attrs, 'Attribute is_inverse is not given for the operation MXFFT.' - super().__init__(graph, mandatory_props, attrs) - - def infer(self, node: Node): - node_name = node.soft_get('name', node.id) - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None, 'Input shape of MXFFT node {} must not be None'.format(node_name) - is_inverse = node.soft_get('is_inverse', False) - output_shape = input_shape.copy() - if is_inverse: - output_shape[-1] = output_shape[-1] // 2 - else: - output_shape[-1] = output_shape[-1] * 2 - node.out_port(0).data.set_shape(output_shape) diff --git a/tools/mo/openvino/tools/mo/ops/mxrepeat.py b/tools/mo/openvino/tools/mo/ops/mxrepeat.py deleted file mode 100644 index 004b282abd39a1..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/mxrepeat.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class MXRepeat(Op): - op = 'MXRepeat' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - assert 'axis' in attrs, 'MXRepeat operation should have `axis` parameter set during creation' - assert 'repeats' in attrs, 'MXRepeat operation should have `repeats` parameter set during creation' - - super().__init__(graph, { - 'op': self.op, - 'type': None, - - # operation should be resolved on the front phase, partial inference is not needed - 'infer': None, - - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/nms_rotated.py b/tools/mo/openvino/tools/mo/ops/nms_rotated.py deleted file mode 100644 index 0385de8a9cd66e..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/nms_rotated.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class NMSRotated(Op): - op = 'NMSRotated' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset13', - - 'infer': self.infer, - 'type_infer': self.type_infer, - - 'sort_result_descending': True, - 'output_type': np.int64, - 'clockwise': True, - - 'in_ports_count': 5, - 'out_ports_count': 3, - } - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return [('sort_result_descending', lambda node: bool_to_str(node, 'sort_result_descending')), - 'output_type', - ('clockwise', lambda node: bool_to_str(node, 'clockwise'))] - - def supported_attrs(self): - return [ - 'sort_result_descending', - 'output_type', - 'clockwise', - ] - - @staticmethod - def infer(node: Node): - num_of_inputs = len(node.in_ports()) - opset = node.get_opset() - required_num_inputs = 5 - input_msg_fmt = 'NMSRotated node {} from {} must have {} inputs' - node_name = node.soft_get('name', node.id) - inputs_msg = input_msg_fmt.format( - node_name, opset, required_num_inputs) - assert num_of_inputs == required_num_inputs, inputs_msg - - node.out_port(0).data.set_shape( - shape_array([dynamic_dimension_value, 3])) - num_of_outputs = len( - [port for port in node.out_ports().values() if not port.disconnected()]) - if num_of_outputs >= 2 and node.has_port('out', 1): - node.out_port(1).data.set_shape( - shape_array([dynamic_dimension_value, 3])) - if num_of_outputs >= 3 and node.has_port('out', 2): - node.out_port(2).data.set_shape(shape_array([1])) - - @staticmethod - def type_infer(node: Node): - node.out_port(1).set_data_type(np.float32) - if node.has_valid('output_type') and node['output_type'].lower() == 'i32': - node.out_port(0).set_data_type(np.int32) - node.out_port(2).set_data_type(np.int32) - else: - node.out_port(0).set_data_type(np.int64) - node.out_port(2).set_data_type(np.int64) diff --git a/tools/mo/openvino/tools/mo/ops/non_max_suppression.py b/tools/mo/openvino/tools/mo/ops/non_max_suppression.py deleted file mode 100644 index 1faa431b709359..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/non_max_suppression.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension, shape_array, dynamic_dimension_value, \ - set_input_shapes, undefined_shape_of_rank -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class NonMaxSuppression(Op): - op = 'NonMaxSuppression' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset9', - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - 'output_type': np.int64, - 'box_encoding': 'corner', - 'in_ports_count': 5, - 'sort_result_descending': 1, - 'force_precision_in_ports': { - 2: 'int64'}, - 'type_infer': self.type_infer, - } - super().__init__(graph, mandatory_props, attrs) - version = self.get_opset() - if version in ['opset1', 'opset3', 'opset4']: - self.attrs['out_ports_count'] = 1 - elif version in ['opset5', 'opset9']: - self.attrs['out_ports_count'] = 3 - else: - raise Error('Unsupported operation opset version "{}"'.format(version)) - - def backend_attrs(self): - version = self.get_opset() - if version in ['opset3', 'opset4', 'opset5', 'opset9']: - return [('sort_result_descending', lambda node: bool_to_str(node, 'sort_result_descending')), - 'box_encoding', - ('output_type', lambda node: np_data_type_to_destination_type(node.output_type))] - elif version == 'opset1': - return [('sort_result_descending', lambda node: bool_to_str(node, 'sort_result_descending')), - 'box_encoding'] - else: - raise Error('Unsupported operation opset version "{}"'.format(version)) - - @staticmethod - def infer(node: Node): - num_of_inputs = len(node.in_ports()) - opset = node.get_opset() - max_num_of_inputs = 6 if opset in ['opset5', 'opset9'] else 5 - input_msg_fmt = 'NonMaxSuppression node {} from {} must have from 2 to {} inputs' - node_name = node.soft_get('name', node.id) - inputs_msg = input_msg_fmt.format(node_name, opset, max_num_of_inputs) - assert 2 <= num_of_inputs <= max_num_of_inputs, inputs_msg - - boxes_shape = node.in_port(0).data.get_shape() - assert boxes_shape is not None, 'The shape of tensor with boxes is not defined' - scores_shape = node.in_port(1).data.get_shape() - assert scores_shape is not None, 'The shape of tensor with scores is not defined' - assert len(boxes_shape) == 3, 'Length of tensors with boxes must be equal to 3' - assert len(scores_shape) == 3, 'Length of tensors with scores must be equal to 3' - - # According to the specification of the operation NonMaxSuppression, - # the input 'max_output_boxes_per_class' (port 2) is optional, with default value 0. - if num_of_inputs >= 3: - max_output_boxes_per_class = node.in_port(2).data.get_value() - else: - max_output_boxes_per_class = 0 - - if not max_output_boxes_per_class: - log.info('Set default "max_output_boxes_per_class" for node {} to number of boxes'.format(node.name)) - max_output_boxes_per_class = boxes_shape[1] - - # convert the np.array value to a scalar to avoid issue with ragged numpy array generation in the shape - # calculation formulas below - if isinstance(max_output_boxes_per_class, np.ndarray): - max_output_boxes_per_class = max_output_boxes_per_class.item() - - num_classes = scores_shape[1] - num_input_boxes = boxes_shape[1] - assert scores_shape[2] is dynamic_dimension or scores_shape[2] == num_input_boxes or scores_shape[2] is None \ - or num_input_boxes is None, 'Number of boxes mismatch for operation {}'.format(node_name) - - if node.get_opset() in ['opset4', 'opset5', 'opset9']: - max_number_of_boxes = min(num_input_boxes, max_output_boxes_per_class) * boxes_shape[0] * num_classes - else: - max_number_of_boxes = min(num_input_boxes, boxes_shape[0] * max_output_boxes_per_class * num_classes) - node.out_port(0).data.set_shape(shape_array([max_number_of_boxes, 3])) - - if opset in ['opset5', 'opset9']: - node.out_port(0).data.set_shape(shape_array([dynamic_dimension_value, 3])) - num_of_outputs = len([port for port in node.out_ports().values() if not port.disconnected()]) - if num_of_outputs >= 2 and node.has_port('out', 1): - node.out_port(1).data.set_shape(shape_array([dynamic_dimension_value, 3])) - if num_of_outputs >= 3 and node.has_port('out', 2): - node.out_port(2).data.set_shape(shape_array([1])) - - @staticmethod - def type_infer(node): - opset = node.get_opset() - if opset in ['opset5', 'opset9']: - node.out_port(0).set_data_type(node.output_type) - if node.has_port('out', 1): - node.out_port(1).set_data_type(np.float32) - if node.has_port('out', 2): - node.out_port(2).set_data_type(np.int64) - elif opset in ['opset3', 'opset4']: - node.out_port(0).set_data_type(node.output_type) - else: - node.out_port(0).set_data_type(np.int64) - - @staticmethod - def reverse_infer(node): - set_input_shapes(node, undefined_shape_of_rank(3), undefined_shape_of_rank(3)) diff --git a/tools/mo/openvino/tools/mo/ops/non_zero.py b/tools/mo/openvino/tools/mo/ops/non_zero.py deleted file mode 100644 index da8f6bd031b07b..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/non_zero.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined, dynamic_dimension_value -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op - - -class NonZero(Op): - op = 'NonZero' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - assert 'output_type' in attrs, 'NonZero has mandatory `output_type` attribute' - mandatory_props = { - 'op': self.op, - 'type': self.op, - 'version': 'opset3', - - 'infer': self.infer, - 'type_infer': self.type_infer, - - 'in_ports_count': 1, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return [ - ('output_type', lambda node: np_data_type_to_destination_type(node.output_type)), - ] - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None, 'The input shape for node "{}" is None'.format(node_name) - assert node.has_valid('output_type'), \ - '`output_type` attribute is not set for NonZero node `{}`'.format(node_name) - assert node.output_type in [np.int64, np.int32], \ - 'NonZero `output_type` attribute must be int32 or int64, `{}` found'.format(np.dtype(node.output_type).name) - - input_value = node.in_port(0).data.get_value() - if is_fully_defined(input_value): - node.out_port(0).data.set_value(mo_array(np.nonzero(input_value), dtype=node.output_type)) - else: - if is_fully_defined(input_shape): - # output shape of NonZero is still static (upper bound) - node.out_port(0).data.set_shape([len(input_shape), np.prod(input_shape)]) - else: - node.out_port(0).data.set_shape([len(input_shape), dynamic_dimension_value]) - - @staticmethod - def type_infer(node): - assert node.output_type in [np.int64, np.int32], \ - 'NonZero `output_type` attribute must be int32 or int64, `{}` found'.format(np.dtype(node.output_type).name) - node.out_port(0).set_data_type(node.output_type) diff --git a/tools/mo/openvino/tools/mo/ops/normalize.py b/tools/mo/openvino/tools/mo/ops/normalize.py deleted file mode 100644 index 4bcaa82e9f1784..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/normalize.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.common.partial_infer.utils import mark_input_bins -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op - - -class NormalizeOp(Op): - op = 'Normalize' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'eps': None, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': self.infer - }, attrs) - - if 'across_spatial' in self.attrs and isinstance(self.attrs['across_spatial'], str): - self.attrs['across_spatial'] = int(self.attrs['across_spatial']) - - if 'channel_shared' in self.attrs and isinstance(self.attrs['channel_shared'], str): - self.attrs['channel_shared'] = int(self.attrs['channel_shared']) - - self.attrs['across_spatial'] = bool(self.attrs['across_spatial']) - self.attrs['channel_shared'] = bool(self.attrs['channel_shared']) - - def supported_attrs(self): - return ['eps', 'eps_mode', - ('across_spatial', - lambda node: bool(node.across_spatial) if node.has_valid('across_spatial') else None), - ('channel_shared', - lambda node: bool(node.channel_shared) if node.has_valid('channel_shared') else None), - ] - - @staticmethod - def infer(node: Node): - mark_input_bins(node) - copy_shape_infer(node) diff --git a/tools/mo/openvino/tools/mo/ops/normalize_l2.py b/tools/mo/openvino/tools/mo/ops/normalize_l2.py deleted file mode 100644 index 56c263b5913bc2..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/normalize_l2.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op - - -class NormalizeL2Op(Op): - op = 'NormalizeL2' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'eps': None, - 'p': None, - 'eps_mode': None, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': self.infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - }, attrs) - - def supported_attrs(self): - return ['eps', 'eps_mode'] - - @staticmethod - def infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - if input_shape is None: - return - - input_value = node.in_port(0).data.get_value() - axes = node.in_port(1).data.get_value() - if input_value is not None and axes is not None: - norm_value = np.linalg.norm(input_value, node.p, axes, keepdims=True) - if node.eps_mode == 'add': - norm_value = norm_value + node.eps - elif node.eps_mode == 'max': - norm_value = np.max(norm_value, node.eps) - else: - assert False, 'Unsupported "eps_mode" = {}'.format(node.eps_mode) - node.out_port(0).data.set_value(input_value / norm_value) - else: - node.out_port(0).data.set_shape(input_shape) - - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'axis') diff --git a/tools/mo/openvino/tools/mo/ops/one_hot.py b/tools/mo/openvino/tools/mo/ops/one_hot.py deleted file mode 100644 index 03317f0822168c..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/one_hot.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_insert -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class OneHot(Op): - op = 'OneHot' - enabled = False # we have to extract for `axis` attribute - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'axis': -1, - 'infer': self.infer, - 'out_ports_count': 1, - 'in_ports_count': 4, - 'data_type': None, - 'force_precision_in_ports': {1: 'int64'}, - 'type_infer': self.type_infer, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return ['axis'] - - @staticmethod - def infer(node: Node): - indices_shape = node.in_port(0).data.get_shape() - assert indices_shape is not None - dim = indices_shape.size - - assert_msg = "OneHot `{0}` ({1} input port value) should be scalar: node: `{2}`, {0} value: `{3}`" - depth = node.in_port(1).data.get_value() - assert depth is not None and depth.ndim == 0, assert_msg.format('depth', '1', node.name, depth) - depth = depth.item(0) - - assert node.has_valid('axis') - axis = node['axis'] - assert -1 <= axis <= dim - - # If axis == -1 we need to insert new depth dimension in the end of indices_shape shape - axis = dim if axis == -1 else axis - - if dim == 0: - # scalar indices case - output_shape = [depth] - else: # dim >= 1 - # vector/matrix indices case - output_shape = shape_insert(indices_shape, axis, depth) - - node.out_port(0).data.set_shape(output_shape) - - indices = node.in_port(0).data.get_value() - depth = node.in_port(1).data.get_value() - on_value = node.in_port(2).data.get_value() - off_value = node.in_port(3).data.get_value() - - if indices is not None and depth is not None and on_value is not None and off_value is not None: - onehot_value = np.full(output_shape, off_value) - - for idx in np.ndindex(tuple(indices_shape)): - if axis == 0: - hot_idx = indices[idx], *idx - elif (axis > 0) and (axis < len(output_shape) - 1): - hot_idx = *idx[:axis], indices[idx], *idx[axis:] - elif axis == len(output_shape) - 1: - hot_idx = *idx, indices[idx] - - if -depth <= indices[idx] < depth: - onehot_value[hot_idx] = on_value # pylint: disable=possibly-used-before-assignment - - node.out_port(0).data.set_value(onehot_value) - - # This operation should be inferred in original layout - node['reinterp_shape'] = True - node['NCHW'] = True - - @staticmethod - def type_infer(node: Node): - node.out_port(0).set_data_type(node.in_port(2).get_data_type()) diff --git a/tools/mo/openvino/tools/mo/ops/op.py b/tools/mo/openvino/tools/mo/ops/op.py deleted file mode 100644 index 9ed180c1e4852a..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/op.py +++ /dev/null @@ -1,478 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import copy -import logging as log -from collections import namedtuple - -import networkx as nx -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, strict_compare_tensors -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.extractor import add_attrs_props, update_ie_fields -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.utils import class_registration -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.runtime_info import RTInfo - - -class Op(object): - registered_ops = {} - registered_cls = [] - # Add the derived class to excluded_classes if one should not be registered in registered_ops - excluded_classes = [] - - def __init__(self, graph: Graph, attrs1: dict = None, attrs2: dict = None): - self.graph = graph - try: - self.ir_version = graph.graph['ir_version'] - except: - self.ir_version = None - - self.attrs = { - 'kind': 'op', - 'rt_info': RTInfo() - } - self.default_backend_attrs = [] - if attrs1 is not None: - self.attrs.update(attrs1) - if attrs2 is not None: - self.attrs.update(attrs2) - - def add_node(self, attrs: dict = None): - new_attrs = {} - new_attrs.update(self.attrs) - if attrs is not None: - new_attrs.update(attrs) - id_prefix = new_attrs['name'] if 'name' in new_attrs else '' - id = self.graph.unique_id(id_prefix) - new_attrs['name'] = id - new_attrs = add_attrs_props(new_attrs) - update_ie_fields(new_attrs, self.ir_version) - self.substitute_ie_attrs(new_attrs) - self.graph.add_node(id, **new_attrs) - - node = Node(self.graph, id) - return node - - def substitute_ie_attrs(self, new_attrs: dict): - """ - Replace standard list of attribute in layer/data by attributes - delivered by backend_attrs - """ - backend_attrs_mapping = { - None: self.backend_attrs, - 10: self.backend_attrs, - 11: self.backend_attrs, - } - - if self.ir_version not in backend_attrs_mapping.keys(): - raise Error("Unrecognized IR version was specified: {}".format(self.ir_version)) - - new_attrs.update({ - 'IE': [( - 'layer', - [('id', lambda node: node.node), 'name', 'type', 'version'], - [ - ('data', backend_attrs_mapping[self.ir_version]() + self.default_backend_attrs, []), - '@runtime_info', - '@ports', - '@consts'])] - }) - - @staticmethod - def extract_port(node_port): - if isinstance(node_port, tuple): - node = node_port[0] - port = node_port[1] - else: - node = node_port - port = 0 - # 'data' nodes do not have 'out' edge attribute but always has one output - out_ids = [attr['out'] for _, __, attr in node.graph.out_edges(node.id, data=True) if 'out' in attr] - if len(set(out_ids)) > 1 and not isinstance(node_port, tuple): - raise Error('Node {} has more than one outputs. Provide output port explicitly. '.format(node.name)) - return node, port - - def create_node_on_port(self, node: Node, out_port: int, attrs: dict = None, edge_attrs: dict = None): - """ - Removes an edge, that is connected to nodes out_port. Creates new_node with attrs attributes and - connects it to node by edge that stores the same information as cutted edge. - :param node: Input node, to cut the edge from - :param out_port: output port of edge to cut - :param attrs: attributes of new node - :param edge_attrs: attributes to be changed/added to new edge - :return: Node instance of created new_node - """ - if edge_attrs is None: - edge_attrs = {'in': 0} - prev_edge_attrs = copy.deepcopy(node.out_edge(out_port)) - prev_edge_attrs.update(edge_attrs) - new_edge_attrs = prev_edge_attrs - if attrs is None: - attrs = dict() - new_node = self.add_node(attrs) - self.graph.add_edge(node.id, new_node.id, **new_edge_attrs) - return new_node - - def create_node(self, inputs: list = None, attrs: dict = None, edge_attrs: dict = None): - # TODO pass also edge attributes to copy to newly created edges - # TODO attrs should be matched with attrs() - if inputs is not None: - inputs = [Op.extract_port(inp) for inp in inputs] - else: - inputs = [] - if attrs is None: - attrs = dict() - new_node = self.add_node(attrs) - for i, inp in enumerate(inputs): - edge_attr = {'in': i, 'out': inp[1], - 'in_attrs': ['in', 'permutation'], - 'out_attrs': ['out', 'permutation'], - 'data_attrs': []} if not inp[0].has_valid('kind') or inp[0].kind == 'op' \ - else {'in': i, 'in_attrs': ['in', 'permutation']} - - # handling of debug information - if inp[0].has_port('out', inp[1]): - debug_info = inp[0].out_port(inp[1]).get_tensor_debug_info() - if debug_info is not None and len(debug_info) > 0: - edge_attr.update({'fw_tensor_debug_info': debug_info}) - edge_attr['data_attrs'].append('fw_tensor_debug_info') - - if edge_attrs is not None: - edge_attr.update(edge_attrs) - new_node.add_input_port(i, skip_if_exist=True) - inp[0].add_output_port(inp[1], skip_if_exist=True) - self.graph.add_edge(inp[0].id, new_node.id, **edge_attr) - return new_node - - def create_node_with_data(self, inputs: list = None, attrs: dict = None, - data_nodes: [Node, np.ndarray, list] = None, edge_attrs: list = None): - """ - Creates a new node with given inputs and attrs and also creates data node that - holds the op output value. Inputs should be data nodes (not op nodes). - Work for ops with a single output port only. - Edge attributes in edge_attrs go in order of items in 'inputs' - """ - if inputs is None: - inputs = [] - if attrs is None: - attrs = {} - # No need to extract port, because input node should be a data node, - # so there is no choice. - new_op_node = self.add_node(attrs) - - # TODO Preserve debug information - inputs_with_edge_attrs = [] - for i, inp in enumerate(inputs): - if inp is None: - continue - edge_attr = {'in': i} - if edge_attrs is not None and i < len(edge_attrs): - edge_attr.update(edge_attrs[i]) - inputs_with_edge_attrs.append((inp.id, new_op_node.id, edge_attr)) - new_op_node.add_input_port(i, skip_if_exist=True) - - self.graph.add_edges_from(inputs_with_edge_attrs) - - # TODO: Extend to the case when multiple output ports - old_data_value = [None] - old_data_shape = [None] - if data_nodes is None: - data_node = self.graph.unique_id() - self.graph.add_node(data_node, **add_attrs_props( - dict(kind='data', name=data_node, value=None, shape=None, data_type=None, infer=None))) - data_nodes = [Node(self.graph, data_node)] - else: - if type(data_nodes) not in [list, np.ndarray]: - data_nodes = [data_nodes] - old_data_value = [data_node.value.copy() if data_node.has_valid('value') else None for data_node in - data_nodes] - old_data_shape = [data_node.shape.copy() if data_node.has_valid('shape') else None for data_node in - data_nodes] - for id, data_node in enumerate(data_nodes): - self.graph.add_edges_from([(new_op_node.id, data_node.id, {'out': id})]) - - if new_op_node.has_valid('infer'): - if log.getLogger().isEnabledFor(log.DEBUG): - log.debug('Start running infer function for individual op node with attributes: {}' - ''.format(str(new_op_node))) - new_op_node.infer(new_op_node) - if new_op_node.has('nchw_layout'): - for out_node in new_op_node.out_nodes().values(): - out_node['nchw_layout'] = new_op_node.nchw_layout - assert all(old_value is None for old_value in old_data_value) or all( - [strict_compare_tensors(old_data_value[id], data_node.value) - for id, data_node in enumerate(data_nodes)]) - assert all(old_shape is None for old_shape in old_data_shape) or all( - [strict_compare_tensors(old_data_shape[id], data_node.shape) - for id, data_node in enumerate(data_nodes)]), \ - "After re-inference of {} node, old and new shapes do not match. Old shapes: {}, new shapes: {}." \ - "".format(new_op_node.soft_get('name'), [old_data_shape[id] for id in range(len(data_nodes))], - [data_node.shape for data_node in data_nodes]) - for data_node in data_nodes: - if log.getLogger().isEnabledFor(log.DEBUG): - log.debug( - 'Finished running infer function, data nodes attributes: {}'.format(data_node)) - return data_nodes[0] if len(data_nodes) == 1 else data_nodes - - @staticmethod - def create_data_node(graph: Graph, op_node: Node, attrs: dict = None, edge_attrs: dict = None, out_port=0): - assert op_node is not None and op_node.kind == 'op' - assert out_port not in op_node.out_nodes() - - if attrs is None: - attrs = {} - - data_node = graph.unique_id(op_node.id) - default_attrs = dict(kind='data', name=data_node, value=None, shape=None, data_type=None, infer=None) - default_attrs.update(attrs) - graph.add_node(data_node, **add_attrs_props(default_attrs)) - data_node = Node(graph, data_node) - if edge_attrs is not None: - graph.add_edges_from([(op_node.id, data_node.id, {'out': out_port, **edge_attrs})]) - else: - graph.add_edges_from([(op_node.id, data_node.id, {'out': out_port})]) - return data_node - - @staticmethod - def _create_data_node(graph: Graph, name: str, attrs: dict = None): - if attrs is None: - attrs = {} - - data_node = graph.unique_id(name) - default_attrs = dict(kind='data', name=data_node, value=None, shape=None, data_type=None, infer=None) - default_attrs.update(attrs) - graph.add_node(data_node, **add_attrs_props(default_attrs)) - data_node = Node(graph, data_node) - return data_node - - @staticmethod - def create_input_data_node(graph: Graph, name: str, value: np.array, attrs: dict = None): - if attrs is None: - attrs = {} - data_node = graph.unique_id(name) - default_attrs = dict(kind='data', name=data_node, value=mo_array(value), shape=mo_array(value.shape), - data_type=None, infer=None) - default_attrs.update(attrs) - graph.add_node(data_node, **add_attrs_props(default_attrs)) - return Node(graph, data_node) - - @staticmethod - def create_and_connect_input_data_node(graph: Graph, op_node: Node, attrs: dict = None, edge_attrs: dict = None): - assert op_node is not None and op_node.kind == 'op' - if attrs is None: - attrs = {} - if edge_attrs is None: - edge_attrs = {} - - data_node = graph.unique_id(op_node.id) - default_attrs = dict(kind='data', name=data_node, value=None, shape=None, data_type=None, infer=None) - default_attrs.update(attrs) - graph.add_node(data_node, **add_attrs_props(default_attrs)) - data_node = Node(graph, data_node) - op_node.add_input_port(edge_attrs['in'], skip_if_exist=True) - graph.add_edges_from([(data_node.id, op_node.id, edge_attrs)]) - return data_node - - def update_node(self, node: Node, attrs: dict = None): - """ - Updates/creates new attributes in node based on self.attrs and attrs. - """ - new_attrs = {} - new_attrs.update(self.attrs) - if attrs: - new_attrs.update(attrs) - new_attrs = add_attrs_props(new_attrs) - update_ie_fields(new_attrs, self.ir_version) - self.substitute_ie_attrs(new_attrs) - for k, v in new_attrs.items(): - node[k] = v - node.update_node() - - def get_opset(self): - """ - Gets the operation set version where the operation was introduced. - If the version is not defined then consider it an extension - :return: the string with the opset name - """ - return self.attrs.get('version', 'extension') - - @classmethod - def update_node_stat(cls, node: Node, attrs: dict = None): - if attrs is None: - attrs = dict() - op = cls(node.graph, attrs) - op.update_node(node) - - def supported_attrs(self): - """ - Attributes that user should/can set for the operation - """ - return [] - - def backend_attrs(self): - """ - Attributes that will be translated to back-end IR - """ - return self.supported_attrs() - - @staticmethod - def get_op_class_by_name(name: str): - return __class__.registered_ops[name] - - @classmethod - def class_type(cls): - return class_registration.ClassType.OP - - @staticmethod - def expand_node_shape(node: Node, dims_to_add): - if node is None or not node.has_valid('value'): - return - for idx in range(dims_to_add): - node.value = np.expand_dims(node.value, axis=-1) - node.shape = mo_array(node.value.shape) - - @staticmethod - def normalize_outputs(node: Node): - if node.has_valid('out_ports_count') and len(node.out_edges()) < node.out_ports_count: - from openvino.tools.mo.ops.result import Result # Import is here to avoid circular import error - for p in range(node.out_ports_count): - if p not in node.out_ports(): - node.add_output_port(p) - if node.out_port(p).disconnected(): - res_node = Result(node.graph, {'name': node.name + '/Fake_output_{}/'.format(p), - 'keep_output_port': True}).create_node() - node.out_port(p).connect(res_node.in_port(0)) - - -class PermuteAttrs: - Permutation = namedtuple('Permutation', ['perm', 'inv']) - Attr = namedtuple('Attr', ['name', 'port', 'func']) - - common_permutation = lambda node, permutation, attr: node[attr][permutation.perm] - slice_permutation = lambda node, permutation, attr: node[attr][ # doesn't depend from permutation variable - PermuteAttrs.get_nhwc_to_nchw_permutation(len(node[attr])).perm] - common_permutation_inv = lambda node, permutation, attr: permutation.inv[node[attr]] - - # List of default permutations - common_attrs_permutation = { - 'dim': common_permutation, - 'pad': common_permutation, - 'pads': common_permutation, - 'shape': common_permutation, - 'order': lambda node, permutation, attr: permutation.inv[node[attr][permutation.perm]], - 'stride': common_permutation, - 'window': common_permutation, - 'dilation': common_permutation, - 'kernel_shape': common_permutation, - 'output_shape': common_permutation, - 'begin_mask': slice_permutation, - 'end_mask': slice_permutation, - 'shrink_axis_mask': slice_permutation, - 'new_axis_mask': slice_permutation, - 'ellipsis_mask': slice_permutation, - 'axes': common_permutation_inv, - 'axis': common_permutation_inv, - 'seq_axis': common_permutation_inv, - 'batch_axis': common_permutation_inv, - 'batch_dims': common_permutation_inv, - 'channel_dims': common_permutation_inv, - 'spatial_dims': common_permutation_inv, - - 'input_channel_dim': common_permutation_inv, - 'output_channel_dim': common_permutation_inv, - 'kernel_spatial_idx': common_permutation_inv, - 'input_feature_channel': common_permutation_inv, - 'output_feature_channel': common_permutation_inv, - } - - @staticmethod - def __attr(name, port, func=None): - if func is None: - if name in PermuteAttrs.common_attrs_permutation: - func = PermuteAttrs.common_attrs_permutation[name] - else: - raise Error('Attr {} is missing in PermuteAttrs.common_attrs_permutation. Please update ' - 'common_attrs_permutation with permutation for your attribute!'.format(name)) - - if len(port.split(':')) != 2 or port.split(':')[0] not in ['input', 'output']: - raise Error("Attribute port {} for {} wasn't set correctly!".format(port, name)) - - return PermuteAttrs.Attr(name=name, port=port, func=func) - - def __init__(self): - self.attrs = {} - - def update_attrs(self, attrs): - for attr in attrs: - if not isinstance(attr, tuple) or len(attr) not in [2, 3]: - raise Error('attr object must be a tuple: (attribute_name, port) or (attribute_name, port, func)') - self.attrs.update({attr[0]: self.__attr(*attr)}) - return self - - def permute_attrs(self, node): - # This function applies permutation for given node - for attr in self.attrs.keys(): - name, port, func = self.attrs[attr] - node_type, port = port.split(':') - port = int(port) - node_with_permutation = node.in_node(port) if node_type == 'input' else node.out_node(port) - - if node_with_permutation.has_valid('permutation'): - permutation = node_with_permutation.permutation - if isinstance(permutation, type(lambda: 0)): - node[name] = func(node, permutation(node), name) - else: - node[name] = func(node, permutation, name) - - @staticmethod - def create_permute_attrs(node, attrs=None): - # Create permute_attrs if not exists - if not node.has_valid('permute_attrs'): - node['permute_attrs'] = PermuteAttrs() - node['permute_attrs'].update_attrs(attrs) - - @staticmethod - def set_permutation(node1, node2, permutation, override=False): - # This function creates permutation on edge between node1->node2 - edge_attrs = node1.graph.get_edge_data(node1.id, node2.id)[0] - if 'permutation' not in edge_attrs or override: - nx.set_edge_attributes(G=node1.graph, values={(node1.id, node2.id, 0): permutation}, name='permutation') - else: - # If permutation exists we check that given and already set permutations are equal - if (edge_attrs['permutation'] is None and permutation is not None) or \ - not np.array_equal(edge_attrs['permutation'], permutation): - raise Error('Permutation already exists in edge between {} and {}'.format(node1.id, node2.id)) - - @staticmethod - def get_inverse_permutation(perm): - inv = [0] * len(perm) - # Create reverse permutation - for index, pos in enumerate(perm): - inv[pos] = index - return inv - - @staticmethod - def get_nhwc_to_nchw_permutation(dims_number: int): - # This function returns permutation from NHWC to NCHW for given dims number - if dims_number != 3: - perm = [0, dims_number - 1, *[x for x in range(1, dims_number - 1)]] if dims_number > 1 else \ - [x for x in range(dims_number)] - else: - # Exclude 3D shapes from permutation process: identity permutation - perm = list(range(0, dims_number)) - inv = PermuteAttrs.get_inverse_permutation(perm) - return PermuteAttrs.Permutation(perm=int64_array(perm), inv=int64_array(inv)) - - @staticmethod - def get_nchw_to_nhwc_permutation(dims_number: int): - # This function returns permutation from NCHW to NHWC for given dims number - if dims_number != 3: - perm = [0, *[x for x in range(2, dims_number)], 1] if dims_number > 1 else [x for x in range(dims_number)] - else: - # Exclude 3D shapes from permutation process: identity permutation - perm = list(range(0, dims_number)) - inv = PermuteAttrs.get_inverse_permutation(perm) - return PermuteAttrs.Permutation(perm=int64_array(perm), inv=int64_array(inv)) diff --git a/tools/mo/openvino/tools/mo/ops/pack.py b/tools/mo/openvino/tools/mo/ops/pack.py deleted file mode 100644 index b706127d2124bc..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/pack.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class PackOp(Op): - op = 'Pack' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': __class__.op, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'axis' - ] - diff --git a/tools/mo/openvino/tools/mo/ops/pad.py b/tools/mo/openvino/tools/mo/ops/pad.py deleted file mode 100644 index 1e2a8344c58f21..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/pad.py +++ /dev/null @@ -1,163 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined, shape_array, undefined_shape_of_rank -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op - - -class Pad(Op): - """ Pad operation that explicitly extends an input tensor at borders. - - The operation extends each (not only spatial) dimensions of input tensors by new elements increasing output - shape. - The second and third inputs are 1D tensor with number of elements equal to input tensor rank. These inputs - specify the begin and end paddings. - The forth input specifies the fill value for 'constant' mode and not used for other cases. - """ - - op = 'Pad' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - - 'version': 'opset1', - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - - 'mode': 'constant', - - 'force_precision_in_ports': { - 1: 'int64', - 2: 'int64', - }, - - 'in_ports_count': 4, - 'out_ports_count': 1, - }, attrs) - - def backend_attrs(self): - return [('pad_mode', 'mode')] - - @staticmethod - def infer(node): - pad_node_name = node.soft_get('name', node.id) - - assert len(node.in_nodes()) in [3, 4], "The node {} must have 3 or 4 inputs".format(pad_node_name) - - input_shape = node.in_port(0).data.get_shape() - input_value = node.in_port(0).data.get_value() - pad_beg = node.in_port(1).data.get_value() - pad_end = node.in_port(2).data.get_value() - - assert pad_beg is not None, 'The padding begin value is None for node {}'.format(pad_node_name) - assert pad_end is not None, 'The padding end value is None for node {}'.format(pad_node_name) - assert input_shape is not None, 'The input shape is None for node {}'.format(pad_node_name) - assert len(input_shape) == len(pad_beg), \ - 'Length of begin padding "{}" does not correspond to input tensor shape "{}" for node "{}".' \ - ''.format(pad_beg, input_shape, pad_node_name) - assert len(input_shape) == len(pad_end), \ - 'Length of end padding "{}" does not correspond to input tensor shape "{}" for node "{}".' \ - ''.format(pad_beg, input_shape, pad_node_name) - assert not node.is_in_port_connected(3) or node.in_port(3).data.get_shape().size == 0, \ - 'Optional 3rd input of Pad operation should be scalar, but has shape {} for node {}' \ - ''.format(node.in_port(3).data.get_shape(), pad_node_name) - - node.out_port(0).data.set_shape(input_shape + pad_beg + pad_end) - - if input_value is not None and is_fully_defined(pad_beg) and is_fully_defined(pad_end): - pads = np.insert(pad_end, np.arange(len(pad_end)), pad_beg) - pads = np.reshape(pads, (len(pad_end), 2)) - pad_val = 0 - if len(node.in_nodes()) == 4: - pad_val = node.in_port(3).data.get_value() if node.in_port(3).data is not None else 0 - if is_fully_defined(input_value): - node.out_port(0).data.set_value(np.pad(input_value, pads, constant_values=pad_val, mode='constant')) - else: - node.out_port(0).data.set_value(shape_array(np.pad(input_value, pads, constant_values=pad_val, - mode='constant'))) - # pad values should be permuted during the NHWC->NCHW layout change - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'shape') - PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'shape') - - @staticmethod - def reverse_infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - if input_shape is None and node.is_in_port_connected(2) and node.in_port(2).data.get_shape() is not None: - shape = undefined_shape_of_rank(node.in_port(2).data.get_shape()[0]) - node.in_port(0).data.set_shape(shape) - - -class AttributedPad(Op): - """ Pad operation that explicitly extends an input tensor at borders. - - This operation is uses the same semantics as Pad but with pad values specified as attributes. - Pad values are in format [nDims, 2], where [:, 0] - begin pads, [:, 1] - end pads. - """ - - op = 'AttributedPad' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': None, - 'infer': None, # the operation should be replaced before the shape inference - 'in_ports_count': 1, - 'out_ports_count': 1, - 'mode': 'constant', - 'fill_value': float(0), - 'pads': None, - }, attrs) - - -class TFPad(Op): - """ Pad operation that explicitly extends an input tensor at borders. - - This operation with the TensorFlow semantics with inputs: - 1. Input tensor. - 2. Pad values [nDims, 2] - 3. Fill value (Optional) - """ - - op = 'TFPad' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': None, - 'infer': None, # the operation should be replaced before the shape inference - 'in_ports_count': 3, - 'out_ports_count': 1, - 'mode': 'constant', - }, attrs) - - -class ONNXPad(Op): - """ Pad operation that explicitly extends an input tensor at borders. - - This operation with the ONNX semantics with inputs: - 1. Input tensor. - 2. Pad values - 3. Fill value (Optional) - """ - - op = 'ONNXPad' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': None, - 'infer': None, # the operation should be replaced before the shape inference - 'in_ports_count': 3, - 'out_ports_count': 1, - 'mode': 'constant', - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/parameter.py b/tools/mo/openvino/tools/mo/ops/parameter.py deleted file mode 100644 index 99581ddbbd4cdb..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/parameter.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import unmask_shape -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op, PermuteAttrs -from openvino.runtime import PartialShape - - -class Parameter(Op): - op = 'Parameter' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - 'is_input': True, - 'data_type': None, - - 'type_infer': self.type_infer, - - 'out_ports_count': 1, - 'user_shape': None, - } - if 'data_type' not in attrs: - mandatory_props['data_type'] = np.float32 - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def type_infer(node): - node.out_port(0).set_data_type(node.data_type) - - @staticmethod - def shape_serialize(node): - if not node.has_valid('user_shape'): - return ','.join([str(i) for i in unmask_shape(node.shape)]) - shape = node.soft_get('user_shape') - if isinstance(shape, np.ma.masked_array): - shape = unmask_shape(shape) - if isinstance(shape, PartialShape): - return shape.to_string() - raise Exception("Unknown shape type in user_shape attribute {}".format(type(shape))) - - def supported_attrs(self): - return [ - ('shape', lambda node: self.shape_serialize(node)), - ('element_type', lambda node: np_data_type_to_destination_type(node.data_type)), - ] - - @staticmethod - def infer(node): - name = node.soft_get('name', node.id) - assert node.has_valid('shape'), \ - 'Parameter node {} should have `shape` attribute. Please use cli options to set model input shape' \ - ''.format(name) - node.out_port(0).data.set_shape(node.shape) - - PermuteAttrs.create_permute_attrs(node, attrs=[('shape', 'output:0')]) - - @staticmethod - def reverse_infer(node: Node): - # update node 'shape' attribute (if it is not defined) from the output port shape which was calculated - # during the reverse_infer phase - shape = node.soft_get('shape', None) - if shape is None and node.out_port(0).data.get_shape() is not None: - node['shape'] = node.out_port(0).data.get_shape() diff --git a/tools/mo/openvino/tools/mo/ops/permute.py b/tools/mo/openvino/tools/mo/ops/permute.py deleted file mode 100644 index 527697c2ff509d..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/permute.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.extractor import attr_getter -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class Permute(Op): - op = 'Permute' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'order': None, - 'type': __class__.op, - 'op': __class__.op, - 'infer': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return [('order', lambda node: attr_getter(node, 'order'))] diff --git a/tools/mo/openvino/tools/mo/ops/pnorm.py b/tools/mo/openvino/tools/mo/ops/pnorm.py deleted file mode 100644 index aa2666cee843a5..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/pnorm.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op - - -class PNormOp(Op): - """ - PNorm operation should be replaced by operations: - Power(P) -> Reshape(n,c*g->n,g,c)-> ReduceSum(axis=1)-> Power(1/P) - """ - op = 'pnorm' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'infer': self.infer - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def infer(node: Node): - shape = node.in_port(0).data.get_shape().copy() - shape[1] = shape[1] // node.group - node.out_port(0).data.set_shape(shape) diff --git a/tools/mo/openvino/tools/mo/ops/pooling.py b/tools/mo/openvino/tools/mo/ops/pooling.py deleted file mode 100644 index 4f6dc9ce5a8ad2..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/pooling.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.partial_infer.utils import tf_window_op_pad_infer, int64_array, shape_array, \ - dynamic_dimension_value, dynamic_dimension, undefined_shape_of_rank -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.front.onnx.extractors.utils import get_backend_pad -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op, PermuteAttrs -from openvino.tools.mo.utils.error import Error - - -poolings_map = { - 'max': {'version': 'opset8', 'out_ports_count': 2}, - 'avg': {'version': 'opset1', 'out_ports_count': 1} -} - - -class PoolingV2(Op): - """ - TensorFlow MaxPoolV2 and AvgPoolV2 operations expect windows_size and strides values from inputs not from - attributes. This internal operation is introduced to handle that. Only constant windows_size and strides - values are supported. Eventually will be replaced with the standard pooling operations from the opset. - """ - op = 'PoolingV2' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'version': None, - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - 'in_ports_count': 3, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node: Node): - assert (len(node.in_nodes()) == 3), 'MaxPoolV2 node {} from must have only 3 inputs: input, window size, and ' \ - 'strides but instead got {} inputs'.format(node.soft_get('name', node.id), - len(node.in_nodes())) - node['window'] = node.in_port(1).data.get_value() - node['stride'] = node.in_port(2).data.get_value() - - if node['window'] is None: - raise Error('The non-constant window size for MaxPoolV2 node {} is not supported' - ''.format(node.soft_get('name', node.id))) - if node['stride'] is None: - raise Error('The non-constant strides for MaxPoolV2 node {} is not supported' - ''.format(node.soft_get('name', node.id))) - - Pooling.pool_infer(node) - - @staticmethod - def reverse_infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - window_shape = node.in_port(1).data.get_shape() - # use the value of the 'window' input to determine input tensor rank - if input_shape is None and window_shape is not None: - node.in_port(0).data.set_shape(undefined_shape_of_rank(window_shape[0])) - - -class Pooling(Op): - op = 'Pooling' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': poolings_map[attrs.get('pool_method')]['version'], - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - 'in_ports_count': 1, - 'out_ports_count': 1 if attrs.get('version') == 'opset1' else - poolings_map[attrs.get('pool_method')]['out_ports_count'] - }, attrs) - - def backend_attrs(self): - backend_attrs_list = [ - ('strides', lambda node: ','.join(map(str, node['stride'][node.spatial_dims]))), - ('kernel', lambda node: ','.join(map(str, node['window'][node.spatial_dims]))), - - ('pads_begin', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 0)))), - ('pads_end', lambda node: ','.join(map(str, get_backend_pad(node.pad, node.spatial_dims, 1)))), - - ('exclude-pad', lambda node: bool_to_str(node, 'exclude_pad')), - - 'rounding_type', - ('auto_pad', lambda node: node.auto_pad if node.has_valid('auto_pad') else 'explicit') - ] - - if self.attrs.get('pool_method') == 'avg': - return backend_attrs_list - else: - return backend_attrs_list + [ - ('dilations', lambda node: ','.join(map(str, node['dilation'][node.spatial_dims]))), - 'axis', - ('index_element_type', lambda node: np_data_type_to_destination_type(node.index_element_type)) - ] - - @staticmethod - def infer(node: Node): - assert (len(node.in_nodes()) == 1), 'MaxPool node {} from must have only one input but instead got ' \ - '{} inputs'.format(node.soft_get('name', node.id), len(node.in_nodes())) - - Pooling.pool_infer(node) - - @staticmethod - def pool_infer(node: Node): - input_shape = node.in_node(0).shape - if input_shape is None: - return - - if not node.has_valid('spatial_dims'): - node['spatial_dims'] = np.delete([x for x in range(len(input_shape))], - [node.batch_dims[0], node.channel_dims[0]]) - - input_spatial_shape = input_shape[node.spatial_dims] - - # Setting default pad and stride attrs in case if None specified - if not node.has_valid('pad'): - node['pad'] = int64_array([[0, 0] for x in range(len(input_shape))]) - if not node.has_valid('pad_spatial_shape'): - node['pad_spatial_shape'] = node.pad[node.spatial_dims] - - if not node.has_valid('stride'): - node['stride'] = int64_array([1 for x in range(len(input_shape))]) - - if node.has_and_set('global_pool'): - node['window'] = np.zeros(len(input_shape), dtype=np.int64) - node.window[node.spatial_dims] = input_spatial_shape - - if not node.has_valid('dilation'): - node['dilation'] = np.ones(len(input_shape), dtype=np.float32) - - if not node.has_valid('axis'): - node['axis'] = 0 - - if not node.has_valid('index_element_type'): - node['index_element_type'] = np.int64 - - window_spatial_shape = node.window[node.spatial_dims] - stride_spatial = node.stride[node.spatial_dims] - dilation_spatial = node.dilation[node.spatial_dims] - assert any(stride_spatial), 'Stride can not be zero in node {}'.format(node.id) - - if node.has_valid('auto_pad') and node.auto_pad != 'explicit': - node.pad_spatial_shape, node.output_spatial_shape = tf_window_op_pad_infer(input=input_spatial_shape, - window=window_spatial_shape, - stride=stride_spatial, - auto_pad=node.auto_pad, - dilation=dilation_spatial) - pad = np.zeros((len(input_shape), 2), dtype=np.int64) - pad[node.spatial_dims] = node.pad_spatial_shape - node.pad = pad - else: - - pad_spatial_shape = np.add.reduce(node.pad_spatial_shape, axis=1) - - rounding = np.floor - if node.soft_get('pooling_convention') == 'full' or node.soft_get('rounding_type') == 'ceil': - rounding = np.ceil - - padded_spatial_shape = input_spatial_shape + pad_spatial_shape - ((window_spatial_shape - 1) * - dilation_spatial + 1) - if np.any(padded_spatial_shape < 0): - raise Error("Data after padding has dimension less than window size. " + - "Possible reason of error is incorrectly specified model input shape(s).") - - output_spatial_shape = shape_array([dynamic_dimension_value for _ in range(len(padded_spatial_shape))]) - for idx in range(len(padded_spatial_shape)): - if padded_spatial_shape[idx] is not dynamic_dimension and stride_spatial[idx] is not dynamic_dimension: - output_spatial_shape[idx] = int(rounding(padded_spatial_shape[idx] / stride_spatial[idx])) + 1 - - original_pads = mo_array([i[1] for i in node.pad_spatial_shape]) - - for i in range(len(input_spatial_shape)): - if original_pads[i] and (output_spatial_shape[i] - 1) * stride_spatial[i] >= \ - input_spatial_shape[i] + original_pads[i]: - output_spatial_shape[i] -= 1 - - node['output_spatial_shape'] = output_spatial_shape - - output_shape = input_shape.copy() - output_shape[node.spatial_dims] = node.output_spatial_shape - node.out_port(0).data.set_shape(output_shape) - - if len(node.out_ports()) == 2 and not node.out_port(1).disconnected(): - node.out_port(1).data.set_shape(output_shape) - - if node.has_and_set('pool_method') and node['pool_method'] == 'max': - node['remove_values_output'] = True - - # Add permute_attrs - PermuteAttrs.create_permute_attrs(node, attrs=[('pad', 'input:0'), - ('stride', 'input:0'), - ('window', 'input:0'), - ('spatial_dims', 'input:0'), - ('dilation', 'input:0')]) - - @staticmethod - def reverse_infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - window = node.soft_get('window', None) - if input_shape is None and window is not None: - node.in_port(0).data.set_shape(undefined_shape_of_rank(len(window))) diff --git a/tools/mo/openvino/tools/mo/ops/power.py b/tools/mo/openvino/tools/mo/ops/power.py deleted file mode 100644 index 288ab0d7cb0736..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/power.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np -from openvino.tools.mo.ops.op import Op - - -class AttributedPower(Op): - op = 'AttributedPower' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': 'Power', - - 'power': 1, - 'scale': 1, - 'shift': 0, - - 'infer': self.infer, - 'type_infer': self.type_infer, - - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return ['power', 'scale', 'shift'] - - @staticmethod - def type_infer(node: Node): - node.out_port(0).set_data_type(data_type_str_to_np(node.graph.graph['cmd_params'].data_type)) - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - connected_inputs = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_inputs) == 1 and 0 in connected_inputs, \ - "AttributedPower should have 1 connected input port, but it doesn't for node: `{}`. Ports: {}" \ - "".format(name, connected_inputs) - - assert node.has_valid('scale'), \ - 'AttributedPower operation should have `scale` parameter set, but it doesn`t for node {}'.format(name) - assert node.has_valid('shift'), \ - 'AttributedPower operation should have `shift` parameter set, but it doesn`t for node {}'.format(name) - assert node.has_valid('power'), \ - 'AttributedPower operation should have `power` parameter set, but it doesn`t for node {}'.format(name) - - eltwise_infer(node, lambda a: np.power(a * node.scale + node.shift, node.power)) diff --git a/tools/mo/openvino/tools/mo/ops/prelu.py b/tools/mo/openvino/tools/mo/ops/prelu.py deleted file mode 100644 index c79976ced97a45..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/prelu.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class PReLU(Op): - op = 'PReLU' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - - 'infer': self.infer, - - 'force_precision_in_ports': {1: 'float'}, - - 'in_ports_count': 2, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node): - if len(node.in_nodes()) == 2: - gamma_vector = node.in_node(1) - if np.all(gamma_vector.shape == [1]): - node['channel_shared'] = 1 - else: - node['channel_shared'] = 0 - node.in_node(1)['correct_data_type'] = True - - copy_shape_infer(node) diff --git a/tools/mo/openvino/tools/mo/ops/priorbox.py b/tools/mo/openvino/tools/mo/ops/priorbox.py deleted file mode 100644 index 6cbf4899aa037b..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/priorbox.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.layout import get_width_dim, get_height_dim -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.extractor import attr_getter, bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class PriorBoxOp(Op): - op = 'PriorBox' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'flip': True, - 'clip': True, - 'scale_all_sizes': True, - 'max_size': mo_array([]), - 'min_size': mo_array([]), - 'aspect_ratio': mo_array([]), - 'density': mo_array([]), - 'fixed_size': mo_array([]), - 'fixed_ratio': mo_array([]), - 'in_ports_count': 2, - 'out_ports_count': 1, - 'type_infer': self.type_infer, - 'infer': self.priorbox_infer - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'min_size', - 'max_size', - 'aspect_ratio', - 'flip', - 'clip', - 'variance', - 'img_size', - 'img_h', - 'img_w', - 'step', - 'step_h', - 'step_w', - 'offset', - 'density', - 'fixed_size', - 'fixed_ratio', - ] - - def backend_attrs(self): - return [ - ('flip', lambda node: int(node.flip)), # We need to convert this boolean attribute value to int to keep - # forward compatibility with OV 2021.2 - ('clip', lambda node: int(node.clip)), # We need to convert this boolean attribute value to int to keep - # forward compatibility with OV 2021.2 - 'step', - 'offset', - ('scale_all_sizes', lambda node: bool_to_str(node, 'scale_all_sizes')), - ('min_size', lambda node: attr_getter(node, 'min_size')), - ('max_size', lambda node: attr_getter(node, 'max_size')), - ('aspect_ratio', lambda node: attr_getter(node, 'aspect_ratio')), - ('variance', lambda node: attr_getter(node, 'variance')), - ('density', lambda node: attr_getter(node, 'density')), - ('fixed_size', lambda node: attr_getter(node, 'fixed_size')), - ('fixed_ratio', lambda node: attr_getter(node, 'fixed_ratio')), - ] - - @staticmethod - def type_infer(node): - node.out_port(0).set_data_type(np.float32) - - @staticmethod - def priorbox_infer(node: Node): - layout = node.graph.graph['layout'] - data_shape = node.in_node(0).shape - - # calculate all different aspect_ratios (the first one is always 1) - # in aspect_ratio 1/x values will be added for all except 1 if flip is True - ar_seen = [1.0] - ar_seen.extend(node.aspect_ratio.copy()) - if node.flip: - for s in node.aspect_ratio: - ar_seen.append(1.0 / s) - - ar_seen = np.unique(mo_array(ar_seen).round(decimals=6)) - - num_ratios = 0 - if len(node.min_size) > 0: - num_ratios = len(ar_seen) * len(node.min_size) - - if node.has_valid('fixed_size') and len(node.fixed_size) > 0: - num_ratios = len(ar_seen) * len(node.fixed_size) - - if node.has_valid('density') and len(node.density) > 0: - for d in node.density: - if node.has_valid('fixed_ratio') and len(node.fixed_ratio) > 0: - num_ratios = num_ratios + len(node.fixed_ratio) * (pow(d, 2) - 1) - else: - num_ratios = num_ratios + len(ar_seen) * (pow(d, 2) - 1) - - num_ratios = num_ratios + len(node.max_size) - - if node.has_and_set('V10_infer'): - assert node.in_node(0).value is not None - node.out_port(0).data.set_shape([2, np.prod(node.in_node(0).value) * num_ratios * 4]) - else: - res_prod = data_shape[get_height_dim(layout, 4)] * data_shape[get_width_dim(layout, 4)] * num_ratios * 4 - node.out_port(0).data.set_shape([1, 2, res_prod]) diff --git a/tools/mo/openvino/tools/mo/ops/priorbox_clustered.py b/tools/mo/openvino/tools/mo/ops/priorbox_clustered.py deleted file mode 100644 index 6dddc3b254b530..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/priorbox_clustered.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.layout import get_width_dim, get_height_dim -from openvino.tools.mo.front.extractor import attr_getter -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class PriorBoxClusteredOp(Op): - op = 'PriorBoxClustered' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': self.priorbox_clustered_infer, - 'type_infer': self.type_infer, - 'clip': True, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'width', - 'height', - 'flip', - 'clip', - 'variance', - 'img_size', - 'img_h', - 'img_w', - 'step', - 'step_h', - 'step_w', - 'offset' - ] - - def backend_attrs(self): - return [ - ('clip', lambda node: int(node.clip)), # We need to convert this boolean attribute value to int to keep - # forward compatibility with OV 2021.2 - 'img_h', - 'img_w', - 'step', - 'step_h', - 'step_w', - 'offset', - ('variance', lambda node: attr_getter(node, 'variance')), - ('width', lambda node: attr_getter(node, 'width')), - ('height', lambda node: attr_getter(node, 'height')) - ] - - @staticmethod - def type_infer(node): - node.out_port(0).set_data_type(np.float32) - - @staticmethod - def priorbox_clustered_infer(node: Node): - layout = node.graph.graph['layout'] - data_shape = node.in_node(0).shape - num_ratios = len(node.width) - - if node.has_and_set('V10_infer'): - assert node.in_node(0).value is not None - node.out_port(0).data.set_shape([2, np.prod(node.in_node(0).value) * num_ratios * 4]) - else: - res_prod = data_shape[get_height_dim(layout, 4)] * data_shape[get_width_dim(layout, 4)] * num_ratios * 4 - node.out_port(0).data.set_shape([1, 2, res_prod]) diff --git a/tools/mo/openvino/tools/mo/ops/priorgridgenerator_onnx.py b/tools/mo/openvino/tools/mo/ops/priorgridgenerator_onnx.py deleted file mode 100644 index 80d4b3e288bd74..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/priorgridgenerator_onnx.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, set_input_shapes -from openvino.tools.mo.ops.op import Op - - -class ExperimentalDetectronPriorGridGenerator(Op): - op = 'ExperimentalDetectronPriorGridGenerator' - - def __init__(self, graph, attrs): - mandatory_props = dict( - type=self.op, - op=self.op, - version='opset6', - infer=self.infer, - reverse_infer=self.reverse_infer, - ) - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return [ - 'flatten', - 'h', - 'w', - 'stride_x', - 'stride_y', - ] - - @staticmethod - def infer(node): - input_shape = node.in_port(0).data.get_shape() - priors_num = input_shape[0] - grid_h = node.in_port(1).data.get_shape()[2] - grid_w = node.in_port(1).data.get_shape()[3] - if node.flatten: - out_shape = [grid_h * grid_w * priors_num, 4] - else: - out_shape = [grid_h, grid_w, priors_num, 4] - node.out_port(0).data.set_shape(out_shape) - - @staticmethod - def reverse_infer(node): - priors_shape = shape_array([dynamic_dimension_value, 4]) - feature_map_shape = shape_array([1, dynamic_dimension_value, dynamic_dimension_value, dynamic_dimension_value]) - image_shape = shape_array([1, dynamic_dimension_value, dynamic_dimension_value, dynamic_dimension_value]) - - set_input_shapes(node, priors_shape, feature_map_shape, image_shape) diff --git a/tools/mo/openvino/tools/mo/ops/proposal.py b/tools/mo/openvino/tools/mo/ops/proposal.py deleted file mode 100644 index 1cb536edea646e..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/proposal.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import undefined_shape_of_rank, set_input_shapes -from openvino.tools.mo.front.extractor import attr_getter, bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class ProposalOp(Op): - op = 'Proposal' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset4', - 'post_nms_topn': 300, # default in caffe-shared - 'infer': ProposalOp.proposal_infer, - 'reverse_infer': self.reverse_infer, - 'in_ports_count': 3, - 'out_ports_count': 1 if attrs.get('version') == 'opset1' else 2, - 'normalize': False, - 'clip_before_nms': True, - 'clip_after_nms': False, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'feat_stride', - 'base_size', - 'min_size', - 'ratio', - 'scale', - 'pre_nms_topn', - 'post_nms_topn', - 'nms_thresh', - ] - - def backend_attrs(self): - return [ - 'feat_stride', - 'base_size', - 'min_size', - ('ratio', lambda node: attr_getter(node, 'ratio')), - ('scale', lambda node: attr_getter(node, 'scale')), - 'pre_nms_topn', - 'post_nms_topn', - 'nms_thresh', - 'framework', - 'box_coordinate_scale', - 'box_size_scale', - ('normalize', lambda node: bool_to_str(node, 'normalize')), - ('clip_after_nms', lambda node: bool_to_str(node, 'clip_after_nms')), - ('clip_before_nms', lambda node: bool_to_str(node, 'clip_before_nms')), - ] - - @staticmethod - def proposal_infer(node: Node): - input_shape = node.in_node(0).shape - # rois blob: holds R regions of interest, each is a 5 - tuple - # (n, x1, y1, x2, y2) specifying an image batch index n and a - # rectangle(x1, y1, x2, y2) - node.out_port(0).data.set_shape([input_shape[0] * node.post_nms_topn, 5]) - - # the second optional output contains box probabilities - if len(node.out_ports()) == 2 and not node.out_port(1).disconnected(): - node.out_port(1).data.set_shape([input_shape[0] * node.post_nms_topn]) - - @staticmethod - def reverse_infer(node): - set_input_shapes(node, undefined_shape_of_rank(4), undefined_shape_of_rank(4)) diff --git a/tools/mo/openvino/tools/mo/ops/proposal_onnx.py b/tools/mo/openvino/tools/mo/ops/proposal_onnx.py deleted file mode 100644 index bd5e575a9a93a8..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/proposal_onnx.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, \ - undefined_shape_of_rank, set_input_shapes -from openvino.tools.mo.ops.op import Op - - -class ExperimentalDetectronGenerateProposalsSingleImage(Op): - op = 'ExperimentalDetectronGenerateProposalsSingleImage' - - def __init__(self, graph, attrs): - mandatory_props = dict( - type=self.op, - op=self.op, - version='experimental', - reverse_infer=self.reverse_infer, - infer=self.infer - ) - - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return [ - 'min_size', - 'nms_threshold', - 'post_nms_count', - 'pre_nms_count' - ] - - @staticmethod - def infer(node): - node.out_port(0).data.set_shape([node.post_nms_count, 4]) - node.out_port(1).data.set_shape([node.post_nms_count]) - - @staticmethod - def reverse_infer(node): - set_input_shapes(node, - undefined_shape_of_rank(1), - shape_array([dynamic_dimension_value, 4]), - undefined_shape_of_rank(3), - undefined_shape_of_rank(3)) diff --git a/tools/mo/openvino/tools/mo/ops/proposal_python_example.py b/tools/mo/openvino/tools/mo/ops/proposal_python_example.py deleted file mode 100644 index 12a1748ac34032..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/proposal_python_example.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.proposal import ProposalOp -from openvino.tools.mo.front.caffe.extractor import register_caffe_python_extractor -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class ProposalPythonExampleOp(Op): - op = 'Proposal' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': __class__.op, - 'op': __class__.op, - 'post_nms_topn': 300, - 'infer': ProposalOp.proposal_infer - } - - super().__init__(graph, mandatory_props, attrs) - - -register_caffe_python_extractor(ProposalPythonExampleOp, 'rpn.proposal_layer.ProposalLayer.example') -Op.excluded_classes.append(ProposalPythonExampleOp) diff --git a/tools/mo/openvino/tools/mo/ops/psroipooling.py b/tools/mo/openvino/tools/mo/ops/psroipooling.py deleted file mode 100644 index b890ca649fa04b..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/psroipooling.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.layout import get_batch_dim, shape_for_layout -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, \ - undefined_shape_of_rank, set_input_shapes -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class PSROIPoolingOp(Op): - op = 'PSROIPooling' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset2', - 'mode': 'average', - 'in_ports_count': 2, - 'out_ports_count': 1, - 'trans_std': 0, - 'no_trans': True, - 'reverse_infer': self.reverse_infer, - 'infer': PSROIPoolingOp.psroipooling_infer - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'spatial_scale', - 'output_dim', - ('group_size', lambda node: int(node.group_size)), - 'mode', - 'spatial_bins_x', - 'spatial_bins_y', - ] - - @staticmethod - def psroipooling_infer(node: Node): - """ - Sets shape of output node according specified parameters input blobs and node - Sets number from the first input blob, channels from the second one, height and width are specified - Parameters - ---------- - node - """ - shapes = [node.in_node(i).shape for i in range(len(node.in_nodes()))] - if any(s is None for s in shapes): - return - layout = node.graph.graph['layout'] - assert len(layout) == 4 - assert node.has_valid('group_size') - assert node.group_size == int(node.group_size) - node['group_size'] = int(node['group_size']) - node.out_node().shape = shape_for_layout(layout, - batch=shapes[1][get_batch_dim(layout, 4)], - features=node.output_dim, - height=node.group_size, - width=node.group_size) - - @staticmethod - def reverse_infer(node): - set_input_shapes(node, undefined_shape_of_rank(4), shape_array([dynamic_dimension_value, 5])) - - -class DeformablePSROIPoolingOp(PSROIPoolingOp): - op = 'DeformablePSROIPooling' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - updated_attrs = { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'mode': 'bilinear_deformable', - 'in_ports_count': 3, - 'trans_std': 0, - } - updated_attrs.update(attrs) - super().__init__(graph, updated_attrs) - - def supported_attrs(self): - return super().supported_attrs() + ['trans_std', 'part_size'] - - @staticmethod - def reverse_infer(node): - transformation_values_shape = shape_array( - [dynamic_dimension_value, dynamic_dimension_value, int(node.group_size), int(node.group_size)]) - - set_input_shapes(node, - undefined_shape_of_rank(4), - shape_array([dynamic_dimension_value, 5]), - transformation_values_shape) diff --git a/tools/mo/openvino/tools/mo/ops/quantize_linear.py b/tools/mo/openvino/tools/mo/ops/quantize_linear.py deleted file mode 100644 index 9fc4efac8ea9e3..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/quantize_linear.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class QuantizeLinear(Op): - op = 'QuantizeLinear' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'axis': None, - 'version': None, - 'infer': copy_shape_infer, - 'out_ports_count': 1, - 'in_ports_count': 3, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return ['axis'] diff --git a/tools/mo/openvino/tools/mo/ops/random_uniform.py b/tools/mo/openvino/tools/mo/ops/random_uniform.py deleted file mode 100644 index e352133d304d79..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/random_uniform.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op - - -class RandomUniform(Op): - """ - RandomUniform operation that generates a sequence of random values from uniform distribution. - """ - op = 'RandomUniform' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset8', - 'infer': self.infer, - 'in_ports_count': 3, - 'out_ports_count': 1, - 'type_infer': self.type_infer, - 'global_seed': 0, - 'op_seed': 0, - 'output_type': np.float32, - }, attrs) - - def backend_attrs(self): - return [('output_type', lambda node: np_data_type_to_destination_type(node.output_type)), - 'global_seed', - 'op_seed'] - - @staticmethod - def type_infer(node: Node): - node.out_port(0).set_data_type(node['output_type']) - - @staticmethod - def infer(node: Node): - assert node.has_valid('output_type') - - node.out_port(0).data.set_shape(node.in_port(0).data.get_value()) - - # We need to keep data type in data nodes corresponding to min and max values, - # as min and max value type should be the same as output_type attribute of RandomUniform - # operation. 'correct_data_type' attribute prevents changes of the data node type when - # ir data type is not equal to data node type. - node.in_node(1)['correct_data_type'] = True - node.in_node(2)['correct_data_type'] = True - - PermuteInputs().set_input_permutation(node.in_node(0), node, 'output:0', 'shape') - - -class AttributedRandomUniform(Op): - """ RandomUniform operation that generates a sequence of random values from uniform distribution. - This operation uses the same semantics as RandomUniform but output shape, min value or max value - can be specified as attribute. - Shape is specified as attribute in ONNX. Min value and max value are specified as attributes - in RandomUniformInt in TF. - """ - op = 'AttributedRandomUniform' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'infer': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'global_seed': 0, - 'op_seed': 0, - 'output_type': np.float32, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/range.py b/tools/mo/openvino/tools/mo/ops/range.py deleted file mode 100644 index e23418acfd237c..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/range.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, is_fully_defined -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class Range(Op): - """ - Some notes on the automatic result data type infer. The tf.range does is differently than np.arange. Numpy - by default creates array with elements of type int64 and float64, but TF does not widen data types and - keep them int32 and float32. - Compare: - - >>> tf.range(1, 5, 0.5) - - >>> tf.range(1, 5, 2) - - - >>> mo_array([0.5], dtype=np.float32) - array([0.5], dtype=float32) - >>> np.arange(mo_array([1], dtype=np.int32), mo_array([5], dtype=np.int32), mo_array([2], dtype=np.int32)).dtype - dtype('int64') - >>> np.arange(mo_array([1], dtype=np.int32), mo_array([5], dtype=np.int32), mo_array([0.5], dtype=np.float32)).dtype - dtype('float64') - """ - op = 'Range' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - - 'version': 'opset4', - 'infer': self.infer, - 'type_infer': self.type_infer, - - 'in_ports_count': 3, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - version = self.get_opset() - if version == 'opset4': - return [ - ('output_type', lambda node: np_data_type_to_destination_type(node.output_type)), - ] - elif version == 'opset1': - return [] - else: - raise Error('Unknown opset version "{}"'.format(version)) - - @staticmethod - def type_infer(node: Node): - node.out_port(0).set_data_type(node['output_type']) - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - connected_input_ports = [in_port.idx for in_port in node.in_ports().values() if not in_port.disconnected()] - assert len(connected_input_ports) == 3 and [0, 1, 2] == sorted(connected_input_ports), \ - 'Range operation should have 3 inputs, {} found for {}'.format(len(connected_input_ports), name) - - start = node.in_port(0).data.get_value() - limit = node.in_port(1).data.get_value() - delta = node.in_port(2).data.get_value() - - for input in (start, limit, delta): - if input is not None and not node.has_valid('output_type'): - node['output_type'] = input.dtype - - if not is_fully_defined(start) or not is_fully_defined(limit) or not is_fully_defined(delta): - node.out_port(0).data.set_shape(shape_array([dynamic_dimension_value])) - else: - node.out_port(0).data.set_value(np.arange(start, limit, delta, dtype=node['output_type'])) diff --git a/tools/mo/openvino/tools/mo/ops/rank.py b/tools/mo/openvino/tools/mo/ops/rank.py deleted file mode 100644 index 76cdc6eea483ad..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/rank.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class Rank(Op): - op = 'Rank' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - - 'output_type': np.int64, - 'infer': None, - - 'in_ports_count': 1, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/read_value.py b/tools/mo/openvino/tools/mo/ops/read_value.py deleted file mode 100644 index 1147ee0d3a2372..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/read_value.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -import numpy as np -from openvino.runtime import PartialShape - -from openvino.tools.mo.front.common.partial_infer.utils import unmask_shape -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op - - -class ReadValue(Op): - op = 'ReadValue' - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset6', - 'infer': self.infer, - 'type_infer': self.type_infer, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def shape_serialize(node): - shape = node.soft_get('variable_shape') - if isinstance(shape, np.ndarray): - return shape.tolist() - if isinstance(shape, np.ma.masked_array): - shape = unmask_shape(shape) - if isinstance(shape, PartialShape): - return shape.to_string() - raise Exception("Unknown shape type in user_shape attribute {}".format(type(shape))) - - def backend_attrs(self): - return ['variable_id', - ('variable_shape', lambda node: self.shape_serialize(node)), - ('variable_type', lambda node: np_data_type_to_destination_type(node.variable_type))] - - @staticmethod - def type_infer(node: Node): - node.variable_type = node.in_port(0).get_data_type() - node.out_port(0).set_data_type(node.in_port(0).get_data_type()) - - @staticmethod - def infer(node: Node): - assert node.has_valid('variable_id'), \ - "There is no required attribute variable_id in ReadValue op with name " + node.id - in_shape = node.in_port(0).data.get_shape() - node.out_port(0).data.set_shape(in_shape) - node.variable_shape = in_shape diff --git a/tools/mo/openvino/tools/mo/ops/regionyolo.py b/tools/mo/openvino/tools/mo/ops/regionyolo.py deleted file mode 100644 index 7c8161c8939183..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/regionyolo.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.front.common.layout import get_batch_dim, get_height_dim, get_width_dim, shape_for_layout -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined, dynamic_dimension_value, \ - undefined_shape_of_rank -from openvino.tools.mo.front.extractor import attr_getter, bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class RegionYoloOp(Op): - op = 'RegionYolo' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'in_ports_count': 1, - 'out_ports_count': 1, - 'reverse_infer': self.reverse_infer, - 'infer': RegionYoloOp.regionyolo_infer - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'coords', - 'classes', - 'num', - 'axis', - 'end_axis', - 'do_softmax', - 'anchors', - 'mask' - ] - - def backend_attrs(self): - return [ - 'coords', - 'classes', - 'num', - 'axis', - 'end_axis', - ('do_softmax', lambda node: bool_to_str(node, 'do_softmax')), - ('anchors', lambda node: attr_getter(node, 'anchors')), - ('mask', lambda node: attr_getter(node, 'mask')) - ] - - @staticmethod - def regionyolo_infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - axis = get_canonical_axis_index(input_shape, node.axis) - end_axis = get_canonical_axis_index(input_shape, node.end_axis) - node.axis = axis - node.end_axis = end_axis - if node.do_softmax: - dims_to_flatten = input_shape[axis: end_axis + 1] - if is_fully_defined(dims_to_flatten): - flat_dim = np.ma.prod(dims_to_flatten) - else: - flat_dim = dynamic_dimension_value - node.out_port(0).data.set_shape([*input_shape[:axis], flat_dim, *input_shape[end_axis + 1:]]) - else: - layout = node.graph.graph['layout'] - assert len(layout) == 4 - - node.out_port(0).data.set_shape(shape_for_layout(layout, - batch=input_shape[get_batch_dim(layout, 4)], - features=(node.classes + node.coords + 1) * len(node.mask), - height=input_shape[get_height_dim(layout, 4)], - width=input_shape[get_width_dim(layout, 4)])) - - @staticmethod - def reverse_infer(node): - if node.in_port(0).data.get_shape() is None: - node.in_port(0).data.set_shape(undefined_shape_of_rank(4)) diff --git a/tools/mo/openvino/tools/mo/ops/reorgyolo.py b/tools/mo/openvino/tools/mo/ops/reorgyolo.py deleted file mode 100644 index 8fae9ad8936cea..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/reorgyolo.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import undefined_shape_of_rank -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op, PermuteAttrs -from openvino.tools.mo.utils.error import Error - - -class ReorgYoloOp(Op): - op = 'ReorgYolo' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset2', - 'reverse_infer': self.reverse_infer, - 'infer': ReorgYoloOp.reorgyolo_infer - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'stride' - ] - - @staticmethod - def reorgyolo_infer(node: Node): - input_shape = node.in_node(0).shape - if input_shape is None: - raise Error('Input shape for operation "{}" is None'.format(node.soft_get('name', node.id))) - - stride = node.stride - - output_shape = input_shape.copy() - output_shape[node.batch_dims] = input_shape[node.batch_dims] # pylint: disable=unsupported-assignment-operation - output_shape[node.channel_dims] = input_shape[node.channel_dims] * stride ** 2 # pylint: disable=unsupported-assignment-operation - # Round as in caffe - output_shape[node.spatial_dims] = np.ma.round(input_shape[node.spatial_dims] / stride) # pylint: disable=unsupported-assignment-operation - - node.out_port(0).data.set_shape(output_shape) - PermuteAttrs.create_permute_attrs(node, attrs=[('channel_dims', 'input:0'), ('spatial_dims', 'input:0')]) - - @staticmethod - def reverse_infer(node): - if node.in_port(0).data.get_shape() is None: - node.in_port(0).data.set_shape(undefined_shape_of_rank(4)) diff --git a/tools/mo/openvino/tools/mo/ops/reshape.py b/tools/mo/openvino/tools/mo/ops/reshape.py deleted file mode 100644 index bd3538ff7845f8..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/reshape.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension, dynamic_dimension_value, is_fully_defined -from openvino.tools.mo.front.extractor import bool_to_str -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op - - -class Reshape(Op): - op = 'Reshape' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - - 'infer': self.infer, - - 'special_zero': True, - 'reinterp_shape': True, - - 'in_ports_count': 2, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return [('special_zero', lambda node: bool_to_str(node, 'special_zero'))] - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - - connected_inputs = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_inputs) == 2 and all([i in connected_inputs for i in range(2)]), \ - "Reshape should have 2 connected input ports, but it doesn't for node: `{}`. Ports: {}" \ - "".format(name, connected_inputs) - - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None - - new_shape = node.in_port(1).data.get_value() - assert new_shape is not None, 'Dynamic Reshape second input is not supported. Node {}'.format(name) - - assert np.argwhere(new_shape == -1).size <= 1, \ - 'Reshape second input should not have several `-1` values set. ' \ - 'Node: {}, reshape second input value {}'.format(name, new_shape) - - num_of_input_elements = np.prod(input_shape) - num_of_output_elements = 1 - for index, x in enumerate(new_shape): - if x is dynamic_dimension: - num_of_output_elements = dynamic_dimension_value - elif x == 0 and node.has_and_set('special_zero'): - if input_shape[index] is not dynamic_dimension: - num_of_output_elements *= input_shape[index] - elif x != -1: - num_of_output_elements *= x - - # input_shape = [dynamic, 5, 6], new_shape = [0, -1] => output_shape [dynamic, 30] - # marker that no dynamic input dimensions or all of them are copied with "0" magic value - all_dynamic_dimension_are_copied = True - if not is_fully_defined(input_shape): - for index, x in enumerate(input_shape): - if x is dynamic_dimension: - if index >= len(new_shape) or new_shape[index] != 0: - all_dynamic_dimension_are_copied = False - - undefined_dim = dynamic_dimension - if num_of_output_elements is not dynamic_dimension and all_dynamic_dimension_are_copied and \ - is_fully_defined(new_shape): - undefined_dim = num_of_input_elements // num_of_output_elements - output_shape = [] - for index, x in enumerate(new_shape): - if x == 0 and node.has_and_set('special_zero'): - output_shape.append(input_shape[index]) - elif x == -1: - output_shape.append(undefined_dim) - else: - output_shape.append(x) - - # even if the new_shape contains some dynamic values we can calculate the actual value by deducing it from the - # input shape if it is static: input_shape = [5, 3, 8], new_shape = [4, d] => output_shape = [4, 30] - if is_fully_defined(input_shape) and not is_fully_defined(new_shape): - dynamic_indices = np.argwhere([item is dynamic_dimension for item in new_shape]) - num_of_output_elements = 1 - if dynamic_indices.size == 1: - for index, x in enumerate(new_shape): - if x == 0 and node.has_and_set('special_zero'): - num_of_output_elements *= input_shape[index] - elif x is not dynamic_dimension and x != -1: - num_of_output_elements *= x - assert num_of_input_elements % num_of_output_elements == 0, \ - 'Incorrect number of output elements deduced for node {}: '.format(name) - output_shape[dynamic_indices[0][0]] = num_of_input_elements // num_of_output_elements - - assert not is_fully_defined(input_shape) or not is_fully_defined(output_shape) or \ - np.prod(input_shape) == np.prod(output_shape), \ - "Number of elements in input {} and output {} of reshape node {} mismatch" \ - "".format(input_shape, output_shape, name) - - PermuteInputs().set_input_permutation(node.in_node(1), node, 'output:0', 'shape') - - if node.in_port(0).data.get_value() is not None and is_fully_defined(output_shape): - node.out_port(0).data.set_value(node.in_port(0).data.get_value().reshape(output_shape)) - else: - node.out_port(0).data.set_shape(output_shape) diff --git a/tools/mo/openvino/tools/mo/ops/resize.py b/tools/mo/openvino/tools/mo/ops/resize.py deleted file mode 100644 index d39d2af9ac50ee..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/resize.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class ResizeOp(Op): - enabled = False - op = 'Resize' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': self.resize_infer - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'mode', - ] - - def backend_attrs(self): - return [ - 'mode', - ] - - @staticmethod - def resize_infer(node: Node): - layout = node.graph.graph['layout'] - assert len(layout) == 4 - - input_shape = node.in_node(0).shape - if input_shape is None: - raise Error('Input shape for operation "{}" is None'.format(node.soft_get('name', node.id))) - - scale_value = node.in_node(1).value - - node.out_port(0).data.set_shape(input_shape * scale_value) - diff --git a/tools/mo/openvino/tools/mo/ops/resize_factor_utils.py b/tools/mo/openvino/tools/mo/ops/resize_factor_utils.py deleted file mode 100644 index 961d02a135c6f9..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/resize_factor_utils.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - - -def factor_update(factor: float, real_factor: list, in_shape: list, out_shape: list, name: str): - """ Updates factor value for layers related to image resizing such as Resample and Interp. """ - if factor is None: - if real_factor[0] != real_factor[1]: - log.warning( - 'Cannot deduce a single zoom factor for both height and widths for node {}: [{},{}]/[{},{}] = [{},{}]. ' - 'This model will not reshape in IE.'.format( - name, - out_shape[0], - out_shape[1], - in_shape[0], - in_shape[1], - real_factor[0], - real_factor[1] - ) - ) - else: - factor = real_factor[0] - return factor diff --git a/tools/mo/openvino/tools/mo/ops/restrictedattentioncomponent.py b/tools/mo/openvino/tools/mo/ops/restrictedattentioncomponent.py deleted file mode 100644 index ff9fdfe30eeeb2..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/restrictedattentioncomponent.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class RestrictedAttentionComponent(Op): - op = 'restrictedattentioncomponent' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'has_default': False, - 'infer': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/result.py b/tools/mo/openvino/tools/mo/ops/result.py deleted file mode 100644 index 38da327d1a6ca0..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/result.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class Result(Op): - """ - Operation that should be added after the output node of the graph. It is a marker of the graph output. - This type of nodes is used in the dead nodes elimination pass and not dumped into the IR. - """ - op = 'Result' - - def __init__(self, graph: Graph, attrs: dict = None): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - 'infer': lambda x: None, - 'value': None, - 'data_type': None, - 'in_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/reverse_sequence.py b/tools/mo/openvino/tools/mo/ops/reverse_sequence.py deleted file mode 100644 index bbf3ec33c41b3a..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/reverse_sequence.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op, PermuteAttrs - - -class ReverseSequence(Op): - op = 'ReverseSequence' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'version': 'opset1', - 'seq_axis': None, - 'batch_axis': 0, - 'op': self.op, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': self.infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'seq_axis', 'batch_axis', - ] - - @staticmethod - def infer(node): - input_data_shape = node.in_port(0).data.get_shape() - assert input_data_shape is not None - assert node.has_valid('seq_axis') - assert node.has_valid('batch_axis') - - assert len(node.out_nodes()) == 1 - node.out_port(0).data.set_shape(input_data_shape) - - PermuteAttrs.create_permute_attrs(node, attrs=[('seq_axis', 'input:0')]) - PermuteAttrs.create_permute_attrs(node, attrs=[('batch_axis', 'input:0')]) diff --git a/tools/mo/openvino/tools/mo/ops/roialign.py b/tools/mo/openvino/tools/mo/ops/roialign.py deleted file mode 100644 index 169dfbdc792c41..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/roialign.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.layout import get_features_dim, shape_for_layout -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, \ - undefined_shape_of_rank, set_input_shapes, compatible_dims -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class ROIAlign(Op): - op = 'ROIAlign' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - assert 'mode' in attrs, '`mode` attribute is not set for ROIAlign during creation' - assert 'pooled_h' in attrs, '`pooled_h` attribute is not set for ROIAlign during creation' - assert 'pooled_w' in attrs, '`pooled_w` attribute is not set for ROIAlign during creation' - assert 'sampling_ratio' in attrs, '`sampling_ratio` attribute is not set for ROIAlign during creation' - assert 'spatial_scale' in attrs, '`spatial_scale` attribute is not set for ROIAlign during creation' - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset9', - - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - - 'in_ports_count': 3, - 'out_ports_count': 1, - - 'aligned_mode': 'asymmetric', - }, attrs) - - def backend_attrs(self): - version = self.get_opset() - if version == 'opset3': - return [ - ('mode', lambda node: str(node.mode)), - ('pooled_h', lambda node: str(int(node.pooled_h))), - ('pooled_w', lambda node: str(int(node.pooled_w))), - ('sampling_ratio', lambda node: str(int(node.sampling_ratio))), - ('spatial_scale', lambda node: str(float(node.spatial_scale))), - ] - elif version == 'opset9': - return [ - ('mode', lambda node: str(node.mode)), - ('pooled_h', lambda node: str(int(node.pooled_h))), - ('pooled_w', lambda node: str(int(node.pooled_w))), - ('sampling_ratio', lambda node: str(int(node.sampling_ratio))), - ('spatial_scale', lambda node: str(float(node.spatial_scale))), - ('aligned_mode', lambda node: str(node.aligned_mode)) - ] - - @staticmethod - def infer(node): - - layout = node.graph.graph['layout'] - node_name = node.soft_get('name', node.id) - assert len([port for port in node.in_ports().values() if not port.disconnected()]) == 3, \ - 'The node "{}" must 3 inputs'.format(node_name) - - assert node.has_valid('pooled_w'), '"pooled_w" attribute is not set for node "{}"'.format(node_name) - assert node.has_valid('pooled_h'), '"pooled_h" attribute is not set for node "{}"'.format(node_name) - assert node.has_valid('mode'), '"mode" attribute is not set for node "{}"'.format(node_name) - assert node.mode in ['avg', 'max'], \ - '"mode" attribute range of values is ["avg", "max"], got {} for node "{}"'.format(node.mode, node_name) - if node.get_opset() == 'opset9': - assert node.aligned_mode in ['asymmetric', 'half_pixel_for_nn', 'half_pixel'], \ - '"aligned_mode" attribute range of values is ["asymmetric", "half_pixel_for_nn", "half_pixel"]' - input_shape = node.in_port(0).data.get_shape() - rois_shape = node.in_port(1).data.get_shape() - indices_shape = node.in_port(2).data.get_shape() - assert input_shape is not None and rois_shape is not None and indices_shape is not None, \ - 'The node "{}" input shape is None'.format(node_name) - assert compatible_dims(rois_shape[0], indices_shape[0]), 'The number of batch indices does not correspond ' \ - 'to number of ROIs for node "{}"'.format(node_name) - assert compatible_dims(rois_shape[1], 4), 'The size of ROI element must be 4 for node "{}"'.format(node_name) - assert len(input_shape) == 4, 'The rank of port 0 input tensor of node "{}" must be 4.'.format(node_name) - node.out_port(0).data.set_shape( - shape_for_layout(layout, - batch=rois_shape[0], - features=input_shape[get_features_dim(layout, 4)], - height=node.pooled_h, - width=node.pooled_w) - ) - - @staticmethod - def reverse_infer(node): - set_input_shapes(node, - undefined_shape_of_rank(4), - shape_array([dynamic_dimension_value, 4]), - undefined_shape_of_rank(1)) diff --git a/tools/mo/openvino/tools/mo/ops/roifeatureextractor_onnx.py b/tools/mo/openvino/tools/mo/ops/roifeatureextractor_onnx.py deleted file mode 100644 index e399b1f16342c4..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/roifeatureextractor_onnx.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes -from openvino.tools.mo.ops.op import Op - - -class ExperimentalDetectronROIFeatureExtractor(Op): - op = 'ExperimentalDetectronROIFeatureExtractor' - - def __init__(self, graph, attrs): - mandatory_props = dict( - type=self.op, - op=self.op, - version='opset6', - infer=self.infer, - reverse_infer=self.reverse_infer, - out_ports_count=2, - ) - - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return [ - ('pyramid_scales', lambda node: ','.join(map(str, node['pyramid_scales']))), - 'output_size', - 'sampling_ratio', - ('aligned', lambda node: str(bool(node.soft_get('aligned', False))).lower())] - - @staticmethod - def infer(node): - input_rois_shape = node.in_port(0).data.get_shape() - rois_num = input_rois_shape[0] - input_features_level_0_shape = node.in_port(1).data.get_shape() - channels_num = input_features_level_0_shape[1] - node.out_port(0).data.set_shape([rois_num, channels_num, node.output_size, node.output_size]) - if not node.out_port(1).disconnected(): - node.out_port(1).data.set_shape([rois_num, 4]) - - @staticmethod - def reverse_infer(node): - set_input_shapes(node, - shape_array([dynamic_dimension_value, 4]), - shape_array([1, dynamic_dimension_value, dynamic_dimension_value, dynamic_dimension_value])) diff --git a/tools/mo/openvino/tools/mo/ops/roipooling.py b/tools/mo/openvino/tools/mo/ops/roipooling.py deleted file mode 100644 index fef0c0554ab876..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/roipooling.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.roipooling import roipooling_infer -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, \ - undefined_shape_of_rank, set_input_shapes -from openvino.tools.mo.ops.op import Op - - -class ROIPooling(Op): - op = 'ROIPooling' - enabled = False - - def __init__(self, graph, attrs: dict): - super().__init__(graph, { - 'type': __class__.op, - 'op': __class__.op, - 'version': 'opset2', - 'pooled_h': None, - 'pooled_w': None, - 'spatial_scale': 0.0625, - 'method': 'max', - 'infer': roipooling_infer, - 'reverse_infer': self.reverse_infer, - 'in_ports_count': 2, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return ['pooled_h', 'pooled_w', 'spatial_scale', 'method'] - - @staticmethod - def reverse_infer(node): - set_input_shapes(node, undefined_shape_of_rank(4), shape_array([dynamic_dimension_value, 5])) diff --git a/tools/mo/openvino/tools/mo/ops/roll.py b/tools/mo/openvino/tools/mo/ops/roll.py deleted file mode 100644 index 4216d83b1916d0..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/roll.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.common.partial_infer.utils import reverse_bypass_infer -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op - - -class Roll(Op): - """ - Roll operation that shifts elements of a tensor along specified axes. - """ - op = 'Roll' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset7', - 'infer': roll_infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - 'in_ports_count': 3, - 'out_ports_count': 1 - }, attrs) - - -class AttributedRoll(Op): - """ Roll operation that shifts elements of a tensor along specified axes. - This operation uses the same semantics as Roll but with shift and axes specified as attributes. - Shift and axes are specified as attributes in MxNet. - """ - - op = 'AttributedRoll' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'infer': None, - 'in_ports_count': 3, - 'out_ports_count': 1, - 'shift': None, - 'axes': None - }, attrs) - - -def roll_infer(node: Node): - PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'axis') - copy_shape_infer(node) diff --git a/tools/mo/openvino/tools/mo/ops/scale_shift.py b/tools/mo/openvino/tools/mo/ops/scale_shift.py deleted file mode 100644 index c9baac5cedfa79..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/scale_shift.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class ScaleShiftOp(Op): - op = 'ScaleShift' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'infer': copy_shape_infer, - 'type': __class__.op, - 'op': __class__.op, - 'in_ports_count': 3, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/scatter.py b/tools/mo/openvino/tools/mo/ops/scatter.py deleted file mode 100644 index f33ec01959abc0..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/scatter.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes, reverse_bypass_infer, shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class Scatter(Op): - enabled = False - - op = op_type = None - version = None - - def __init__(self, graph: Graph, attrs: dict): - assert self.op is not None and self.op_type is not None and self.version is not None, \ - 'Please use specialized Scatter operation class, Scatter is base class' - - mandatory_props = { - 'op': self.op, - 'type': self.op_type, - 'version': self.version, - - 'is_scatter': True, # is used for gathering all types of scatters in common transformations - 'infer': self.infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node, in_ports=[0]), - - 'reduction': None, - 'use_init_val': None, - - 'in_ports_count': 4, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - - input_shape = node.in_port(0).data.get_shape() - indices_shape = node.in_port(1).data.get_shape() - updates_shape = node.in_port(2).data.get_shape() - assert input_shape is not None and updates_shape is not None and indices_shape is not None, \ - 'The node "{}" input shape is None'.format(node_name) - - node.out_port(0).data.set_shape(input_shape) - - -class ScatterElementsAdd(Scatter): - op = 'ScatterElementsAdd' - op_type = None - version = None - - -class ScatterElementsDiv(Scatter): - op = 'ScatterElementsDiv' - op_type = None - version = None - - -class ScatterElementsMax(Scatter): - op = 'ScatterElementsMax' - op_type = None - version = None - - -class ScatterElementsMin(Scatter): - op = 'ScatterElementsMin' - op_type = None - version = None - - -class ScatterElementsMul(Scatter): - op = 'ScatterElementsMul' - op_type = None - version = 'opset3' - - -class ScatterElementsSub(Scatter): - op = 'ScatterElementsSub' - op_type = None - version = None - - -class ScatterElementsUpdate(Scatter): - op = op_type = 'ScatterElementsUpdate' - version = 'opset3' - - def backend_attrs(self): - version = self.get_opset() - if version == 'opset12': - return ['reduction', 'use_init_val'] - else: - return [] - - @staticmethod - def infer(node: Node): - Scatter.infer(node) - - node_name = node.soft_get('name', node.id) - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - assert len(connected_in_ports) == 4, \ - "Incorrect number of inputs for {} node".format(node_name) - - input_value = node.in_port(0).data.get_value() - indices_value = node.in_port(1).data.get_value() - updates_value = node.in_port(2).data.get_value() - - input_shape = node.in_port(0).data.get_shape() - indices_shape = node.in_port(1).data.get_shape() - updates_shape = node.in_port(2).data.get_shape() - - assert len(input_shape) == len(indices_shape), 'data and indices inputs for node "{}" must be of the ' \ - 'same rank. Instead got {} and {}'.format(node_name, - len(input_shape), - len(indices_shape)) - assert compatible_shapes(indices_shape, updates_shape), \ - 'updates and indices shapes for node "{}" must be equal. Instead got {} and {}.' \ - ''.format(node_name, indices_shape, updates_shape) - - axis = node.in_port(3).data.get_value() - opset = node.soft_get('version', 'default') - is_opset12_reduction = opset == 'opset12' and (node.soft_get('reduction') != 'none' or not node.soft_get('use_init_val')) - if input_value is not None and indices_value is not None and updates_value is not None and axis is not None and not is_opset12_reduction: - assert axis.size == 1, "The node {} has axis input value size equal to {} but it should be exactly 1.".format( - node_name, axis.size) - axis = axis.item() - out_value = input_value.copy() - for idx in np.ndindex(*indices_shape): - data_idx = list(idx) - data_idx[axis] = indices_value[idx] - out_value[tuple(data_idx)] = updates_value[idx] - node.out_port(0).data.set_value(out_value) - - -class ScatterAdd(Scatter): - op = 'ScatterAdd' - op_type = None - version = None - - -class ScatterDiv(Scatter): - op = 'ScatterDiv' - op_type = None - version = None - - -class ScatterMax(Scatter): - op = 'ScatterMax' - op_type = None - version = None - - -class ScatterMin(Scatter): - op = 'ScatterMin' - op_type = None - version = None - - -class ScatterMul(Scatter): - op = 'ScatterMul' - op_type = None - version = None - - -class ScatterSub(Scatter): - op = 'ScatterSub' - op_type = None - version = None - - -class ScatterUpdate(Scatter): - op = op_type = 'ScatterUpdate' - version = 'opset3' - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - Scatter.infer(node) - - input_shape = node.in_port(0).data.get_shape() - - input_value = node.in_port(0).data.get_value() - indices_value = node.in_port(1).data.get_value() - updates_value = node.in_port(2).data.get_value() - - axis = node.in_port(3).data.get_value() - - if input_value is not None and indices_value is not None and updates_value is not None and axis is not None: - assert axis.size == 1, "The node {} has axis input value size equal to {} but it should be exactly 1.".format( - node_name, axis.size) - axis = axis.item() - if axis < 0: - axis = len(input_shape) + axis - - out_value = input_value.copy() - for idx in np.ndindex(*input_shape[:axis]): - out_value[idx][indices_value] = updates_value[idx] - # update value can be dynamic, we need to create masked array in that case - if isinstance(updates_value, np.ma.masked_array): - out_value = shape_array(out_value, dtype=out_value.dtype) - node.out_port(0).data.set_value(out_value) diff --git a/tools/mo/openvino/tools/mo/ops/scatternd.py b/tools/mo/openvino/tools/mo/ops/scatternd.py deleted file mode 100644 index 1934dd12ac8dd1..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/scatternd.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes, strict_compare_tensors, \ - is_fully_defined -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class ScatterNDBase(Op): - enabled = False - - op = op_type = None - version = None - - def __init__(self, graph: Graph, attrs: dict): - assert self.op is not None and self.op_type is not None and self.version is not None, \ - 'Please use specialized ScatterNDBase operation class, ScatterNDBase is base class' - - mandatory_props = { - 'op': self.op, - 'type': self.op_type, - 'version': self.version, - - 'infer': self.infer, - - 'in_ports_count': 3, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - - input_shape = node.in_port(0).data.get_shape() - indices_shape = node.in_port(1).data.get_shape() - updates_shape = node.in_port(2).data.get_shape() - assert input_shape is not None and updates_shape is not None and indices_shape is not None, \ - 'The node "{}" input shape is None'.format(node_name) - - # check that shapes are correct - # 1. ranks of both input and indices must be at least 1 - assert len(input_shape) >= 1 and len(indices_shape) >= 1, \ - 'The node "{}" input and indices ranks must be at least 1'.format(node_name) - - # 2. the last dimension of indices shape must be at most a rank of input - assert not is_fully_defined(indices_shape[-1]) or indices_shape[-1] <= len(input_shape), \ - 'The last dimension of indices shape must be at most a rank of input for the node "{}"'.format(node_name) - - # 3. updates is a tensor of shape indices_shape[:-1] + input_shape[indices_shape[-1]:] - # if expected updates shape is scalar, updates can be tensor with the single element (for example, of shape - # [1], [[1]], etc.) - expected_updates_shape = np.ma.concatenate((indices_shape[:-1], input_shape[indices_shape[-1]:]), axis=0) - assert compatible_shapes(updates_shape, expected_updates_shape) or \ - (strict_compare_tensors(expected_updates_shape, []) and - strict_compare_tensors(updates_shape, np.ones(len(updates_shape), dtype=np.int64))), \ - 'The updates shape must be equal to indices_shape[:-1] + input_shape[indices_shape[-1]:] for the node ' \ - '"{}"'.format(node_name) - - node.out_port(0).data.set_shape(input_shape) - - @staticmethod - def type_infer(node: Node): - assert node.in_port(0).get_source().get_data_type() == node.in_port(2).get_source().get_data_type(), \ - 'The data type of the first and the third inputs must be equal for the node {}'.format(node.name) - node.out_port(0).set_data_type(node.in_port(0).get_data_type()) - - -class ScatterNDUpdate(ScatterNDBase): - op = op_type = 'ScatterNDUpdate' - version = 'opset4' - - @staticmethod - def infer(node: Node): - ScatterNDBase.infer(node) - - input_value = node.in_port(0).data.get_value() - indices_shape = node.in_port(1).data.get_shape() - indices_value = node.in_port(1).data.get_value() - updates_value = node.in_port(2).data.get_value() - - # compute output value if all inputs are constant - if input_value is not None and is_fully_defined(indices_value) and updates_value is not None: - output_value = input_value.copy() - indx_range = indices_shape[:-1] - for indx in np.ndindex(tuple(indx_range)): - if indx == (): - # a case when updates is a scalar - indx = 0 - updates_value = [updates_value] - insert_index = indices_value[indx] - # we check and change index type explicitly to avoid error in indexing ndarray by another ndarray - if isinstance(insert_index, np.ndarray): - insert_index = tuple(insert_index) - output_value[insert_index] = updates_value[indx] - - node.out_port(0).data.set_value(output_value) - - -class TFScatterND(Op): - """ - TFScatterND operation comes from TensorFlow and will be replaced by TFScatterNDDecomposition. - """ - op = 'TFScatterND' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'in_ports_count': 3, - 'out_ports_count': 1, - 'infer': None - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/select.py b/tools/mo/openvino/tools/mo/ops/select.py deleted file mode 100644 index 3da9fbe7fe4780..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/select.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes, dynamic_dimension, shape_array, is_fully_defined -from openvino.tools.mo.graph.graph import Node, Graph, Error -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.broadcasting import bi_directional_shape_broadcasting, bi_directional_broadcasting - - -class Select(Op): - op = 'Select' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - 'in_ports_count': 3, - 'out_ports_count': 1, - 'infer': self.infer, - 'type_infer': self.type_infer, - 'auto_broadcast': 'numpy' - } - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return ['auto_broadcast'] - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - assert len([port for port in node.in_ports().values() if not port.disconnected()]) == 3, \ - "Select operation must have 3 inputs: 'condition', 'then' and 'else' tensors for node {}".format(node_name) - - condition_value = node.in_port(0).data.get_value() - condition_shape = node.in_port(0).data.get_shape() - resulting_tensors = [node.in_port(1).data.get_value(), node.in_port(2).data.get_value()] - - a_shape = node.in_port(1).data.get_shape() - b_shape = node.in_port(2).data.get_shape() - broadcast_rule = node.soft_get('auto_broadcast', 'numpy') - - if broadcast_rule == 'numpy': - msg = "In Select node '{}' condition and then/else shapes must be broadcastable. " \ - "But instead got: cond_shape={}, then_shape={}, else_shape={}".format( - node_name, condition_shape, a_shape, b_shape) - - output_shape = bi_directional_shape_broadcasting(a_shape, b_shape) - assert output_shape is not None, msg - - output_is_scalar = len(output_shape) == 0 - - # if Select was created from TF Where operations then 1D condition must have the same size - # as 0-index dimension of output_shape. This condition is different from being numpy compatible - # but by adding ones to the end we can achieve numpy compatibility, as in transformation SelectBroadcast.py - if node.has_valid('format') and node['format'] == 'tf' and len(condition_shape) == 1: - # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/array_ops.py#L4596-L4598 - msg_tf = "In Select node '{}' if 'condition' is a 1D tensor then it's size " \ - "must be matching with the first dimension of then/else branches. " \ - "But instead got: cond_shape={}, then_shape={}, else_shape={}".format( - node_name, condition_shape, a_shape, b_shape) - - # check equality only if both values non-dynamic - if is_fully_defined(condition_shape[0]) and not output_is_scalar and is_fully_defined(output_shape[0]): - assert condition_shape[0] == output_shape[0], msg_tf - ones_shape = len(output_shape) if output_is_scalar else len(output_shape) - 1 - condition_shape = np.concatenate((condition_shape, np.ones(ones_shape, dtype=np.int64))) - - output_shape = bi_directional_shape_broadcasting(output_shape, condition_shape) - assert output_shape is not None, msg - - elif broadcast_rule == 'pdpd': - # todo: add pdpd broadcasting rule - # note that additionally to output_shape resulting_tensors must be broadcasted as well - raise Error("PDPD broadcasting rule is not implemented yet") - else: # broadcasting is not allowed - assert compatible_shapes(a_shape, b_shape) and compatible_shapes(condition_shape, a_shape), \ - 'In node \'{}\' for Select operation when broadcasting is off all inputs must be of the same shape. ' \ - 'But instead got: cond_shape={}, then_shape={}, else_shape={}'.format( - node_name, condition_shape, a_shape, b_shape) - output_shape = shape_array([i if i is not dynamic_dimension else j for i, j in zip(a_shape, b_shape)]) - - node.out_port(0).data.set_shape(output_shape) - - if condition_value is not None: - if is_fully_defined(condition_value) and np.all(condition_value == condition_value.item(0)): - # in some graphs Select condition is always True[False] and - # one of the branches is None (which is not selected) - # if we use np.where for such cases then dtype of output_value will be object (non numeric type) - # and subsequent numpy operation on such tensors will fail - output_value = resulting_tensors[not bool(condition_value.item(0))] - if output_value is None: - return - if broadcast_rule == 'numpy': - output_value = bi_directional_broadcasting(output_value, output_shape) - elif broadcast_rule == 'pdpd': - # todo: add pdpd broadcasting rule - raise Error("PDPD broadcasting rule is not implemented yet") - - node.out_port(0).data.set_value(output_value) - elif resulting_tensors[0] is not None and resulting_tensors[1] is not None: - output_value = np.ma.where(condition_value, resulting_tensors[0], resulting_tensors[1]) - node.out_port(0).data.set_value(output_value) - - @staticmethod - def type_infer(node: Node): - assert node.in_port(1).get_source().get_data_type() == node.in_port(2).get_source().get_data_type(), \ - 'The data type of the second and the third inputs must be equal for the node {}'.format(node.name) - node.out_port(0).set_data_type(node.in_port(1).get_source().get_data_type()) diff --git a/tools/mo/openvino/tools/mo/ops/shape.py b/tools/mo/openvino/tools/mo/ops/shape.py deleted file mode 100644 index e576087d0925d8..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/shape.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class Shape(Op): - op = 'ShapeOf' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset3', - - 'output_type': np.int64, - 'infer': self.infer, - 'type_infer': self.type_infer, - - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - def backend_attrs(self): - version = self.get_opset() - if version == 'opset3': - return [ - ('output_type', lambda node: np_data_type_to_destination_type(node.output_type)), - ] - elif version == 'opset1': - return [] - else: - raise Error('Unknown opset version "{}"'.format(version)) - - - @staticmethod - def infer(node): - name = node.soft_get('name', node.id) - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - assert len(connected_in_ports) == 1, \ - 'ShapeOf operation should have exact one input node, but it has {}'.format(len(connected_in_ports)) - - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None, \ - 'Input shape is undefined for ShapeOf node `{}`'.format(node.soft_get('name', node.id)) - - assert node.has_valid('output_type'), \ - '`output_type` attribute is not set for ShapeOf node `{}`'.format(name) - assert node.output_type in [np.int64, np.int32], \ - 'ShapeOf `output_type` attribute must be int32 or int64, `{}` found'.format(np.dtype(node.output_type).name) - - if node.has_and_set('stop_value_propagation'): - node.out_port(0).data.set_shape(input_shape.shape) - else: - node.out_port(0).data.set_value(shape_array(mo_array(input_shape, dtype=node.output_type))) - - @staticmethod - def type_infer(node): - node.out_port(0).set_data_type(node.output_type) diff --git a/tools/mo/openvino/tools/mo/ops/shufflechannel.py b/tools/mo/openvino/tools/mo/ops/shufflechannel.py deleted file mode 100644 index 711780801b62ee..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/shufflechannel.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op - - -class ShuffleChannels(Op): - op = 'ShuffleChannels' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset3', - - 'infer': self.infer, - - 'axis': 1, - 'group': None, - - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - def backend_attrs(self): - return ['group', 'axis'] - - @staticmethod - def infer(node: Node): - node_name = node.soft_get('name', node.id) - assert node.soft_get('group') is not None, 'The attribute "group" must be set for node {}'.format(node_name) - node.out_port(0).data.set_shape(node.in_port(0).data.get_shape()) diff --git a/tools/mo/openvino/tools/mo/ops/size.py b/tools/mo/openvino/tools/mo/ops/size.py deleted file mode 100644 index 72212bef026950..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/size.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined, shape_array, dynamic_dimension_value -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class Size(Op): - op = 'Size' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - assert 'output_type' in attrs, 'Size has mandatory `output_type` attribute' - - mandatory_props = { - 'type': None, - 'op': self.op, - - 'output_type': np.int64, - 'infer': self.infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - connected_in_ports = [port for port in node.in_ports().values() if not port.disconnected()] - assert len(connected_in_ports) == 1, \ - 'Size operation should have exact one input node, but it has {}'.format(len(connected_in_ports)) - - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None, \ - 'Input shape is undefined for Size node `{}`'.format(node.soft_get('name', node.id)) - - assert node.has_valid('output_type'), \ - '`output_type` attribute is not set for Size node `{}`'.format(name) - assert node.output_type in [np.int64, np.int32], \ - 'Size `output_type` attribute must be int32 or int64, `{}` found'.format(np.dtype(node.output_type).name) - - if is_fully_defined(input_shape): - node.out_port(0).data.set_value(mo_array(np.prod(input_shape), dtype=node.output_type)) - else: - node.out_port(0).data.set_value(shape_array(dynamic_dimension_value)) diff --git a/tools/mo/openvino/tools/mo/ops/slice.py b/tools/mo/openvino/tools/mo/ops/slice.py deleted file mode 100644 index c59aba17799a82..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/slice.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import get_shape_from_slice, shape_array, \ - dynamic_dimension_value, \ - dynamic_dimension, is_dynamic_slice -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - -""" -Slicing operations have different semantic or different parameters/inputs in different frameworks. To distinguish them -several internal operations are introduced. The internal MO Slice operation behaves same as Slice in ONNX opset >= 10. -A number of transformations take place on the front phase to convert framework slicing: - - AttributedSlice, TFSlice -> Slice - - CaffeSlice -> Split -""" - - -class AttributedSlice(Op): - """ - AttributedSlice is used in old versions of ONNX models (opset version < 10). - Is replaced with internal Slice on the front phase. - """ - op = 'AttributedSlice' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'infer': None, - }, attrs) - - -class CaffeSlice(Op): - """ - Slice in Caffe is equivalent to Split operation in OpenVINO. - https://caffe.berkeleyvision.org/tutorial/layers/slice.html - Is replaced with Split from opset on the front phase. - """ - op = 'CaffeSlice' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'infer': None, - }, attrs) - - -class TFSlice(Op): - """ - TFSlice differs from Slice in ONNX, Caffe. - TFSlice has 'begin' and 'size' inputs while Slice has 'start', 'end', 'step', and 'axis' inputs. - https://www.tensorflow.org/api_docs/python/tf/slice - Is replaced with internal Slice op on the front phase. - """ - op = 'TFSlice' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'in_ports_count': 3, - 'out_ports_count': 1, - 'infer': None, - }, attrs) - - -def slice_infer(node: Node, steps_idx: int, axes_idx: int): - input_value = node.in_port(0).data.get_value() - input_shape = node.in_port(0).data.get_shape() - - starts = node.in_port(1).data.get_value() - ends = node.in_port(2).data.get_value() - if node.is_in_port_connected(steps_idx): - steps = node.in_port(steps_idx).data.get_value() - else: - steps = np.ones(len(starts), dtype=np.int64) - - if node.is_in_port_connected(axes_idx): - axes = node.in_port(axes_idx).data.get_value() - else: - axes = [x for x in range(len(starts))] - - if starts is None or ends is None or steps is None or axes is None: - node.out_port(0).data.set_shape(shape_array([dynamic_dimension_value] * len(input_shape))) - return - - slice_idx = [slice(0, in_shape, 1) for in_shape in input_shape] - for i in range(len(axes)): - # Ranged for output value for specified axis - slice_idx[axes[i]] = slice(starts[i], ends[i], steps[i]) - if input_value is None or any(is_dynamic_slice(s) for s in slice_idx): - output_shape = get_shape_from_slice(input_shape, slice_idx) - node.out_port(0).data.set_shape(output_shape) - else: - node.out_port(0).data.set_value(input_value[tuple(slice_idx)]) - - -class Slice(Op): - """ - Semantic of Slice is identical to Slice in ONNX opset >= 10. - It has 'starts', 'ends', 'steps', and 'axes' inputs. - SliceConverter replaces it with StridedSlice from opset. - """ - op = 'Slice' - enabled = False - - def __init__(self, graph: Graph, attrs: dict = None): - super().__init__(graph, { - 'type': None, - 'op': 'Slice', - 'in_ports_count': 5, - 'out_ports_count': 1, - 'infer': self.infer - }, attrs) - - @staticmethod - def infer(node: Node): - slice_infer(node, 4, 3) - - -class OvSlice(Op): - """ - Semantic of OvSlice is identical to Slice in Openvino opset8. - It is introduced for usage in MO IR Reader. - """ - op = 'OvSlice' - enabled = False - - def __init__(self, graph: Graph, attrs: dict = None): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'in_ports_count': 5, - 'out_ports_count': 1, - 'infer': self.infer - }, attrs) - - @staticmethod - def infer(node: Node): - slice_infer(node, 3, 4) diff --git a/tools/mo/openvino/tools/mo/ops/slice_like.py b/tools/mo/openvino/tools/mo/ops/slice_like.py deleted file mode 100644 index efa71efe3cfe15..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/slice_like.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, is_fully_defined -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class SliceLike(Op): - op = 'slice_like' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - assert 'axes' in attrs, 'Please set mandatory `axes` attribute for `slice_like` operation' - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': self.infer, - }, attrs) - - @staticmethod - def infer(node): - input_shape = node.in_port(0).data.get_shape() - input_value = node.in_port(0).data.get_value() - shape_like = node.in_port(1).data.get_shape() - - new_shape = shape_array(input_shape.copy()) - if node.axes is not None: - node.axes = sorted([get_canonical_axis_index(input_shape, i) for i in node.axes]) - for i in node.axes: - new_shape[i] = shape_like[i] - else: - assert input_shape.size == shape_like.size,\ - 'Input shape ranks are inconsistent: {} and {}'.format(input_shape.size, shape_like.size) - node.axes = int64_array(range(shape_like.size)) - new_shape = shape_like.copy() - node.out_port(0).data.set_shape(new_shape) - - if input_value is not None and is_fully_defined(new_shape): - out_value = np.copy(input_value) - - slice_indexes = [] - for s in out_value.shape: - slice_indexes.append(slice(0, s)) - - for axis in node.axes: - slice_indexes[axis] = slice(0, new_shape[axis]) - out_value = out_value[tuple(slice_indexes)] - node.out_port(0).data.set_value(out_value) diff --git a/tools/mo/openvino/tools/mo/ops/softmax.py b/tools/mo/openvino/tools/mo/ops/softmax.py deleted file mode 100644 index 38cdc416c1bd2d..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/softmax.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op, PermuteAttrs - - -class Softmax(Op): - op = 'SoftMax' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset8', - 'infer': self.infer, - 'axis': 1, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - def supported_attrs(self): - return ['axis'] - - @staticmethod - def infer(node: Node): - copy_shape_infer(node) - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) - - -class SoftmaxONNX(Op): - op = 'SoftMaxONNX' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'infer': None, - 'axis': 1, - 'type': None, # this operation will be replaced with a - # Reshape(Softmax(Flatten(x, axis), -1), x.shape) sub-graph - 'op': __class__.op, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/space_to_batch.py b/tools/mo/openvino/tools/mo/ops/space_to_batch.py deleted file mode 100644 index 97096419a4b0a8..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/space_to_batch.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined, dynamic_dimension -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op - - -class SpaceToBatch(Op): - op = 'SpaceToBatch' - enabled = False - - def __init__(self, graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'in_ports_count': 3, - 'out_ports_count': 1, - 'version': 'opset2', - 'infer': self.infer, - }, attrs) - - @staticmethod - def infer(node): - """ - https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch - """ - input_shape = node.in_port(0).data.get_shape() - node_name = node.soft_get('name', node.id) - assert len(node.in_nodes()) == 4, 'Some inputs are not connected for the operation SpaceToBatch with name {}' \ - ''.format(node_name) - - block_size = node.in_port(1).data.get_value() - pads_begin = node.in_port(2).data.get_value() - pads_end = node.in_port(3).data.get_value() - assert block_size is not None and pads_begin is not None and pads_end is not None,\ - 'Some inputs are not defined for SpaceToBatch operation with name {}'.format(node_name) - - pads = pads_begin + input_shape + pads_end - - if is_fully_defined(block_size): - block_elements_count = np.prod(block_size) - else: - block_elements_count = dynamic_dimension - node.out_port(0).data.set_shape([input_shape[0] * block_elements_count, - *[x for x in (pads[1:] // block_size[1:])]]) - - # block_shape, pads_begin, pads_end should be permuted during the NHWC->NCHW layout change - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'shape') - PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'shape') - PermuteInputs().set_input_permutation(node.in_node(3), node, 'input:0', 'shape') - - -class BatchToSpace(Op): - op = 'BatchToSpace' - enabled = False - - def __init__(self, graph, attrs: dict): - super().__init__(graph, { - 'kind': 'op', - 'op': self.op, - 'type': self.op, - 'in_ports_count': 3, - 'out_ports_count': 1, - 'version': 'opset2', - 'infer': self.infer - }, attrs) - - @staticmethod - def infer(node): - input_shape = node.in_node(0).shape - if input_shape is None: - return - - if len(node.in_nodes()) != 4: - return - - block_size = node.in_port(1).data.get_value() - crops_begin = node.in_port(2).data.get_value() - crops_end = node.in_port(3).data.get_value() - if block_size is None or crops_begin is None or crops_end is None: - return - - pads = block_size * input_shape - - sizes = pads[1:] - crops_begin[1:] - crops_end[1:] - if is_fully_defined(block_size): - block_elements_count = np.prod(block_size) - else: - block_elements_count = dynamic_dimension - batch = input_shape[0] // block_elements_count - - node.out_port(0).data.set_shape([batch, *sizes]) - - # block_shape, crops_begin, crops_end values should be permuted during the NHWC->NCHW layout change - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'shape') - PermuteInputs().set_input_permutation(node.in_node(2), node, 'input:0', 'shape') - PermuteInputs().set_input_permutation(node.in_node(3), node, 'input:0', 'shape') diff --git a/tools/mo/openvino/tools/mo/ops/space_to_depth.py b/tools/mo/openvino/tools/mo/ops/space_to_depth.py deleted file mode 100644 index 424ee3bddc47de..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/space_to_depth.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.layout import shape_for_layout, get_height_dim, get_batch_dim, get_features_dim, get_width_dim -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension, is_fully_defined -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class SpaceToDepth(Op): - op = 'SpaceToDepth' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - - 'mode': 'blocks_first', - - 'infer': self.infer, - - 'in_ports_count': 1, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return ['mode', 'block_size'] - - @staticmethod - def infer(node: Node): - in_shape = node.in_node().shape - if in_shape.size != 4: - raise Error('TensorFlow SpaceToDepth operation is supported for 4D \'NHWC\' input layout only. ' - 'Current input shape is \'{}\''.format(in_shape)) - - layout = node.graph.graph['layout'] - N = in_shape[get_batch_dim(layout, 4)] - H = in_shape[get_height_dim(layout, 4)] - W = in_shape[get_width_dim(layout, 4)] - C = in_shape[get_features_dim(layout, 4)] - - block_size = node['block_size'] - if (H is not dynamic_dimension and H % block_size) or (W is not dynamic_dimension and W % block_size): - raise Error('Spatial dimensions of input tensor of SpaceToDepth operation have to be divisible by ' - 'SpaceToDepth \'block_size\' parameter. Input tensor shape = {}. Spatial dimensions = {},{}. ' - 'block_size = {}'.format(in_shape, H, W, block_size)) - - out_shape = shape_for_layout(layout, - batch=N, - features=C * (block_size ** 2), - height=H // block_size, - width=W // block_size) - - node.out_port(0).data.set_shape(out_shape) diff --git a/tools/mo/openvino/tools/mo/ops/sparse_fill_empty_rows.py b/tools/mo/openvino/tools/mo/ops/sparse_fill_empty_rows.py deleted file mode 100644 index 53a8da6a87bace..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/sparse_fill_empty_rows.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, is_fully_defined -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class SparseFillEmptyRows(Op): - """ - The operation fills empty rows in the input 2-D sparse tensor with a default value. - For more details see https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/sparse-fill-empty-rows - - 4 inputs: - - [0, required] input indices of the sparse tensor (2D), - - [1, required] input values of the sparse tensor (1D), - - [2, required] shape of the sparse tensor. Value of this input is required for the Model Optimizer (1D), - - [3, required] default value to insert at rows missing from the input sparse tensor (0D), - - 3 outputs: - - [0, optional] indices of the filled sparse tensor (2D) - - [1, optional] values of the filled sparse tensor (1D) - - [2, optional] indicator of whether the dense row was missing in the input sparse tensor (1D) - """ - op = 'SparseFillEmptyRows' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'version': 'experimental', - 'infer': self.infer, - 'in_ports_count': 4, - 'out_ports_count': 3 - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [] - - @staticmethod - def infer(node: Node): - assert len(node.in_nodes()) == 4 - - # check that shape value is defined that is needed for shape inference - shape = node.in_node(2) - assert shape.value is not None and shape.value.size == 2, \ - "SparseFillEmptyRows is supported only with constant shape value" - - shape_value = int64_array(shape.value) - - # check that default value is scalar - default_value = node.in_node(3) - assert default_value.shape is not None and len(default_value.shape) == 0, \ - "Default value for SparseFillEmptyRows must be scalar" - - if node.is_out_port_connected(0): # set a shape for output indices - if is_fully_defined(shape_value): - node.out_port(0).data.set_shape([np.prod(shape_value), 2]) - else: - node.out_port(0).data.set_shape([dynamic_dimension_value, 2]) - if node.is_out_port_connected(1): # set a shape for output values - if is_fully_defined(shape_value): - node.out_port(1).data.set_shape([np.prod(shape_value)]) - else: - node.out_port(1).data.set_shape([dynamic_dimension_value]) - if node.is_out_port_connected(2): # set a shape for empty row indicator - node.out_port(2).data.set_shape([shape_value[0]]) diff --git a/tools/mo/openvino/tools/mo/ops/sparse_reshape.py b/tools/mo/openvino/tools/mo/ops/sparse_reshape.py deleted file mode 100644 index 3de474e58317a8..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/sparse_reshape.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension, is_fully_defined, shape_array, strict_compare_tensors, dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class SparseReshape(Op): - """ - SparseReshape operation reshapes a sparse tensor in Coordinate list (COO) format - It recomputes indices for a new dense shape. - """ - op = 'SparseReshape' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': __class__.op, - 'infer': self.infer, - 'in_ports_count': 3, - 'out_ports_count': 2, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [] - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - - input_indices_shape = node.in_port(0).data.get_shape() - input_indices_value = node.in_port(0).data.get_value() - input_shape = node.in_port(1).data.get_value() - new_shape = node.in_port(2).data.get_value() - new_shape_shape = node.in_port(2).data.get_shape() - - assert input_shape is not None and new_shape is not None, \ - "Values for input shape and new shape must be defined" - assert len(np.argwhere(new_shape == -1)) <= 1, \ - "Value -1 occurs in new shape value more than once" - assert len(np.argwhere(new_shape < -1)) == 0, \ - "Only non-negative or -1 values are allowed" - - output_shape = np.ma.masked_array(new_shape, mask=new_shape == -1, fill_value=dynamic_dimension_value) - assert not is_fully_defined(input_shape) or not is_fully_defined(output_shape) or \ - np.prod(input_shape) == np.prod(output_shape), \ - "Number of elements in input {} and output {} of dynamic reshape node {} mismatch" \ - "".format(input_shape, output_shape, name) - - # we can deduce -1 only if input_shape is fully defined and - # there is one dynamic dimension in output_shape - if is_fully_defined(input_shape) and np.ma.count_masked(output_shape) == 1: - undefined_dim_size = np.prod(input_shape) // np.prod(output_shape) - - undefined_idx = np.where(output_shape == dynamic_dimension)[0][0] - output_shape[undefined_idx] = undefined_dim_size - output_shape.mask[undefined_idx] = False - - node.out_port(1).data.set_value(shape_array(output_shape)) - output_indices_shape = np.concatenate((input_indices_shape[0:1], new_shape_shape)) - node.out_port(0).data.set_shape(output_indices_shape) - - # TODO: implement constant value propagation for common case with scipy.sparse.coo_matrix.reshape - # instead of compatible_shapes we intentionally use np.array_equal - if strict_compare_tensors(input_shape, output_shape) and input_indices_value is not None: - node.out_port(0).data.set_value(input_indices_value) diff --git a/tools/mo/openvino/tools/mo/ops/sparse_segment_mean.py b/tools/mo/openvino/tools/mo/ops/sparse_segment_mean.py deleted file mode 100644 index 798578f525f9af..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/sparse_segment_mean.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class SparseSegmentMean(Op): - ''' The operation computes the mean along sparse segments of a tensor - For more details, see https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/sparse-segment-mean. - - Three inputs: - - [0, required] Data tensor from which rows are selected for the mean (ND), - - [1, required] Tensor of indices of selected rows from the first input tensor along 0 dimension (1D), - - [2, required] Tensor of segment IDs to which selected rows for the mean belong. - Selected rows belonging to the same segment are computed with the mean. The tensor has the same size as the second input. - Values must be sorted and can be repeated. (1D). - - One output: - - [0, required] The output has the same shape as the data tensor, except for dimension 0, which has a size equal to a number of segments (ND) - ''' - op = 'SparseSegmentMean' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': __class__.op, - 'version': 'experimental', - 'infer': __class__.infer, - 'in_ports_count': 3, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [] - - @staticmethod - def infer(node: Node): - # check a number of input/output edges - assert len(node.in_nodes()) == 3 - assert len(node.out_nodes()) == 1 - - data_shape = node.in_port(0).data.get_shape() - indices_shape = node.in_port(1).data.get_shape() - segment_ids_shape = node.in_port(2).data.get_shape() - data_value = node.in_port(0).data.get_value() - indices_value = node.in_port(1).data.get_value() - segment_ids_value = node.in_port(2).data.get_value() - - # check input shapes - assert data_shape is not None, \ - "Shape for input data tensor to SparseSegmentMean must be defined" - assert indices_shape is not None and indices_shape.size == 1, \ - "SparseSegmentMean supports only 1D indices tensor" - assert segment_ids_shape is not None and segment_ids_shape.size == 1, \ - "SparseSegmentMean supports only 1D segment IDs tensor" - assert compatible_shapes(segment_ids_shape, indices_shape), \ - "Indices and segment IDs tensors must have compatible shapes" - - # computes output shape - output_shape = data_shape - output_shape[0] = segment_ids_shape[0] - node.out_port(0).data.set_shape(output_shape) - - # infer if all input is constant - if data_value is None or indices_value is None or segment_ids_value is None: - return - - # check that values in segment_ids are sorted - for i in range(1, len(segment_ids_value)): - assert segment_ids_value[i-1] <= segment_ids_value[i], \ - "Values in segment IDs are not sorted" - num_segments = int(segment_ids_value[-1]) + 1 - - # check that indices are in a range [0, data_shape[0]) - assert np.all(indices_value >= 0) and np.all(indices_value < data_shape[0]), \ - "Some value in indices tensor is out of range" - - # infer - num_adds = np.zeros(num_segments, dtype=int) - output_value = np.zeros([num_segments] + data_shape[1:].tolist(), dtype=np.float32) - output_shape = output_value.shape - for i in range(len(segment_ids_value)): - segment_id = int(segment_ids_value[i]) - indice = int(indices_value[i]) - output_value[segment_id, :] += data_value[indice, :] - num_adds[segment_id] += 1 - - for segment_id in range(num_segments): - if num_adds[segment_id] != 0: - output_value[segment_id, :] /= num_adds[segment_id] - node.out_port(0).data.set_shape(output_shape) - node.out_port(0).data.set_value(output_value) diff --git a/tools/mo/openvino/tools/mo/ops/sparse_segment_sqrtn.py b/tools/mo/openvino/tools/mo/ops/sparse_segment_sqrtn.py deleted file mode 100644 index b48df5ecda3391..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/sparse_segment_sqrtn.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class SparseSegmentSqrtN(Op): - ''' The operation computes the sum along sparse segments of a tensor and divides it by the square root of N, where N is a number of rows in a segment. - For more details, see https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/sparse-segment-sqrt-n. - - Three inputs: - - [0, required] Data tensor from which rows are selected for the sum divided by sqrt of N (ND), - - [1, required] Tensor of indices of selected rows from the first input tensor along 0 dimension (1D), - - [2, required] Tensor of segment IDs to which selected rows belong. - Selected rows belonging to the same segment are summed up. The tensor has the same size as the second input. - Values must be sorted and can be repeated. (1D). - - One output: - - [0, required] The output has the same shape as the data tensor, except for dimension 0, which has a size equal to a number of segments (ND). - ''' - op = 'SparseSegmentSqrtN' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': __class__.op, - 'version': 'experimental', - 'infer': __class__.infer, - 'in_ports_count': 3, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [] - - @staticmethod - def infer(node: Node): - # check a number of input/output edges - assert len(node.in_nodes()) == 3 - assert len(node.out_nodes()) == 1 - - data_shape = node.in_port(0).data.get_shape() - indices_shape = node.in_port(1).data.get_shape() - segment_ids_shape = node.in_port(2).data.get_shape() - data_value = node.in_port(0).data.get_value() - indices_value = node.in_port(1).data.get_value() - segment_ids_value = node.in_port(2).data.get_value() - - # check input shapes - assert data_shape is not None, \ - "Shape for input data tensor to SparseSegmentSqrtN must be defined" - assert indices_shape is not None and indices_shape.size == 1, \ - "SparseSegmentSqrtN supports only 1D indices tensor" - assert segment_ids_shape is not None and segment_ids_shape.size == 1, \ - "SparseSegmentSqrtN supports only 1D segment IDs tensor" - assert compatible_shapes(segment_ids_shape, indices_shape), \ - "Indices and segment IDs tensors must have compatible shapes" - - # computes output shape - output_shape = data_shape - output_shape[0] = segment_ids_shape[0] - node.out_port(0).data.set_shape(output_shape) - - # infer if all input is constant - if data_value is None or indices_value is None or segment_ids_value is None: - return - - # check that values in segment_ids are sorted - for i in range(1, len(segment_ids_value)): - assert segment_ids_value[i-1] <= segment_ids_value[i], \ - "Values in segment IDs are not sorted" - num_segments = int(segment_ids_value[-1]) + 1 - - # check that indices are in a range [0, data_shape[0]) - assert np.all(indices_value >= 0) and np.all(indices_value < data_shape[0]), \ - "Some value in indices tensor is out of range" - - # infer - num_adds = np.zeros(num_segments, dtype=int) - output_value = np.zeros([num_segments] + data_shape[1:].tolist(), dtype=np.float32) - output_shape = output_value.shape - for i in range(len(segment_ids_value)): - segment_id = int(segment_ids_value[i]) - indice = int(indices_value[i]) - output_value[segment_id, :] += data_value[indice, :] - num_adds[segment_id] += 1 - - num_adds = np.sqrt(num_adds) - for segment_id in range(num_segments): - if num_adds[segment_id] != 0: - output_value[segment_id, :] /= num_adds[segment_id] - node.out_port(0).data.set_shape(output_shape) - node.out_port(0).data.set_value(output_value) diff --git a/tools/mo/openvino/tools/mo/ops/sparse_segment_sum.py b/tools/mo/openvino/tools/mo/ops/sparse_segment_sum.py deleted file mode 100644 index 34af7d8dae70a6..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/sparse_segment_sum.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class SparseSegmentSum(Op): - ''' The operation computes the sum along sparse segments of a tensor. - For more details, see https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/sparse-segment-sum. - - Three inputs: - - [0, required] Data tensor from which rows are selected for the sum (ND), - - [1, required] Tensor of indices of selected rows from the first input tensor along 0 dimension (1D), - - [2, required] Tensor of segment IDs to which selected rows for the sum belong. - Selected rows belonging to the same segment are summed up. The tensor has the same size as the second input. - Values must be sorted and can be repeated. (1D). - - One output: - - [0, required] The output has the same shape as the data tensor, except for dimension 0, which has a size equal to a number of segments. (ND) - ''' - op = 'SparseSegmentSum' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': __class__.op, - 'version': 'experimental', - 'infer': __class__.infer, - 'in_ports_count': 3, - 'out_ports_count': 1, - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [] - - @staticmethod - def infer(node: Node): - # check a number of input/output edges - assert len(node.in_nodes()) == 3 - assert len(node.out_nodes()) == 1 - - data_shape = node.in_port(0).data.get_shape() - indices_shape = node.in_port(1).data.get_shape() - segment_ids_shape = node.in_port(2).data.get_shape() - data_value = node.in_port(0).data.get_value() - indices_value = node.in_port(1).data.get_value() - segment_ids_value = node.in_port(2).data.get_value() - - # check input shapes - assert data_shape is not None, \ - "Shape for input data tensor to SparseSegmentSum must be defined" - assert indices_shape is not None and indices_shape.size == 1, \ - "SparseSegmentSum supports only 1D indices tensor" - assert segment_ids_shape is not None and segment_ids_shape.size == 1, \ - "SparseSegmentSum supports only 1D segment IDs tensor" - assert compatible_shapes(segment_ids_shape, indices_shape), \ - "Indices and segment IDs tensors must have compatible shapes" - - # computes output shape - output_shape = data_shape - output_shape[0] = segment_ids_shape[0] - node.out_port(0).data.set_shape(output_shape) - - # infer if all input is constant - if data_value is None or indices_value is None or segment_ids_value is None: - return - - # check that values in segment_ids are sorted - for i in range(1, len(segment_ids_value)): - assert segment_ids_value[i-1] <= segment_ids_value[i], \ - "Values in segment IDs are not sorted" - num_segments = int(segment_ids_value[-1]) + 1 - - # check that indices are in a range [0, data_shape[0]) - assert np.all(indices_value >= 0) and np.all(indices_value < data_shape[0]), \ - "Some value in indices tensor is out of range" - - # infer - output_value = np.zeros([num_segments] + data_shape[1:].tolist(), dtype=np.float32) - output_shape = output_value.shape - for i in range(len(segment_ids_value)): - segment_id = int(segment_ids_value[i]) - indice = int(indices_value[i]) - output_value[segment_id, :] += data_value[indice, :] - node.out_port(0).data.set_shape(output_shape) - node.out_port(0).data.set_value(output_value) diff --git a/tools/mo/openvino/tools/mo/ops/splice.py b/tools/mo/openvino/tools/mo/ops/splice.py deleted file mode 100644 index 9457591dec71f1..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/splice.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op - - -class Splice(Op): - op = 'Splice' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': self.op, - 'const_dim': 0, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'infer': self.infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - output_shape = input_shape.copy() - output_shape[1] = node.const_dim + (input_shape[1] - node.const_dim) * len(node.context) - node.out_port(0).data.set_shape(output_shape) diff --git a/tools/mo/openvino/tools/mo/ops/split.py b/tools/mo/openvino/tools/mo/ops/split.py deleted file mode 100644 index fc8b464bb8ebd8..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/split.py +++ /dev/null @@ -1,320 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined, dynamic_dimension, shape_delete, \ - clarify_partial_shape, shape_array, mo_array -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op, PermuteAttrs - - -def delete_out_port(idx, node: Node): - for k in range(idx + 1, node.out_ports_count): - node.out_port(k).get_connection().set_source(node.out_port(k - 1)) - node.out_ports_count -= 1 - - -class VariadicSplitBase(Op): - op = None - enabled = False - - @staticmethod - def infer(node): - name = node.soft_get('name', node.id) - - op = node.soft_get('op', None) - assert op is not None and op in ['VariadicSplit', 'AttributedVariadicSplit'], \ - 'Unexpected `op`={} attribute for Split-like node {}'.format(op, name) - - num_in_ports = 1 if op == 'AttributedVariadicSplit' else 3 if op == 'VariadicSplit' else None - assert num_in_ports in [1, 3], \ - 'VariadicSplitBase supports AttributedVariadicSplit with 1 input and VariadicSplit with 3 inputs, ' \ - 'but it is {} for {} node {}'.format(num_in_ports, op, name) - - connected_inputs = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_inputs) == num_in_ports and all([i in connected_inputs for i in range(num_in_ports)]), \ - "{} should have {} connected input ports, but it doesn't for node: `{}`. Ports: {}" \ - "".format(op, num_in_ports, name, connected_inputs) - - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None - - axis = node.in_port(1).data.get_value() if op == 'VariadicSplit' else node.soft_get('axis', None) - assert axis is not None, '{} `axis` is unknown for node {}'.format(op, name) - assert axis.ndim == 0 or (axis.ndim == 1 and axis.shape[0] == 1), \ - '{} `axis` should be scalar or tensor with shape [1], but it`s not for node {}'.format(op, name) - - split_lengths = node.in_port(2).data.get_value() if op == 'VariadicSplit' else node.soft_get('split_lengths', - None) - assert split_lengths is not None, '{} `split_lengths` is unknown for node {}'.format(op, name) - - undefined_elements = np.argwhere(split_lengths == -1).flatten() - assert undefined_elements.size <= 1, \ - '{} split_lengths=`{}` is a list with output sizes, only one of which could be -1. Node: {}' \ - ''.format(op, split_lengths, name) - - input_elements = input_shape[axis] - assert undefined_elements.size != 0 or input_elements is dynamic_dimension or \ - input_elements == np.sum(split_lengths), 'The sum of split_lengths=`{}` must match data.shape[axis]=' \ - '`{}`. Node: {}'.format(split_lengths, input_elements, name) - - assert len(split_lengths) >= len([port for i, port in node.out_ports().items() if not port.disconnected()]), \ - 'Number of split_lengths=`{}` is less than connected output ports. Node: {}'.format(split_lengths, name) - - # in split_lengths some value can be 0, in this case we will ignore it: - # * remove according branch - # * remove 0 from split_lengths - for i in reversed(range(len(split_lengths))): - if split_lengths[i] == 0: - if node.out_port(i).disconnected(): - split_lengths = shape_delete(split_lengths, i) - if op == 'VariadicSplit': - node.in_port(2).data.set_value(split_lengths) - else: - node['split_lengths'] = split_lengths - delete_out_port(i, node) - else: - log.warning("Zero dimension on {} branch after Split node {}".format(i, node.id)) - - # shape propagation - idxs, curr_pos = [], 0 - for i, piece in enumerate(split_lengths): - assert piece >= -1, 'VariadicSplit split_lengths=`{}` should be non-negative'.format(split_lengths) - out_shape = input_shape.copy() - - split_length = piece if piece > -1 else input_elements - (np.sum(split_lengths) + 1) - out_shape[axis] = split_length - curr_pos = curr_pos + split_length - idxs.append(curr_pos) - - if not node.out_port(i).disconnected(): - node.out_port(i).data.set_shape(out_shape) - - # value propagation - input_value = node.in_port(0).data.get_value() - if input_value is not None: - split = np.split(input_value, mo_array(idxs[:-1], dtype=split_lengths.dtype), axis) - for i, port in node.out_ports().items(): - if not port.disconnected(): - port.data.set_value(split[i]) - - if op == 'VariadicSplit': - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'axis') - elif op == 'AttributedVariadicSplit': - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) - - @staticmethod - def reverse_infer(node: Node): - if node.in_port(0).data.get_shape() is not None: - return - - axis = node.in_port(1).data.get_value() if node.op == 'VariadicSplit' else node.soft_get('axis', None) - assert axis is not None, '{} `axis` is unknown for node {}'.format(node.op, node.soft_get('name', node.id)) - split_lengths = node.in_port(2).data.get_value() if node.op == 'VariadicSplit' else node.soft_get('split_lengths', None) - assert split_lengths is not None, '{} `split_lengths` is unknown for node {}'.format(node.op, node.soft_get('name', node.id)) - - split_reverse_infer(node, len(split_lengths), axis) - - -class VariadicSplit(VariadicSplitBase): - op = 'VariadicSplit' - - def __init__(self, graph: Graph, attrs: dict): - assert 'axis' not in attrs, \ - 'Please use `AttributedVariadicSplit` instead of `VariadicSplit` operation to create node with `axis` ' \ - 'parameter set or keep using VariadicSplit operation, but express axis as a scalar second input of ' \ - 'VariadicSplit operation' - - assert 'size_splits' not in attrs, \ - 'Please use `AttributedVariadicSplit` instead of `VariadicSplit` operation to create node with ' \ - '`size_splits` parameter set or keep using VariadicSplit operation, but express size_splits as a 1D ' \ - 'third input of VariadicSplit operation' - - assert 'out_ports_count' in attrs, 'Please set `out_ports_count` attribute for VariadicSplit while creating' - - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - - 'in_ports_count': 3, - }, attrs) - - def supported_attrs(self): - return ['axis'] - - -class AttributedVariadicSplit(VariadicSplitBase): - op = 'AttributedVariadicSplit' - - def __init__(self, graph: Graph, attrs: dict): - assert 'axis' in attrs, 'AttributedVariadicSplit operation should have `axis` parameter set while creation' - assert 'size_splits' in attrs, \ - 'AttributedVariadicSplit operation should have `size_splits` parameter set while creation' - - if 'out_ports_count' not in attrs: - attrs['out_ports_count'] = len(attrs['size_splits']) - - super().__init__(graph, { - 'op': self.op, - 'type': 'VariadicSplit', - 'version': 'opset1', - - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - - 'in_ports_count': 1, - }, attrs) - - -class SplitBase(Op): - op = None - enabled = False - - @staticmethod - def infer(node): - name = node.soft_get('name', node.id) - - op = node.soft_get('op', None) - assert op is not None and op in ['Split', 'AttributedSplit'], \ - 'Unexpected `op`={} attribute for Split-like node {}'.format(op, name) - - num_in_ports = 1 if op == 'AttributedSplit' else 2 if op == 'Split' else None - assert num_in_ports in [1, 2], \ - 'SplitBase supports AttributedSplit with 1 input and Split with 2 inputs, but it is {} for {} node {}' \ - ''.format(num_in_ports, op, name) - - connected_inputs = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_inputs) == num_in_ports and all([i in connected_inputs for i in range(num_in_ports)]), \ - "{} should have {} connected input ports, but it doesn't for node: `{}`. Ports: {}" \ - "".format(op, num_in_ports, name, connected_inputs) - - input_shape = node.in_port(0).data.get_shape() - assert input_shape is not None, 'Input shape is unknown for node {}'.format(name) - assert node.has_valid('num_splits'), 'Parameter `num_splits` is unknown for node {}'.format(name) - num_splits = node.num_splits - - axis = node.in_port(1).data.get_value() if op == 'Split' else node.soft_get('axis', None) - assert axis is not None, '{} `axis` is unknown for node {}'.format(op, name) - assert axis.ndim == 0, '{} `axis` should be scalar, but it`s not for node {}'.format(op, name) - - assert not is_fully_defined(input_shape[axis]) or input_shape[axis] % num_splits == 0, \ - 'Input shape is not evenly divided by `num_splits` of {} node {}. `input_shape`={}, `axis`={}, ' \ - '`num_splits`={}'.format(op, name, input_shape, axis, num_splits) - - out_shape = input_shape.copy() - out_shape[axis] = input_shape[axis] // num_splits - - input_value = node.in_port(0).data.get_value() - output_value = np.split(input_value.copy(), axis=axis, indices_or_sections=num_splits) \ - if input_value is not None else None - - for idx, port in node.out_ports().items(): - if idx in node.out_nodes(): - port.data.set_shape(out_shape) - if output_value is not None: - port.data.set_value(output_value[idx]) - - if op == 'Split': - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'axis') - elif op == 'AttributedSplit': - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) - - @staticmethod - def reverse_infer(node: Node): - if node.in_port(0).data.get_shape() is not None: - return - - assert hasattr(node, 'num_splits') - axis = node.in_port(1).data.get_value() if node.op == 'Split' else node.soft_get('axis', None) - assert axis is not None, '{} `axis` is unknown for node {}'.format(node.op, node.soft_get('name', node.id)) - split_reverse_infer(node, node['num_splits'], axis) - - -class Split(SplitBase): - op = 'Split' - - def __init__(self, graph: Graph, attrs: dict): - assert 'num_splits' in attrs, 'Split operation should have `num_splits` while creation' - if 'out_ports_count' not in attrs: - attrs['out_ports_count'] = attrs['num_splits'] - - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - - 'in_ports_count': 2, - }, attrs) - - assert 'axis' not in self.attrs, \ - 'Please use `AttributedSplit` instead of `Split` operation to create node with `axis` parameter set or' \ - ' keep using Split operation, but express axis as a scalar second input of Split operation' - - def supported_attrs(self): - return ['num_splits'] - - -class AttributedSplit(SplitBase): - op = 'AttributedSplit' - - def __init__(self, graph: Graph, attrs: dict): - assert 'num_splits' in attrs, 'AttributedSplit operation should have `num_splits` while creation' - if 'out_ports_count' not in attrs: - attrs['out_ports_count'] = attrs['num_splits'] - - super().__init__(graph, { - 'op': self.op, - 'type': 'Split', - 'version': 'opset1', - - 'axis': 1, - - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - - 'in_ports_count': 1, - }, attrs) - - assert 'axis' in self.attrs, 'AttributedSplit operation should have `axis` parameter set while creation' - - def supported_attrs(self): - return ['num_splits', 'axis'] - - -def split_reverse_infer(node: Node, num_splits: int, axis: int): - aggregated_size_along_axis = 0 - shapes = [] - for i in range(num_splits): - shape = node.out_port(i).data.get_shape() if not node.out_port(i).disconnected() else None - if shape is not None: - # if out_shape_1 = [dyn, 4, 3], out_shape_2 = [7, dyn, 3], axis = 2 - # to get the original source shape [7, 4, 6] - # dimensions along axis must be summed while - # for other dimensions clarify_partial_shape should be called - aggregated_size_along_axis += shape[axis] - # in order to be able to call clarify_partial_shape axis dimension is masked into dynamic - shape[axis] = dynamic_dimension - shapes.append(shape_array(shape)) - else: - # if at least one output shape is None/undefined - # set value of shape along axis into dynamic - # dynamic_dimension + static_value = dynamic_dimension - aggregated_size_along_axis = dynamic_dimension - continue - - if len(shapes) == 0: - return - - res_partial_shape = clarify_partial_shape(shapes) - res_partial_shape[axis] = aggregated_size_along_axis - node.in_port(0).data.set_shape(res_partial_shape) diff --git a/tools/mo/openvino/tools/mo/ops/squeeze.py b/tools/mo/openvino/tools/mo/ops/squeeze.py deleted file mode 100644 index bca11bb8cf03ee..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/squeeze.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import get_canonical_axis_index -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension, shape_delete, is_fully_defined, \ - undefined_shape_of_rank -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class Squeeze(Op): - op = 'Squeeze' - enabled = False - - def __init__(self, graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - 'squeeze_dims': None, - 'reinterp_shape': True, - 'keep_at_least_1d': 0, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - }, attrs) - - @staticmethod - def infer(node: Node): - real_squeeze_dims = int64_array([]) - input_shape = node.in_port(0).data.get_shape() - node_name = node.soft_get('name', node.id) - if input_shape is None: - raise Error('Input shape is not defined for node {}'.format(node_name)) - - output_shape = input_shape.copy() - assert len(node.in_nodes()) == 2, 'The Squeeze node {} must have 2 inputs'.format(node_name) - - # TODO remove the following 'if' statement when OV start support 0D tensors - squeeze_dims = node.in_port(1).data.get_value() - if squeeze_dims.ndim == 0: - squeeze_dims = squeeze_dims.reshape([1]) - - for dim in squeeze_dims: - if output_shape[dim] == 1 or output_shape[dim] is dynamic_dimension: - real_squeeze_dims = np.ma.append(real_squeeze_dims, get_canonical_axis_index(output_shape, dim)) - else: - raise Error('Trying to squeeze dimension not equal to 1 for node "{}"'.format(node_name)) - - # if squeeze_dims empty then all 1s should be removed (tf specification of Squeeze op) - if squeeze_dims.size == 0: - for i in range(output_shape.size): - if output_shape[i] == 1: - real_squeeze_dims = np.ma.append(real_squeeze_dims, get_canonical_axis_index(output_shape, i)) - - assert is_fully_defined(real_squeeze_dims), 'Squeeze dimension(s) is not defined for op "{}"'.format(node_name) - output_shape = shape_delete(output_shape, real_squeeze_dims) - node.out_port(0).data.set_shape(output_shape) - - # make dimensions positive to correctly translate from NHWC to NCHW layout - if node.in_port(1).get_source().node.op == 'Const': - node.in_port(1).data.set_value(real_squeeze_dims) - - if node.in_port(0).data.get_value() is not None: - node.out_port(0).data.set_value(node.in_port(0).data.get_value().reshape(output_shape)) - - # the squeeze_dim attribute will be converted to the second input in the end of the Middle phase - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'axis') - - @staticmethod - def reverse_infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - output_shape = node.out_port(0).data.get_shape() - squeeze_dims = node.in_port(1).data.get_value() - if input_shape is None and output_shape is not None and squeeze_dims is not None: - num_squeeze_dims = 1 if int64_array(squeeze_dims).ndim == 0 else len(squeeze_dims) - shape = undefined_shape_of_rank(len(output_shape) + num_squeeze_dims) - node.in_port(0).data.set_shape(shape) diff --git a/tools/mo/openvino/tools/mo/ops/stop_gradient.py b/tools/mo/openvino/tools/mo/ops/stop_gradient.py deleted file mode 100644 index 507c420fdd2223..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/stop_gradient.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class StopGradientOp(Op): - op = 'StopGradient' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'identity': True, - 'in_ports_count': 1, - 'out_ports_count': 1, - 'infer': copy_shape_infer - }, attrs) - diff --git a/tools/mo/openvino/tools/mo/ops/strided_slice.py b/tools/mo/openvino/tools/mo/ops/strided_slice.py deleted file mode 100644 index c113df3d4fab97..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/strided_slice.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from typing import List, Tuple - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import get_shape_from_slice, dynamic_dimension, dynamic_dimension_value, \ - is_dynamic_slice -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.utils import array_to_str - - -class StridedSlice(Op): - op = 'StridedSlice' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': 'StridedSlice', - 'version': 'opset1', - 'in_ports_count': 4, - 'out_ports_count': 1, - 'infer': self.infer - }, attrs) - for mask_name in StridedSlice.get_mask_names(): - assert mask_name in attrs, 'Attribute {} of the StridedSlice node is not given.'.format(mask_name) - - @staticmethod - def get_mask_names(): - return ['begin_mask', 'end_mask', 'new_axis_mask', 'shrink_axis_mask', 'ellipsis_mask'] - - def backend_attrs(self): - al = list() - - def convert(attr): - return lambda node: array_to_str(node, attr) - - for a in StridedSlice.get_mask_names(): - al.append((a, convert(a))) - return al - - @staticmethod - def infer(node: Node): - data_shape = node.in_port(0).data.get_shape() - data_value = node.in_port(0).data.get_value() - slices = StridedSlice.get_slices(node, data_shape) - - if data_value is not None and dynamic_dimension_value not in slices and \ - all(not is_dynamic_slice(s) for s in slices): - node.out_port(0).data.set_value(data_value[tuple(slices)]) - else: - node.out_port(0).data.set_shape(get_shape_from_slice(data_shape, slices)) - - node['slices'] = slices - node['force_precision_in_ports'] = {port: 'int64' for port in range(1, len(node.in_nodes()))} - - # StridedSliceNormalizer inserts nodes that change original begin, end, and strides data nodes - # and since input permutations are stored in data nodes we end up having permutations - # in the wrong place of the graph. - # Therefore, PermuteInputs will be set after StridedSliceNormalizer. - - @staticmethod - def get_slices(node: Node, data_shape: Tuple) -> List: - input_rank = len(data_shape) - slice_rank = node.in_port(1).data.get_shape()[0] - begin = node.in_port(1).data.get_value() - end = node.in_port(2).data.get_value() - strides = node.in_port(3).data.get_value() if node.is_in_port_connected(3) else \ - np.ones([slice_rank], dtype=np.int64) - - # from now slices are without ellipsis - slices = [[]] * slice_rank - in_idx = 0 # index along input tensor shapes, note that input_rank not necessary is equal to slice_rank - for i in range(slice_rank): - if i < len(node.new_axis_mask) and node.new_axis_mask[i]: - slices[i] = np.newaxis - elif i < len(node.shrink_axis_mask) and node.shrink_axis_mask[i]: - if begin is not None and begin[i] is not dynamic_dimension: - slices[i] = int(begin[i]) - # the normalization is needed for the ConvertGroupedStridedSlice transformation - if slices[i] < 0 and data_shape[in_idx] is not dynamic_dimension: - slices[i] += int(data_shape[in_idx]) - else: - slices[i] = dynamic_dimension_value - elif i < len(node.ellipsis_mask) and node.ellipsis_mask[i]: - slices[i] = ... - in_idx += input_rank - slice_rank + np.count_nonzero(node.new_axis_mask) - else: - if begin is not None and end is not None and strides is not None: - start, stop = begin[i], end[i] - if i < len(node.begin_mask) and not node.begin_mask[i]: # if begin, and end are not specified take the whole range - start = None - if i < len(node.end_mask) and not node.end_mask[i]: - stop = None - slices[i] = slice(start, stop, strides[i]) - else: - slices[i] = dynamic_dimension_value - in_idx += 1 if i < len(node.new_axis_mask) and not node.new_axis_mask[i] else 0 - return slices - - @staticmethod - def align_mask_with_slice_rank(node: Node, slice_rank: int): - # align masks sizes with slice_rank (not confuse with extending, mask_alignment != mask_extending) - for mask_name in StridedSlice.get_mask_names(): - num_insertations = slice_rank - len(node[mask_name]) - val = 0 if mask_name not in ['begin_mask', 'end_mask'] else 1 # extend with ones only for begin and end - node[mask_name] = np.append(node[mask_name], [val] * num_insertations).astype(int) - diff --git a/tools/mo/openvino/tools/mo/ops/swapaxis.py b/tools/mo/openvino/tools/mo/ops/swapaxis.py deleted file mode 100644 index 5a34a5613649ac..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/swapaxis.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import PermuteAttrs, Op - - -class SwapAxis(Op): - op = 'SwapAxis' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node: Node): - node['order'] = list(range(node.in_node().shape.size)) - node.order[node.dim2], node.order[node.dim1] = node.order[node.dim1], node.order[node.dim2] - - input_shape = node.in_port(0).data.get_shape().copy() - node.out_port(0).data.set_shape(input_shape[node.order]) - if node.in_port(0).data.get_value() is not None: - node.out_port(0).data.set_value(np.transpose(node.in_port(0).data.get_value(), axes=node.order)) - - PermuteAttrs.create_permute_attrs(node, attrs=[('order', 'input:0')]) - - @staticmethod - def reverse_infer(node: Node): - output_shape = node.out_port(0).data.get_shape() - if node.in_port(0).data.get_shape() is None and output_shape is not None: - input_shape = output_shape.data.copy() - input_shape[node.dim2], input_shape[node.dim1] = input_shape[node.dim1], input_shape[node.dim2] - node.in_port(0).data.set_shape(shape_array(input_shape)) diff --git a/tools/mo/openvino/tools/mo/ops/switch.py b/tools/mo/openvino/tools/mo/ops/switch.py deleted file mode 100644 index 43c27cc534e18f..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/switch.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, is_fully_defined -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class Switch(Op): - op = 'Switch' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'infer': self.infer, - 'cf_infer': self.control_flow_infer - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def infer(node: Node): - assert len(node.in_nodes()) == 2 - tensor = node.in_node(0) - port_id = node.in_node(1) - - output_shape = shape_array(tensor.shape) - for out_port_id in range(2): - if node.is_out_port_connected(out_port_id): - node.out_port(out_port_id).data.set_shape(output_shape) - - if port_id.has_valid('value'): - output_value = tensor.value - if output_value is not None: - for out_port_id in range(2): - if node.is_out_port_connected(out_port_id): - node.out_port(out_port_id).data.set_value(output_value.copy()) - - @staticmethod - def control_flow_infer(node: Node, is_executable: bool, mark_executability: callable): - """ - Infers control flow through switch operation node. It marks output data nodes executability according to - executability of current node and switch data value - :param node: Node instance to infer control flow through - :param is_executable: if current node is executable - :param mark_executability: function to mark executability of node - """ - out_data_nodes = node.out_nodes(control_flow=True) - node_with_switch_value = node.in_node(1) - - switch_data_0_port_node_id = [out_data_nodes[0].id] if 0 in out_data_nodes else [] - switch_data_1_port_node_id = [out_data_nodes[1].id] if 1 in out_data_nodes else [] - assert 1 <= len(switch_data_0_port_node_id) + len(switch_data_1_port_node_id) <= 2 - - if not node_with_switch_value.has_valid('value') or not is_fully_defined(node_with_switch_value.value): - # Mark both ports as executable - resulting_switch_data_node_ids = switch_data_0_port_node_id + switch_data_1_port_node_id - for n in resulting_switch_data_node_ids: - mark_executability(n, True) - else: - switch_value = node_with_switch_value.value.item(0) - resulting_switch_data_node_ids = [switch_data_0_port_node_id, switch_data_1_port_node_id] - - for n in resulting_switch_data_node_ids[not switch_value]: - mark_executability(n, False) - for n in resulting_switch_data_node_ids[switch_value]: - mark_executability(n, is_executable) diff --git a/tools/mo/openvino/tools/mo/ops/tdnncomponent.py b/tools/mo/openvino/tools/mo/ops/tdnncomponent.py deleted file mode 100644 index 415ab7f6b584bc..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/tdnncomponent.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class TdnnComponent(Op): - op = 'tdnncomponent' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'infer': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/tensor_iterator.py b/tools/mo/openvino/tools/mo/ops/tensor_iterator.py deleted file mode 100644 index 899971bb917130..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/tensor_iterator.py +++ /dev/null @@ -1,435 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -from copy import copy, deepcopy -from math import ceil - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, is_fully_defined, dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node, dict_includes, Graph -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.utils.error import Error - - -class TensorIterator(Op): - """ - Loop layer that iterates over tensors and execute embedded sub-graph. - """ - - op = 'TensorIterator' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'input_port_map': [], # a list of dicts with such attrs as external_port_id, etc. - 'output_port_map': [], # a list of dicts with such attrs as external_port_id, etc. - 'back_edges': [], # a list of dicts with such attrs as from_layer, from_port, etc. - 'body': None, # an Graph object with a body sub-graph - 'sub_graphs': ['body'], # built-in attribute with all sub-graph - 'infer': self.infer, - 'type_infer': self.ti_type_infer, - } - super().__init__(graph, mandatory_props, attrs) - - @staticmethod - def cover_body_input_data_nodes_with_parameter_ops(ti: Node): - body = ti.body - - op_port_map = [] - for record in ti.input_port_map: - operation_node = get_internal_node_by_layer_id(ti, record['internal_layer_id']) - real_in_port = TensorIterator.special_port_to_real_port(operation_node, copy(record['internal_port_id'])) - op_port_map.append((operation_node, real_in_port)) - - for operation_node, in_port in op_port_map: - data_node = operation_node.in_node(in_port) - - attrs = deepcopy(body.get_edge_data(data_node.id, operation_node.id)[0]) - body.remove_edge(data_node.id, operation_node.id) - - assert data_node.has_valid('shape'), \ - 'Data node should have `shape` attribute set, but it`s not for node {}'.format(data_node.id) - shape = data_node['shape'].copy() - parameter_data_node = Parameter(body, {'shape': shape_array(shape)}).create_node_with_data() - - body.create_edge(src_node=parameter_data_node, dst_node=operation_node, - out_port=0, in_port=in_port, edge_attrs=attrs) - del body.get_edge_data(parameter_data_node.id, operation_node.id)[0]['out'] - - @staticmethod - def cover_body_constant_data_nodes_with_const_ops(ti: Node): - body = ti.body - for data_node in body.get_data_nodes(): - if len(data_node.in_nodes()) == 0 and len(data_node.out_nodes()) != 0: - assert data_node.has_valid('shape'), \ - 'Data node should have `shape` attribute set, but it`s not for node {}'.format(data_node.id) - assert data_node.has_valid('value'), \ - 'Data node should have `value` attribute set, but it`s not for node {}'.format(data_node.id) - shape = data_node['shape'].copy() - value = data_node['value'].copy() - const_node = Const(body, {'shape': shape, 'value': value}).create_node() - body.create_edge(src_node=const_node, dst_node=data_node, out_port=0, in_port=0) - - @staticmethod - def special_port_to_real_port(node: Node, special_port_id: int, direction: str = 'in'): - assert node.kind == 'op' - assert direction in ['in', 'out'] - - port_type = 'external_port_id' if node.has_valid('body') else 'internal_port_id' - - if direction == 'in': - edges = node.in_edges() - else: - edges = node.out_edges() - - suitable_edges = {} - for idx, attrs in edges.items(): - if port_type in attrs and attrs[port_type] == special_port_id: - suitable_edges[idx] = attrs - assert len(suitable_edges) == 1 - return list(suitable_edges.keys())[0] - - @staticmethod - def set_internal_layer_id_for_nodes(ti: Node, nodes: list): - max_internal_layer_id_used = max([n.soft_get('internal_layer_id', 0) for n in ti.body.get_op_nodes()]) - - for node in nodes: - if not node.has_valid('internal_layer_id'): - node['internal_layer_id'] = max_internal_layer_id_used = max_internal_layer_id_used + 1 - - @staticmethod - def update_back_edge_map(ti, direction, old_layer_id, old_port_id, new_layer_id, new_port_id=None): - assert direction in ['from', 'to'] - layer_attr_name = direction + '_layer' - port_attr_name = direction + '_port' - - for record in ti.back_edges: - if record[layer_attr_name] != old_layer_id: - continue - if (port_attr_name in record and record[port_attr_name] == old_port_id) or new_port_id is not None: - record[layer_attr_name] = new_layer_id - if new_port_id is None: - del record[port_attr_name] - else: - record[port_attr_name] = new_port_id - - @staticmethod - def validate_maps(ti): - def check_by_attribute(port_map, appropriate_attribute, inappropriate_attribute, node_type): - for record in port_map: - node = get_internal_node_by_layer_id(ti, record[appropriate_attribute]) - assert node.soft_get('type') == node_type - assert inappropriate_attribute not in record, record[inappropriate_attribute] - - check_by_attribute(ti.input_port_map, 'internal_layer_id', 'internal_port_id', 'Parameter') - check_by_attribute(ti.output_port_map, 'internal_layer_id', 'internal_port_id', 'Result') - check_by_attribute(ti.back_edges, 'from_layer', 'from_port', 'Result') - check_by_attribute(ti.back_edges, 'to_layer', 'to_port', 'Parameter') - - @staticmethod - def normalize_internal_ids(ti): - assert ti.has_valid('input_port_map') - assert ti.has_valid('output_port_map') - assert ti.has_valid('back_edges') - - body = ti.body - - TensorIterator.set_internal_layer_id_for_nodes(ti, body.get_op_nodes(type='Parameter')) - TensorIterator.set_internal_layer_id_for_nodes(ti, body.get_op_nodes(type='Result')) - - node_map = {copy(node.internal_layer_id): node for node in body.get_op_nodes() if - node.has_valid('internal_layer_id')} - - for record in ti.input_port_map: - assert 'internal_layer_id' in record - assert 'internal_port_id' in record - assert 'external_port_id' in record - - internal_node_id = copy(record['internal_layer_id']) - assert internal_node_id in node_map - internal_node = node_map[internal_node_id] - - in_port = TensorIterator.special_port_to_real_port(internal_node, copy(record['internal_port_id'])) - assert in_port in internal_node.in_ports() and not internal_node.in_port(in_port).disconnected() - - internal_input_node = internal_node.in_port(in_port).get_source().node - assert internal_input_node.soft_get('type') == 'Parameter' - - TensorIterator.update_back_edge_map(ti=ti, direction='to', old_layer_id=internal_node_id, - old_port_id=record['internal_port_id'], - new_layer_id=internal_input_node.internal_layer_id) - del record['internal_port_id'] - record['internal_layer_id'] = internal_input_node['internal_layer_id'] - - for record in ti.output_port_map: - assert 'internal_layer_id' in record - assert 'internal_port_id' in record - assert 'external_port_id' in record - - internal_node_id = copy(record['internal_layer_id']) - assert internal_node_id in node_map - internal_node = node_map[internal_node_id] - - out_port = TensorIterator.special_port_to_real_port(internal_node, copy(record['internal_port_id']), 'out') - assert out_port in internal_node.out_ports() and not internal_node.out_port(out_port).disconnected() - - assert len(internal_node.out_port(out_port).get_destinations()) >= 1 - - internal_output_node = None - for dst in internal_node.out_port(out_port).get_destinations(): - possible_output_node = dst.node - if possible_output_node.soft_get('type') == 'Result': - assert internal_output_node is None, 'Several Result operations on the same output port of {}'.format( - internal_node) - internal_output_node = possible_output_node - assert internal_output_node is not None - TensorIterator.update_back_edge_map(ti=ti, direction='from', old_layer_id=internal_node_id, - old_port_id=record['internal_port_id'], - new_layer_id=internal_output_node.internal_layer_id) - - del record['internal_port_id'] - record['internal_layer_id'] = internal_output_node.internal_layer_id - - for record in ti.back_edges: - assert 'from_layer' in record - assert 'to_layer' in record - - internal_node_id = record['from_layer'] - assert internal_node_id in node_map - internal_node = node_map[internal_node_id] - - if internal_node.soft_get('type') != 'Result': - # this output won't get out of the body, but it is still Result and needed on non first iterations of TI - assert 'from_port' in record - out_port = TensorIterator.special_port_to_real_port(internal_node, record['from_port'], 'out') - assert out_port in internal_node.out_ports() and not internal_node.out_port(out_port).disconnected() - assert len(internal_node.out_port(out_port).get_destinations()) >= 1 - - internal_output_node = None - for dst in internal_node.out_port(out_port).get_destinations(): - possible_output_node = dst.node - if possible_output_node.soft_get('type') == 'Result': - assert internal_output_node is None, 'Several Result operations on the same output port of {}' \ - ''.format(internal_node) - internal_output_node = possible_output_node - assert internal_output_node is not None - TensorIterator.update_back_edge_map(ti=ti, direction='from', old_layer_id=internal_node_id, - old_port_id=record['from_port'], - new_layer_id=internal_output_node.internal_layer_id) - - TensorIterator.validate_maps(ti) - - def port_map_attrs(self): - return [ - 'external_port_id', - 'internal_layer_id', - 'internal_port_id', - 'axis', - 'start', - 'stride', - 'end', - 'part_size', - ] - - def substitute_ie_attrs(self, new_attrs: dict): - """ - Replace standard list of attribute in layer/data by attributes - delivered by backend_attrs - """ - - port_map_attrs = self.port_map_attrs() - - back_edges_attrs = [ - ('from-layer', 'from_layer'), - ('to-layer', 'to_layer'), - ] - - new_attrs.update({ - 'IE': [( - 'layer', - [('id', lambda node: node.node), 'name', 'type', 'version'], - [ - ('data', self.backend_attrs() + self.default_backend_attrs, []), - '@ports', - ('port_map', [], [ - ('@list', lambda node: self.generate_port_map(node, node.input_port_map, 'in'), - ('input', port_map_attrs, [])), - ('@list', lambda node: self.generate_port_map(node, node.output_port_map, 'out'), - ('output', port_map_attrs, [])), - ]), - ('back_edges', [], [ - ('@list', lambda node: self.generate_back_edges(node), ('edge', back_edges_attrs, [])), - ]), - ('body', [], [('@network', 'body')]), - ])] - }) - - @staticmethod - def find_port_id(node: Node, virtual_id: str, attr: str): - attrs = node.edge({attr: virtual_id})[2] - assert bool('in' in attrs) != bool('out' in attrs), attrs - return attrs['in' if 'in' in attrs else 'out'] - - @staticmethod - def find_internal_layer_id(graph: Graph, virtual_id): - internal_nodes = list( - filter(lambda d: dict_includes(d[1], {'internal_layer_id': virtual_id}), graph.nodes(data=True))) - assert len(internal_nodes) == 1, 'Nodes: {}, virtual_id: {}'.format(internal_nodes, virtual_id) - return internal_nodes[0][0] - - @staticmethod - def generate_port_map(node: Node, src_port_map, dir: str): - """ Extract port_map attributes from node and node.body attributes. - - It iterates over src_port_map and substitute external_port_id, internal_port_id and - internal_layer_id by real values queried from node ports and node.body attributes. - """ - result_list = [] - for map_item in src_port_map: - result = dict(map_item) - assert result is not map_item - result['external_port_id'] = __class__.find_port_id(node, result['external_port_id'], 'external_port_id') - result['internal_layer_id'] = __class__.find_internal_layer_id(node.body, result['internal_layer_id']) - result_list.append(result) - return result_list - - @staticmethod - def generate_back_edges(node: Node): - ''' Extract back_edges attributes from node and node.body attributes. ''' - result_list = [] - for back_edge in node.back_edges: - result = dict(back_edge) - assert result is not back_edge - result['from_layer'] = __class__.find_internal_layer_id(node.body, result['from_layer']) - result['to_layer'] = __class__.find_internal_layer_id(node.body, result['to_layer']) - result_list.append(result) - return result_list - - @staticmethod - def infer(node: Node): - return - raise Error('TensorIterator.infer is not implemented. ' - 'Do not insert TensorIterator before middle-end in Model Optimizer') - - @staticmethod - def ti_type_infer(node): - from openvino.tools.mo.middle.passes.infer import type_infer - ti_graph = node.body - - for record in node.input_port_map: - internal_node = get_internal_node_by_layer_id(node, record['internal_layer_id']) - assert internal_node.soft_get('type') == 'Parameter', internal_node.soft_get('type') - - real_external_port_idx = TensorIterator.special_port_to_real_port(node, record['external_port_id']) - external_data_type = node.in_port(real_external_port_idx).get_connection().get_source().get_data_type() - internal_node.data_type = external_data_type - - fake_input_const_nodes = [] - # create fake const node to make type inference work correctly for all TI input nodes - for data_node in ti_graph.get_data_nodes(has_value=True): - if len(data_node.in_nodes()) == 0: - const_node = Const(ti_graph, {'name': 'const_', 'value': data_node.value}).create_node() - fake_input_const_nodes.append(const_node) - ti_graph.create_edge(const_node, data_node) - - type_infer(ti_graph) - - # propagate data types to the TI output ports - for record in node.output_port_map: - internal_node = get_internal_node_by_layer_id(node, record['internal_layer_id']) - assert internal_node.soft_get('type') == 'Result', internal_node.soft_get('type') - - internal_data_type = internal_node.in_port(0).get_data_type() - real_external_port_idx = TensorIterator.special_port_to_real_port(node, record['external_port_id'], 'out') - node.out_port(real_external_port_idx).set_data_type(internal_data_type) - - ti_graph.remove_nodes_from([node.id for node in fake_input_const_nodes]) - - @staticmethod - def find_iterations_count_for_output(ti_node): - def check_field(record, field): - return field in record and record[field] is not None - iterations_count = dynamic_dimension_value - # find out iterations count from inputs. - # If no input contains 'axis' attribute then no slicing is in TI and it has only one iteration - # If several inputs have axis attribute with different iterations count then we use maximum value. - for in_rec in ti_node.input_port_map: - if not check_field(in_rec, 'axis'): - continue - assert check_field(in_rec, 'external_port_id'), "external_port_id not set for input of {} node".format(ti_node.id) - in_shape = ti_node.in_port(in_rec['external_port_id']).data.get_shape() - if check_field(in_rec, 'end') and in_rec['end'] >= 0 and \ - check_field(in_rec, 'start') and in_rec['start'] >= 0: - in_rec_end = in_rec['end'] - in_rec_start = in_rec['start'] - elif check_field(in_rec, 'end') and in_rec['end'] >= 0: - in_rec_end = in_rec['end'] - in_rec_start = in_shape[in_rec['axis']] if not check_field(in_rec, 'start') else \ - in_shape[in_rec['axis']] + 1 + in_rec['start'] - elif check_field(in_rec, 'start') and in_rec['start'] >= 0: - in_rec_end = in_shape[in_rec['axis']] if not check_field(in_rec, 'end') else \ - in_shape[in_rec['axis']] + 1 + in_rec['end'] - in_rec_start = in_rec['start'] - elif check_field(in_rec, 'end') and in_rec['end'] < 0 and \ - check_field(in_rec, 'start') and in_rec['start'] < 0: - in_rec_end = in_rec['end'] - in_rec_start = in_rec['start'] - else: - in_rec_end = ti_node.in_port(in_rec['external_port_id']).data.get_shape()[in_rec['axis']] - in_rec_start = 0 - - if check_field(in_rec, 'stride'): - in_rec_stride = in_rec['stride'] - else: - in_rec_stride = 1 - - # in case of dynamic iterations count don't continue any calculations on this iteration - if not is_fully_defined(in_rec_end) or not is_fully_defined(in_rec_start): - continue - - if iterations_count is not dynamic_dimension_value and \ - ceil((in_rec_end - in_rec_start) / in_rec_stride) != iterations_count: - raise Error("TensorIterator node {} have inputs with different iterations count".format(ti_node.id)) - iterations_count = ceil((in_rec_end - in_rec_start) / in_rec_stride) - - return iterations_count - - -def get_internal_node_by_layer_id(ti, internal_layer_id): - suitable_nodes = ti.body.get_op_nodes(internal_layer_id=internal_layer_id) - assert len(suitable_nodes) == 1, \ - 'Expected 1 node with `internal_layer_id`={}, {} found'.format(internal_layer_id, len(suitable_nodes)) - return suitable_nodes[0] - - -# Some utils for TI -def _get_internal_idxs_to_names_dict(graph: Graph, ports_type='in'): - """ - Create mapping from (internal_layer_id, internal_port_id) to layer id in body of TensorIterator. - """ - mapping = {} - ordered_nodes = graph.pseudo_topological_sort() - for node in ordered_nodes: - if node.kind == 'op' and node.has_valid('internal_layer_id'): - mapping[node.internal_layer_id] = node.id - return mapping - - -def _get_internal_output_node_id(graph: Graph, ti_node_id: str, external_port: int): - node = Node(graph, ti_node_id) - outputs = node['output_port_map'] - mapping = _get_internal_idxs_to_names_dict(node['body'], 'out') - for out in outputs: - if out['external_port_id'] == external_port: - return mapping[out['internal_layer_id']] - - -def _get_internal_input_node_id(graph: Graph, ti_node_id: str, external_port: int): - node = Node(graph, ti_node_id) - inputs = node['input_port_map'] - mapping = _get_internal_idxs_to_names_dict(node['body'], 'in') - for inp in inputs: - if inp['external_port_id'] == external_port: - return mapping[inp['internal_layer_id']] diff --git a/tools/mo/openvino/tools/mo/ops/tile.py b/tools/mo/openvino/tools/mo/ops/tile.py deleted file mode 100644 index 7c05da33cb49e1..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/tile.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value, dynamic_dimension, \ - is_fully_defined, shape_array, shape_insert -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op, PermuteAttrs - - -class Tile(Op): - op = 'Tile' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - - 'infer': self.infer, - - 'in_ports_count': 2, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node: Node): - name = node.soft_get('name', node.id) - - connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_in_ports) == 2 and 0 in connected_in_ports and 1 in connected_in_ports, \ - "Tile should have 2 connected input port, but it doesn't for node: `{}`. Ports: {}" \ - "".format(name, connected_in_ports) - - shape = node.in_port(0).data.get_shape() - assert shape is not None, "Undefined input shape for Tile node '{}'.".format(name) - tile_array = node.in_port(1).data.get_value() - assert tile_array is not None, "Undefined `repeats` (1st port input value) of Tile node '{}'".format(name) - - # align ranks of the tile_array tensor and input shape node - if shape.size < tile_array.size: - shape = shape_insert(shape, 0, [1] * (tile_array.size - shape.size)) - elif shape.size > tile_array.size: - tile_array = shape_insert(tile_array, 0, [1] * (shape.size - tile_array.size)) - - input_value = node.in_port(0).data.get_value() - if input_value is not None and is_fully_defined(shape) and is_fully_defined(tile_array): - node.out_port(0).data.set_value(np.tile(input_value.reshape(shape), tile_array)) - else: - node.out_port(0).data.set_shape(shape * tile_array) - - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'shape') - - -class AttributedTile(Op): - op = 'AttributedTile' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': 'Tile', - 'version': 'opset1', - - 'infer': self.infer, - - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) - - assert 'axis' in self.attrs - assert 'tiles' in self.attrs - - def supported_attrs(self): - return ['axis', 'tiles'] - - @staticmethod - def infer(node): - name = node.soft_get('name', node.id) - - connected_in_ports = {idx: port for idx, port in node.in_ports().items() if not port.disconnected()} - assert len(connected_in_ports) == 1 and 0 in connected_in_ports, \ - "AttributedTile should have 1 connected input port, but it doesn't for node: `{}`. Ports: {}" \ - "".format(name, connected_in_ports) - - shape = node.in_port(0).data.get_shape() - assert shape is not None, "Undefined input shape for AttributedTile node '{}'.".format(name) - axis = node.soft_get('axis', None) - assert axis is not None - tiles = node.soft_get('tiles', None) - assert tiles is not None, "Undefined `tiles` attribute of Tile node '{}'".format(name) - - tile_array = int64_array(np.ones(shape.size)) - tile_array[node.axis] = node.tiles - - node.out_port(0).data.set_shape(shape * tile_array) - if node.in_port(0).data.get_value() is not None: - node.out_port(0).data.set_value(np.tile(node.in_port(0).data.get_value(), tile_array)) - - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) diff --git a/tools/mo/openvino/tools/mo/ops/timeheightconvolution.py b/tools/mo/openvino/tools/mo/ops/timeheightconvolution.py deleted file mode 100644 index 16fdabb5e27580..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/timeheightconvolution.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.ops.op import Op - - -class TimeHeightConvolutionComponent(Op): - op = 'timeheightconvolutioncomponent' - enabled = False - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': None, - 'op': self.op, - 'infer': None, - 'in_ports_count': 1, - 'out_ports_count': 1, - }, attrs) diff --git a/tools/mo/openvino/tools/mo/ops/topk.py b/tools/mo/openvino/tools/mo/ops/topk.py deleted file mode 100644 index 3e62c44cd8854f..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/topk.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type -from openvino.tools.mo.ops.op import Op, PermuteAttrs -from openvino.tools.mo.utils.error import Error - - -class TopK(Op): - op = 'TopK' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset3', - 'infer': self.infer, - 'type_infer': self.type_infer, - - 'index_element_type': np.int32, - 'axis': None, - 'mode': 'max', - 'sort': 'none', - 'force_precision_in_ports': { - 1: 'int32'}, - 'in_ports_count': 3, - 'out_ports_count': 2, - }, attrs) - - def backend_attrs(self): - version = self.get_opset() - if version in 'opset11': - return ['axis', 'mode', 'sort', 'stable', - ('index_element_type', lambda node: np_data_type_to_destination_type(node.index_element_type))] - elif version in 'opset3': - return ['axis', 'mode', 'sort', - ('index_element_type', lambda node: np_data_type_to_destination_type(node.index_element_type))] - elif version == 'opset1': - return ['axis', 'mode', 'sort'] - else: - raise Error('Unknown opset version "{}"'.format(version)) - - @staticmethod - def infer(node): - in_ports = node.in_ports() - connected_ports = [port for port in in_ports.values() if not port.disconnected()] - assert len(connected_ports) == 2, 'The number of inputs to the TopK layer name "{}" must be equal to 2.' \ - ''.format(node.soft_get('name')) - - k = node.in_port(1).data.get_value() - if k is None: - k = dynamic_dimension - assert node.has_valid('axis'), 'The "axis" attribute is not defined for node {}'.format(node.name) - - input_shape = node.in_port(0).data.get_shape() - node.axis = len(input_shape) + node.axis if node.axis < 0 else node.axis - output_shape = input_shape.copy() - output_shape[node.axis] = k - - PermuteAttrs.create_permute_attrs(node, attrs=[('axis', 'input:0')]) - - # setting shape and value if applicable - if not node.out_port(0).disconnected(): - node.out_port(0).data.set_shape(output_shape) - if not node.out_port(1).disconnected(): - node.out_port(1).data.set_shape(output_shape) - if node.in_port(0).data.get_value() is not None: - # TODO implement value propagation - pass - - @staticmethod - def type_infer(node): - node.out_port(0).set_data_type(node.in_port(0).get_data_type()) - if node.get_opset() in ['opset3', 'opset11']: - node.out_port(1).set_data_type(node.index_element_type) - else: - node.out_port(1).set_data_type(np.int32) diff --git a/tools/mo/openvino/tools/mo/ops/topkrois_onnx.py b/tools/mo/openvino/tools/mo/ops/topkrois_onnx.py deleted file mode 100644 index 6054182940f27e..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/topkrois_onnx.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes -from openvino.tools.mo.ops.op import Op - - -class ExperimentalDetectronTopKROIs(Op): - op = 'ExperimentalDetectronTopKROIs' - - def __init__(self, graph, attrs): - mandatory_props = dict( - type=self.op, - op=self.op, - version='experimental', - reverse_infer=self.reverse_infer, - infer=self.infer - ) - super().__init__(graph, mandatory_props, attrs) - - def backend_attrs(self): - return ['max_rois', ] - - @staticmethod - def infer(node): - node.out_port(0).data.set_shape([node.max_rois, 4]) - - @staticmethod - def reverse_infer(node): - set_input_shapes(node, shape_array([dynamic_dimension_value, 4]), shape_array([dynamic_dimension_value])) diff --git a/tools/mo/openvino/tools/mo/ops/transpose.py b/tools/mo/openvino/tools/mo/ops/transpose.py deleted file mode 100644 index 0430f90cd12e81..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/transpose.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.op import Op - - -class Transpose(Op): - op = 'Transpose' - enabled = True - - def __init__(self, graph: Graph, attrs: dict): - super().__init__(graph, { - 'type': self.op, - 'op': self.op, - 'version': 'opset1', - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - 'force_precision_in_ports': {1: 'int64'}, - 'in_ports_count': 2, - 'out_ports_count': 1, - }, attrs) - - @staticmethod - def infer(node: Node): - # order parameter calculation and checks - in_ports = node.in_ports() - connected_ports = [port for port in in_ports.values() if not port.disconnected()] - input_shape = node.in_port(0).data.get_shape() - - if node.has_and_set('reverse_order'): - assert len(connected_ports) == 1 and 0 in in_ports, \ - 'Cannot infer `{}` due to both order and reverse_order was set'.format(node.soft_get('name')) - order = np.arange(len(input_shape))[::-1] # Reverse order - else: - # we import PermuteInputs locally because it uses Transpose inside and we have recursive imports - from openvino.tools.mo.graph.perm_inputs import PermuteInputs - assert len(connected_ports) == 2 and 0 in in_ports and 1 in in_ports, \ - "{} node `{}` should have 2 input ports, where 0-input is a data input and 1-input represents " \ - "Transpose `order`".format(node.op, node.id) - order = node.in_port(1).data.get_value() - assert order is not None, 'Cannot infer `{}` because order is None'.format(node.soft_get('name')) - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'order') - - # setting shape and value if applicable - if node.in_port(0).data.get_value() is not None: - node.out_port(0).data.set_value(np.transpose(node.in_port(0).data.get_value(), axes=order)) - else: - node.out_port(0).data.set_shape(input_shape[order]) - - @staticmethod - def reverse_infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - output_shape = node.out_port(0).data.get_shape() - order = node.in_port(1).data.get_value() - - if input_shape is None and output_shape is not None and order is not None: - output_shape_unmasked = output_shape.data.copy() - input_shape_unmasked = output_shape.data.copy() - for curr_out_size, order_axis in zip(output_shape_unmasked, order): - input_shape_unmasked[order_axis] = curr_out_size - input_shape = shape_array(input_shape_unmasked) - node.in_port(0).data.set_shape(input_shape) diff --git a/tools/mo/openvino/tools/mo/ops/unique.py b/tools/mo/openvino/tools/mo/ops/unique.py deleted file mode 100644 index 7cbed7ce2cd052..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/unique.py +++ /dev/null @@ -1,157 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import mo_array, int64_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class Unique(Op): - ''' The operation finds unique elements in 1-D tensor. - For more details see https://www.tensorflow.org/api_docs/python/tf/unique - - attributes: - - sorted, indicates whether to sort the unique elements in ascending order or - to return in the same order as they occur in the input - - return_inverse, indicates whether to output indices - - return_counts, indicates whether to output the counts of each unique element - - 1 input: - - [0, required] input tensor (1D) - - 2 outputs: - - [0, required] tensor containing all of the unique elements of the input - and sorted in the same order as in the input (1D) - - [1, optional] tensor of indices for each value of the input - in the tensor of unique elements (1D) - - [2, optional] tensor with a number of occurrences for each unique element - in the input (1D) - ''' - op = 'Unique' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': None, - 'op': __class__.op, - 'version': 'experimental', - 'infer': __class__.infer, - 'in_ports_count': 1, - 'out_ports_count': 3 - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'sorted', - 'return_inverse', - 'return_counts', - ] - - @staticmethod - def infer(node: Node): - # check that all required attributes are set - assert node.has('sorted') and node.sorted in ['true', 'false'], \ - "Unique does not have valid sorted attribute" - assert node.has('return_inverse') and node.return_inverse in ['true', 'false'], \ - "Unique does not have valid return_inverse attribute" - assert node.has('return_counts') and node.return_counts in ['true', 'false'], \ - "Unique does not have valid return_counts attribute" - - # check a number of input and output nodes - assert len(node.in_nodes()) == 1, "Unique must have one input" - assert len(node.out_nodes()) <= 3, "Unique must have less or equal to 3 outputs" - - # compute maximum number of outputs if no output port is pruned - max_num_outputs = 1 - if node.return_inverse == 'true': - max_num_outputs += 1 - if node.return_counts == 'true': - max_num_outputs += 1 - - # check a number of outputs - assert len(node.out_nodes()) <= max_num_outputs, \ - "The number of outputs in IR Unique layer must be less or equal to framework graph one" - - # check that the output with unique elements remains in a graph after pruning - # since this is required output - assert 0 in node.out_nodes(), \ - "The output with unique elements must remain in a graph" - - # check if outputs with indices and counts remain in a graph after pruning - # and update attributes - if len(node.out_nodes()) == 1: - node.return_inverse = 'false' - node.return_counts = 'false' - if len(node.out_nodes()) == 2 and 1 in node.out_nodes() \ - and node.return_inverse == 'true' and node.return_counts == 'true': - node.return_counts = 'false' - if len(node.out_nodes()) == 2 and 2 in node.out_nodes() \ - and node.return_inverse == 'true' and node.return_counts == 'true': - node.return_inverse = 'false' - - # check that input is 1-D tensor - input_shape = node.in_node(0).shape - assert input_shape is not None and input_shape.size == 1, \ - "Unique accepts only 1-D input" - - # determine a shape for each output - for out_node_ind in node.out_nodes(): - assert (out_node_ind < max_num_outputs), "Unique has three outputs at most" - # all outputs have the same shape equal to the input shape - node.out_node(out_node_ind).shape = input_shape - - input_value = node.in_node(0).value - if input_value is None: - return - - # check that input value is 1-D - assert len(input_value.shape) == 1, \ - "Unique accepts only 1-D input" - - is_sorted = (node.sorted == 'true') - return_inverse = (node.return_inverse == 'true') - return_counts = (node.return_counts == 'true') - - # infer if the input is constant - if is_sorted: - unique_output = np.unique(input_value, return_inverse = return_inverse, - return_counts = return_counts, return_index = False) - if not return_inverse and not return_counts: - unique_output = [unique_output] - else: - # np.unique can only return unique elements in sorted order - # so this case should be handled separately - sorted_uniques, sorted_index, sorted_inverse, sorted_counts = np.unique(input_value, return_index = True, - return_inverse = True, return_counts = True) - # compute uniques that are in the same order as they occur in the input, - # indices of input values in uniques, counts for each unique element - uniques = [] - inverse = [] - counts = [] - old_ind_by_elem = dict(zip(sorted_uniques, range(len(sorted_index)))) - new_ind_by_elem = dict() - new_ind = 0 - for ind in np.sort(sorted_index): - uniques.append(input_value[ind]) - old_ind = old_ind_by_elem[input_value[ind]] - counts.append(sorted_counts[old_ind]) - new_ind_by_elem[input_value[ind]] = new_ind - new_ind += 1 - inverse = [new_ind_by_elem[input_value[ind]] for ind in range(len(input_value))] - - # pack unique_output - unique_output = [] - unique_output.append(uniques) - if return_inverse: - unique_output.append(inverse) - if return_counts: - unique_output.append(counts) - - # write result to output nodes - j = 0 - for out_node_ind in node.out_nodes(): - node.out_node(out_node_ind).value = mo_array(unique_output[j], dtype=float) - node.out_node(out_node_ind).shape = int64_array(node.out_node(out_node_ind).value.shape) - j += 1 diff --git a/tools/mo/openvino/tools/mo/ops/unsqueeze.py b/tools/mo/openvino/tools/mo/ops/unsqueeze.py deleted file mode 100644 index 21dbcce5920dd3..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/unsqueeze.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, is_fully_defined, shape_insert, undefined_shape_of_rank -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.graph.perm_inputs import PermuteInputs -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.error import Error - - -class Unsqueeze(Op): - """ - The operation that inserts dimensions of size one into specific positions of the input layer. The dimensions are - specified in the second input. - """ - op = 'Unsqueeze' - enabled = False - - def __init__(self, graph, attrs: dict): - super().__init__(graph, { - 'op': self.op, - 'type': self.op, - 'version': 'opset1', - 'unsqueeze_dims': None, - 'reinterp_shape': True, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': self.infer, - 'reverse_infer': self.reverse_infer, - }, attrs) - - @staticmethod - def infer(node): - if len(node.in_nodes()) <= 1: - raise Error('There is no input with unsqueeze dims for the node {}'.format(node.soft_get('name'))) - unsqueeze_dims = node.in_port(1).data.get_value() - if unsqueeze_dims is None: - raise Error('The dimensions to unsqueeze are not defined for the node {}'.format(node.soft_get('name'))) - unsqueeze_dims = int64_array(unsqueeze_dims) - - input_value = node.in_port(0).data.get_value() - input_shape = node.in_port(0).data.get_shape() - - # TODO remove the following line when the OpenVINO plugins support 0D tensors - if unsqueeze_dims.ndim == 0: - unsqueeze_dims = int64_array([unsqueeze_dims.item()]) - - # make dimensions positive to correctly translate from NHWC to NCHW layout - unsqueeze_dims = int64_array([dim + len(node.in_port(0).data.get_shape()) + 1 if dim < 0 else dim - for dim in unsqueeze_dims]) - if node.in_port(1).get_source().node.op == 'Const': - node.in_port(1).data.set_value(unsqueeze_dims) - - output_shape = input_shape.copy() - for dim in unsqueeze_dims: - output_shape = shape_insert(output_shape, dim, 1) - - if input_value is not None and is_fully_defined(output_shape): - node.out_port(0).data.set_value(input_value.reshape(output_shape)) - else: - node.out_port(0).data.set_shape(output_shape) - - PermuteInputs().set_input_permutation(node.in_node(1), node, 'input:0', 'axis') - - @staticmethod - def reverse_infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - output_shape = node.out_port(0).data.get_shape() - unsqueeze_dims = node.in_port(1).data.get_value() - if input_shape is None and output_shape is not None and unsqueeze_dims is not None: - num_unsqueeze_dims = 1 if int64_array(unsqueeze_dims).ndim == 0 else len(unsqueeze_dims) - shape = undefined_shape_of_rank(len(output_shape) - num_unsqueeze_dims) - node.in_port(0).data.set_shape(shape) diff --git a/tools/mo/openvino/tools/mo/ops/upsample.py b/tools/mo/openvino/tools/mo/ops/upsample.py deleted file mode 100644 index 2afa8d4375409c..00000000000000 --- a/tools/mo/openvino/tools/mo/ops/upsample.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import math - -from openvino.tools.mo.front.common.layout import get_batch_dim, get_features_dim, get_height_dim, get_width_dim, shape_for_layout -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension, shape_array, dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class UpsampleOp(Op): - op = 'Upsample' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'op': self.op, - 'in_ports_count': 2, - 'out_ports_count': 1, - 'infer': UpsampleOp.upsample_infer - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'height_scale', - 'width_scale', - 'mode', - ] - - @staticmethod - def upsample_infer(node: Node): - node_name = node.soft_get('name', node.id) - layout = node.graph.graph['layout'] - assert len(layout) == 4, 'Input tensor rank must be equal to 4 for node "{}"'.format(node_name) - - input_shape = node.in_port(0).data.get_shape() - - if len(node.in_nodes()) == 1: - in_height = input_shape[get_height_dim(layout, 4)] - in_width = input_shape[get_width_dim(layout, 4)] - assert node.has('width_scale') is not None and node.has('height_scale') is not None - if in_height is not dynamic_dimension: - out_height = math.floor(in_height * node.height_scale) - else: - out_height = dynamic_dimension - if in_width is not dynamic_dimension: - out_width = math.floor(in_width * node.width_scale) - else: - out_width = dynamic_dimension - node.out_port(0).data.set_shape(shape_for_layout(layout, - batch=input_shape[get_batch_dim(layout, 4)], - features=input_shape[get_features_dim(layout, 4)], - height=out_height, - width=out_width)) - else: - scales = node.in_port(1).data.get_value() - assert scales is not None, 'The input with scales for node "{}" is not constant'.format(node_name) - eps = 1e-5 # This is to make rounding in case of very close number to round to closest instead of down - # generic output shape calculation to support 5D input shape case - output_shape = shape_array([dynamic_dimension for _ in range(len(input_shape))]) - for idx in range(len(output_shape)): - if input_shape[idx] is not dynamic_dimension: - output_shape[idx] = int((input_shape[idx] + eps) * scales[idx]) - else: - output_shape[idx] = dynamic_dimension_value - node.out_port(0).data.set_shape(output_shape) diff --git a/tools/mo/openvino/tools/mo/pipeline/__init__.py b/tools/mo/openvino/tools/mo/pipeline/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/pipeline/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/pipeline/common.py b/tools/mo/openvino/tools/mo/pipeline/common.py deleted file mode 100644 index cae68d40042475..00000000000000 --- a/tools/mo/openvino/tools/mo/pipeline/common.py +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import logging as log -import os -from operator import itemgetter - -import networkx as nx -import numpy as np - -from openvino.tools.mo.back.RemoveUselessConvert import RemoveUselessConvert -from openvino.tools.mo.back.ResultRename import ResultRename -from openvino.tools.mo.back.ie_ir_ver_2.emitter import port_renumber, serialize_constants, generate_ie_ir, \ - serialize_mean_image -from openvino.tools.mo.back.op_versioning import OpVersioning -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes import tensor_names, convert_data_type -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_np -from openvino.tools.mo.middle.passes.infer import type_infer -from openvino.tools.mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.utils.error import Error - - -def determined_sort(outputs: list): - op_order = [] - data_order = [] - stack = list(outputs) - visited = set() - while len(stack) != 0: - node = stack.pop(0) - node_id = node.id - visited.add(node_id) - has_child = False - in_names = [n for n, d in node.get_inputs()] - for in_node_name in in_names: - if in_node_name not in visited: - stack.insert(0, node) - stack.insert(0, Node(node.graph, in_node_name)) - has_child = True - break - if not has_child: - if node.kind == 'op': - op_order.append(node_id) - if node.kind == 'data': - data_order.append(node_id) - return op_order, data_order - - -def get_fw_tensor_debug_info(node: Node): - while not node.has_valid('fw_tensor_debug_info') and not node.has_valid('output_sort_order') \ - and len(node.in_nodes()): - try: - node = node.in_node() - except Exception as e: - log.warning('Was not able to determine tensor debug info for node {}'.format(node.name)) - return "dummy_node_name" - if node.has_valid('output_sort_order'): - return node.soft_get('output_sort_order') - return node.soft_get('fw_tensor_debug_info') - - -def get_sorted_outputs(graph: Graph): - outputs = [] - outputs_for_sort = {} - for node in graph.nodes(): - if len(graph.out_edges(node)) == 0: - outputs.append(Node(graph, node)) - if len(outputs) == 1: - return outputs - for node in outputs: - debug_info = get_fw_tensor_debug_info(node) - if isinstance(debug_info, str): - outputs_for_sort[node.id] = debug_info - elif isinstance(debug_info, list): - outputs_for_sort[node.id] = debug_info[0][0] + '_' + str(debug_info[0][1]) - else: - raise Error('Unsupported type of the variable with debug information used to sort output nodes') - if len(outputs_for_sort) != len(set(outputs_for_sort.values())): - log.warning('There are at least two output nodes with the same key used to sort the outputs. This means that ' - 'IRs with different order of nodes may be generated between Model Optimizer runs. The dictionary ' - 'with outputs is: {}'.format(outputs_for_sort)) - return [Node(graph, key) for key, value in sorted(outputs_for_sort.items(), key=itemgetter(1))] - - -def collect_sub_graphs(graph: Graph): - """ Go over all nodes and sub_graphs in the graph recursively; returns all found sub-graphs. """ - result = [] - for node in graph.nodes(): - node = Node(graph, node) - if node.has_valid('sub_graphs'): - for sub_graph in node.sub_graphs: - result.append(node[sub_graph]) - result += collect_sub_graphs(node[sub_graph]) - return result - - -def relabel_nodes_inplace_safe(graph: Graph, new_labels: dict): - """ Safely relabels graph in-place without graph copy. - - Safety in this place means that it is guaranteed that - there won't be collisions during relabeling process. - """ - # Relabel nodes in two stages - intermediate_map = {node: graph.unique_id('__relabel__{}__'.format(str(i))) for i, node in enumerate(graph.nodes())} - final_map = {dst: new_labels[src] for src, dst in intermediate_map.items()} - assert len(set(intermediate_map.keys()).intersection(set(intermediate_map.values()))) == 0 - assert len(set(final_map.keys()).intersection(set(final_map.values()))) == 0 - nx.relabel_nodes(graph, intermediate_map, copy=False) - nx.relabel_nodes(graph, final_map, copy=False) - - -def convert_const_node_value_type(const_node: Node, np_data_type): - assert const_node.type == 'Const' - log.warning('Converting type of Const node "{}" to "{}"'.format(const_node.name, np_data_type)) - const_node.value = const_node.value.astype(np_data_type) - const_node.data_type = np_data_type - const_node.infer(const_node) - const_node.type_infer(const_node) - - # if the Const node has an input data node then need to update it also - if len(const_node.in_nodes()) == 1: - input_data = const_node.in_node(0) - assert input_data.kind == 'data' - input_data.value = input_data.value.astype(const_node.data_type) - input_data.data_type = const_node.data_type - - -def convert_inputs_of_specific_ops(graph: Graph): - type_port = {'Broadcast': {1: 'int64', 2: 'int64'}, - 'ConvolutionBackpropData': {2: 'int64'}, - 'Deconvolution': {2: 'int64'}, - 'Gather': {2: 'int64'}, - 'GroupConvolutionBackpropData': {2: 'int64'}, - 'Interpolate': {1: 'int64'}, - 'LRN': {1: 'int64'}, - 'NonMaxSuppression': {2: 'int64'}, - 'NormalizeL2': {1: 'int64'}, - 'OneHot': {1: 'int64'}, - 'Pad': {1: 'int64', 2: 'int64'}, - 'PriorBox': {0: 'int64', 1: 'int64'}, - 'PriorBoxClustered': {0: 'int64', 1: 'int64'}, - 'ReduceLogicalAnd': {1: 'int64'}, - 'ReduceLogicalOr': {1: 'int64'}, - 'ReduceMax': {1: 'int64'}, - 'ReduceMean': {1: 'int64'}, - 'ReduceMin': {1: 'int64'}, - 'ReduceProd': {1: 'int64'}, - 'ReduceSum': {1: 'int64'}, - 'Reshape': {1: 'int64'}, - 'Squeeze': {1: 'int64'}, - 'StridedSlice': {1: 'int64', 2: 'int64', 3: 'int64'}, - 'Split': {1: 'int64'}, - 'Tile': {1: 'int64'}, - 'Transpose': {1: 'int64'}, - 'Unsqueeze': {1: 'int64'}, - 'VariadicSplit': {1: 'int64', 2: 'int64'}, - } - - for node in graph.get_op_nodes(): - if node.soft_get('version') != "opset11": - # opset11 cannot be produced by legacy MO frontends, it can only be read by MO IR Reader - if node.soft_get('type') in type_port: - ports_to_update = type_port[node.soft_get('type')] - for port_id, precision in ports_to_update.items(): - if port_id in node.in_ports() and not node.in_port(port_id).disconnected(): - log.debug('Converting value for the input port "{}" of op "{}" to "{}".' - ''.format(port_id, node.soft_get('name', node.id), precision)) - in_port = node.in_port(port_id) - np_type = data_type_str_to_np(precision) - in_node = node.in_port(port_id).get_source().node - in_type = in_node.out_port(0).get_data_type() - - if in_node.type == 'Const': - if np.issubdtype(in_type, np.integer) and np.issubdtype(np_type, np.integer): - # do not convert Constant value if both source and destination types are of integer types - # otherwise, it affects compatibility of MO IR Engine and TF FE - # TF FE intents to use original model type for layers if it is possible - continue - convert_const_node_value_type(in_node, np_type) - else: - allowed_int_types = [np.int32, np.int64, np.uint32, np.uint64] - if in_type in allowed_int_types and np_type in allowed_int_types: - # do not convert if both source and destination types are within the set of - # int32/int64/uint32/uint64. It prevents from getting different IRs from the original - # cpp serializer and from the legacy serialized when restored with ir_reader_utils - continue - in_port.get_connection().insert_node(Cast(graph, {'dst_type': np_type}).create_node()) - - -def set_default_tensor_names_for_parameters_results(graph: Graph): - for node in graph.get_op_nodes(): - if node.soft_get('type') == 'Result' and node.is_in_port_connected(0): - port = node.in_port(0).get_connection().get_source() - elif node.soft_get('type') == 'Parameter' and node.is_out_port_connected(0): - port = node.out_port(0) - else: - continue - if node.has_and_set('keep_output_port'): - continue - - tensors = port.get_tensor_names() - if tensors is not None and isinstance(tensors, list) and len(tensors) > 0: - continue - new_tensor_name = port.get_default_tensor_name() - op_name = port.node.soft_get('name') - port.add_tensor_names([new_tensor_name, op_name]) - - -def prepare_emit_ir(graph: Graph, data_type: str, output_dir: str, output_model_name: str, - mean_data: [list, None] = None, input_names: list = None, meta_info: dict = None, - use_temporary_path=False, convert_types=False, rename_results=True): - if input_names is None: - input_names = [] - if meta_info is None: - meta_info = {} - graph.strict_mode = False - - if convert_types: - # convert Parameter data types - convert_data_type.convert_parameters_data_type(graph, data_type) - # convert blobs (usually weights and biases) - for sub_graph in [graph] + collect_sub_graphs(graph): - convert_data_type.convert_blobs(sub_graph, data_type) - - # restore data type for specific inputs/outputs of specific ops to the data types required by nGraph - for_graph_and_each_sub_graph_recursively(graph, convert_inputs_of_specific_ops) - - for_graph_and_each_sub_graph_recursively(graph, OpVersioning().find_and_replace_pattern) - - # do not run the type inference in sub-graphs. It will be called automatically as part of the type inference of - # the TensorIterator nodes - type_infer(graph) - - for_graph_and_each_sub_graph_recursively(graph, RemoveUselessConvert().find_and_replace_pattern) - - if rename_results: - ResultRename().find_and_replace_pattern(graph) - set_default_tensor_names_for_parameters_results(graph) - - for sub_graph in [graph] + collect_sub_graphs(graph): - op_order, data_order = determined_sort(get_sorted_outputs(sub_graph)) - mapping = {v: u for u, v in enumerate(op_order)} - mapping.update({v: u for u, v in enumerate(data_order, start=len(sub_graph))}) - relabel_nodes_inplace_safe(sub_graph, mapping) - port_renumber(sub_graph) - - tensor_names.propagate_op_name_to_tensor(graph) - - ir_path_suffix = "_tmp" if use_temporary_path else "" - - bin_file = os.path.join(output_dir, '{}{}.bin'.format(output_model_name, ir_path_suffix)) - serialize_constants(graph, bin_file) - - mean_offset = None - mean_size = None - if mean_data: - mean_offset, mean_size = serialize_mean_image(bin_file, mean_data=mean_data) - - generate_ie_ir(graph=graph, - file_name=os.path.join(output_dir, '{}{}.xml'.format(output_model_name, ir_path_suffix)), - input_names=input_names, - mean_offset=mean_offset, - mean_size=mean_size, - meta_info=meta_info) - tensor_names.output_tensor_names_map(graph, os.path.join(output_dir, - '{}{}.mapping'.format(output_model_name, ir_path_suffix))) - - -def get_ir_version(argv: argparse.Namespace): - """ - Determine IR version based on command line arguments and the default version. - :param argv: the parsed command line arguments - :return: the IR version - """ - return 11 diff --git a/tools/mo/openvino/tools/mo/pipeline/unified.py b/tools/mo/openvino/tools/mo/pipeline/unified.py deleted file mode 100644 index 619e579ea527ce..00000000000000 --- a/tools/mo/openvino/tools/mo/pipeline/unified.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.pipeline.common import get_ir_version -from openvino.tools.mo.utils import class_registration - - -def unified_pipeline(argv: argparse.Namespace): - graph = Graph(cmd_params=argv, name=argv.model_name, ir_version=get_ir_version(argv)) - class_registration.apply_replacements(graph, [ - class_registration.ClassType.LOADER, - class_registration.ClassType.FRONT_REPLACER, - class_registration.ClassType.MIDDLE_REPLACER, - class_registration.ClassType.BACK_REPLACER - ]) - return graph diff --git a/tools/mo/openvino/tools/mo/subprocess_main.py b/tools/mo/openvino/tools/mo/subprocess_main.py deleted file mode 100644 index 0b8eb5069e000f..00000000000000 --- a/tools/mo/openvino/tools/mo/subprocess_main.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import os -import subprocess # nosec -import sys - - -def log_ie_not_found(): - log.error("Could not find the OpenVINO or Python API.\n" - "Consider building the OpenVINO and Python APIs" - " from sources or try to install OpenVINO (TM) Toolkit using pip \npip install openvino") - - -def log_mo_root_dir_not_found(): - log.error("Could not find the ModelOptimizer root module directory.\n" - "Consider setting PYTHONPATH to the openvino tools folder (usually openvino/tools/mo)") - - -def setup_env(): - mo_root_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir) - - # Check that MO root directory already set to the PYTHONPATH - def is_mo_imported(): - try: - status = subprocess.run([sys.executable, os.path.join(mo_root_path, 'openvino/tools/mo/utils/check_mo_import.py')], - env=os.environ) - return status.returncode == 0 - except: - return False - - if not is_mo_imported(): - # If no, we try to set it manually based on relative path - python_path_key = 'PYTHONPATH' - if python_path_key not in os.environ: - os.environ[python_path_key] = mo_root_path - else: - os.environ[python_path_key] = os.pathsep.join([os.environ[python_path_key], mo_root_path]) - - sys.path.append(mo_root_path) - - if not is_mo_imported(): - log_mo_root_dir_not_found() - sys.exit(1) - - ie_found = True - try: - from openvino.tools.mo.utils.find_ie_version import find_ie_version # pylint: disable=no-name-in-module - ie_found = find_ie_version(silent=True) - except Exception as e: - log.error(e) - ie_found = False - - if not ie_found: - log_ie_not_found() - sys.exit(1) - - return True - - -def subprocess_main(framework=None): - """ - Please keep this file compatible with python2 in order to check user python version. - - This function checks that OpenVINO Python API available and working as expected - and then in sub-process it executes main_.py files. Due to some OSs specifics we can't - just add paths to Python modules and libraries into current env. So to make OpenVINO - Python API to be available inside MO we need to use subprocess with new env. - """ - setup_env() - - path_to_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), - 'main_{}.py'.format(framework) if framework else 'main.py') - - # python2 compatible code. Do not remove. - args = [sys.executable, path_to_main] - - for arg in sys.argv[1:]: - args.append(arg) - status = subprocess.run(args, env=os.environ) - sys.exit(status.returncode) diff --git a/tools/mo/openvino/tools/mo/utils/__init__.py b/tools/mo/openvino/tools/mo/utils/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/utils/broadcasting.py b/tools/mo/openvino/tools/mo/utils/broadcasting.py deleted file mode 100644 index b93fea86f66a75..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/broadcasting.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension, shape_array, shape_insert, is_fully_defined, \ - dynamic_dimension_value -from openvino.tools.mo.front.common.partial_infer.utils import mo_array - - -def make_equal_rank(shape_1: np.ndarray, shape_2: np.ndarray): - """ - Prepend shape with smaller length with 1. Return updates shapes - :param shape_1: first shape - :param shape_2: second shape - :return: tuple with updated shapes - """ - while len(shape_1) < len(shape_2): - shape_1 = shape_insert(shape_1, 0, 1) - - while len(shape_2) < len(shape_1): - shape_2 = shape_insert(shape_2, 0, 1) - - return shape_1, shape_2 - - -def uni_directional_shape_broadcasting(input_shape: np.array, target_shape: np.array): - """ - Uni-directional broadcasting of two shapes following the numpy semantic - :param input_shape: input shape to broadcast - :param target_shape: target shape - :return: broadcasted shape or None if broadcasting cannot be performed - """ - input = input_shape.copy() - - # in one-directional broadcasting the target shape rank can be higher or equal than input shape - if len(input_shape) > len(target_shape): - log.debug('The shape "{}" cannot be broadcasted to "{}"'.format(input_shape, target_shape)) - return None - - # prepend input shape with 1s - input, target_shape = make_equal_rank(input, target_shape) - result_shape = [] - for left, right in zip(input, target_shape): - if left != right and left != 1 and right is not dynamic_dimension: - log.debug('The shape "{}" cannot be broadcasted to "{}"'.format(input_shape, target_shape)) - return None - if right is dynamic_dimension and left is not dynamic_dimension and left != 1: - result_shape.append(left) - else: - result_shape.append(right) - return shape_array(result_shape) - - -def bi_directional_shape_broadcasting(input_shape_1: np.array, input_shape_2: np.array): - """ - Bi-directional broadcasting of two shapes following numpy semantic - :param input_shape_1: first shape to broadcast - :param input_shape_2: second shape to broadcast - :return: broadcasted shape or None if broadcasting cannot be performed - """ - shape_1 = input_shape_1.copy() - shape_2 = input_shape_2.copy() - shape_1, shape_2 = make_equal_rank(shape_1, shape_2) - result = list() - - for left, right in zip(shape_1, shape_2): - if left != right and left != 1 and right != 1 and left is not dynamic_dimension and \ - right is not dynamic_dimension: - log.debug('The shape "{}" cannot be broadcasted to "{}"'.format(input_shape_1, input_shape_2)) - return None - if left is not dynamic_dimension and right is not dynamic_dimension: - result.append(max(left, right)) - elif left is not dynamic_dimension and left != 1: - result.append(left) - elif right is not dynamic_dimension and right != 1: - result.append(right) - else: - result.append(dynamic_dimension_value) - - return shape_array(result) - - -def explicit_shape_broadcasting(input_shape: np.array, target_shape: np.array, axes_mapping: np.array) -> [np.array, np.array]: - """ - Explicit shape broadcasting of input tensor. Function only asserts that values are correct and normalizes axes. - Resulting shape is equal to target_shape. - :param input_shape: input value to broadcast - :param target_shape: target shape - :param axes_mapping: a list of axis indices, each index maps an axis from the input_value to axis in the output - :return: broadcasted shape and normalized axes - """ - assert np.all(np.diff(axes_mapping) >= 0), "axes_mapping is not sorted" - assert len(axes_mapping) == len(input_shape), "size of axes_mapping does not match to rank of input" - axes_mapping = mo_array(list(map(lambda axis: axis + len(target_shape) if axis < 0 else axis, axes_mapping))) - - res = target_shape.copy() - for i, axis in enumerate(axes_mapping): - assert 0 <= axis < len(res), "axis value from axes_mapping exceeds rank of target_shape" - assert res[axis] == input_shape[i], "specified mapping axis in target_shape differs from axis in input_shape" - return res, axes_mapping - - -def uni_directional_broadcasting(input_value: np.array, target_shape: np.array): - """ - Uni-directional broadcasting of input tensor to target shape following the numpy semantic - :param input_value: input value to broadcast - :param target_shape: target shape - :return: broadcasted value - """ - assert is_fully_defined(target_shape) - assert uni_directional_shape_broadcasting(shape_array(input_value.shape), target_shape) is not None, \ - 'The tensor of shape "{}" cannot be uni-directionally broadcasted to shape "{}"'.format(input_value.shape, - target_shape) - return input_value * np.ones(target_shape).astype(input_value.dtype) - - -def bi_directional_broadcasting(input_value: np.array, second_shape: np.array): - """ - Bi-directional broadcasting of input tensor to target shape following the numpy semantic - :param input_value: input value to broadcast - :param second_shape: second tensor shape - :return: broadcasted value - """ - output_shape = bi_directional_shape_broadcasting(shape_array(input_value.shape), second_shape) - assert output_shape is not None, 'The tensor of shape "{}" cannot be bi-directionally broadcasted to shape "{}"' \ - ''.format(input_value.shape, second_shape) - assert is_fully_defined(output_shape) - return input_value * np.ones(second_shape).astype(input_value.dtype) - - -def explicit_broadcasting(input_value: np.array, target_shape: np.array, axes_mapping: np.array) -> np.array: - """ - Explicit broadcasting of input tensor. Resulting shape is equal to target_shape except for axes specified in axes_mapping - :param input_value: input value to broadcast - :param target_shape: target shape - :param axes_mapping: a list of axis indices, each index maps an axis from the input_value to axis in the output - :return: broadcasted value - """ - res_shape, normalized_axes_mapping = explicit_shape_broadcasting(input_value.shape, target_shape, axes_mapping) - #TODO: Function 'expand_dims' should be replaced with 'numpy.expand_dims' if numpy version will be >=18.x in requirements. - expand_dim_axis = set(np.arange(len(target_shape))) - set(normalized_axes_mapping) - input_expanded = input_value.copy() - - for axis in sorted(list(expand_dim_axis)): - input_expanded = np.expand_dims(input_expanded, axis) - return np.broadcast_to(input_expanded, res_shape) diff --git a/tools/mo/openvino/tools/mo/utils/check_ie_bindings.py b/tools/mo/openvino/tools/mo/utils/check_ie_bindings.py deleted file mode 100644 index ae10b3e617def7..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/check_ie_bindings.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import os -import platform -import sys - -try: - import openvino.tools.mo - execution_type = "mo" -except ModuleNotFoundError: - mo_root_path = os.path.normpath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)) - sys.path.insert(0, mo_root_path) - execution_type = "install_prerequisites.{}".format("bat" if platform.system() == "Windows" else "sh") - -import openvino.tools.mo.utils.version as v -try: - import openvino_telemetry as tm # pylint: disable=import-error,no-name-in-module - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm -from openvino.tools.mo.utils.error import classify_error_type -from openvino.tools.mo.utils.telemetry_utils import init_mo_telemetry - - -def send_telemetry(mo_version: str, message: str, event_type: str): - t = init_mo_telemetry('Version Checker') - # do not trigger new session if we are executing from the check from within the MO because it is actually not model - # conversion run which we want to send - if execution_type != 'mo': - t.start_session(execution_type) - t.send_event(execution_type, event_type, message) - if execution_type != "mo": - t.end_session(execution_type) - t.force_shutdown(1.0) - - -def import_core_modules(silent: bool, path_to_module: str): - """ - This function checks that OpenVINO Python API is available - and necessary python modules exists. So the next list of imports - must contain all IE/NG Python API imports that are used inside MO. - - :param silent: enables or disables logs printing to stdout - :param path_to_module: path where python API modules were found - :return: True if all imports were successful and False otherwise - """ - try: - from openvino._offline_transformations import apply_moc_transformations, apply_moc_legacy_transformations,\ - apply_low_latency_transformation, apply_fused_names_cleanup # pylint: disable=import-error,no-name-in-module - from openvino._offline_transformations import apply_make_stateful_transformation # pylint: disable=import-error,no-name-in-module - - from openvino.runtime import Model, serialize, get_version # pylint: disable=import-error,no-name-in-module - from openvino.runtime.op import Parameter # pylint: disable=import-error,no-name-in-module - from openvino.runtime import PartialShape, Dimension # pylint: disable=import-error,no-name-in-module - from openvino.frontend import FrontEndManager, FrontEnd # pylint: disable=no-name-in-module,import-error - - import openvino.frontend # pylint: disable=import-error,no-name-in-module - - if silent: - return True - - ie_version = str(get_version()) - mo_version = str(v.get_version()) # pylint: disable=no-member,no-name-in-module - - print("{}: \t{}".format("OpenVINO runtime found in", os.path.dirname(openvino.__file__))) - print("{}: \t{}".format("OpenVINO runtime version", ie_version)) - print("{}: \t{}".format("Model Optimizer version", mo_version)) - - versions_mismatch = False - - mo_hash = v.extract_hash_from_version(mo_version) - ie_hash = v.extract_hash_from_version(ie_version) - - if mo_hash is not None and ie_hash is not None: - min_length = min(len(mo_hash), len(ie_hash)) - mo_hash = mo_hash[:min_length] - ie_hash = ie_hash[:min_length] - - if mo_hash != ie_hash or mo_hash is None or ie_hash is None: - versions_mismatch = True - extracted_mo_release_version = v.extract_release_version(mo_version) - mo_is_custom = extracted_mo_release_version == (None, None) - - print("[ WARNING ] Model Optimizer and OpenVINO runtime versions do not match.") - print("[ WARNING ] Consider building the OpenVINO Python API from sources or reinstall OpenVINO " - "(TM) toolkit using", end=" ") - if mo_is_custom: - print("\"pip install openvino\" (may be incompatible with the current Model Optimizer version)") - else: - print("\"pip install openvino=={}.{}\"".format(*extracted_mo_release_version)) - - simplified_mo_version = v.get_simplified_mo_version() - message = str(dict({ - "platform": platform.system(), - "mo_version": simplified_mo_version, - "ie_version": v.get_simplified_ie_version(version=ie_version), - "versions_mismatch": versions_mismatch, - })) - send_telemetry(simplified_mo_version, message, 'ie_version_check') - - return True - except Exception as e: - # Do not print a warning if module wasn't found or silent mode is on - if "No module named 'openvino" not in str(e): - print("[ WARNING ] Failed to import OpenVINO Python API in: {}".format(path_to_module)) - print("[ WARNING ] {}".format(e)) - - # Send telemetry message about warning - simplified_mo_version = v.get_simplified_mo_version() - message = str(dict({ - "platform": platform.system(), - "mo_version": simplified_mo_version, - "ie_version": v.get_simplified_ie_version(env=os.environ), - "python_version": sys.version, - "error_type": classify_error_type(e), - })) - send_telemetry(simplified_mo_version, message, 'ie_import_failed') - - return False - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.add_argument("--silent", action="store_true") - parser.add_argument("--path_to_module") - args = parser.parse_args() - - if not import_core_modules(args.silent, args.path_to_module): - exit(1) diff --git a/tools/mo/openvino/tools/mo/utils/check_mo_import.py b/tools/mo/openvino/tools/mo/utils/check_mo_import.py deleted file mode 100644 index beba4447baa214..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/check_mo_import.py +++ /dev/null @@ -1,7 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -try: - import openvino.tools.mo -except: - exit(1) diff --git a/tools/mo/openvino/tools/mo/utils/class_registration.py b/tools/mo/openvino/tools/mo/utils/class_registration.py deleted file mode 100644 index a232a953742af0..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/class_registration.py +++ /dev/null @@ -1,343 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import os -from enum import Enum - -import networkx as nx - -from openvino.tools.mo.front.common.custom_replacement_registry import CustomReplacementRegistry -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.eliminate import shape_inference -from openvino.tools.mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively -from openvino.tools.mo.utils.error import Error, InternalError, FrameworkError -from openvino.tools.mo.utils.logger import progress_bar # pylint: disable=no-name-in-module,import-error - -_registered_classes_dict = {} - - -def _check_unique_ids(): - """ - Check that idxs is unique for all registered replacements. - """ - unique_idxs = set() - for class_type, classes_set in _registered_classes_dict.items(): - for cls in classes_set: - replacers = [c for c in cls.registered_cls if not hasattr(c, 'op')] + \ - [c for op, c in cls.registered_ops.items() if c] - for replacer_cls in replacers: - if hasattr(replacer_cls, 'id'): - id_cls = getattr(replacer_cls, 'id') - - if id_cls in unique_idxs: - raise Error('Found replacer {} with not unique id!'.format(replacer_cls)) - unique_idxs.add(id_cls) - log.debug("All replacers has unique idxs.") - - -def get_enabled_and_disabled_transforms(): - """ - :return: tuple of lists with force enabled and disabled id of transformations. - """ - disabled_transforms = os.environ['MO_DISABLED_TRANSFORMS'] if 'MO_DISABLED_TRANSFORMS' in os.environ else '' - enabled_transforms = os.environ['MO_ENABLED_TRANSFORMS'] if 'MO_ENABLED_TRANSFORMS' in os.environ else '' - - assert isinstance(enabled_transforms, str) - assert isinstance(disabled_transforms, str) - - disabled_transforms = disabled_transforms.split(',') - enabled_transforms = enabled_transforms.split(',') - - return enabled_transforms, disabled_transforms - - -class ClassType(Enum): - EXTRACTOR = 0 - OP = 1 - FRONT_REPLACER = 2 - MIDDLE_REPLACER = 3 - BACK_REPLACER = 4 - IR_READER_EXTENDER = 5 - LOADER = 6 - - -def _update(cls, registered_list: list, registered_dict: dict, key: str, enabled_transforms: list, - disabled_transforms: list, exclude_modules: set): - new_keys = {} # maps a custom name to class - new_keys_lower = {} # translates lowered custom name to its original form - # print('Registering new subclasses for', cls) - - for c in cls.__subclasses__(): - if need_exclude_class(c, exclude_modules): - continue - # Force enabling operations - if hasattr(c, 'id') and c.id in enabled_transforms or \ - ".".join([c.__module__, c.__name__]) in enabled_transforms: - setattr(c, 'enabled', True) - - # Force disabling operations - if hasattr(c, 'id') and c.id in disabled_transforms or \ - ".".join([c.__module__, c.__name__]) in disabled_transforms: - setattr(c, 'enabled', False) - - if c not in registered_list: - if hasattr(cls, 'excluded_classes') and c in cls.excluded_classes: - continue - registered_list.append(c) - log.info('New subclass: {}'.format(c)) - if hasattr(c, key) and getattr(c, key) is not None: - k = getattr(c, key) - if k.lower() in new_keys_lower: - # log.warning('Attempt to register of custom name {} for the second time as class {}. ' - # 'Note that custom names are case-insensitive. ' + refer_to_faq_msg(55), k, c) - continue - else: - new_keys_lower[k.lower()] = k - new_keys[k] = c - log.info('Registered a new subclass with key: {}'.format(k)) - else: - log.warning('Skipped {} registration because it was already registered or it was disabled. '.format(c)) - registered_dict.update(new_keys) - - -def update_registration(classes: list, enabled_transforms: list, disabled_transforms: list, exclude_modules: set): - for cls in classes: - _update(cls, cls.registered_cls, cls.registered_ops, 'op', enabled_transforms, disabled_transforms, exclude_modules) - _registered_classes_dict.setdefault(cls.class_type(), set()).add(cls) - - -class DependencyGraph(Graph): - def __init__(self, data=None, **attr): - super().__init__(data, **attr) - - def dump_graph_for_graphviz(self, node_attrs: list = [], edge_attrs: list = [], nodes_to_dump: list = None, - save_to_svg=False, highlight_nodes: list = None): - log.debug("---- GRAPHVIZ OUTPUT STARTS ----") - if nodes_to_dump is None: - nodes_to_dump = self.nodes() - string = '\ndigraph {\n' - string += 'node [color=lightblue2, style=filled, shape=box];\n' - - for node in nodes_to_dump: - attrs = "" - if hasattr(node, 'enabled') and not node.enabled: - attrs += "color=gray70," - string += '"{}" [{}];\n'.format(node, attrs) - - visited_nodes = set() - for src_node_name, dst_node_name, attrs in self.edges(data=True): - visited_nodes.add(src_node_name) - visited_nodes.add(dst_node_name) - if src_node_name not in nodes_to_dump or dst_node_name not in nodes_to_dump: - continue - src_node = self.node[src_node_name] - dst_node = self.node[dst_node_name] - src_node_string = str(src_node_name) + '\\n'.join( - [str(key) + '=' + str(src_node.get(key, 'None')) for key in node_attrs if key in src_node]) - dst_node_string = str(dst_node_name) + '\\n'.join( - [str(key) + '=' + str(dst_node.get(key, 'None')) for key in node_attrs if key in dst_node]) - edge_string = ' '.join([str(key) + '=' + str(attrs.get(key, 'None')) for key in edge_attrs if key in attrs]) - string += '"{}" -> "{}" [label = "{}"];\n'.format(src_node_string, dst_node_string, edge_string) - - for node in nodes_to_dump: - if node not in visited_nodes: - string += '"{}";\n'.format(node) - visited_nodes.add(node) - - string += '}' - log.debug(string) - log.debug("---- GRAPHVIZ OUTPUT ENDS ----") - - if save_to_svg: - try: - import graphviz - import os - file_name = "{}_{}.txt".format(self.name.replace('/', '_'), 0) - id = 1 - while os.path.exists(file_name): - file_name = "{}_{}.txt".format(self.name.replace('/', '_'), id) - id += 1 - with open(file_name, "w") as f: - f.write(string) - graphviz.render('dot', 'svg', file_name) - print('Graph was saved to {}.{}'.format(file_name, 'svg')) - except ImportError: - raise ImportError('Can\'t import graphviz') - except Exception as e: - raise Error('Can\'t save graph to svg') from e - - return string - - def cycle_check(self): - try: - list(nx.topological_sort(self)) - except nx.NetworkXUnfeasible as exception: - cycles = nx.simple_cycles(self) - raise Error( - 'There is(are) cyclic dependency(ies) between replacers. One of the cycles is the following: {}', - ' -> '.join([str(node) for node in list(cycles)[0]])) from exception - - def repeated_cls_names_check(self): - name_to_class_map = {} - for transform_class in self.node: - transform_name = transform_class.__name__ - assert transform_name not in name_to_class_map, \ - 'Transform name `{}` is not unique: at least {} and {} exist' \ - ''.format(transform_name, transform_class, name_to_class_map[transform_name]) - name_to_class_map[transform_name] = transform_class - - def sort_util(self, v, visited, stack): - visited.append(v) - for i in sorted([child for _, child in self.out_edges(v)], key=lambda x: x.__name__): - if i not in visited: - self.sort_util(i, visited, stack) - stack.insert(0, v) - - def determined_sort(self): - self.cycle_check() - self.repeated_cls_names_check() - transforms = sorted([cls for cls in self.nodes() if len(self.in_edges(cls)) == 0], key=lambda x: x.__name__) - order, visited = [], [] - for transform in transforms: - self.sort_util(transform, visited, order) - - graph_copy = self.copy() - for i in range(len(order) - 1): - graph_copy.add_edge(order[i], order[i + 1]) - try: - nx_order = list(nx.topological_sort(graph_copy)) - except Exception as e: - raise InternalError( - "Internal DependencyGraph determined_sort function behaves unexpectedly: cycle found") from e - assert nx_order == order, \ - "Internal DependencyGraph determined_sort function behaves unexpectedly: nx_order != order" - return order - - -def need_exclude_class(class_type, excluded_frameworks): - for framework in excluded_frameworks: - if "." + framework + "." in str(class_type): - return True - return False - - -def get_replacers_order(transform_types: list): - """ - Gets all transforms that do not have 'op'. - If two or more classes replaces the same op (both have op class attribute and values match), such - pattern is not applied (while registration it will warn user that we have a conflict). - """ - dependency_graph = DependencyGraph(name="UnifiedPipeline" if len(transform_types) != 1 else transform_types[0].name) - - replacers = [] - for class_type, classes_set in _registered_classes_dict.items(): - if class_type in transform_types: - for cls in classes_set: - cur_cls_replacers = [c for c in cls.registered_cls if not hasattr(c, 'op')] + \ - [c for op, c in cls.registered_ops.items() if c] - replacers.extend( - [replacer for replacer in cur_cls_replacers if replacer not in cls.excluded_replacers]) - - for replacer_cls in replacers: - dependency_graph.add_node(replacer_cls) - - for i, replacer_cls in enumerate(replacers): - for cls_after in replacer_cls().run_before(): - if cls_after in replacers: - dependency_graph.add_edge(replacer_cls, cls_after) - for cls_before in replacer_cls().run_after(): - if cls_before in replacers: - dependency_graph.add_edge(cls_before, replacer_cls) - - replacers_order = dependency_graph.determined_sort() - - debug_msg_list = ['| id | enabled | class '] - for i, replacer_cls in enumerate(replacers_order): - debug_msg_list.append('|{:5} |{:^9}| {}'.format(i, str(getattr(replacer_cls, 'enabled', None)), replacer_cls)) - log.debug('Replacers execution order: \n{}'.format('\n'.join(debug_msg_list))) - - return replacers_order - - -@progress_bar -def apply_transform(graph: Graph, replacer_cls, **kwargs): - """ - Safely executes transform if it should be and validates graph after transform execution - """ - replacer = replacer_cls() - replacement_id = 'REPLACEMENT_ID' - if hasattr(replacer, 'replacement_id'): - replacement_id = replacer.replacement_id - - if hasattr(replacer, 'enabled') and not replacer.enabled: - log.info("Skip replacer {} (enabled = False)".format(replacer_cls)) - return - - if hasattr(replacer, 'graph_condition') and \ - not all([condition(graph) for condition in replacer.graph_condition]): - log.info("Skip replacer {} (graph_condition not satisfied)".format(replacer_cls)) - return - - log.debug("Run replacer {}".format(replacer_cls)) - - try: - if hasattr(replacer, 'run_not_recursively') and replacer.run_not_recursively: - replacer.find_and_replace_pattern(graph) - else: - for_graph_and_each_sub_graph_recursively(graph, replacer.find_and_replace_pattern) - - if hasattr(replacer, 'force_clean_up') and replacer.force_clean_up: - for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) - - if hasattr(replacer, 'force_shape_inference') and replacer.force_shape_inference: - shape_inference(graph) - - if hasattr(replacer, 'run_not_recursively') and replacer.run_not_recursively: - graph.check_empty_graph(replacer_cls) - graph.check_shapes_consistency() - else: - for_graph_and_each_sub_graph_recursively(graph, lambda _: graph.check_empty_graph(replacer_cls)) - for_graph_and_each_sub_graph_recursively(graph, lambda _: graph.check_shapes_consistency()) - - except Error as err: - raise Error('Exception occurred during running replacer "{}" ({}): {}'.format( - replacement_id, - replacer_cls, - str(err).replace('[REPLACEMENT_ID]', replacement_id), - )) from err - except FrameworkError as err: - raise FrameworkError('{}'.format(str(err))) from err - except Exception as err: - raise Exception('Exception occurred during running replacer "{} ({})": {}'.format( - replacement_id, - replacer_cls, - str(err).replace('[REPLACEMENT_ID]', replacement_id), - )) from err - - -def apply_replacements_list(graph: Graph, replacers_order: list): - """ - Apply all transformations from replacers_order - """ - for i, replacer_cls in enumerate(replacers_order): - apply_transform( - graph=graph, - replacer_cls=replacer_cls, - curr_transform_num=i, - num_transforms=len(replacers_order)) - - -def apply_replacements(graph: Graph, replacements_type: list): - """ - Apply all patterns that do not have 'op' first, then apply patterns from registered_ops. - If two or more classes replaces the same op (both have op class attribute and values match), such - pattern is not applied (while registration it will warn user that we have a conflict). - """ - replacers_order = get_replacers_order(replacements_type) - apply_replacements_list(graph, replacers_order) - - -def clear_registered_classes_dict(): - CustomReplacementRegistry.registry = {} - _registered_classes_dict.clear() diff --git a/tools/mo/openvino/tools/mo/utils/cli_parser.py b/tools/mo/openvino/tools/mo/utils/cli_parser.py deleted file mode 100644 index edea30d077878b..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/cli_parser.py +++ /dev/null @@ -1,2030 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import ast -import logging as log -import os -import re -from collections import OrderedDict, namedtuple -from itertools import zip_longest -from pathlib import Path -from operator import xor -from typing import List, Union -import numbers -import inspect - -import numpy as np -from openvino.runtime import Layout, PartialShape, Dimension, Shape, Type - -import openvino -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg, get_mo_root_dir -from openvino.tools.mo.utils.help import get_convert_model_help_specifics, get_to_string_methods_for_params - - -def strtobool(value): - """ - Converts a string representation to true or false. - - :param value: a string which will be converted to bool. - :return: boolean value of the input string. - """ - value = value.lower() - if value in ('y', 'yes', 't', 'true', 'on', '1'): - return 1 - elif value in ('n', 'no', 'f', 'false', 'off', '0'): - return 0 - else: - raise ValueError(f"Invalid truth value: {value}.") - - -def extension_path_to_str_or_extensions_class(extension): - if isinstance(extension, str): - return extension - elif isinstance(extension, Path): - return str(extension) - else: - # Return unknown object as is. - # The type of the object will be checked by frontend.add_extension() method - return extension - - -def transformations_config_to_str(value): - if value is None: - return value - return extension_path_to_str_or_extensions_class(value) - - -def extensions_to_str_or_extensions_class(extensions): - if extensions is None: - return None - extensions_list = [] - if isinstance(extensions, str): - extensions_list = extensions.split(',') - elif isinstance(extensions, list): - for ext in extensions: - ext = extension_path_to_str_or_extensions_class(ext) - extensions_list.append(ext) - else: - extensions_list = [extension_path_to_str_or_extensions_class(extensions)] - - for ext in extensions_list: - if isinstance(ext, str): - readable_file_or_dir(ext) - return extensions_list - - -def path_to_str(path): - if path is None: - return None - if isinstance(path, str): - return path - elif isinstance(path, Path): - return str(path) - else: - raise Exception("Incorrect type of {} expected str or Path, got {}".format(path, type(path))) - - -def path_to_str_or_object(value): - if value is None or isinstance(value, str): - return value - elif isinstance(value, Path): - return str(value) - else: - return value - - -def paths_to_str(paths): - if paths is None: - return None - if isinstance(paths, list): - paths_str = [] - for path in paths: - paths_str.append(path_to_str(path)) - return ','.join(paths_str) - else: - path_to_str(paths) - - -def str_list_to_str(values): - if values is None: - return None - if isinstance(values, str): - return values - elif isinstance(values, list): - for value in values: - if not isinstance(value, str): - raise Error("Incorrect argument. {} expected to string, got type {}.".format(value, type(value))) - return ','.join(values) - else: - raise Error("Incorrect argument. {} expected to string or list of strings, got type {}.".format(values, type(values))) - - -def is_shape_type(value): - if isinstance(value, PartialShape): - return True - if isinstance(value, Shape): - return True - if isinstance(value, list) or isinstance(value, tuple): - for dim in value: - if not (isinstance(dim, Dimension) or isinstance(dim, int)): - return False - return True - return False - - -def value_to_str(value, separator): - if isinstance(value, np.ndarray): - values = [] - for x in np.nditer(value): - values.append(str(x)) - return "[" + separator.join(values) + "]" - if isinstance(value, list): - values = [] - for x in value: - if not isinstance(x, numbers.Number): - raise Exception("Incorrect value type. Expected numeric value, got {}".format(type(x))) - values.append(str(x)) - return "[" + separator.join(values) + "]" - if isinstance(value, bool): - return "True" if value else "False" - raise Exception("Incorrect value type. Expected np.ndarray or list, got {}".format(type(value))) - - -def single_input_to_input_cut_info(input: [str, tuple, list, PartialShape, Type, type]): - """ - Parses parameters of single input to InputCutInfo. - :param input: input cut parameters of single input - :return: InputCutInfo - """ - if isinstance(input, str): - # Parse params from string - node_name, shape, value, data_type = parse_input_value(input) - return openvino.tools.mo.InputCutInfo(node_name, - PartialShape(shape) if shape is not None else None, - data_type, - value) - if isinstance(input, openvino.tools.mo.InputCutInfo): - # Wrap input.shape to PartialShape if possible and wrap to InputCutInfo - return openvino.tools.mo.InputCutInfo(input.name, - PartialShape(input.shape) if input.shape is not None else None, - input.type, - input.value) - if isinstance(input, (tuple, list, PartialShape)): - # If input represents list with shape, wrap it to list. Single PartialShape also goes to this condition. - # Check of all dimensions will be in is_shape_type(val) method below - if len(input) > 0 and isinstance(input[0], (int, Dimension)): - input = [input] - - # Check values of tuple or list and collect to InputCutInfo - name = None - inp_type = None - shape = None - for val in input: - if isinstance(val, str): - if name is not None: - raise Exception("More than one input name provided: {}".format(input)) - name = val - elif isinstance(val, (type, Type)): - if inp_type is not None: - raise Exception("More than one input type provided: {}".format(input)) - inp_type = val - elif is_shape_type(val): - if shape is not None: - raise Exception("More than one input shape provided: {}".format(input)) - shape = PartialShape(val) - else: - raise Exception("Incorrect input parameters provided. Expected tuple with input name, " - "input type or input shape. Got unknown object: {}".format(val)) - return openvino.tools.mo.InputCutInfo(name, - PartialShape(shape) if shape is not None else None, - inp_type, - None) - # Case when only type is set - if isinstance(input, (type, Type)): - return openvino.tools.mo.InputCutInfo(None, None, input, None) - - # We don't expect here single unnamed value. If list of int is set it is considered as shape. - # Setting of value is expected only using InputCutInfo or string analog. - - raise Exception("Unexpected object provided for input. Expected openvino.toos.mo.InputCutInfo " - "or tuple or str. Got {}".format(type(input))) - - -def input_to_input_cut_info(input: [str, tuple, list]): - """ - Parses 'input' to list of InputCutInfo. - :param input: input cut parameters passed by user - :return: list of InputCutInfo with input cut parameters - """ - if input is None: - return [] - if isinstance(input, str): - inputs = [] - # Split to list of string - for input_value in split_inputs(input): - - # Parse string with parameters for single input - node_name, shape, value, data_type = parse_input_value(input_value) - inputs.append(openvino.tools.mo.InputCutInfo(node_name, - PartialShape(shape) if shape is not None else None, - data_type, - value)) - return inputs - if isinstance(input, openvino.tools.mo.InputCutInfo): - # Wrap to list and return - return [input] - if isinstance(input, tuple): - # Case when input is single shape set in tuple - if len(input) > 0 and isinstance(input[0], (int, Dimension)): - input = [input] - # Case when input is set as tuple. Expected that it is always single input. - return [single_input_to_input_cut_info(input)] - if isinstance(input, list): - # Case when input is single shape set in list - if len(input) > 0 and isinstance(input[0], (int, Dimension)): - input = [input] - inputs = [] - # Case when input is set as list. Expected that it is list of params for different inputs. - for inp in input: - inputs.append(single_input_to_input_cut_info(inp)) - return inputs - # Case when single type or value is set, or unknown object - return [single_input_to_input_cut_info(input)] - - -def input_shape_to_input_cut_info(input_shape: [str, Shape, PartialShape, list, tuple], inputs: list): - """ - Parses 'input_shape' to list of PartialShape and updates 'inputs'. - :param input_shape: input shapes passed by user - :param inputs: list of InputCutInfo with information from 'input' parameter - """ - if input_shape is None: - return - if isinstance(input_shape, str): - # Split input_shape to list of string - input_shape = split_shapes(input_shape) - if isinstance(input_shape, (Shape, PartialShape)): - # Whap single shape to list - input_shape = [input_shape] - if isinstance(input_shape, (list, tuple)): - # Check case when single shape is passed as list or tuple - if len(input_shape) > 0 and isinstance(input_shape[0], (int, Dimension)): - input_shape = [input_shape] - - if len(inputs) > 0 and len(input_shape) > 0: - assert len(inputs) == len(input_shape), "Different numbers of inputs were specified in \"input\" parameter " \ - "and \"input_shapes\". \"input\" has {} items, \"input_shape\" has {} item.".format(len(inputs), len(input_shape)) - - # Update inputs with information from 'input_shape' - if len(inputs) > 0: - for idx, shape in enumerate(input_shape): - shape = PartialShape(shape) - assert inputs[idx].shape is None, "Shape was set in both \"input\" and in \"input_shape\" parameter." \ - "Please use either \"input\" or \"input_shape\" for shape setting." - inputs[idx] = openvino.tools.mo.InputCutInfo(inputs[idx].name, shape, inputs[idx].type, inputs[idx].value) - - else: - for shape in input_shape: - inputs.append(openvino.tools.mo.InputCutInfo(None, PartialShape(shape), None, None)) - return - - raise Exception("Unexpected object provided for input_shape. Expected PartialShape, Shape, tuple, list or str. " - "Got {}".format(type(input_shape))) - - -def freeze_placeholder_to_input_cut_info(argv_freeze_placeholder_with_value: str, inputs: list): - """ - Parses 'argv_freeze_placeholder_with_value' to dictionary and collects unnamed inputs from 'inputs' to list. - :param argv_freeze_placeholder_with_value: string set by user. - As it was planned to be deprecated no Python analogs were made. - :param inputs: list of InputCutInfo with information from 'input' parameter - :returns (placeholder_values, unnamed_placeholder_values), where - placeholder_values - dictionary where key is node name, value is node value, - unnamed_placeholder_values - list with unnamed node values - """ - # Parse argv_freeze_placeholder_with_value to dictionary with names and values - placeholder_values = parse_freeze_placeholder_values(argv_freeze_placeholder_with_value) - unnamed_placeholder_values = [] - - # Collect values for freezing from 'inputs' - if inputs is not None and len(inputs) > 0: - for input in inputs: - node_name = input.name - value = input.value - if value is None: - continue - # Check for value conflict - if node_name in placeholder_values and placeholder_values[node_name] != value: - raise Error("Overriding replacement value of the placeholder with name '{}': old value = {}, new value = {}" - ".".format(node_name, placeholder_values[node_name], value)) - if node_name is not None: - # Named input case, add to dictionary - placeholder_values[node_name] = value - else: - # Unnamed input case, add to list - unnamed_placeholder_values.append(value) - - return placeholder_values, unnamed_placeholder_values - - -def mean_scale_value_to_str(value): - # default empty value - if isinstance(value, tuple) and len(value) == 0: - return value - - if isinstance(value, str): - return value - if isinstance(value, dict): - values_str = [] - for op_name, val in value.items(): - if not isinstance(op_name, str): - raise Exception("Incorrect operation name type. Expected string, got {}".format(type(op_name))) - values_str.append(op_name + value_to_str(val, ",")) - return ",".join(values_str) - if isinstance(value, list) or isinstance(value, tuple): - list_of_lists = False - for val in value: - if isinstance(val, list) or isinstance(val, tuple): - list_of_lists = True - break - if list_of_lists: - values_str = [] - for val in value: - values_str.append(value_to_str(val, ",")) - return ",".join(values_str) - else: - return value_to_str(value, ",") - return value_to_str(value, ",") - - -def layout_to_str(layout): - if isinstance(layout, str): - return layout - if isinstance(layout, Layout): - return layout.to_string() - raise Exception("Incorrect layout type. Expected Layout or string or dictionary, " - "where key is operation name and value is layout or list of layouts, got {}".format(type(layout))) - - -def source_target_layout_to_str(value): - # default empty value - if isinstance(value, tuple) and len(value) == 0: - return value - - if isinstance(value, str): - return value - if isinstance(value, dict): - values_str = [] - for op_name, layout in value.items(): - if not isinstance(op_name, str): - raise Exception("Incorrect operation name type. Expected string, got {}".format(type(op_name))) - values_str.append(op_name + "(" + layout_to_str(layout) + ")") - return ",".join(values_str) - - return layout_to_str(value) - - -def layoutmap_to_str(value): - if isinstance(value, str): - return value - if isinstance(value, openvino.tools.mo.LayoutMap): - assert value.source_layout is not None, "Incorrect layout map. 'source_layout' should be set." - source_layout = layout_to_str(value.source_layout) - if value.target_layout is not None: - target_layout = layout_to_str(value.target_layout) - source_layout += "->" + target_layout - return source_layout - return layout_to_str(value) - - -def layout_param_to_str(value): - # default empty value - if isinstance(value, tuple) and len(value) == 0: - return value - - if isinstance(value, str): - return value - - if isinstance(value, dict): - values_str = [] - for op_name, layout in value.items(): - if not isinstance(op_name, str): - raise Exception("Incorrect operation name type. Expected string, got {}".format(type(op_name))) - values_str.append(op_name + "(" + layoutmap_to_str(layout) + ")") - return ",".join(values_str) - if isinstance(value, openvino.tools.mo.LayoutMap): - return layoutmap_to_str(value) - if isinstance(value, list) or isinstance(value, tuple): - values_str = [] - for layout in value: - values_str.append(layoutmap_to_str(layout)) - return ",".join(values_str) - - return layoutmap_to_str(value) - - -def batch_to_int(value): - if value is None or isinstance(value, int): - return value - if isinstance(value, Dimension): - if not value.is_static: - # TODO: Ticket 88676 - raise Exception("Dynamic batch for \"batch\" parameter is not supported.") - else: - return value.get_length() - raise Exception("Incorrect batch value. Expected int, got {}.".format(type(value))) - - -def transform_param_value_to_str(value): - # This function supports parsing of parameters of MakeStateful, LowLatency2, Pruning. - # If available transforms list is extended this method should be extended for new transforms. - if isinstance(value, str): - return value - if isinstance(value, bool): - return str(value) - if isinstance(value, dict): - # param_res_names dictionary for MakeStateful transform - values_str = [] - for input_name, output_name in value.items(): - assert isinstance(input_name, str), "Incorrect input name. " \ - "Expected string, got {}".format(type(input_name)) - assert isinstance(output_name, str), "Incorrect output name. " \ - "Expected string, got {}".format(type(output_name)) - values_str.append("\'{}\':\'{}\'".format(input_name, output_name)) - return "{" + ','.join(values_str) + "}" - raise Exception("Unknown parameter type.") - - -def transform_to_str(value): - from openvino.tools.mo.back.offline_transformations import get_available_transformations # pylint: disable=no-name-in-module,import-error - - if isinstance(value, str): - return value - - if isinstance(value, tuple): - assert 1 <= len(value) <= 2, "Incorrect definition of transformation in transform argument: " \ - "expected two elements in tuple, provided {}. " \ - "Supported transforms are: {}".format( - len(value), - list(get_available_transformations().keys())) - transform_name = value[0] - assert isinstance(transform_name, str), "Incorrect transform name type. " \ - "Expected string, got {}".format(type(transform_name)) - if len(value) == 2: - params = value[1] - assert isinstance(params, dict), "Incorrect transform params type. " \ - "Expected dictionary, got {}".format(type(params)) - params_str_list = [] - for param_name, val in params.items(): - assert isinstance(param_name, str), "Incorrect transform parameter name type. " \ - "Expected string, got {}".format(type(param_name)) - val_str = transform_param_value_to_str(val) - params_str_list.append(param_name + "=" + val_str) - transform_name += '[' + ','.join(params_str_list) + ']' - return transform_name - raise Exception("Incorrect transform type. Expected tuple with transform name and " - "dictionary with transform parameters. Got object of type {}".format(type(value))) - - -def transform_param_to_str(value): - if value is None or isinstance(value, str): - return value - if isinstance(value, list): - transforms_str = [] - for transform in value: - transforms_str.append(transform_to_str(transform)) - return ','.join(transforms_str) - return transform_to_str(value) - - -ParamDescription = namedtuple("ParamData", - ["description", "cli_tool_description", "to_string"]) - - -def get_mo_convert_params(): - mo_convert_docs = openvino.tools.mo.convert_model.__doc__ - mo_convert_params = {} - group = "Optional parameters:" - mo_convert_params[group] = {} - - mo_convert_docs = mo_convert_docs[:mo_convert_docs.find('Returns:')] - - while len(mo_convert_docs) > 0: - param_idx1 = mo_convert_docs.find(":param") - if param_idx1 == -1: - break - param_idx2 = mo_convert_docs.find(":", param_idx1+1) - param_name = mo_convert_docs[param_idx1+len(':param '):param_idx2] - - param_description_idx = mo_convert_docs.find(":param", param_idx2+1) - param_description = mo_convert_docs[param_idx2+1: param_description_idx] - - group_name_idx = param_description.rfind('\n\n') - group_name = '' - if group_name_idx != -1: - group_name = param_description[group_name_idx:].strip() - - param_description = param_description[:group_name_idx] - param_description = param_description.strip() - - mo_convert_params[group][param_name] = ParamDescription(param_description, "", None) - - mo_convert_docs = mo_convert_docs[param_description_idx:] - - if group_name != '': - mo_convert_params[group_name] = {} - group = group_name - - # TODO: remove this when internal converting of params to string is removed - params_converted_to_string = get_to_string_methods_for_params() - - params_with_paths = get_params_with_paths_list() - cli_tool_specific_descriptions = get_convert_model_help_specifics() - - for group_name, param_group in mo_convert_params.items(): - for param_name, d in param_group.items(): - to_str_method = None - if param_name in params_converted_to_string: - to_str_method = params_converted_to_string[param_name] - elif param_name in params_with_paths: - to_str_method = path_to_str - - cli_tool_description = None - if param_name in cli_tool_specific_descriptions: - cli_tool_description = cli_tool_specific_descriptions[param_name] - - desc = ParamDescription(d.description, - cli_tool_description, - to_str_method) - mo_convert_params[group_name][param_name] = desc - - return mo_convert_params - - -class DeprecatedStoreTrue(argparse.Action): - def __init__(self, nargs=0, **kw): - super().__init__(nargs=nargs, **kw) - - def __call__(self, parser, namespace, values, option_string=None): - dep_msg = "Use of deprecated cli option {} detected. Option use in the following releases will be fatal. ".format(option_string) - if 'fusing' in option_string: - dep_msg += 'Please use --finegrain_fusing cli option instead' - log.error(dep_msg, extra={'is_warning': True}) - setattr(namespace, self.dest, True) - - -class DeprecatedOptionCommon(argparse.Action): - def __call__(self, parser, args, values, option_string): - dep_msg = "Use of deprecated cli option {} detected. Option use in the following releases will be fatal. ".format(option_string) - log.error(dep_msg, extra={'is_warning': True}) - setattr(args, self.dest, values) - - -class IgnoredAction(argparse.Action): - def __init__(self, nargs=0, **kw): - super().__init__(nargs=nargs, **kw) - - def __call__(self, parser, namespace, values, option_string=None): - dep_msg = "Use of removed cli option '{}' detected. The option is ignored. ".format(option_string) - log.error(dep_msg, extra={'is_warning': True}) - setattr(namespace, self.dest, True) - - -def canonicalize_and_check_paths(values: Union[str, List[str]], param_name, - try_mo_root=False, check_existence=True) -> List[str]: - if values is not None: - list_of_values = list() - if isinstance(values, str): - if values != "": - list_of_values = values.split(',') - elif isinstance(values, list): - list_of_values = values - else: - raise Error('Unsupported type of command line parameter "{}" value'.format(param_name)) - - if not check_existence: - return [get_absolute_path(path) for path in list_of_values] - - for idx, val in enumerate(list_of_values): - list_of_values[idx] = val - - error_msg = 'The value for command line parameter "{}" must be existing file/directory, ' \ - 'but "{}" does not exist.'.format(param_name, val) - if os.path.exists(val): - continue - elif not try_mo_root or val == '': - raise Error(error_msg) - elif try_mo_root: - path_from_mo_root = get_mo_root_dir() + '/mo/' + val - list_of_values[idx] = path_from_mo_root - if not os.path.exists(path_from_mo_root): - raise Error(error_msg) - - return [get_absolute_path(path) for path in list_of_values] - - -class CanonicalizePathAction(argparse.Action): - """ - Expand user home directory paths and convert relative-paths to absolute. - """ - - def __call__(self, parser, namespace, values, option_string=None): - list_of_paths = canonicalize_and_check_paths(values, param_name=option_string, - try_mo_root=False, check_existence=False) - setattr(namespace, self.dest, ','.join(list_of_paths)) - - -class CanonicalizeTransformationPathCheckExistenceAction(argparse.Action): - """ - Convert relative to the current and relative to mo root paths to absolute - and check specified file or directory existence. - """ - - def __call__(self, parser, namespace, values, option_string=None): - list_of_paths = canonicalize_and_check_paths(values, param_name=option_string, - try_mo_root=True, check_existence=True) - setattr(namespace, self.dest, ','.join(list_of_paths)) - - -class CanonicalizePathCheckExistenceAction(argparse.Action): - """ - Expand user home directory paths and convert relative-paths to absolute and check specified file or directory - existence. - """ - - def __call__(self, parser, namespace, values, option_string=None): - list_of_paths = canonicalize_and_check_paths(values, param_name=option_string, - try_mo_root=False, check_existence=True) - setattr(namespace, self.dest, ','.join(list_of_paths)) - - -class CanonicalizeExtensionsPathCheckExistenceAction(argparse.Action): - """ - Expand user home directory paths and convert relative-paths to absolute and check specified file or directory - existence. - """ - - def __call__(self, parser, namespace, values, option_string=None): - list_of_paths = canonicalize_and_check_paths(values, param_name=option_string, - try_mo_root=False, check_existence=True) - # Extensions paths are needed to be stored as list - setattr(namespace, self.dest, list_of_paths) - - -class CanonicalizePathCheckExistenceIfNeededAction(CanonicalizePathCheckExistenceAction): - - def __call__(self, parser, namespace, values, option_string=None): - if values is not None: - if isinstance(values, str): - if values != "": - super().__call__(parser, namespace, values, option_string) - else: - setattr(namespace, self.dest, values) - - -class DeprecatedCanonicalizePathCheckExistenceAction(CanonicalizePathCheckExistenceAction): - def __call__(self, parser, namespace, values, option_string=None): - dep_msg = "Use of deprecated cli option {} detected. Option use in the following releases will be fatal. ".format( - option_string) - log.error(dep_msg, extra={'is_warning': True}) - super().__call__(parser, namespace, values, option_string) - - -def readable_file(path: str): - """ - Check that specified path is a readable file. - :param path: path to check - :return: path if the file is readable - """ - if not os.path.isfile(path): - raise Error('The "{}" is not existing file'.format(path)) - elif not os.access(path, os.R_OK): - raise Error('The "{}" is not readable'.format(path)) - else: - return path - - -def readable_file_or_dir(path: str): - """ - Check that specified path is a readable file or directory. - :param path: path to check - :return: path if the file/directory is readable - """ - if not os.path.isfile(path) and not os.path.isdir(path): - raise Error('The "{}" is not existing file or directory'.format(path)) - elif not os.access(path, os.R_OK): - raise Error('The "{}" is not readable'.format(path)) - else: - return path - - -def readable_dirs(paths: str): - """ - Checks that comma separated list of paths are readable directories. - :param paths: comma separated list of paths. - :return: comma separated list of paths. - """ - paths_list = [readable_dir(path) for path in paths.split(',')] - return ','.join(paths_list) - - -def readable_dirs_or_empty(paths: str): - """ - Checks that comma separated list of paths are readable directories of if it is empty. - :param paths: comma separated list of paths. - :return: comma separated list of paths. - """ - if paths: - return readable_dirs(paths) - return paths - - -def readable_dirs_or_files_or_empty(paths: str): - """ - Checks that comma separated list of paths are readable directories, files or a provided path is empty. - :param paths: comma separated list of paths. - :return: comma separated list of paths. - """ - if paths: - paths_list = [readable_file_or_dir(path) for path in paths.split(',')] - return ','.join(paths_list) - return paths - - -def readable_dir(path: str): - """ - Check that specified path is a readable directory. - :param path: path to check - :return: path if the directory is readable - """ - if not os.path.isdir(path): - raise Error('The "{}" is not existing directory'.format(path)) - elif not os.access(path, os.R_OK): - raise Error('The "{}" is not readable'.format(path)) - else: - return path - - -def writable_dir(path: str): - """ - Checks that specified directory is writable. The directory may not exist but it's parent or grandparent must exist. - :param path: path to check that it is writable. - :return: path if it is writable - """ - if path is None: - raise Error('The directory parameter is None') - if os.path.exists(path): - if os.path.isdir(path): - if os.access(path, os.W_OK): - return path - else: - raise Error('The directory "{}" is not writable'.format(path)) - else: - raise Error('The "{}" is not a directory'.format(path)) - else: - cur_path = path - while os.path.dirname(cur_path) != cur_path: - if os.path.exists(cur_path): - break - cur_path = os.path.dirname(cur_path) - if cur_path == '': - cur_path = os.path.curdir - if os.access(cur_path, os.W_OK): - return path - else: - raise Error('The directory "{}" is not writable'.format(cur_path)) - - -def add_args_by_description(args_group, params_description): - signature = inspect.signature(openvino.tools.mo.convert_model) - filepath_args = get_params_with_paths_list() - cli_tool_specific_descriptions = get_convert_model_help_specifics() - for param_name, param_description in params_description.items(): - if param_name == 'help': - continue - cli_param_name = "--"+param_name - if cli_param_name not in args_group._option_string_actions: - # Get parameter specifics - param_specifics = cli_tool_specific_descriptions[param_name] if param_name in \ - cli_tool_specific_descriptions else {} - help_text = param_specifics['description'] if 'description' in param_specifics \ - else param_description.description - action = param_specifics['action'] if 'action' in param_specifics else None - param_type = param_specifics['type'] if 'type' in param_specifics else None - param_alias = param_specifics['aliases'] if 'aliases' in param_specifics else {} - param_version = param_specifics['version'] if 'version' in param_specifics else None - param_choices = param_specifics['choices'] if 'choices' in param_specifics else None - - # Bool params common setting - if signature.parameters[param_name].annotation == bool and param_name != 'version': - default_flag = signature.parameters[param_name].default - # tools.mo.convert_model by default does not compress, - # but if we convert from cli we need to compress_to_fp16 if user did not specify otherwise - if param_name == 'compress_to_fp16': - default_flag = True - args_group.add_argument( - cli_param_name, *param_alias, - type=check_bool if param_type is None else param_type, - nargs="?", - const=True, - help=help_text, - default=default_flag) - # File paths common setting - elif param_name in filepath_args: - action = action if action is not None else CanonicalizePathCheckExistenceAction - args_group.add_argument( - cli_param_name, *param_alias, - type=str if param_type is None else param_type, - action=action, - help=help_text, - default=signature.parameters[param_name].default) - # Other params - else: - additional_params = {} - if param_version is not None: - additional_params['version'] = param_version - if param_type is not None: - additional_params['type'] = param_type - if param_choices is not None: - additional_params['choices'] = param_choices - args_group.add_argument( - cli_param_name, *param_alias, - help=help_text, - default=signature.parameters[param_name].default, - action=action, - **additional_params - ) - - -def get_common_cli_parser(parser: argparse.ArgumentParser = None): - if not parser: - parser = argparse.ArgumentParser() - common_group = parser.add_argument_group('Framework-agnostic parameters') - mo_convert_params = get_mo_convert_params() - mo_convert_params_common = mo_convert_params['Framework-agnostic parameters:'] - - # Command line tool specific params - common_group.add_argument('--model_name', '-n', - help='Model_name parameter passed to the final create_ir transform. ' + - 'This parameter is used to name ' + - 'a network in a generated IR and output .xml/.bin files.') - common_group.add_argument('--output_dir', '-o', - help='Directory that stores the generated IR. ' + - 'By default, it is the directory from where the Model Conversion is launched.', - default=get_absolute_path('.'), - action=CanonicalizePathAction, - type=writable_dir) - - # Deprecated params - common_group.add_argument('--freeze_placeholder_with_value', - help='Replaces input layer with constant node with ' - 'provided value, for example: "node_name->True". ' - 'It will be DEPRECATED in future releases. ' - 'Use "input" option to specify a value for freezing.', - default=None) - common_group.add_argument('--static_shape', - help='Enables IR generation for fixed input shape (folding `ShapeOf` operations and ' - 'shape-calculating sub-graphs to `Constant`). Changing model input shape using ' - 'the OpenVINO Runtime API in runtime may fail for such an IR.', - action='store_true', default=False) - common_group.add_argument("--use_new_frontend", - help='Force the usage of new Frontend for model conversion into IR. ' - 'The new Frontend is C++ based and is available for ONNX* and PaddlePaddle* models. ' - 'Model Conversion API uses new Frontend for ONNX* and PaddlePaddle* by default that means ' - '`use_new_frontend` and `use_legacy_frontend` options are not specified.', - action='store_true', default=False) - common_group.add_argument("--use_legacy_frontend", - help='Force the usage of legacy Frontend for model conversion into IR. ' - 'The legacy Frontend is Python based and is available for TensorFlow*, ONNX*, ' - 'Caffe*, and Kaldi* models.', - action='store_true', default=False) - add_args_by_description(common_group, mo_convert_params_common) - return parser - - -def get_common_cli_options(model_name): - d = OrderedDict() - d['input_model'] = '- Path to the Input Model' - d['output_dir'] = ['- Path for generated IR', lambda x: x if x != '.' else os.getcwd()] - d['model_name'] = ['- IR output name', lambda x: x if x else model_name] - d['log_level'] = '- Log level' - d['batch'] = ['- Batch', lambda x: x if x else 'Not specified, inherited from the model'] - d['input'] = ['- Input layers', lambda x: x if x else 'Not specified, inherited from the model'] - d['output'] = ['- Output layers', lambda x: x if x else 'Not specified, inherited from the model'] - d['input_shape'] = ['- Input shapes', lambda x: x if x else 'Not specified, inherited from the model'] - d['source_layout'] = ['- Source layout', lambda x: x if x else 'Not specified'] - d['target_layout'] = ['- Target layout', lambda x: x if x else 'Not specified'] - d['layout'] = ['- Layout', lambda x: x if x else 'Not specified'] - d['mean_values'] = ['- Mean values', lambda x: x if x else 'Not specified'] - d['scale_values'] = ['- Scale values', lambda x: x if x else 'Not specified'] - d['scale'] = ['- Scale factor', lambda x: x if x else 'Not specified'] - d['transform'] = ['- User transformations', lambda x: x if x else 'Not specified'] - d['reverse_input_channels'] = '- Reverse input channels' - d['static_shape'] = '- Enable IR generation for fixed input shape' - d['transformations_config'] = '- Use the transformations config file' - return d - - -def get_advanced_cli_options(): - d = OrderedDict() - d['use_legacy_frontend'] = '- Force the usage of legacy Frontend for model conversion into IR' - d['use_new_frontend'] = '- Force the usage of new Frontend for model conversion into IR' - return d - - -def get_caffe_cli_options(): - d = { - 'input_proto': ['- Path to the Input prototxt', lambda x: x], - 'caffe_parser_path': ['- Path to Python Caffe* parser generated from caffe.proto', lambda x: x], - 'k': '- Path to CustomLayersMapping.xml', - } - - return OrderedDict(sorted(d.items(), key=lambda t: t[0])) - - -def get_tf_cli_options(): - d = { - 'input_model_is_text': '- Input model in text protobuf format', - 'tensorflow_custom_operations_config_update': '- Update the configuration file with input/output node names', - 'tensorflow_object_detection_api_pipeline_config': '- Use configuration file used to generate the model with ' - 'Object Detection API', - 'tensorflow_custom_layer_libraries': '- List of shared libraries with TensorFlow custom layers implementation', - 'tensorboard_logdir': '- Path to model dump for TensorBoard' - } - - return OrderedDict(sorted(d.items(), key=lambda t: t[0])) - - -def get_kaldi_cli_options(): - d = { - 'counts': '- A file name with full path to the counts file or empty string if you want to use counts from model', - 'remove_output_softmax': '- Removes the SoftMax layer that is the output layer', - 'remove_memory': '- Removes the Memory layer and use additional inputs and outputs instead' - } - - return OrderedDict(sorted(d.items(), key=lambda t: t[0])) - - -def get_onnx_cli_options(): - d = { - } - - return OrderedDict(sorted(d.items(), key=lambda t: t[0])) - - -def get_params_with_paths_list(): - return ['input_model', 'output_dir', 'caffe_parser_path', 'extensions', 'k', 'output_dir', - 'input_checkpoint', 'input_meta_graph', 'input_proto', 'input_symbol', - 'pretrained_model_name', 'saved_model_dir', 'tensorboard_logdir', - 'tensorflow_custom_layer_libraries', 'tensorflow_custom_operations_config_update', - 'tensorflow_object_detection_api_pipeline_config', - 'transformations_config'] - - -def get_caffe_cli_parser(parser: argparse.ArgumentParser = None): - """ - Specifies cli arguments for Model Conversion for Caffe* - - Returns - ------- - ArgumentParser instance - """ - if not parser: - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - get_common_cli_parser(parser=parser) - - caffe_group = parser.add_argument_group('Caffe*-specific parameters') - mo_convert_params_caffe = get_mo_convert_params()['Caffe*-specific parameters:'] - add_args_by_description(caffe_group, mo_convert_params_caffe) - return parser - - -def get_tf_cli_parser(parser: argparse.ArgumentParser = None): - """ - Specifies cli arguments for Model Conversion for TF - - Returns - ------- - ArgumentParser instance - """ - if not parser: - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - get_common_cli_parser(parser=parser) - mo_convert_params_tf = get_mo_convert_params()['TensorFlow*-specific parameters:'] - - tf_group = parser.add_argument_group('TensorFlow*-specific parameters') - add_args_by_description(tf_group, mo_convert_params_tf) - return parser - - -def get_kaldi_cli_parser(parser: argparse.ArgumentParser = None): - """ - Specifies cli arguments for Model Conversion for Kaldi* - - Returns - ------- - ArgumentParser instance - """ - if not parser: - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - get_common_cli_parser(parser=parser) - - kaldi_group = parser.add_argument_group('Kaldi-specific parameters') - mo_convert_params_kaldi = get_mo_convert_params()['Kaldi-specific parameters:'] - add_args_by_description(kaldi_group, mo_convert_params_kaldi) - return parser - - -def get_onnx_cli_parser(parser: argparse.ArgumentParser = None): - """ - Specifies cli arguments for Model Conversion for ONNX - - Returns - ------- - ArgumentParser instance - """ - if not parser: - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - get_common_cli_parser(parser=parser) - - return parser - - -def get_all_cli_parser(): - """ - Specifies cli arguments for Model Conversion - - Returns - ------- - ArgumentParser instance - """ - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - mo_convert_params_optional = get_mo_convert_params()['Optional parameters:'] - add_args_by_description(parser, mo_convert_params_optional) - - get_common_cli_parser(parser=parser) - get_tf_cli_parser(parser=parser) - get_caffe_cli_parser(parser=parser) - get_kaldi_cli_parser(parser=parser) - get_onnx_cli_parser(parser=parser) - - return parser - - -def remove_data_type_from_input_value(input_value: str): - """ - Removes the type specification from the input string. The type specification is a string enclosed with curly braces. - :param input_value: string passed as input to the "input" command line parameter - :return: string without type specification - """ - return re.sub(r'\{.*\}', '', input_value) - - -def get_data_type_from_input_value(input_value: str): - """ - Returns the numpy data type corresponding to the data type specified in the input value string - :param input_value: string passed as input to the "input" command line parameter - :return: the corresponding numpy data type and None if the data type is not specified in the input value - """ - data_type_match = re.match(r'.*\{(.*)\}.*', input_value) - return destination_type_to_np_data_type(data_type_match.group(1)) if data_type_match is not None else None - - -def remove_shape_from_input_value(input_value: str): - """ - Removes the shape specification from the input string. The shape specification is a string enclosed with square - brackets. - :param input_value: string passed as input to the "input" command line parameter - :return: string without shape specification - """ - assert '->' not in input_value, 'The function should not be called for input_value with constant value specified' - return re.sub(r'[(\[]([0-9\.?, -]*)[)\]]', '', input_value) - - -def get_shape_from_input_value(input_value: str): - """ - Returns PartialShape corresponding to the shape specified in the input value string - :param input_value: string passed as input to the "input" command line parameter - :return: the corresponding shape and None if the shape is not specified in the input value - """ - # remove the tensor value from the input_value first - input_value = input_value.split('->')[0] - - # parse shape - shape = re.findall(r'[(\[]([0-9\.\?, -]*)[)\]]', input_value) - if len(shape) == 0: - shape = None - elif len(shape) == 1 and shape[0] in ['', ' ']: - # this shape corresponds to scalar - shape = PartialShape([]) - elif len(shape) == 1: - dims = re.split(r', *| +', shape[0]) - dims = list(filter(None, dims)) - shape = PartialShape([Dimension(dim) for dim in dims]) - else: - raise Error("Wrong syntax to specify shape. Use \"input\" " - "\"node_name[shape]->value\"") - return shape - - -def get_node_name_with_port_from_input_value(input_value: str): - """ - Returns the node name (optionally with input/output port) from the input value - :param input_value: string passed as input to the "input" command line parameter - :return: the corresponding node name with input/output port - """ - return remove_shape_from_input_value(remove_data_type_from_input_value(input_value.split('->')[0])) - - -def get_value_from_input_value(input_value: str): - """ - Returns the value from the input value string - :param input_value: string passed as input to the "input" command line parameter - :return: the corresponding value or None if it is not specified - """ - parts = input_value.split('->') - value = None - if len(parts) == 2: - value = parts[1] - if value[0] == '[' and value[-1] != ']' or value[0] != '[' and value[-1] == ']': - raise Error("Wrong syntax to specify value. Use \"input\"=\"node_name[shape]->value\"") - if '[' in value.strip(' '): - value = value.replace('[', '').replace(']', '') - if ',' in value: - value = value.replace(' ', '') - value = value.split(',') - else: - value = value.split(' ') - if not isinstance(value, list): - value = ast.literal_eval(value) - elif len(parts) > 2: - raise Error("Wrong syntax to specify value. Use \"input\"=\"node_name[shape]->value\"") - return value - - -def partial_shape_prod(shape: [PartialShape, tuple]): - assert not (isinstance(shape, PartialShape) and shape.is_dynamic), \ - "Unable to calculate prod for dynamic shape {}.".format(shape) - - prod = 1 - for dim in shape: - prod *= dim.get_min_length() - return prod - - -def parse_input_value(input_value: str): - """ - Parses a value of the "input" command line parameter and gets a node name, shape and value. - The node name includes a port if it is specified. - Shape and value is equal to None if they are not specified. - Parameters - ---------- - input_value - string with a specified node name, shape, value and data_type. - E.g. 'node_name:0[4]{fp32}->[1.0 2.0 3.0 4.0]' - - Returns - ------- - Node name, shape, value, data type - E.g. 'node_name:0', '4', [1.0 2.0 3.0 4.0], np.float32 - """ - data_type = get_data_type_from_input_value(input_value) - node_name = get_node_name_with_port_from_input_value(input_value) - value = get_value_from_input_value(input_value) - shape = get_shape_from_input_value(input_value) - value_size = np.prod(len(value)) if isinstance(value, list) else 1 - - if value is not None and shape is not None: - for dim in shape: - if isinstance(dim, Dimension) and dim.is_dynamic: - raise Error("Cannot freeze input with dynamic shape: {}".format(shape)) - - if shape is not None and value is not None and partial_shape_prod(shape) != value_size: - raise Error("The shape '{}' of the input node '{}' does not correspond to the number of elements '{}' in the " - "value: {}".format(shape, node_name, value_size, value)) - return node_name, shape, value, data_type - - -def split_str_avoiding_square_brackets(s: str) -> list: - """ - Splits a string by comma, but skips commas inside square brackets. - :param s: string to split - :return: list of strings split by comma - """ - res = list() - skipping = 0 - last_idx = 0 - for i, c in enumerate(s): - if c == '[': - skipping += 1 - elif c == ']': - skipping -= 1 - elif c == ',' and skipping == 0: - res.append(s[last_idx:i]) - last_idx = i + 1 - res.append(s[last_idx:]) - return res - - -def split_layouts_by_arrow(s: str) -> tuple: - """ - Splits a layout string by first arrow (->). - :param s: string to split - :return: tuple containing source and target layouts - """ - arrow = s.find('->') - if arrow != -1: - source_layout = s[:arrow] - target_layout = s[arrow + 2:] - if source_layout == '': - source_layout = None - if target_layout == '': - target_layout = None - return source_layout, target_layout - else: - return s, None - - -def validate_layout(layout: str): - """ - Checks if layout is of valid format. - :param layout: string containing layout - :raises: if layout is incorrect - """ - error_msg = 'Invalid layout parsed: {}'.format(layout) - if layout: - incorrect_brackets = xor(layout[0] == '[', layout[-1] == ']') - if incorrect_brackets or layout[-1] == '-': - error_msg += ', did you forget quotes?' - else: - valid_layout_re = re.compile(r'\[?[^\[\]\(\)\-\s]*\]?') - if valid_layout_re.fullmatch(layout): - return - raise Error(error_msg) - - -def write_found_layout(name: str, found_layout: str, parsed: dict, dest: str = None): - """ - Writes found layout data to the 'parsed' dict. - :param name: name of the node to add layout - :param found_layout: string containing layout for the node - :param parsed: dict where result will be stored - :param dest: type of the command line: - * 'source' is "source_layout" - * 'target' is "target_layout" - * None is "layout" - """ - s_layout = None - t_layout = None - if name in parsed: - s_layout = parsed[name]['source_layout'] - t_layout = parsed[name]['target_layout'] - if dest == 'source': - s_layout = found_layout - elif dest == 'target': - t_layout = found_layout - else: - s_layout, t_layout = split_layouts_by_arrow(found_layout) - validate_layout(s_layout) - validate_layout(t_layout) - parsed[name] = {'source_layout': s_layout, 'target_layout': t_layout} - - -def write_found_layout_list(idx: int, found_layout: str, parsed: list, dest: str = None): - """ - Writes found layout data to the 'parsed' dict. - :param idx: idx of of the node to add layout - :param found_layout: string containing layout for the node - :param parsed: list where result will be stored - :param dest: type of the command line: - * 'source' is "source_layout" - * 'target' is "target_layout" - * None is "layout" - """ - s_layout = None - t_layout = None - if idx < len(parsed): - s_layout = parsed[idx]['source_layout'] - t_layout = parsed[idx]['target_layout'] - if dest == 'source': - s_layout = found_layout - elif dest == 'target': - t_layout = found_layout - else: - s_layout, t_layout = split_layouts_by_arrow(found_layout) - validate_layout(s_layout) - validate_layout(t_layout) - - if idx < len(parsed): - parsed[idx] = {'source_layout': s_layout, 'target_layout': t_layout} - else: - parsed.append({'source_layout': s_layout, 'target_layout': t_layout}) - - -def parse_layouts_by_destination(s: str, parsed: dict, parsed_list: list, dest: str = None) -> None: - """ - Parses layout command line to get all names and layouts from it. Adds all found data in the 'parsed' dict. - :param s: string to parse - :param parsed: dict where result will be stored - :param dest: type of the command line: - * 'source' is "source_layout" - * 'target' is "target_layout" - * None is "layout" - """ - list_s = split_str_avoiding_square_brackets(s) - if len(list_s) == 1 and (list_s[0][-1] not in ')]' or (list_s[0][0] == '[' and list_s[0][-1] == ']')): - # single layout case - write_found_layout('', list_s[0], parsed, dest) - else: - for idx, layout_str in enumerate(list_s): - # case for: "name1(nhwc->[n,c,h,w])" - p1 = re.compile(r'([^\[\]\(\)]*)\((\S+)\)') - m1 = p1.match(layout_str) - # case for: "name1[n,h,w,c]->[n,c,h,w]" - p2 = re.compile(r'([^\[\]\(\)]*)(\[\S*\])') - m2 = p2.match(layout_str) - if m1: - found_g = m1.groups() - elif m2: - found_g = m2.groups() - else: - # case for layout without name - write_found_layout_list(idx, layout_str, parsed_list, dest) - continue - if len(found_g[0]) > 0: - write_found_layout(found_g[0], found_g[1], parsed, dest) - else: - write_found_layout_list(idx, found_g[1], parsed_list, dest) - - -def get_layout_values(argv_layout: str = '', argv_source_layout: str = '', argv_target_layout: str = ''): - """ - Parses layout string. - :param argv_layout: string with a list of layouts passed as a "layout". - :param argv_source_layout: string with a list of layouts passed as a "source_layout". - :param argv_target_layout: string with a list of layouts passed as a "target_layout". - :return: dict with names and layouts associated - """ - if argv_layout and (argv_source_layout or argv_target_layout): - raise Error("\"layout\" is used as well as \"source_layout\" and/or \"target_layout\" which is not allowed, please " - "use one of them.") - res = {} - res_list = [] - if argv_layout: - parse_layouts_by_destination(argv_layout, res, res_list) - if argv_source_layout: - parse_layouts_by_destination(argv_source_layout, res, res_list, 'source') - if argv_target_layout: - parse_layouts_by_destination(argv_target_layout, res, res_list, 'target') - if len(res) > 0 and len(res_list) > 0: - raise Error("Some layout values are provided with names, and some without names. " - "Please provide ether all layouts with names or all layouts without names.") - if len(res) > 0: - return res - else: - return res_list - - -def parse_freeze_placeholder_values(argv_freeze_placeholder_with_value: str): - """ - Parses parse_freeze_placeholder_values string. - :param argv_freeze_placeholder_with_value: string information on freezing placeholders - :return: dictionary where key is node name, value is node value. - """ - placeholder_values = {} - if argv_freeze_placeholder_with_value is not None: - for plh_with_value in argv_freeze_placeholder_with_value.split(','): - plh_with_value = plh_with_value.split('->') - if len(plh_with_value) != 2: - raise Error("Wrong replacement syntax. Use \"freeze_placeholder_with_value\" " - "\"node1_name->value1,node2_name->value2\"") - node_name = plh_with_value[0] - value = plh_with_value[1] - if node_name in placeholder_values and placeholder_values[node_name] != value: - raise Error("Overriding replacement value of the placeholder with name '{}': old value = {}, new value = {}" - ".".format(node_name, placeholder_values[node_name], value)) - if '[' in value.strip(' '): - value = value.replace('[', '').replace(']', '').split(' ') - placeholder_values[node_name] = value - return placeholder_values - - -def get_freeze_placeholder_values(argv_input: str, argv_freeze_placeholder_with_value: str): - """ - Parses values for placeholder freezing and input node names - - Parameters - ---------- - argv_input - string with a list of input layers: either an empty string, or strings separated with comma. - 'node_name1[shape1]->value1,node_name2[shape2]->value2,...' - argv_freeze_placeholder_with_value - string with a list of input shapes: either an empty string, or tuples separated with comma. - 'placeholder_name1->value1, placeholder_name2->value2,...' - - Returns - ------- - parsed placeholders with values for freezing - input nodes cleaned from shape info - """ - placeholder_values = parse_freeze_placeholder_values(argv_freeze_placeholder_with_value) - input_node_names = None - - if argv_input is not None: - input_node_names = '' - # walkthrough all input values and save values for freezing - for input_value in split_inputs(argv_input): - node_name, _, value, _ = parse_input_value(input_value) - input_node_names = input_node_names + ',' + node_name if input_node_names != '' else node_name - if value is None: # no value is specified for freezing - continue - if node_name in placeholder_values and placeholder_values[node_name] != value: - raise Error("Overriding replacement value of the placeholder with name '{}': old value = {}, new value = {}" - ".".format(node_name, placeholder_values[node_name], value)) - placeholder_values[node_name] = value - - return placeholder_values, input_node_names - - -def split_inputs(input_str): - brakets_count = 0 - inputs = [] - while input_str: - idx = 0 - for c in input_str: - if c == '[': - brakets_count += 1 - if c == ']': - brakets_count -= 1 - if c == ',': - if brakets_count != 0: - idx += 1 - continue - else: - break - idx += 1 - if idx >= len(input_str)-1: - inputs.append(input_str) - break - inputs.append(input_str[:idx]) - input_str = input_str[idx+1:] - return inputs - - - -def split_shapes(argv_input_shape: str): - range_reg = r'([0-9]*\.\.[0-9]*)' - first_digit_reg = r'([0-9 ]+|-1|\?|{})'.format(range_reg) - next_digits_reg = r'(,{})*'.format(first_digit_reg) - tuple_reg = r'((\({}{}\))|(\[{}{}\]))'.format(first_digit_reg, next_digits_reg, - first_digit_reg, next_digits_reg) - - full_reg = r'^{}(\s*,\s*{})*$|^$'.format(tuple_reg, tuple_reg) - if not re.match(full_reg, argv_input_shape): - raise Error('Input shape "{}" cannot be parsed. ' + refer_to_faq_msg(57), argv_input_shape) - return re.findall(r'[(\[]([0-9,\.\? -]+)[)\]]', argv_input_shape) - -def get_placeholder_shapes(argv_input: str, argv_input_shape: str, argv_batch=None): - """ - Parses input layers names and input shapes from the cli and returns the parsed object. - All shapes are specified only through one command line option either "input" or "input_shape". - - Parameters - ---------- - argv_input - string with a list of input layers: either an empty string, or strings separated with comma. - E.g. 'inp1,inp2', 'node_name1[shape1]->value1,node_name2[shape2]->value2' - argv_input_shape - string with a list of input shapes: either an empty string, or tuples separated with comma. - E.g. '[1,2],[3,4]'. - Only positive integers are accepted. - '?' marks dynamic dimension. - Partial shape is specified with ellipsis. E.g. '[1..10,2,3]' - argv_batch - integer that overrides batch size in input shape - - Returns - ------- - parsed shapes in form of {'name of input':tuple} if names of inputs are provided with shapes - parsed shapes in form of {'name of input':None} if names of inputs are provided without shapes - tuple if only one shape is provided and no input name - None if neither shape nor input were provided - """ - if argv_input_shape and argv_batch: - raise Error("Both \"input_shape\" and \"batch\" were provided. Please provide only one of them. " + - refer_to_faq_msg(56)) - - # attempt to extract shapes from "input" parameters - placeholder_shapes = dict() - placeholder_data_types = dict() - are_shapes_specified_through_input = False - inputs_list = list() - if argv_input: - for input_value in split_inputs(argv_input): - node_name, shape, _, data_type = parse_input_value(input_value) - placeholder_shapes[node_name] = shape - inputs_list.append(node_name) - if data_type is not None: - placeholder_data_types[node_name] = data_type - if shape is not None: - are_shapes_specified_through_input = True - - if argv_input_shape and are_shapes_specified_through_input: - raise Error("Shapes are specified using both \"input\" and \"input_shape\" command-line parameters, but only one " - "parameter is allowed.") - - if argv_batch and are_shapes_specified_through_input: - raise Error("Shapes are specified using both \"input\" and \"batch\" command-line parameters, but only one " - "parameter is allowed.") - - if are_shapes_specified_through_input: - return inputs_list, placeholder_shapes, placeholder_data_types - - shapes = list() - inputs = list() - inputs_list = list() - placeholder_shapes = None - - - if argv_input_shape: - shapes = split_shapes(argv_input_shape) - - if argv_input: - inputs = split_inputs(argv_input) - inputs = [remove_data_type_from_input_value(inp) for inp in inputs] - - # check number of shapes with no input provided - if argv_input_shape and not argv_input: - placeholder_shapes = [PartialShape(shape) for shape in shapes] - if len(placeholder_shapes) == 1: - placeholder_shapes = PartialShape(placeholder_shapes[0]) - # check if number of shapes does not match number of passed inputs - elif argv_input and (len(shapes) == len(inputs) or len(shapes) == 0): - # clean inputs from values for freezing - inputs_without_value = list(map(lambda x: x.split('->')[0], inputs)) - placeholder_shapes = dict(zip_longest(inputs_without_value, - map(lambda x: PartialShape(x) if x else None, shapes))) - for inp in inputs: - if '->' not in inp: - inputs_list.append(inp) - continue - shape = placeholder_shapes[inp.split('->')[0]] - inputs_list.append(inp.split('->')[0]) - - if shape is None: - continue - for dim in shape: - if isinstance(dim, Dimension) and not dim.is_static: - raise Error("Cannot freeze input with dynamic shape: {}".format(shape)) - - elif argv_input: - raise Error('Please provide each input layers with an input layer shape. ' + refer_to_faq_msg(58)) - - return inputs_list, placeholder_shapes, placeholder_data_types - - -def parse_tuple_pairs(argv_values: str): - """ - Gets mean/scale values from the given string parameter - Parameters - ---------- - argv_values - string with a specified input name and list of mean values: either an empty string, or a tuple - in a form [] or (). - E.g. 'data(1,2,3)' means 1 for the RED channel, 2 for the GREEN channel, 3 for the BLUE channel for the data - input layer, or tuple of values in a form [] or () if input is specified separately, e.g. (1,2,3),[4,5,6]. - - Returns - ------- - dictionary with input name and tuple of values or list of values if mean/scale value is specified with input, - e.g.: - "data(10,20,30),info(11,22,33)" -> { 'data': [10,20,30], 'info': [11,22,33] } - "(10,20,30),(11,22,33)" -> [mo_array(10,20,30), mo_array(11,22,33)] - """ - res = {} - if not argv_values: - return res - - matches = [m for m in re.finditer(r'[(\[]([0-9., -]+)[)\]]', argv_values, re.IGNORECASE)] - - error_msg = 'Mean/scale values should consist of name and values specified in round or square brackets ' \ - 'separated by comma, e.g. data(1,2,3),info[2,3,4],egg[255] or data(1,2,3). Or just plain set of ' \ - 'values without names: (1,2,3),(2,3,4) or [1,2,3],[2,3,4].' + refer_to_faq_msg(101) - if not matches: - raise Error(error_msg, argv_values) - - name_start_idx = 0 - name_was_present = False - for idx, match in enumerate(matches): - input_name = argv_values[name_start_idx:match.start(0)] - name_start_idx = match.end(0) + 1 - tuple_value = np.fromstring(match.groups()[0], dtype=float, sep=',') - - if idx != 0 and (name_was_present ^ bool(input_name)): - # if node name firstly was specified and then subsequently not or vice versa - # e.g. (255),input[127] or input(255),[127] - raise Error(error_msg, argv_values) - - name_was_present = True if input_name != "" else False - if name_was_present: - res[input_name] = tuple_value - else: - res[idx] = tuple_value - - if not name_was_present: - # return a list instead of a dictionary - res = sorted(res.values(), key=lambda v: v[0]) - return res - - -def get_tuple_values(argv_values: str or tuple, num_exp_values: int = 3, t=float or int): - """ - Gets mean values from the given string parameter - Args: - argv_values: string with list of mean values: either an empty string, or a tuple in a form [] or (). - E.g. '(1,2,3)' means 1 for the RED channel, 2 for the GREEN channel, 4 for the BLUE channel. - t: either float or int - num_exp_values: number of values in tuple - - Returns: - tuple of values - """ - - digit_reg = r'(-?[0-9. ]+)' if t == float else r'(-?[0-9 ]+)' - - assert num_exp_values > 1, 'Can not parse tuple of size 1' - content = r'{0}\s*,{1}\s*{0}'.format(digit_reg, (digit_reg + ',') * (num_exp_values - 2)) - tuple_reg = r'((\({0}\))|(\[{0}\]))'.format(content) - - if isinstance(argv_values, tuple) and not len(argv_values): - return argv_values - - if not len(argv_values) or not re.match(tuple_reg, argv_values): - raise Error('Values "{}" cannot be parsed. ' + - refer_to_faq_msg(59), argv_values) - - mean_values_matches = re.findall(r'[(\[]([0-9., -]+)[)\]]', argv_values) - - for mean in mean_values_matches: - if len(mean.split(',')) != num_exp_values: - raise Error('{} channels are expected for given values. ' + - refer_to_faq_msg(60), num_exp_values) - - return mean_values_matches - - -def split_node_in_port(node_id: str): - """Split node_id in form port:node to separate node and port, where port is converted to int""" - if isinstance(node_id, str): - separator = ':' - parts = node_id.split(separator) - if len(parts) > 1: - if parts[0].isdigit(): - node_name = separator.join(parts[1:]) - try: - port = int(parts[0]) - return node_name, port - except ValueError as err: - log.warning('Didn\'t recognize port:node format for "{}" because port is not an integer.'.format( - node_id)) - else: - node_name = separator.join(parts[:-1]) - try: - port = int(parts[-1]) - return node_name, port - except ValueError as err: - log.warning('Didn\'t recognize node:port format for "{}" because port is not an integer.'.format( - node_id)) - - return node_id, None - - -def get_mean_scale_dictionary(mean_values, scale_values, argv_input: list): - """ - This function takes mean_values and scale_values, checks and processes them into convenient structure - - Parameters - ---------- - mean_values dictionary, contains input name and mean values passed py user (e.g. {data: np.array[102.4, 122.1, 113.9]}), - or list containing values (e.g. np.array[102.4, 122.1, 113.9]) - scale_values dictionary, contains input name and scale values passed py user (e.g. {data: np.array[102.4, 122.1, 113.9]}) - or list containing values (e.g. np.array[102.4, 122.1, 113.9]) - - Returns - ------- - The function returns a dictionary e.g. - mean = { 'data': np.array, 'info': np.array }, scale = { 'data': np.array, 'info': np.array }, input = "data, info" -> - { 'data': { 'mean': np.array, 'scale': np.array }, 'info': { 'mean': np.array, 'scale': np.array } } - - """ - res = {} - # collect input names - if argv_input: - inputs = [get_node_name_with_port_from_input_value(input_value) for input_value in split_inputs(argv_input)] - else: - inputs = [] - if type(mean_values) is dict: - inputs = list(mean_values.keys()) - if type(scale_values) is dict: - for name in scale_values.keys(): - if name not in inputs: - inputs.append(name) - - # create unified object containing both mean and scale for input - if type(mean_values) is dict and type(scale_values) is dict: - if not mean_values and not scale_values: - return res - - for inp_scale in scale_values.keys(): - if inp_scale not in inputs: - raise Error("Specified scale_values name '{}' do not match to any of inputs: {}. " - "Please set 'scale_values' that correspond to values from input.".format(inp_scale, inputs)) - - for inp_mean in mean_values.keys(): - if inp_mean not in inputs: - raise Error("Specified mean_values name '{}' do not match to any of inputs: {}. " - "Please set 'mean_values' that correspond to values from input.".format(inp_mean, inputs)) - - for inp in inputs: - inp, port = split_node_in_port(inp) - if inp in mean_values or inp in scale_values: - res.update( - { - inp: { - 'mean': - mean_values[inp] if inp in mean_values else None, - 'scale': - scale_values[inp] if inp in scale_values else None - } - } - ) - return res - - # user specified input and mean/scale separately - we should return dictionary - if inputs: - if mean_values and scale_values: - if len(inputs) != len(mean_values): - raise Error('Numbers of inputs and mean values do not match. ' + - refer_to_faq_msg(61)) - if len(inputs) != len(scale_values): - raise Error('Numbers of inputs and scale values do not match. ' + - refer_to_faq_msg(62)) - - data = list(zip(mean_values, scale_values)) - - for i in range(len(data)): - res.update( - { - inputs[i]: { - 'mean': - data[i][0], - 'scale': - data[i][1], - - } - } - ) - return res - # only mean value specified - if mean_values: - data = list(mean_values) - for i in range(len(data)): - res.update( - { - inputs[i]: { - 'mean': - data[i], - 'scale': - None - - } - } - ) - return res - - # only scale value specified - if scale_values: - data = list(scale_values) - for i in range(len(data)): - res.update( - { - inputs[i]: { - 'mean': - None, - 'scale': - data[i] - - } - } - ) - return res - # mean and/or scale are specified without inputs - return list(zip_longest(mean_values, scale_values)) - - -def get_model_name(path_input_model: str) -> str: - """ - Deduces model name by a given path to the input model - Args: - path_input_model: path to the input model - - Returns: - name of the output IR - """ - parsed_name, extension = os.path.splitext(os.path.basename(path_input_model)) - return 'model' if parsed_name.startswith('.') or len(parsed_name) == 0 else parsed_name - - -def get_model_name_from_args(argv: argparse.Namespace): - model_name = "" - if hasattr(argv, 'model_name'): - if argv.model_name: - model_name = argv.model_name - elif argv.input_model: - model_name = get_model_name(argv.input_model) - elif argv.saved_model_dir: - model_name = "saved_model" - elif argv.input_meta_graph: - model_name = get_model_name(argv.input_meta_graph) - elif argv.input_symbol: - model_name = get_model_name(argv.input_symbol) - argv.model_name = model_name - return model_name - - -def get_absolute_path(path_to_file: str) -> str: - """ - Deduces absolute path of the file by a given path to the file - Args: - path_to_file: path to the file - - Returns: - absolute path of the file - """ - file_path = os.path.expanduser(path_to_file) - if not os.path.isabs(file_path): - file_path = os.path.join(os.getcwd(), file_path) - return file_path - - -def isfloat(value): - try: - float(value) - return True - except ValueError: - return False - - -def isbool(value): - try: - strtobool(value) - return True - except ValueError: - return False - - -def isdict(value): - try: - evaluated = ast.literal_eval(value) - return isinstance(evaluated, dict) - except ValueError: - return False - - -def convert_string_to_real_type(value: str): - if isdict(value): - return ast.literal_eval(value) - - values = value.split(',') - for i in range(len(values)): - value = values[i] - if value.isdigit(): - values[i] = int(value) - elif isfloat(value): - values[i] = float(value) - elif isbool(value): - values[i] = strtobool(value) - - return values[0] if len(values) == 1 else values - - -def parse_transform(transform: str) -> list: - transforms = [] - - if len(transform) == 0: - return transforms - - all_transforms = re.findall(r"([a-zA-Z0-9]+)(\[([^\]]+)\])*(,|$)", transform) - - # Check that all characters were matched otherwise transform key value is invalid - key_len = len(transform) - for transform in all_transforms: - # In regexp we have 4 groups where 1st group - transformation_name, - # 2nd group - [args], - # 3rd group - args, <-- nested group - # 4th group - EOL - # And to check that regexp matched all string we decrease total length by the length of matched groups (1,2,4) - # In case if no arguments were given to transformation then 2nd and 3rd groups will be empty. - if len(transform) != 4: - raise Error("Unexpected transform key structure: {}".format(transform)) - key_len -= len(transform[0]) + len(transform[1]) + len(transform[3]) - - if key_len != 0: - raise Error("Unexpected transform key structure: {}".format(transform)) - - for transform in all_transforms: - name = transform[0] - args = transform[2] - - args_dict = {} - - if len(args) != 0: - for arg in args.split(';'): - m = re.match(r"^([_a-zA-Z]+)=(.+)$", arg) - if not m: - raise Error("Unrecognized attributes for transform key: {}".format(transform)) - - args_dict[m.group(1)] = convert_string_to_real_type(m.group(2)) - - transforms.append((name, args_dict)) - - return transforms - - -def check_available_transforms(transforms: list): - """ - This function check that transformations specified by user are available. - :param transforms: list of user specified transformations - :return: raises an Error if transformation is not available - """ - from openvino.tools.mo.back.offline_transformations import get_available_transformations # pylint: disable=no-name-in-module,import-error - available_transforms = get_available_transformations() - - missing_transformations = [] - for name, _ in transforms: - if name not in available_transforms.keys(): - missing_transformations.append(name) - - if len(missing_transformations) != 0: - raise Error('Following transformations ({}) are not available. ' - 'List with available transformations ({})'.format(','.join(missing_transformations), - ','.join(available_transforms.keys()))) - return True - - -def check_positive(value): - try: - int_value = int(value) - if int_value <= 0: - raise ValueError - except ValueError: - raise argparse.ArgumentTypeError("expected a positive integer value") - - return int_value - - -def check_bool(value): - if isinstance(value, bool): - return value - elif isinstance(value, str): - if value.lower() not in ['true', 'false']: - raise argparse.ArgumentTypeError("expected a True/False value") - return value.lower() == 'true' - else: - raise argparse.ArgumentTypeError("expected a bool or str type") - - -def depersonalize(value: str, key: str): - dir_keys = [ - 'output_dir', 'extensions', 'saved_model_dir', 'tensorboard_logdir', 'caffe_parser_path' - ] - if isinstance(value, list): - updated_value = [] - for elem in value: - updated_value.append(depersonalize(elem, key)) - return updated_value - - if not isinstance(value, str): - return value - res = [] - for path in value.split(','): - if os.path.isdir(path) and key in dir_keys: - res.append('DIR') - elif os.path.isfile(path): - res.append(os.path.join('DIR', os.path.split(path)[1])) - else: - res.append(path) - return ','.join(res) - -def get_available_front_ends(fem=None): - # Use this function as workaround to avoid IR frontend usage by MO - if fem is None: - return [] - available_moc_front_ends = fem.get_available_front_ends() - if 'ir' in available_moc_front_ends: - available_moc_front_ends.remove('ir') - - return available_moc_front_ends diff --git a/tools/mo/openvino/tools/mo/utils/convert.py b/tools/mo/openvino/tools/mo/utils/convert.py deleted file mode 100755 index fe4d02e66571de..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/convert.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import os -import sys - -# do not print INFO and WARNING messages from TensorFlow -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -try: - import tensorflow.compat.v1 as tf_v1 -except ImportError: - import tensorflow as tf_v1 - -#in some environment suppressing through TF_CPP_MIN_LOG_LEVEL does not work -tf_v1.get_logger().setLevel("ERROR") - -sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)) -from openvino.tools.mo.front.tf.loader import load_tf_graph_def - -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' - - -def argparser(): - parser = argparse.ArgumentParser() - parser.add_argument("--to_pbtxt", dest='pb', type=str, help="Path to TensorFlow binary model") - parser.add_argument('--to_pb', dest='pbtxt', type=str, help="Path to TensorFlow text model") - return parser.parse_args() - - -def convert(filename: str, is_text: bool): - if not os.path.isfile(filename): - raise FileNotFoundError("File doesn't exist: {}".format(filename)) - new_ext = ".pbtxt" if is_text else ".pb" - head, tail = os.path.split(os.path.abspath(filename)) - print("Convert: {} \n to: {}".format(filename, os.path.join(head, tail + new_ext))) - graph_def, _, _, _ = load_tf_graph_def(graph_file_name=filename, is_binary=is_text) - tf_v1.import_graph_def(graph_def, name='') - tf_v1.train.write_graph(graph_def, head, tail + new_ext, as_text=is_text) - - -if __name__ == '__main__': - argv = argparser() - if argv.pb is None and argv.pbtxt is None: - print("Please provide model to convert --to_pb or --to_pbtxt") - sys.exit(1) - if argv.pb is not None: - convert(argv.pb, True) - if argv.pbtxt is not None: - convert(argv.pbtxt, False) diff --git a/tools/mo/openvino/tools/mo/utils/custom_replacement_config.py b/tools/mo/openvino/tools/mo/utils/custom_replacement_config.py deleted file mode 100644 index 83656f2f863f06..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/custom_replacement_config.py +++ /dev/null @@ -1,417 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import json -import logging as log -import os -from re import compile, match - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.graph import nodes_matching_name_pattern, sub_graph_between_nodes -from openvino.tools.mo.utils.json_schema import schema_dict -from openvino.tools.mo.utils.utils import get_mo_root_dir, refer_to_faq_msg - - -class CustomReplacementDescriptor(object): - registered_types = dict() - - def __init__(self, replacement_id: str, attrs: dict = None): - """ - Create class instance based on attrs dictionary which is read from the configuration file. - :param attrs: - """ - super(CustomReplacementDescriptor, self).__setattr__('replacement_id', replacement_id) - if attrs is not None: - super(CustomReplacementDescriptor, self).__setattr__('custom_attributes', - attrs.setdefault('custom_attributes', {})) - super(CustomReplacementDescriptor, self).__setattr__('_replacement_desc', attrs.copy()) - - def __getattr__(self, k): - return self._replacement_desc[k] - - def __setattr__(self, k, v): - # you can assign only existing attributes - if k not in self._replacement_desc: - raise AttributeError - self._replacement_desc[k] = v - - def has(self, attr): - """ - Check that attribute 'attr' is defined for the CustomReplacementDescriptor. - :param attr: attribute to check. - :return: True if the attribute exists and False otherwise. - """ - return attr in self._replacement_desc - - @classmethod - def register_type(cls, match_kind: str, class_type: object): - if match_kind in cls.registered_types: - log.warning('Class for match kind "{}" is already registered'.format(match_kind)) - else: - cls.registered_types[match_kind] = class_type - - @classmethod - def create_instance(cls, match_kind: str, replacement_id: str, attrs: dict = None): - """ - Fabric method to create proper object based on match_kind. - :param match_kind: match kind. - :param replacement_id: id of the replacement. - :param attrs: optional attributes to be set. - :return: object of the sub-class of the CustomLayerDescriptor class or None if the match kind is not registered. - """ - if attrs is None: - attrs = dict() - if match_kind in cls.registered_types: - return cls.registered_types[match_kind](replacement_id, attrs) - else: - raise Error('No class registered for match kind "{}". Supported match kinds are "{}". '.format( - match_kind, list(cls.registered_types.keys())) + - refer_to_faq_msg(65)) - - def sub_graph_instances(self): - raise Exception("The function 'get_sub_graph_instances' must be implemented in the sub-class.") - - def get_config_file_representation(self): - result = { - 'match_kind': self.match_kind, 'instances': self.instances, - 'inputs': self.inputs, 'outputs': self.outputs, - 'custom_attributes': self.custom_attributes, 'id': self.id - } - if self.has('op'): - result.update({'op': self.op}) - return result - - def get_inputs_description(self): - """ - Returns description of inputs of the layer with id 'layer_id'. The format of inputs is the following: list of - lists where each list contains information about nodes consuming the same tensor from outside of the graph. Each - element of the list is a pair where first element is a regular expression for the name of the node in the - sub-graph and the second is the input port of this node. - :return: description of inputs or None if layer with such id is not registered or information about inputs is - not available. - """ - if 'inputs' not in self._replacement_desc: - log.error("Information about inputs of layer with id '{}' is not available".format(self.replacement_id)) - return None - result = list() - for index, input_desc in enumerate(self._replacement_desc['inputs']): - result.append([(inp['node'], inp['port']) for inp in input_desc]) - return result - - def get_outputs_description(self): - """ - Returns description of outputs of the layer with id 'layer_id'. The format of outputs is the following: list of - pairs where the first element of the pair is a regular expression for the name of the node that produces output - of the sub-graph and the second is the output port of this node. - :return: description of outputs or None if layer with such id is not registered or information about outputs is - not available. - """ - if 'outputs' not in self._replacement_desc: - log.error("Information about outputs of layer with id '{}' is not available") - return None - return [(out['node'], out['port']) for out in self._replacement_desc['outputs']] - - def update_custom_replacement_attributes(self, graph: Graph): - """ - The function run specific functions to update attributes of the custom replacement description. Currently it - updates information about input/output nodes. - :param graph: graph to operate on. - :return: True if the update process completed successfully. - """ - raise Exception("The function 'update_custom_layer_attributes' must be implemented in the sub-class.") - - def validate_data(self): - """ - Validates layer description dictionary. - :return: list of errors identified. - """ - errors = list() - if not self.has('id'): - errors.append("Replacement id is not specified for custom replacement '{}'".format(self.replacement_id)) - if not self.has('instances') or self.instances == '': - errors.append("Attribute 'instances' is not specified for replacement '{}'".format(self.replacement_id)) - if not self.has('match_kind'): - errors.append("Replacement match type is not specified for replacement '{}'".format(self.replacement_id)) - return errors - - -class CustomReplacementDescriptorPoints(CustomReplacementDescriptor): - """ - Class that is used to describe custom replacement which is a sub-graph specified by start and end points. - """ - - def __init__(self, replacement_id: str, attrs: dict = None): - super().__init__(replacement_id, attrs) - if not self.has('include_inputs_to_sub_graph'): - super(CustomReplacementDescriptorPoints, self).__setattr__('include_inputs_to_sub_graph', True) - if not self.has('include_outputs_to_sub_graph'): - super(CustomReplacementDescriptorPoints, self).__setattr__('include_outputs_to_sub_graph', True) - - def get_config_file_representation(self): - result = { - 'match_kind': self.match_kind, 'instances': self.instances, - 'custom_attributes': self.custom_attributes, 'id': self.id, - 'include_inputs_to_sub_graph': bool(self.include_inputs_to_sub_graph), - 'include_outputs_to_sub_graph': bool(self.include_outputs_to_sub_graph) - } - if self.has('op'): - result.update({'op': self.op}) - return result - - def get_inputs_description(self): - return [[('^' + node_name + '$', 0)] for node_name in self.instances['start_points']] - - def get_outputs_description(self): - return [('^' + node_name + '$', 0) for node_name in self.instances['end_points']] - - def get_internal_input_nodes(self, graph: Graph): - """ - Gets list of node names getting input from outside of the sub-graph. This function checks whether input nodes - specified in the configuration file should be added to the sub-graph or not. If they should not be added to the - sub-graph then input nodes of the sub-graph are children of these nodes. - :param graph: graph to operate on. - :return: list of input node names. - """ - if not self.include_inputs_to_sub_graph: - log.debug('Do not include inputs to sub-graph for replacement with id {}'.format(self.replacement_id)) - new_start_nodes = set() - for start_node in self.instances['start_points']: - for _, out_node_name in graph.out_edges(start_node): - new_start_nodes.add(out_node_name) - start_nodes = list(new_start_nodes) - log.debug('New inputs are: {}'.format(start_nodes)) - return start_nodes - else: - return self.instances['start_points'] - - def get_internal_output_nodes(self, graph: Graph): - """ - Gets list of node names producing output outside of the sub-graph. This function checks whether output nodes - specified in the configuration file should be added to the sub-graph or not. If they should not be added to the - sub-graph then output nodes of the sub-graph are parents of these nodes. - :param graph: graph to operate on. - :return: list of output node names. - """ - if not self.include_outputs_to_sub_graph: - log.debug('Do not include outputs of sub-graph for replacement with id {}'.format(self.replacement_id)) - new_end_nodes = set() - for end_node in self.instances['end_points']: - for in_node_name, _ in graph.in_edges(end_node): - new_end_nodes.add(in_node_name) - end_nodes = list(new_end_nodes) - log.debug('New outputs are: {}'.format(end_nodes)) - return end_nodes - else: - return self.instances['end_points'] - - def update_custom_replacement_attributes(self, graph: Graph): - if not self.has('instances'): - raise Error("No instance(s) is(are) defined for the custom replacement '{}'. ".format(self.replacement_id) + - refer_to_faq_msg(66)) - if not isinstance(self.instances, dict): - raise Error("The instance must be a single dictionary for the custom replacement with id '{}'. ".format( - self.replacement_id) + - refer_to_faq_msg(67)) - - start_points = self.get_internal_input_nodes(graph) - end_points = self.get_internal_output_nodes(graph) - - matched_nodes = sub_graph_between_nodes(graph, start_points, end_points, include_control_flow=False) - output_tensors = set() - input_nodes_mapping = dict() # key is the input tensor name, value is the pair: (input_port, output_node_name) - for src_node_name, dst_node_name, edge_attrs in graph.edges(data=True): - dst_node = graph.node[dst_node_name] - - # edge outside sub-graph into sub-graph - if (src_node_name not in matched_nodes) and (dst_node_name in matched_nodes): - tensor_name = src_node_name + ":" + str(edge_attrs['out']) - if tensor_name not in input_nodes_mapping: - input_nodes_mapping[tensor_name] = list() - input_nodes_mapping[tensor_name].append(('^' + dst_node_name + '$', edge_attrs['in'])) - - # edge from inside sub-graph to outside sub-graph - if (src_node_name in matched_nodes) and (dst_node_name not in matched_nodes): - output_tensors.add(('^' + dst_node['pb'].input[edge_attrs['in']] + '$', edge_attrs['out'])) - - for node_name in graph.nodes(): - node = Node(graph, node_name) - if node_name in matched_nodes and len(node.out_nodes()) == 0 and node['pb'].op != 'Const': - log.debug("Node {} doesn't have output edges. Consider it output".format(node_name)) - output_tensors.add(('^' + node_name + '$', 0)) - - if not self.has('inputs'): - self._replacement_desc['inputs'] = [[{'node': desc[0], 'port': desc[1]} for desc in inp] - for inp in sorted(input_nodes_mapping.values())] - log.debug('Updated inputs of sub-graph for instance "{}"'.format(self.instances)) - - if not self.has('outputs'): - self._replacement_desc['outputs'] = [{'node': node, 'port': port} for node, port in sorted(output_tensors)] - log.debug('Updated outputs of sub-graph for instance "{}"'.format(self.instances)) - - def sub_graph_instances(self): - return [self.instances] - - -CustomReplacementDescriptor.register_type('points', CustomReplacementDescriptorPoints) - - -class CustomReplacementDescriptorScope(CustomReplacementDescriptor): - """ - Class that is used to describe custom layer which is a sub-graph specified by scope name. - """ - - def __init__(self, replacement_id: str, attrs: dict = None): - super().__init__(replacement_id, attrs) - - def update_custom_replacement_attributes(self, graph: Graph): - if not self.has('instances') or len(self.instances) == 0: - raise Error("No instances are defined for replacement with id '{}'. ".format(self.replacement_id) + - refer_to_faq_msg(68)) - - pattern = self.instances[0] # use the first instance pattern to find input/output nodes patterns - # TODO verify that all instances will produce the same sub-graph - matched_nodes = nodes_matching_name_pattern(graph, pattern) - - output_tensors = set() - input_nodes_mapping = dict() # key is the input tensor name, value is the pair: (input_port, output_node_name) - for src_node_name, dst_node_name, edge_attrs in graph.edges(data=True): - dst_node = graph.node[dst_node_name] - - # edge outside sub-graph into sub-graph - if (src_node_name not in matched_nodes) and (dst_node_name in matched_nodes): - tensor_name = src_node_name + ":" + str(edge_attrs['out']) - if tensor_name not in input_nodes_mapping: - input_nodes_mapping[tensor_name] = list() - input_nodes_mapping[tensor_name].append((generate_pattern_for_node(graph, pattern, dst_node_name), - edge_attrs['in'])) - - # edge from inside sub-graph to outside sub-graph - if (src_node_name in matched_nodes) and (dst_node_name not in matched_nodes): - output_tensors.add( - (generate_pattern_for_node(graph, pattern, dst_node['pb'].input[edge_attrs['in']]), - edge_attrs['out'])) - - for node_name in graph.nodes(): - node = Node(graph, node_name) - if node_name in matched_nodes and len(node.out_nodes()) == 0 and node['pb'].op != 'Const': - log.debug("Node {} doesn't have output edges. Consider it output".format(node_name)) - output_tensors.add((generate_pattern_for_node(graph, pattern, node_name), 0)) - - if not self.has('inputs') or len(self._replacement_desc['inputs']) == 0: - self._replacement_desc['inputs'] = [[{'node': desc[0], 'port': desc[1]} for desc in inp] - for inp in sorted(input_nodes_mapping.values())] - log.debug('Updated inputs of sub-graph for instance "{}"'.format(self.instances)) - - if not self.has('outputs') or len(self._replacement_desc['outputs']) == 0: - self._replacement_desc['outputs'] = [{'node': node, 'port': port} for node, port in sorted(output_tensors)] - log.debug('Updated outputs of sub-graph for instance "{}"'.format(self.instances)) - - def sub_graph_instances(self): - return self.instances - - -CustomReplacementDescriptor.register_type('scope', CustomReplacementDescriptorScope) - - -class CustomReplacementDescriptorGeneral(CustomReplacementDescriptor): - def __init__(self, replacement_id: str, attrs: dict = None): - super().__init__(replacement_id, attrs) - - def validate_data(self): - """ - Validates layer description dictionary. - :return: list of errors identified. - """ - errors = list() - if not self.has('id'): - errors.append("Replacement id is not specified for custom replacement '{}'".format(self.replacement_id)) - if not self.has('match_kind'): - errors.append("Replacement match type is not specified for replacement '{}'".format(self.replacement_id)) - return errors - - -CustomReplacementDescriptor.register_type('general', CustomReplacementDescriptorGeneral) - - -def parse_custom_replacement_config_file(file_name: str): - """ - Reads custom replacement configuration file file_name. - :param file_name: name of the file to read from. - :return: The dictionary where key is the layer id and value is an instance of the CustomLayerDescriptor object. - """ - if not os.path.exists(file_name): - raise Error("Custom replacements configuration file '{}' does not exist. ".format(file_name) + - refer_to_faq_msg(69)) - - data = load_and_validate_json_config(file_name) - result = list() - validation_errors = list() - for attrs in data: - if 'id' not in attrs: - raise Error('One of the custom replacements in the configuration file "{}" does not contain attribute ' - '"id". '.format(file_name) + - refer_to_faq_msg(71)) - if 'match_kind' not in attrs: - raise Error('One of the custom replacements in the configuration file "{}" does not contain attribute ' - '"match_kind". Possible values are "points", "scope" and "general". '.format(file_name) + - refer_to_faq_msg(71)) - desc = CustomReplacementDescriptor.create_instance(attrs['match_kind'], attrs['id'], attrs) - validation_errors.extend(desc.validate_data()) - result.append(desc) - if len(validation_errors) > 0: - raise Error("File '{}' validation failed:\n{}. ".format(file_name, "\n".join(validation_errors)) + - refer_to_faq_msg(72)) - return result - - -def generate_pattern_for_node(graph: Graph, sub_graph_pattern: str, node_name: str): - if sub_graph_pattern == '': - return node_name - node_name_components = node_name.split("/") - cur_name = '' - matched_index = None # index of the node name component to start new pattern from - compiled_pattern = compile(sub_graph_pattern) - for index in range(0, len(node_name_components)): - cur_name += node_name_components[index] + "/" - if match(compiled_pattern, cur_name): - matched_index = index - break - if matched_index is None: - raise RuntimeError('Node name "{}" does not match pattern "{}"'.format(node_name, sub_graph_pattern)) - - if sub_graph_pattern == '' or sub_graph_pattern[-1] != '/': - sub_graph_pattern += '/' - - sub_graph_nodes = nodes_matching_name_pattern(graph, sub_graph_pattern) - name_suffix = '/'.join(node_name_components[matched_index + 1:]) + '$' - if len([node for node in sub_graph_nodes if match(sub_graph_pattern + name_suffix, node)]) == 1: - return name_suffix - - raise RuntimeError('The pattern that uniquely identifies node "{}" using sub-graph pattern "{}" has not been found'. - format(node_name, sub_graph_pattern)) - - -def load_and_validate_json_config(config_file_name: str): - """ - Reads and validate custom replacement configuration file config_file_name. - :param config_file_name: name of the file to read from. - :return: A dictionary serialized from json config file. - """ - try: - with open(config_file_name, 'r') as f: - json_config = json.load(f) - try: - import fastjsonschema as json_validate - - validator = json_validate.compile(schema_dict) - validator(json_config) - except ModuleNotFoundError as e: - log.error("Module 'fastjsonschema' for json validation not installed. Please update requirements.", - extra={'is_warning': True}) - - except Exception as e: - raise Error("Failed to parse custom replacements configuration file '{}': {}. ".format(config_file_name, e) + - refer_to_faq_msg(70)) from e - - return json_config diff --git a/tools/mo/openvino/tools/mo/utils/dsu.py b/tools/mo/openvino/tools/mo/utils/dsu.py deleted file mode 100644 index 9e92f735203420..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/dsu.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -class DSUElem: - """ - An object that represents one DSU element. - """ - name = '' - parent = '' - rank = 1 - - def __init__(self, name): - self.name = name - self.parent = name - self.rank = 1 - - -class DSU: - """ - Naive implementation of the "disjoint set union" data structure. - """ - map = dict() - - def __init__(self, elems: list): - self.map = {elem.name: elem for elem in elems} - pass - - def find_elem(self, name: str): - return self.map[name] - - def find_parent(self, elem: DSUElem): - if elem.parent == elem.name: - return elem - parent_elem = self.find_parent(self.find_elem(elem.parent)) - elem.parent = parent_elem.name - return parent_elem - - def union(self, elem1: DSUElem, elem2: DSUElem): - elem1 = self.find_parent(elem1) - elem2 = self.find_parent(elem2) - if elem1.name == elem2.name: # already in the same set - return - - if elem1.rank < elem2.rank: - elem1.parent = elem2.name - elif elem1.rank > elem2.rank: - elem2.parent = elem1.name - else: - elem1.parent = elem2.name - elem2.rank = elem2.rank + 1 diff --git a/tools/mo/openvino/tools/mo/utils/environment_setup_utils.py b/tools/mo/openvino/tools/mo/utils/environment_setup_utils.py deleted file mode 100644 index 90e42988d715e7..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/environment_setup_utils.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import sys - -# do not print INFO and WARNING messages from TensorFlow -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' - - -def get_imported_module_version(imported_module): - """ - Get imported module version - :return: version(str) or raise AttributeError exception - """ - version_attrs = ("__version__", "VERSION", "version") - installed_version = None - for attr in version_attrs: - installed_version = getattr(imported_module, attr, None) - if isinstance(installed_version, str): - return installed_version - else: - installed_version = None - - if installed_version is None: - raise AttributeError("{} module doesn't have version attribute".format(imported_module)) - else: - return installed_version - - -def get_environment_setup(framework): - """ - Get environment setup such as Python version, TensorFlow version - :param framework: framework name - :return: a dictionary of environment variables - """ - env_setup = dict() - python_version = "{}.{}.{}".format(sys.version_info.major, - sys.version_info.minor, - sys.version_info.micro) - env_setup['python_version'] = python_version - try: - if framework == 'tf': - exec("import tensorflow") - env_setup['tensorflow'] = get_imported_module_version(sys.modules["tensorflow"]) - exec("del tensorflow") - except (AttributeError, ImportError): - pass - env_setup['sys_platform'] = sys.platform - return env_setup diff --git a/tools/mo/openvino/tools/mo/utils/error.py b/tools/mo/openvino/tools/mo/utils/error.py deleted file mode 100644 index af936093924eed..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/error.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import re - - -class BasicError(Exception): - """ Base class for all exceptions in Model Conversion API - - It operates like Exception but when it is converted to str, - it formats string as args[0].format(*args[1:]), where - args are arguments provided when an exception instance is - created. - """ - - def __str__(self): - if len(self.args) <= 1: - return Exception.__str__(self) - return self.args[0].format(*self.args[1:]) # pylint: disable=unsubscriptable-object - - -class FrameworkError(BasicError): - """ User-friendly error: raised when the error on the framework side. """ - pass - - -class Error(BasicError): - """ User-friendly error: raised when the error on the user side. """ - pass - - -class InternalError(BasicError): - """ Not user-friendly error: user cannot fix it and it points to the bug inside MO. """ - pass - - -def classify_error_type(e): - patterns = [ - # Example: No module named 'openvino._offline_transformations.offline_transformations_api' - r"No module named \'\S+\'", - # Example: cannot import name 'IECore' from 'openvino.inference_engine' (unknown location) - r"cannot import name \'\S+\'", - ] - error_message = str(e) - for pattern in patterns: - m = re.search(pattern, error_message) - if m: - return m.group(0) - return "undefined" - - -def legacy_path_error(functionality_description): - raise Exception("{}Please try to install openvino-dev and use convert_model() " - "from openvino.tools.mo.".format(functionality_description)) diff --git a/tools/mo/openvino/tools/mo/utils/find_ie_version.py b/tools/mo/openvino/tools/mo/utils/find_ie_version.py deleted file mode 100644 index 47f069af0329fc..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/find_ie_version.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import sys -import platform -import subprocess # nosec - -lib_env_key = "PATH" if platform.system() == "Windows" else "LD_LIBRARY_PATH" -if lib_env_key not in os.environ: - os.environ[lib_env_key] = "" - -python_path_key = "PYTHONPATH" -if python_path_key not in os.environ: - os.environ[python_path_key] = "" - -lib_path_orig = os.environ[lib_env_key] -python_path_orig = os.environ[python_path_key] - - -def setup_env(module="", libs=[]): - """ - Update os.environ variables with given values. - :param module: path to python module - :param libs: list with paths to libraries - """ - os.environ[python_path_key] = os.pathsep.join([module, os.environ[python_path_key]]) - os.environ[lib_env_key] = os.pathsep.join([*libs, os.environ[lib_env_key]]) - - -def reset_env(): - """ - Reset os.environ variables to default values - """ - os.environ[python_path_key] = python_path_orig - os.environ[lib_env_key] = lib_path_orig - - -def try_to_import_ie(module="", libs=[], silent=False): - """ - Check if OpenVINO Python API modules exists and in case of success - environment will be set with given values. - :param module: path to python module - :param libs: list with paths to libraries - :param silent: hide all output - """ - path_to_script = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'check_ie_bindings.py') - # We need to execute python modules checker in subprocess to avoid issue with environment - # in case if previous import was unsuccessful it can fail further imports even if sys.path - # will be restored to initial default values. - # To pass environment to sub-process PATH/LD_LIBRARY_PATH and PYTHONPATH are used from - # os.environ that is set after setup_env() - setup_env(module=module, libs=libs) - cmd_args = [sys.executable, path_to_script, "--path_to_module", "PYTHONPATH" if module == "" else module] - if silent: - cmd_args.append("--silent") - - status = subprocess.run(cmd_args, env=os.environ) - if status.returncode == 0: - return True - else: - reset_env() - return False - - -def find_ie_version(silent=False): - """ - Tries to import OpenVINO Python API bindings. In case of successful import - PATH/LD_LIBRARY_PATH and PYTHONPATH environment variables will be set - This variables must be passed to subprocess in order to execute OV python bindings. - Example: - if find_ie_version(): - subprocess.run([sys.executable, path_to_script], env=os.environ) - - """ - if try_to_import_ie(silent=silent): - return True - - script_path = os.path.realpath(os.path.dirname(__file__)) - - # Windows - bindings_paths_windows = [ - # Local builds - { - "module": os.path.join(script_path, '../../../../../../bin/intel64/Release/python/'), - "libs": [ - os.path.join(script_path, '../../../../../../bin/intel64/Release'), - os.path.join(script_path, '../../../../../../temp/tbb/bin'), - ] - }, - { - "module": os.path.join(script_path, '../../../../../../bin/intel64/Debug/python/'), - "libs": [ - os.path.join(script_path, '../../../../../../bin/intel64/Debug'), - os.path.join(script_path, '../../../../../../temp/tbb/bin'), - ] - }, - ] - - # Linux / Darwin - bindings_paths_linux = [ - # Local builds - { - "module": os.path.join(script_path, '../../../../../../bin/intel64/Release/python/'), - "libs": [ - os.path.join(script_path, '../../../../../../bin/intel64/Release'), - ] - }, - { - "module": os.path.join(script_path, '../../../../../../bin/intel64/RelWithDebInfo/python'), - "libs": [ - os.path.join(script_path, '../../../../../../bin/intel64/RelWithDebInfo'), - ] - }, - { - "module": os.path.join(script_path, '../../../../../../bin/intel64/Debug/python'), - "libs": [ - os.path.join(script_path, '../../../../../../bin/intel64/Debug'), - ] - } - ] - - bindings_paths = bindings_paths_windows if platform.system() == "Windows" else bindings_paths_linux - for item in bindings_paths: - module = item['module'] - if not os.path.exists(module): - continue - if try_to_import_ie(module=os.path.normpath(module), libs=item['libs'] if 'libs' in item else [], silent=silent): - return True - - return False - - -if __name__ == "__main__": - if not find_ie_version(): - exit(1) diff --git a/tools/mo/openvino/tools/mo/utils/find_inputs.py b/tools/mo/openvino/tools/mo/utils/find_inputs.py deleted file mode 100644 index a52733412beb48..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/find_inputs.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import networkx as nx - -from openvino.tools.mo.graph.graph import Node, Graph - - -def find_nodes_by_attribute_value(graph: Graph, attr: str, attr_name: str): - return [id for id, v in nx.get_node_attributes(graph, attr).items() if v == attr_name] - - -def find_inputs(graph: Graph): - return find_nodes_by_attribute_value(graph, 'type', 'Parameter') - - -def find_outputs(graph: Graph): - outputs = [] - for node_id in find_nodes_by_attribute_value(graph, 'op', 'Result'): - parents = Node(graph, node_id).in_nodes() - assert len(parents) == 1, 'Result node should have exactly one input' - parent = parents[0].id - outputs.append(parent) - return list(set(outputs)) diff --git a/tools/mo/openvino/tools/mo/utils/get_ov_update_message.py b/tools/mo/openvino/tools/mo/utils/get_ov_update_message.py deleted file mode 100644 index effd438af75597..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/get_ov_update_message.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import datetime - -msg_fmt = 'Check for a new version of Intel(R) Distribution of OpenVINO(TM) toolkit here {0} ' \ - 'or on https://github.com/openvinotoolkit/openvino' - - -def get_ov_update_message(): - expected_update_date = datetime.date(year=2024, month=12, day=1) - current_date = datetime.date.today() - - link = 'https://software.intel.com/content/www/us/en/develop/tools/openvino-toolkit/download.html?cid=other&source=prod&campid=ww_2023_bu_IOTG_OpenVINO-2023-1&content=upg_all&medium=organic' - - return msg_fmt.format(link) if current_date >= expected_update_date else None - - -def get_compression_message(): - link = "https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html" - message = '[ INFO ] Generated IR will be compressed to FP16. ' \ - 'If you get lower accuracy, please consider disabling compression explicitly ' \ - 'by adding argument --compress_to_fp16=False.\n' \ - 'Find more information about compression to FP16 at {}'.format(link) - return message - - -def get_try_legacy_fe_message(): - message = '[ INFO ] You can also try to use legacy TensorFlow Frontend by using argument --use_legacy_frontend.\n' - return message - - -def get_ovc_message(): - link = "https://docs.openvino.ai/2023.2/openvino_docs_OV_Converter_UG_prepare_model_convert_model_MO_OVC_transition.html" - message = '[ INFO ] MO command line tool is considered as the legacy conversion API as of OpenVINO 2023.2 release.\n' \ - 'In 2025.0 MO command line tool and openvino.tools.mo.convert_model() will be removed. ' \ - 'Please use OpenVINO Model Converter (OVC) or openvino.convert_model(). ' \ - 'OVC represents a lightweight alternative of MO and provides simplified model conversion API. \n' \ - 'Find more information about transition from MO to OVC at {}'.format(link) - - return message diff --git a/tools/mo/openvino/tools/mo/utils/graph.py b/tools/mo/openvino/tools/mo/utils/graph.py deleted file mode 100644 index 90938d85d3cfdc..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/graph.py +++ /dev/null @@ -1,312 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from collections import deque -from re import match, compile - -import networkx as nx - -from openvino.tools.mo.graph.graph import Node, Graph, set_edge_attribute_between_nodes, get_edge_attribute_between_nodes -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def backward_bfs_for_operation(start_node: Node, op_names: list, skip_op_list: list = None): - """ - Find node with 'op' attribute equal to one of from 'op_name', searching in the backward direction. - In case of branching algorithm goes into each branch, but if it can't find layer in one of them it returns - empty list. - - :param start_node: Start node for BFS algorithm - :param op_names: The list with names of operations to search - :param skip_op_list: list of operations to be stopped at if they are met - """ - if skip_op_list is None: - skip_op_list = [] - ret = [] - q = deque([start_node]) - while len(q) != 0: - node = q.popleft() - in_nodes_size = len(node.in_nodes()) - for id in range(in_nodes_size): # in_nodes() can return either list or dict - pnode = node.in_node(id) - if pnode.kind == 'op': - if pnode.has_valid('op') and pnode.op in op_names: - if pnode.id not in ret: - ret.append(pnode.id) - else: - if pnode.op not in skip_op_list: - q.append(pnode) - elif pnode.kind == 'data' and pnode.value is None: - q.append(pnode) - return [Node(start_node.graph, x) for x in ret] - - -def bfs_search(graph: Graph, start_nodes: list = list()): - """ - Performs breadth-first search over a graph and returns a list of nodes in the BFS order. - :param graph: networkx graph to traverse. - :param start_nodes: list of start nodes of the graph. If the list is empty then start from all nodes that do not - have input nodes. - :return: the list of nodes in the BFS order. - """ - result = list() - if len(start_nodes) == 0: - start_nodes = [node_name for node_name in graph.nodes() if len(graph.in_edges(node_name)) == 0] - - visited = set(start_nodes) - d = deque(start_nodes) - - while len(d) != 0: - cur_node_name = d.popleft() - result.append(cur_node_name) - for src_node, dst_node in graph.out_edges(cur_node_name): - if dst_node not in visited: - d.append(dst_node) - visited.add(dst_node) - return result - - -def nodes_matching_name_pattern(graph: Graph, pattern: str): - """ - Returns list of node names of the graph that match regular expression. - :param graph: graph to operate on. - :param pattern: regular expression describing node name pattern. - :return: list of matched node names. - """ - compiled_pattern = compile(pattern) - return [node_name for node_name in list(graph.nodes()) if match(compiled_pattern, node_name)] - - -def is_connected_component(graph: Graph, node_names: list): - """ - Checks that specified list of nodes forms a connected sub-graph. It ignores edges direction. - The algorithm is the following. Run BFS from one of the nodes from the node_names list ignoring edges order and - visiting only nodes from the node_names list. Prepare list of visited nodes. If this list is equal to the - node_names list (we actually check that the node_names set is sub-set of 'visited' set that is equivalent) then the - sub-graph is connected. - :param graph: graph to operate on. - :param node_names: list of node names to be checked. - :return: Result of the check. - """ - if len(node_names) == 0: - return True - - d = deque([node_names[0]]) - visited = set([node_names[0]]) - while len(d) != 0: - cur_node_name = d.popleft() - visited.add(cur_node_name) - # find adjacent nodes from the list of node_names. Ignoring edges direction - adj_nodes = [src_node for src_node, _ in graph.in_edges(cur_node_name) if src_node in node_names] + \ - [dst_node for _, dst_node in graph.out_edges(cur_node_name) if dst_node in node_names] - for adj_node in adj_nodes: - if adj_node not in visited: - d.append(adj_node) - visited.add(adj_node) - return set(node_names).issubset(visited) - - -def sub_graph_between_nodes(graph: Graph, start_nodes: list, end_nodes: list, detect_extra_start_node: callable=None, - include_control_flow=True, allow_non_reachable_end_nodes=False): - """ - Finds nodes of the sub-graph between 'start_nodes' and 'end_nodes'. Input nodes for the sub-graph nodes are also - added to the sub-graph. Constant inputs of the 'start_nodes' are also added to the sub-graph. - :param graph: graph to operate on. - :param start_nodes: list of nodes names that specifies start nodes. - :param end_nodes: list of nodes names that specifies end nodes. - :param detect_extra_start_node: callable function to add additional nodes to the list of start nodes instead of - traversing the graph further. The list of additional start nodes is returned of the function is not None. - :param include_control_flow: flag to specify whether to follow the control flow edges or not - :param allow_non_reachable_end_nodes: do not fail if the end nodes are not reachable from the start nodes - :return: list of nodes of the identified sub-graph or None if the sub-graph cannot be extracted. - """ - sub_graph_nodes = list() - visited = set(start_nodes) - d = deque(start_nodes) - extra_start_nodes = [] - - nx.set_node_attributes(G=graph, name='prev', values=None) - while len(d) != 0: - cur_node_id = d.popleft() - sub_graph_nodes.append(cur_node_id) - if cur_node_id not in end_nodes: # do not add output nodes of the end_nodes - for _, dst_node_name, attrs in graph.out_edges(cur_node_id, data=True): - if dst_node_name not in visited and (include_control_flow or not attrs.get('control_flow_edge', False)): - d.append(dst_node_name) - visited.add(dst_node_name) - graph.node[dst_node_name]['prev'] = cur_node_id - - for src_node_name, _, attrs in graph.in_edges(cur_node_id, data=True): - # add input nodes for the non-start_nodes - if cur_node_id not in start_nodes and src_node_name not in visited and\ - (include_control_flow or not attrs.get('control_flow_edge', False)): - if detect_extra_start_node is not None and detect_extra_start_node(Node(graph, cur_node_id)): - extra_start_nodes.append(cur_node_id) - else: - d.append(src_node_name) - graph.node[src_node_name]['prev'] = cur_node_id - visited.add(src_node_name) - - # use forward dfs to check that all end nodes are reachable from at least one of input nodes - forward_visited = set() - for start_node in start_nodes: - graph.dfs(start_node, forward_visited) - for end_node in end_nodes: - if not allow_non_reachable_end_nodes and end_node not in forward_visited: - raise Error('End node "{}" is not reachable from start nodes: {}. '.format(end_node, start_nodes) + - refer_to_faq_msg(74)) - - for node_id in sub_graph_nodes: - # sub-graph should not contain Placeholder nodes - if graph.node[node_id].get('op', '') == 'Parameter': - path = list() - cur_node = node_id - while cur_node and 'prev' in graph.node[cur_node]: - path.append(str(cur_node)) - cur_node = graph.node[cur_node]['prev'] - log.debug("The path from input node is the following: {}".format('\n'.join(path))) - raise Error('The matched sub-graph contains network input node "{}". '.format(node_id) + - refer_to_faq_msg(75)) - if detect_extra_start_node is None: - return sub_graph_nodes - else: - return sub_graph_nodes, extra_start_nodes - - -def invert_sub_graph_between_nodes(graph: Graph, start_nodes: list, end_nodes: list, detect_extra_start_node: callable=None): - """ - Finds nodes of the sub-graph between 'start_nodes' and 'end_nodes'. But doing it from start_nodes stepping - backward by in edges. - - Input nodes for the sub-graph nodes are also added to the sub-graph. Constant inputs of the 'start_nodes' - are also added to the sub-graph. - :param graph: graph to operate on. - :param start_nodes: list of nodes names that specifies start nodes. - :param end_nodes: list of nodes names that specifies end nodes. - :return: list of nodes of the identified sub-graph or None if the sub-graph cannot be extracted. - """ - sub_graph_nodes = list() - visited = set(start_nodes) - d = deque(start_nodes) - extra_start_nodes = [] - - nx.set_node_attributes(G=graph, name='prev', values=None) - while len(d) != 0: - cur_node_name = d.popleft() - sub_graph_nodes.append(cur_node_name) - if cur_node_name not in start_nodes and \ - detect_extra_start_node is not None and detect_extra_start_node(Node(graph, cur_node_name)): - extra_start_nodes.append(cur_node_name) - else: - if cur_node_name not in end_nodes: # do not add output nodes of the end_nodes - for src_node_name, _ in graph.in_edges(cur_node_name): - if src_node_name not in visited: - d.append(src_node_name) - visited.add(src_node_name) - graph.node[cur_node_name]['prev'] = src_node_name - - for node_name in sub_graph_nodes: - # sub-graph should not contain Input nodes - if graph.node[node_name].get('op', '') == 'Parameter': - path = list() - cur_node = node_name - while cur_node and 'prev' in graph.node[cur_node]: - path.append(str(cur_node)) - cur_node = graph.node[cur_node]['prev'] - log.debug("The path from input node is the following: {}".format('\n'.join(path))) - raise Error('The matched sub-graph contains network input node "{}". '.format(node_name) + - refer_to_faq_msg(75)) - if detect_extra_start_node is None: - return sub_graph_nodes - else: - return sub_graph_nodes, extra_start_nodes - - -def node_neighbourhood(node_name: str, depth: int, next_node_fn): - """ - Find neighbourhood of the node.. - :param node_name: name of the node to find neighbourhood for. - :param depth: maximum depth of search nodes. - :param next_node_fn: callable that accepts node name and should return list of adjacent nodes. - :return: list of names of nodes in the neighbourhood. - """ - dist = dict() - dist[node_name] = 0 - deq = deque([node_name]) - while len(deq) != 0: - cur_node_name = deq.popleft() - cur_dist = dist[cur_node_name] - if cur_dist < depth: - for next_node_name in next_node_fn(cur_node_name): - next_dist = dist.setdefault(next_node_name, depth + 1) - if next_dist > cur_dist + 1: - dist[next_node_name] = cur_dist + 1 - deq.append(next_node_name) - return list(dist.keys()) - - -def node_incoming_neighbourhood(graph: Graph, node_name: str, depth: int): - """ - Find input neighbourhood of the node. - :param graph: graph to operate on. - :param node_name: name of the node to find neighbourhood for. - :param depth: maximum depth of input nodes. - :return: list of names of nodes in the neighbourhood. - """ - return node_neighbourhood(node_name, depth, lambda node_name: [u for u, v in graph.in_edges([node_name])]) - - -def node_outcoming_neighbourhood(graph: Graph, node_name: str, depth: int): - """ - Find output neighbourhood of the node. - :param graph: graph to operate on. - :param node_name: name of the node to find neighbourhood for. - :param depth: maximum depth of output nodes. - :return: list of names of nodes in the neighbourhood. - """ - return node_neighbourhood(node_name, depth, lambda node_name: [v for u, v in graph.out_edges([node_name])]) - - -def scope_output_nodes(graph: Graph, scope: str, scope_delimiter: str='/'): - """ - The function returns nodes producing output of the sub-graph defined by scope (name prefix). The node is considered - output of the scope if it is in this scope and it's output is outside of the scope. - :param graph: graph to operate on. - :param scope: string with scope (prefix of the node name). - :param scope_delimiter: delimiter between scope parts. - :return: list of Node objects which are outputs of the scope. - """ - if scope[-1] != scope_delimiter: - scope += scope_delimiter - - result = set() - for node_id in graph.nodes(): - if node_id.startswith(scope): - for _, out_node_name in graph.out_edges(node_id): - if not out_node_name.startswith(scope): - result.add(node_id) - break - return [Node(graph, node_id) for node_id in result] - - -def clear_tensor_names_info(nodes: list): - """ - Clears tensor names information from 'fw_tensor_debug_info' attribute for all edges outgoing from - given nodes. - This method is used in cases when transformation adds postprocessing and the result does not - correspond to the original tensor. - This method should only be used during the front phase. - :param nodes: list of Node objects. - """ - for node in nodes: - for out_idx in node.out_nodes(): - out_node = node.out_node(out_idx) - fw_info_list = get_edge_attribute_between_nodes(node, out_node, 'fw_tensor_debug_info') - new_fw_info = [] - for fw_info in fw_info_list: - if fw_info is not None and len(fw_info) >= 2: - new_fw_info.append((fw_info[0], fw_info[1], None)) - set_edge_attribute_between_nodes(node, out_node, 'fw_tensor_debug_info', new_fw_info) - diff --git a/tools/mo/openvino/tools/mo/utils/guess_framework.py b/tools/mo/openvino/tools/mo/utils/guess_framework.py deleted file mode 100644 index d9058bed878e76..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/guess_framework.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import re -from argparse import Namespace - -from openvino.tools.mo.utils.error import Error - - -def deduce_legacy_frontend_by_namespace(argv: Namespace): - if not hasattr(argv, 'framework') or not argv.framework: - if getattr(argv, 'saved_model_dir', None) or getattr(argv, 'input_meta_graph', None): - argv.framework = 'tf' - elif getattr(argv, 'input_proto', None): - argv.framework = 'caffe' - elif argv.input_model is None: - raise Error('Path to input model is required: use --input_model.') - else: - argv.framework = guess_framework_by_ext(argv.input_model) - - return map(lambda x: argv.framework == x, ['tf', 'caffe', 'kaldi', 'onnx']) - - -def guess_framework_by_ext(input_model_path: str) -> int: - if re.match(r'^.*\.caffemodel$', input_model_path): - return 'caffe' - elif re.match(r'^.*\.pb$', input_model_path): - return 'tf' - elif re.match(r'^.*\.pbtxt$', input_model_path): - return 'tf' - elif re.match(r'^.*\.nnet$', input_model_path): - return 'kaldi' - elif re.match(r'^.*\.mdl', input_model_path): - return 'kaldi' - elif re.match(r'^.*\.onnx$', input_model_path): - return 'onnx' diff --git a/tools/mo/openvino/tools/mo/utils/help.py b/tools/mo/openvino/tools/mo/utils/help.py deleted file mode 100644 index 2b80f1d8c52def..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/help.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -def get_convert_model_help_specifics(): - from openvino.tools.mo.utils.cli_parser import CanonicalizeTransformationPathCheckExistenceAction, \ - CanonicalizePathCheckExistenceAction, CanonicalizeExtensionsPathCheckExistenceAction, \ - CanonicalizePathCheckExistenceIfNeededAction, readable_file_or_dir, readable_dirs_or_files_or_empty, \ - check_positive - from openvino.tools.mo.utils.version import VersionChecker - return { - 'input_model': - {'description': - 'Tensorflow*: a file with a pre-trained model ' - '(binary or text .pb file after freezing). ' - 'Caffe*: a model proto file with model weights.', 'action': CanonicalizePathCheckExistenceAction, - 'type': readable_file_or_dir, - 'aliases': {'-w', '-m'}}, - 'input_shape': - {'description': - 'Input shape(s) that should be fed to an input node(s) ' - 'of the model. Shape is defined as a comma-separated ' - 'list of integer numbers enclosed in parentheses or ' - 'square brackets, for example [1,3,227,227] or ' - '(1,227,227,3), where the order of dimensions depends ' - 'on the framework input layout of the model. For ' - 'example, [N,C,H,W] is used for ONNX* models and ' - '[N,H,W,C] for TensorFlow* models. The shape can ' - 'contain undefined dimensions (? or -1) and should fit ' - 'the dimensions defined in the input operation of the ' - 'graph. Boundaries of undefined dimension can be ' - 'specified with ellipsis, for example ' - '[1,1..10,128,128]. One boundary can be undefined, for ' - 'example [1,..100] or [1,3,1..,1..]. If there are ' - 'multiple inputs in the model, --input_shape should ' - 'contain definition of shape for each input separated ' - 'by a comma, for example: [1,3,227,227],[2,4] for a ' - 'model with two inputs with 4D and 2D shapes. ' - 'Alternatively, specify shapes with the --input option.'}, - 'input': - {'description': - 'Quoted list of comma-separated input nodes names with ' - 'shapes, data types, and values for freezing. The order ' - 'of inputs in converted model is the same as order of ' - 'specified operation names. The shape and value are ' - 'specified as comma-separated lists. The data type of ' - 'input node is specified in braces and can have one of ' - 'the values: f64 (float64), f32 (float32), f16 ' - '(float16), i64 (int64), i32 (int32), u8 (uint8), ' - 'boolean (bool). Data type is optional. If it\'s not ' - 'specified explicitly then there are two options: if ' - 'input node is a parameter, data type is taken from the ' - 'original node dtype, if input node is not a parameter, ' - 'data type is set to f32. Example, to set `input_1` ' - 'with shape [1,100], and Parameter node `sequence_len` ' - 'with scalar input with value `150`, and boolean input ' - '`is_training` with `False` value use the following ' - 'format: \n ' - '\"input_1[1,100],sequence_len->150,is_training->False\". ' - 'Another example, use the following format to set input ' - 'port 0 of the node `node_name1` with the shape [3,4] ' - 'as an input node and freeze output port 1 of the node ' - '\"node_name2\" with the value [20,15] of the int32 type ' - 'and shape [2]: \n ' - '\"0:node_name1[3,4],node_name2:1[2]{i32}->[20,15]\".'}, - 'mean_values': - {'description': - 'Mean values to be used for the input image per ' - 'channel. Values to be provided in the (R,G,B) or ' - '[R,G,B] format. Can be defined for desired input of ' - 'the model, for example: "--mean_values ' - 'data[255,255,255],info[255,255,255]". The exact ' - 'meaning and order of channels depend on how the ' - 'original model was trained.'}, - 'scale_values': - {'description': - 'Scale values to be used for the input image per ' - 'channel. Values are provided in the (R,G,B) or [R,G,B] ' - 'format. Can be defined for desired input of the model, ' - 'for example: "--scale_values ' - 'data[255,255,255],info[255,255,255]". The exact ' - 'meaning and order of channels depend on how the ' - 'original model was trained. If both --mean_values and ' - '--scale_values are specified, the mean is subtracted ' - 'first and then scale is applied regardless of the ' - 'order of options in command line.'}, - 'source_layout': - {'description': - 'Layout of the input or output of the model in the ' - 'framework. Layout can be specified in the short form, ' - 'e.g. nhwc, or in complex form, e.g. \"[n,h,w,c]\". ' - 'Example for many names: \"in_name1([n,h,w,c]),in_name2(' - 'nc),out_name1(n),out_name2(nc)\". Layout can be ' - 'partially defined, \"?\" can be used to specify ' - 'undefined layout for one dimension, \"...\" can be used ' - 'to specify undefined layout for multiple dimensions, ' - 'for example \"?c??\", \"nc...\", \"n...c\", etc.'}, - 'transform': - {'description': - 'Apply additional transformations. Usage: \"--transform ' - 'transformation_name1[args],transformation_name2...\" ' - 'where [args] is key=value pairs separated by ' - 'semicolon. Examples: \"--transform LowLatency2\" or \"--' - 'transform Pruning" or "--transform ' - 'LowLatency2[use_const_initializer=False]" or "--' - 'transform "MakeStateful[param_res_names= {\'input_name_1\':' - '\'output_name_1\',\'input_name_2\':\'output_name_2\'}]\" \n' - 'Available transformations: "LowLatency2", "MakeStateful", "Pruning"'}, - 'extensions': - {'description': - 'Paths or a comma-separated list of paths to libraries ' - '(.so or .dll) with extensions. For the legacy MO path ' - '(if `--use_legacy_frontend` is used), a directory or a ' - 'comma-separated list of directories with extensions ' - 'are supported. To disable all extensions including ' - 'those that are placed at the default location, pass an empty string.', - 'action': CanonicalizeExtensionsPathCheckExistenceAction, - 'type': readable_dirs_or_files_or_empty}, - 'transformations_config': - {'description': - 'Use the configuration file with transformations ' - 'description. Transformations file can be specified as ' - 'relative path from the current directory, as absolute ' - 'path or as arelative path from the mo root directory.', - 'action': CanonicalizeTransformationPathCheckExistenceAction}, - 'counts': - {'action': CanonicalizePathCheckExistenceIfNeededAction}, - 'version': - {'action': 'version', - 'version': 'Version of Model Optimizer is: {}'.format(VersionChecker().get_ie_version())}, - 'scale': - {'type': float, - 'aliases': {'-s'}}, - 'batch': - {'type': check_positive, - 'aliases': {'-b'}}, - 'input_proto': - {'aliases': {'-d'}}, - 'log_level': - {'choices': ['CRITICAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']} - } - - -# TODO: remove this when internal converting of params to string is removed -def get_to_string_methods_for_params(): - from openvino.tools.mo.utils.cli_parser import path_to_str_or_object, str_list_to_str, \ - mean_scale_value_to_str, source_target_layout_to_str, layout_param_to_str, transform_param_to_str, \ - extensions_to_str_or_extensions_class, batch_to_int, transformations_config_to_str - return { - 'input_model': path_to_str_or_object, - 'output': str_list_to_str, - 'mean_values': mean_scale_value_to_str, - 'scale_values': mean_scale_value_to_str, - 'source_layout': source_target_layout_to_str, - 'target_layout': source_target_layout_to_str, - 'layout': layout_param_to_str, - 'transform': transform_param_to_str, - 'extensions': extensions_to_str_or_extensions_class, - 'batch': batch_to_int, - 'transformations_config': transformations_config_to_str, - 'saved_model_tags': str_list_to_str - } diff --git a/tools/mo/openvino/tools/mo/utils/ie_version.py b/tools/mo/openvino/tools/mo/utils/ie_version.py deleted file mode 100644 index dce2cd0e5765a6..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ie_version.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -def get_ie_version(): - try: - from openvino.runtime import get_version # pylint: disable=import-error,no-name-in-module - return get_version() - except: - return None - - -if __name__ == "__main__": - print(get_ie_version()) diff --git a/tools/mo/openvino/tools/mo/utils/import_extensions.py b/tools/mo/openvino/tools/mo/utils/import_extensions.py deleted file mode 100644 index f408b50ff2dd5b..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/import_extensions.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import importlib -import logging as log -import os -import pkgutil -import sys - -from openvino.tools.mo.back.replacement import BackReplacementPattern -from openvino.tools.mo.load.loader import Loader -from openvino.tools.mo.middle.replacement import MiddleReplacementPattern -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.class_registration import _check_unique_ids, update_registration, \ - get_enabled_and_disabled_transforms, clear_registered_classes_dict -from openvino.tools.mo.utils.model_analysis import AnalyzeAction - - -def get_internal_dirs(framework: str, get_front_classes: callable): - front_classes = get_front_classes() - return { - ('ops', ): [Op], - ('analysis',): [AnalyzeAction], - ('load', framework): [Loader], - ('front', ): front_classes, - ('front', framework): front_classes, - ('front', framework, 'extractors'): front_classes, - ('middle', ): [MiddleReplacementPattern], - ('back', ): [BackReplacementPattern]} - -def import_by_path(path: str, middle_names: list = (), prefix: str = ''): - for module_loader, name, ispkg in pkgutil.iter_modules([path]): - importlib.import_module('{}{}.{}'.format(prefix, '.'.join(middle_names), name)) - - -def default_path(): - EXT_DIR_NAME = '.' - return os.path.abspath(os.getcwd().join(EXT_DIR_NAME)) - - -def load_dir(framework: str, path: str, get_front_classes: callable): - """ - Assuming the following sub-directory structure for path: - - front/ - / - .py - / - .py - ops/ - .py - middle/ - .py - back/ - .py - - This function loads modules in the following order: - 1. ops/.py - 2. front/.py - 3. front//.py - 4. middle/.py - 5. back/.py - - Handlers loaded later override earlier registered handlers for an op. - 1, 2, 3 can concur for the same op, but 4 registers a transformation pass - and it shouldn't conflict with any stuff loaded by 1, 2 or 3. - It doesn't load files from front/ - """ - log.info("Importing extensions from: {}".format(path)) - root_dir, ext = os.path.split(path) - sys.path.insert(0, root_dir) - - enabled_transforms, disabled_transforms = get_enabled_and_disabled_transforms() - - internal_dirs = get_internal_dirs(framework, get_front_classes) - prefix = 'openvino.tools.' if ext == 'mo' else '' - - exclude_modules = {'tf', 'onnx', 'kaldi', 'caffe'} - exclude_modules.remove(framework) - - for p in internal_dirs.keys(): - import_by_path(os.path.join(path, *p), [ext, *p], prefix) - update_registration(internal_dirs[p], enabled_transforms, disabled_transforms, exclude_modules) - sys.path.remove(root_dir) - - -def load_dirs(framework: str, dirs: list, get_front_classes: callable): - if dirs is None: - return - internal_dirs = get_internal_dirs(framework, get_front_classes) - - for p, dir_names in internal_dirs.items(): - for d in dir_names: - d.registered_cls = [] - d.registered_ops = {} - clear_registered_classes_dict() - - mo_inner_extensions = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'mo')) - dirs.insert(0, mo_inner_extensions) - dirs = [os.path.abspath(e) for e in dirs] - if default_path() not in dirs: - dirs.insert(0, default_path()) - for path in dirs: - load_dir(framework, path, get_front_classes) - - _check_unique_ids() diff --git a/tools/mo/openvino/tools/mo/utils/ir_engine/__init__.py b/tools/mo/openvino/tools/mo/utils/ir_engine/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_engine/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/utils/ir_engine/compare_graphs.py b/tools/mo/openvino/tools/mo/utils/ir_engine/compare_graphs.py deleted file mode 100644 index 523e8e4eb9fa54..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_engine/compare_graphs.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from collections import deque -from numbers import Number - -import numpy as np - -from openvino.tools.mo.graph.graph import Graph, Node - - -def compare_node(node_ref, node, ref_attr_value, attr_value, attr, errors_list: list): - from openvino.tools.mo.utils.ir_engine.ir_engine import IREngine - - def err_format_string(): - return 'Current node "{}" with type "{}" and reference node "{}" with type "{}" have different attr "{}" : ' \ - '{} and {}'.format(node.id, node.soft_get('type', None), node_ref.id, node_ref.soft_get('type', None), - attr, attr_value, ref_attr_value) - - if type(ref_attr_value) in [np.ndarray, list]: - if not np.array_equal(attr_value, ref_attr_value): - errors_list.append(err_format_string()) - elif isinstance(ref_attr_value, tuple): - if len(ref_attr_value) != len(attr_value): - errors_list.append(err_format_string()) - else: - for ref_item, item in zip(ref_attr_value, attr_value): - compare_node(node_ref, node, ref_item, item, attr, errors_list) - elif isinstance(ref_attr_value, dict): - ref_keys = sorted(list(ref_attr_value.keys())) - keys = sorted(list(attr_value.keys())) - if ref_keys != keys: - errors_list.append(err_format_string()) - else: - for key in keys: - compare_node(node_ref, node, ref_attr_value[key], attr_value[key], key, errors_list) - elif isinstance(attr_value, Number): - eps = 5e-2 if node.has('precision') and node['precision'] == 'FP16' else 1e-4 - if abs(attr_value - ref_attr_value) > eps: - errors_list.append(err_format_string()) - elif isinstance(attr_value, IREngine): - resp, err_log = attr_value.compare(ref_attr_value) - if not resp: - errors_list.extend(err_log) - elif isinstance(attr_value, np.ma.masked_array): - if not np.ma.allequal(attr_value, ref_attr_value): - errors_list.append(err_format_string()) - elif isinstance(attr_value, np.ndarray): - if not np.array_equal(attr_value, ref_attr_value): - errors_list.append(err_format_string()) - elif attr_value != ref_attr_value: - errors_list.append(err_format_string()) - - -def compare_graphs(graph: Graph, graph_ref: Graph, last_node: str, last_node_ref=None, check_op_attrs=False): - stderr = [] - if last_node_ref is None: - last_node_ref = last_node - - if 'statistics' in graph.graph and 'statistics' in graph_ref.graph: - assert graph.graph['statistics'] == graph_ref.graph['statistics'], "int8 statistics comparison failed" - - q = deque([last_node]) - q_ref = deque([last_node_ref]) - - checked_nodes = [] - checked_nodes_ref = [] - - while len(q_ref) != 0: - if len(q) == 0: - stderr.append('Graphs have different number of nodes') - return (False, stderr) - node = Node(graph, q.popleft()) - node_ref = Node(graph_ref, q_ref.popleft()) - - checked_nodes.append(node.id) - checked_nodes_ref.append(node_ref.id) - - # Check that nodes has same amount of output nodes - if len(node_ref.out_nodes()) != len(node.out_nodes()): - stderr.append('Current node "{}" and reference node "{}" have different amount of output nodes: {} vs {}'.\ - format(node.id, node_ref.id, len(node.out_nodes()), len(node_ref.out_nodes()))) - continue - - # Check that nodes has same amount of input nodes - if len(node_ref.in_nodes()) != len(node.in_nodes()): - stderr.append('Current node "{}" and reference node "{}" have different amount of input nodes: {} vs {}'.\ - format(node.id, node_ref.id, len(node.in_nodes()), len(node_ref.in_nodes()))) - continue - - # Check that nodes has same 'kind' - if node_ref.kind != node.kind: - stderr.append('Current node "{}" and reference node "{}" have different kind parameter'.\ - format(node.id, node_ref.id)) - return (False, stderr) - - # Check can_be_fused attr - if node_ref.has_valid('can_be_fused'): - if node_ref.soft_get('can_be_fused') != node.soft_get('can_be_fused'): - stderr.append('Current node "{}" and reference node "{}" have different "can_be_fused" parameter ' \ - '{} and {}'.format(node.id, node_ref.id, node.soft_get('can_be_fused'), - node_ref.soft_get('can_be_fused'))) - - if node_ref.kind == 'op': - # Check that nodes has same operation - if check_op_attrs: - cur_node_type = node.type if node.has_valid("type") else None - ref_node_type = node_ref.type if node_ref.has_valid("type") else None - for attr in graph_ref.node[node_ref.id]: - if graph_ref.node[node_ref.id][attr] is None or attr in \ - ['name', 'id', '_in_ports', '_out_ports', 'infer', 'IE', 'biases', 'weights', 'custom', - 'offset', 'ir_data_attrs', 'rt_info']: - continue - if attr not in graph.node[node.id]: - stderr.append('Current node "{}" with type {} has missing attribute {}' - ''.format(node.id, cur_node_type, attr)) - continue - - def align_strided_slice_masks(curr_node: Node, rank: int): - from openvino.tools.mo.ops.strided_slice import StridedSlice - for mask_name in StridedSlice.get_mask_names(): - if isinstance(curr_node[mask_name], int): - curr_node[mask_name] = [curr_node[mask_name]] - elif isinstance(curr_node[mask_name], str): # if mask is an empty string '' - assert len(curr_node[mask_name]) == 0 - curr_node[mask_name] = [] - - num_insertions = rank - len(curr_node[mask_name]) - curr_node[mask_name] = np.append(curr_node[mask_name], [0] * num_insertions).astype(int) - - # Need to align StridedSlice masks since such masks as [] and [0]; [] and [0,0]; [] and [0,0,0] - # or [1] and [1,0]; [1] and [1,0,0] and so on for the input with rank 4 do exactly the same slicing and - # should be treated as equal. Therefore, before attr comparison we align all masks to the input rank - if cur_node_type == 'StridedSlice' and node.in_node(1).has('shape') \ - and node.in_node(1).shape is not None: - slice_rank = node.in_node(1).shape.item() - align_strided_slice_masks(node, slice_rank) - align_strided_slice_masks(node_ref, slice_rank) - - if attr == 'value': - if not values_are_equal(node.value, node_ref.value): - stderr.append('Current node "{}" with type {} and reference node "{}" with type "{}" have ' - 'different values \n{} \nand \n{}'.format( - node.id, cur_node_type, node_ref.id, ref_node_type, node.value, node_ref.value)) - continue - compare_node(node_ref, node, graph_ref.node[node_ref.id][attr], graph.node[node.id][attr], attr, - stderr) - else: - if node_ref.has_valid('shape') and not node.has_valid('shape'): - stderr.append('{} has None shape'.format(node.id)) - if node_ref.has_valid('value') and not node.has_valid('value'): - stderr.append('{} has None value'.format(node.id)) - - # Check that nodes has same shape and value - if node_ref.has_valid('shape') and node_ref.shape is not None and not np.array_equal(node_ref.shape, - node.shape): - stderr.append('Current node "{}" and reference node "{}" have different shapes {} and {}'.\ - format(node.id, node_ref.id, node.shape, node_ref.shape)) - continue - - if node_ref.has_valid('value') and node_ref.value is not None and \ - not values_are_equal(node.value, node_ref.value): - stderr.append('Current node "{}" and reference node "{}" have different values \n{} \nand \n{}'.\ - format(node.id, node_ref.id, node.value, node_ref.value)) - ports = sorted(node.in_nodes().keys()) if node.kind == 'op' else None - in_nodes = [node.in_node(k) for k in ports] if node.kind == 'op' else node.in_nodes() - for in_node in in_nodes: - if in_node.id not in checked_nodes and in_node.id not in q: - q.append(in_node.id) - - ports_ref = sorted(node_ref.in_nodes().keys()) if node_ref.kind == 'op' else None - if ports != ports_ref: - stderr.append('Current node "{}" and reference node "{}" have different ports'.format(node.id, node_ref.id)) - return (False, stderr) - - in_nodes = [node_ref.in_node(k) for k in ports] if node_ref.kind == 'op' else node_ref.in_nodes() - for in_node in in_nodes: - if in_node.id not in checked_nodes_ref and in_node.id not in q_ref: - q_ref.append(in_node.id) - - if node.kind == 'op': - out_nodes = sorted_by_name([Node(graph, v) for v, _ in node.get_outputs()]) - else: - out_nodes = sorted_by_name(node.out_nodes()) - for out_node in out_nodes: - if out_node.id not in checked_nodes and out_node.id not in q: - q.append(out_node.id) - - if node_ref.kind == 'op': - out_nodes = sorted_by_name([Node(graph_ref, v) for v, _ in node_ref.get_outputs()]) - else: - out_nodes = sorted_by_name(node_ref.out_nodes()) - for out_node in out_nodes: - if out_node.id not in checked_nodes_ref and out_node.id not in q_ref: - q_ref.append(out_node.id) - - return (False, stderr) if stderr else (True, []) - - -def sorted_by_name(nodes_list): - return sorted(nodes_list, key=lambda x: x.soft_get('name', x.id)) - - -def values_are_equal(value, value_ref): - dtype = np.asarray(value).dtype - if dtype == 'uint8': - eps = 0 - elif dtype == 'float16': - eps = 5e-2 - else: - eps = 1e-4 - return np.allclose(value_ref, value, rtol=eps, atol=eps) - diff --git a/tools/mo/openvino/tools/mo/utils/ir_engine/ir_engine.py b/tools/mo/openvino/tools/mo/utils/ir_engine/ir_engine.py deleted file mode 100644 index aec4049c6b32e8..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_engine/ir_engine.py +++ /dev/null @@ -1,548 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import hashlib -import logging as log -import os -import sys -from argparse import Namespace -from collections import namedtuple, defaultdict, OrderedDict -from pathlib import Path - -import defusedxml.ElementTree as ET -import numpy as np -from defusedxml import defuse_stdlib - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from openvino.tools.mo.utils.runtime_info import RTInfo, OldAPIMapOrder, OldAPIMapElementType - -log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.DEBUG, stream=sys.stdout) - -# defuse_stdlib provide patched version of xml.etree.ElementTree which allows to use objects from xml.etree.ElementTree -# in a safe manner without including unsafe xml.etree.ElementTree -ElementTree = defuse_stdlib()[ET].ElementTree - - -def read_rt_info_attr(elem): - if len(elem) == 0: - if 'value' not in elem.attrib: - return None - value = elem.attrib['value'] - return value - val_dict = {} - for child in elem: - child_val = read_rt_info_attr(child) - val_dict[child.attrib.get('name', child.tag)] = child_val - return val_dict - - -class IREngine(object): - def __init__(self, path_to_xml: str, path_to_bin=None, precision="FP32", xml_tree=None): - if not xml_tree and not os.path.exists(path_to_xml): - raise AttributeError("File {} do not exists!".format(path_to_xml)) - - if path_to_bin and not os.path.exists(path_to_bin): - raise AttributeError("File {} do not exists!".format(path_to_bin)) - - self.path_to_xml = str(path_to_xml) - self.path_to_bin = str(path_to_bin) if path_to_bin else None - self.xml_tree = xml_tree - self.input_node = None - self.ir_version = None - self.meta_data = dict() - - if precision.upper() not in ['FP32', 'FP16']: - raise AttributeError("Precision {} is not supported!".format(precision)) - self.__load_ir() - - def __load_xml(self): - xml_tree = self.xml_tree or ET.parse(self.path_to_xml) - xml_root = xml_tree.getroot() - xml_layers = {} - xml_edges = [] - statistics = {} - - Edge = namedtuple('edge', ['from_layer', 'from_port', 'to_layer', 'to_port']) - - # Create graph with operations only - self.graph = Graph() - self.graph.graph['hashes'] = {} - - self.graph.graph['ir_version'] = int(xml_root.attrib['version']) if xml_root.attrib.get('version') is not None else None - - # NOTE: THis is MO internal attribute, it cannot be used for - # defining graph input layout. We set it to NCHW as in MO back stage - # during conversion for correct shape inference of layout specific - # operations (ExtractImagePatches, SpaceToDepth, etc.) - self.graph.graph['layout'] = 'NCHW' - - self.graph.name = xml_root.attrib['name'] if xml_root.attrib.get('name') is not None else None - - self.graph.inputs_order = [] - self.graph.outputs_order = [] - - # Parse XML - for child in xml_root: - if child.tag == 'layers': - for layer in child: - layer_id, layer_attrs = self.__load_layer(layer) - xml_layers.update({layer_id: layer_attrs}) - if layer_attrs['type'] == 'Parameter': - self.graph.inputs_order.append(layer_attrs['name']) - if layer_attrs['type'] == 'Result': - self.graph.outputs_order.append(layer_attrs['name']) - elif child.tag == 'edges': - for edge in child: - xml_edges.append(Edge(edge.attrib['from-layer'], int(edge.attrib['from-port']), - edge.attrib['to-layer'], int(edge.attrib['to-port']))) - elif child.tag == 'statistics': - layers = child.findall('layer') - for layer in layers: - statistics[layer.find('name').text] = {'min': layer.find('min').text, 'max': layer.find('max').text} - elif child.tag == 'rt_info': - for elem in child: - self.meta_data[elem.attrib.get('name', elem.tag)] = read_rt_info_attr(elem) - - # TODO: Remove this part when POT updates to using of rt_info - elif child.tag == 'quantization_parameters': - # Section with Post Optimization Toolkit parameters - self.meta_data['quantization_parameters'] = dict() - for elem in child: - if elem.tag == 'config': - self.meta_data['quantization_parameters']['config'] = elem.text - elif elem.tag in ['version', 'cli_params']: - self.meta_data['quantization_parameters'][elem.tag] = elem.attrib['value'] - - self.graph.graph['cmd_params'] = Namespace(**self.meta_data) # TODO check what we need all this attrs - - if len(statistics): - self.graph.graph['statistics'] = statistics - - for layer in xml_layers.keys(): - self.graph.add_node(layer, **xml_layers[layer]) - - xml_edges.sort(key=lambda x: x.to_layer) - - for edge in xml_edges: - self.graph.add_edges_from( - [(edge.from_layer, edge.to_layer, {'from_port': edge.from_port, 'to_port': edge.to_port})]) - - # Insert data nodes between op nodes and insert data nodes with weights - nodes = list(self.graph.nodes()) - for node in nodes: - out_edges = Node(self.graph, node).get_outputs() - data_nodes = {} - for port in self.graph.node[node]['ports']: - data = self.graph.unique_id(prefix='data_') - self.graph.add_node(data, **{'kind': 'data', 'shape': self.graph.node[node]['ports'][port][0], - 'value': None}) - self.graph.add_edges_from([(node, data, {'out': port})]) - data_nodes.update({port: data}) - - for out_node, edge_attrs in out_edges: - self.graph.remove_edge(node, out_node) - if edge_attrs['from_port'] in data_nodes: - data = data_nodes[edge_attrs['from_port']] - else: - raise RuntimeError("SMTH wrong with IR! There is an edge from not existing port") - self.graph.add_edges_from([(data, out_node, {'in': edge_attrs['to_port']})]) - - def __load_bin(self): - bin_buff = np.fromfile(file=self.path_to_bin, dtype=np.uint8) - graph = self.graph - nodes = [node for node in graph.nodes()] - hashes = defaultdict(dict) - for node in nodes: - for w in ['weights', 'biases', 'custom']: - if w in graph.node[node]: - data = graph.unique_id(prefix='data_') - offset, size, in_port, precision = graph.node[node][w] - if Node(graph, node).soft_get('type') == 'BinaryConvolution': - precision = np.uint8 - value = np.frombuffer(buffer=bin_buff, dtype=precision, count=size, offset=offset) - hashes[graph.node[node]['name']][w] = hashlib.sha512(value.tobytes()).hexdigest() - graph.add_node(data, **{'kind': 'data', 'value': value, 'shape': value.shape}) - graph.add_edges_from([(data, node, {'in': in_port})]) - self.graph.graph['hashes'].update(hashes) - - def __load_bin_hashes(self): - graph = self.graph - bin_hash_map = {name: blob_map.item(0) for name, blob_map in dict(np.load(self.path_to_bin, - allow_pickle=True)).items()} - - for node in graph.nodes(): - for w in ['weights', 'biases', 'custom']: - if w in graph.node[node]: - assert Node(graph, node).has_valid('name') - node_name = Node(graph, node).name - assert node_name in bin_hash_map and w in bin_hash_map[node_name] - graph.node[node]['hashes'] = bin_hash_map[node_name][w] - - def __load_ir(self): - self.__load_xml() - if not self.path_to_bin: - return - - if self.path_to_bin.endswith('.bin.hashes.npz'): - self.__load_bin_hashes() - else: - self.__load_bin() - - def __load_layer(self, layer): - """ - Layer example - - - - - - 1 - 3 - 32 - 32 - - - - - 1 - 32 - 32 - 32 - - - - - - - - - """ - - layer_id = layer.attrib['id'] - - layer_attrs = layer.attrib - layer_attrs.update({'ports': {}, 'restored_input_ports': {}, 'kind': 'op'}) - - inputs_counter = 0 - - for attr in layer: - if attr.tag == 'data': - new_attrs = self.__normalize_attrs(attr.attrib) - new_attrs['ir_data_attrs'] = attr.attrib - if layer.attrib['type'] == 'Const': - assert 'offset' in new_attrs and 'size' in new_attrs, \ - 'Incorrect attributes for Const layer, {} instead of {}!'.format(new_attrs.keys(), ['offset', 'size']) - precision = "" - for item in layer: - if item.tag == "output": - precision = item[0].attrib["precision"] - break - new_attrs.update(self.__prepare_bin_attrs(layer, 0, 'custom', new_attrs['offset'], new_attrs['size'], precision)) - layer_attrs.update(new_attrs) - elif attr.tag == 'input': - inputs_counter = len(attr) - - input = attr - for port in input: - port_id = int(port.attrib['id']) - input_shape = [] - port_rt_info = {} - for dim in port: - if dim.tag == "dim": - input_shape.append(int(dim.text)) - if dim.tag == 'rt_info': - for attr in dim: - port_rt_info.update(self.__read_rt_info_common(attr)) - - input_shape = shape_array([d if d != -1 else dynamic_dimension_value for d in input_shape]) - - in_tensor_names = None - if 'names' in port.attrib: - in_tensor_names = port.attrib['names'] - - # special attribute to pass information about operation input ports - layer_attrs['restored_input_ports'].update({port_id: (input_shape, in_tensor_names, port_rt_info)}) - elif attr.tag == 'output': - output = attr - for port in output: - port_id = int(port.attrib['id']) - output_shape = [] - port_rt_info = {} - for dim in port: - if dim.tag == "dim": - output_shape.append(int(dim.text)) - if dim.tag == 'rt_info': - for attr in dim: - port_rt_info.update(self.__read_rt_info_common(attr)) - - output_shape = shape_array([d if d != -1 else dynamic_dimension_value for d in output_shape]) - - out_tensor_names = None - if 'names' in port.attrib: - out_tensor_names = port.attrib['names'] - # special attribute to pass information about operation input ports - # NOTE: renaming or structure changing of this attribute may have big impact on tests - layer_attrs['ports'].update({port_id: (output_shape, out_tensor_names, port_rt_info)}) - elif attr.tag == 'blobs': - in_port = inputs_counter - for blob_attr in attr: - layer_attrs.update(self.__prepare_bin_attrs(layer, in_port, blob_attr.tag, - blob_attr.attrib['offset'], blob_attr.attrib['size'], - blob_attr.attrib.get('precision', None))) - in_port += 1 - elif attr.tag == 'body': - xml_body_child = list(layer.iterfind('body')) - assert len(xml_body_child) == 1 - - body_ir, input_port_map, output_port_map, input_layers = \ - self.__read_subgraph(layer, layer_attrs, xml_body_child, 'port_map') - - body_ir.input_node = input_layers[0] - layer_attrs.update({'body': body_ir}) - layer_attrs.update({'input_port_map': input_port_map}) - layer_attrs.update({'output_port_map': output_port_map}) - - xml_back_edges_map = list(layer.iterfind('back_edges')) - if not len(xml_back_edges_map) == 1: - log.warning("TensorIterator body won\'t be compared due to missing back_edges section!") - continue - xml_back_edges_map = xml_back_edges_map[0] - - back_edges = [] - - for edge in xml_back_edges_map: - back_edges.append(self.__normalize_attrs(edge.attrib)) - - layer_attrs.update({'back_edges': back_edges}) - - elif attr.tag == 'then_body' or attr.tag == 'else_body': - assert layer.attrib['type'] == 'If', "Incorrect IR! The operation {0}" \ - " has sub-graphs for If operation" - layer_attrs = self.__read_if(layer, layer_attrs) - continue - - elif attr.tag == 'rt_info': - layer_attrs = self.__read_rt_info(layer, layer_attrs) - continue - - return layer_id, layer_attrs - - @staticmethod - def __prepare_bin_attrs(xml_layer, in_port, tag, offset, size, precision): - layer_attrs = dict() - if precision is None: - precision = xml_layer.attrib['precision'] - precision_map = { - 'FP64': (8, np.float64), - 'FP32': (4, np.float32), - 'FP16': (2, np.float16), - 'I64': (8, np.int64), - 'I32': (4, np.int32), - 'I8': (1, np.int8), - 'U8': (1, np.uint8), - 'U1': (1, np.uint8), - 'U4': (1, np.uint8), - 'I4': (1, np.uint8), - 'BOOL': (1, bool), - 'BIN': (1, np.uint8), - 'U64': (8, np.uint64) - } - type_size, dtype = precision_map[precision] - layer_attrs[tag] = (int(offset), int(size) // type_size, in_port, dtype) - return layer_attrs - - @staticmethod - def __normalize_attrs(attrs: dict): - """ - Normalize attributes for type 'data'. - Replace " from values (not used right now) and make list of value with int, float or other types values. - Example: {'order': '1,0,2'} -> {'order': [1, 0, 2]} - {'order': '1'} -> {'order': 1} - """ - normalized_attrs = {} - for attr, value in attrs.items(): - value = value.replace('\"', '').replace(' ', '') - value = value.split(',') - n_value = [] - for val in value: - if IREngine.__isint(val): - n_value.append(int(val)) - elif IREngine.__isfloat(val): - n_value.append(float(val)) - elif val in ['True', 'False', 'true', 'false']: - n_value.append(val in ['True', 'true']) - else: - n_value.append(val) - - if len(n_value) == 1: - normalized_attrs.update({attr: n_value[0]}) - else: - normalized_attrs.update({attr: n_value}) - - return normalized_attrs - - @staticmethod - def __isfloat(value): - try: - float(value) - return True - except ValueError: - return False - - @staticmethod - def __isint(value): - is_signed = value.startswith('+') or value.startswith('-') - other_chars_are_digits = value[1:].isdigit() - all_chars_are_digits = value.isdigit() - return all_chars_are_digits or (is_signed and other_chars_are_digits) - - @staticmethod - def __find_input(graph): - inputs = [] - for node in sorted(graph.nodes()): - node = Node(graph, node) - if node.has_valid('type') and node.type in ('Input', 'Parameter'): - inputs.append(node) - - if len(inputs) < 1: - raise RuntimeError("Graph {} has less than one input node".format(graph.name)) - - return inputs - - def compare(self, ref_net): - if not isinstance(ref_net, IREngine): - ir_input = self.__find_input(self.graph)[0] - ref_input = self.__find_input(ref_net)[0] - ref_graph = ref_net - else: - ir_input = self.input_node or self.__find_input(self.graph)[0] - ref_input = ref_net.input_node or ref_net.__find_input(ref_net.graph)[0] - ref_graph = ref_net.graph - # TODO check that ir_input[0].id and ref_input[0].id are the same - result, stderr = compare_graphs(graph=self.graph, graph_ref=ref_graph, last_node=ir_input.id, - last_node_ref=ref_input.id, check_op_attrs=True) - return result, stderr - - def generate_bin_hashes_file(self, path_for_file=None): - # This function creates file with extension '.bin.hashes.npz' where hashes of bin exists. - # For creating this file in custom filder use attribute path_for_file. - # Where directory for file should be existed - graph = self.graph - if path_for_file is None: - path_for_file = str(Path(self.path_to_xml).with_suffix('.bin.hashes.npz')) - assert 'hashes' in graph.graph, "Loaded IR graph doesn't contain `hashes`: {}".format(self.path_to_xml) - np.savez_compressed(path_for_file, **graph.graph['hashes']) - return path_for_file - - def get_inputs(self): - # Function return input nodes in dictionary: {input_node_name: input_node_shape, ...} - input_nodes = self.__find_input(self.graph) - return {input_node.name: input_node.out_node().shape for input_node in input_nodes} - - def __eq__(self, other): - # To call this function create two IREngine objects (IR1, IR2) and compare them IR1 == IR2 - if not isinstance(other, IREngine): - raise AttributeError("IREngine can be compared only with IREngine object type") - return self.compare(other)[0] - - def __read_subgraph(self, layer, layer_attrs, body_child, port_map_name): - body_ir = IREngine(path_to_xml=None, - path_to_bin=self.path_to_bin, - xml_tree=ElementTree(body_child[0])) - - self.graph.graph['hashes'].update(body_ir.graph.graph['hashes']) - - xml_port_map = list(layer.iterfind(port_map_name)) - assert not len(xml_port_map) != 1, "If then_body won\'t be compared due to missing {1} section in node {0}! " \ - .format(layer_attrs['name'], port_map_name) - xml_port_map = xml_port_map[0] - - input_layers = [] - input_port_map = [] - output_port_map = [] - - for port in xml_port_map: - if port.tag == 'input': - if 'internal_layer_id' not in port.attrib: - log.warning("internal_layer_id attrib not found in input section") - else: - input_layers.append(Node(body_ir.graph, port.attrib['internal_layer_id'])) - input_port_map.append(self.__normalize_attrs(port.attrib)) - elif port.tag == 'output': - if 'internal_layer_id' not in port.attrib: - log.warning("internal_layer_id attrib not found in output section") - else: - output_port_map.append(self.__normalize_attrs(port.attrib)) - - return body_ir, input_port_map, output_port_map, input_layers - - def __read_if(self, layer, layer_attrs): - - xml_then_body_child = list(layer.iterfind('then_body')) - xml_else_body_child = list(layer.iterfind('else_body')) - assert len(xml_then_body_child) == 1 and len(xml_else_body_child) == 1, "If operation has only one subgraph" - - then_body_ir, then_input_port_map, then_output_port_map, _ = \ - self.__read_subgraph(layer, layer_attrs, xml_then_body_child, 'then_port_map') - layer_attrs.update({'then_graph': then_body_ir}) - layer_attrs.update({'then_input_port_map': then_input_port_map}) - layer_attrs.update({'then_output_port_map': then_output_port_map}) - - else_body_ir, else_input_port_map, else_output_port_map, _ = \ - self.__read_subgraph(layer, layer_attrs, xml_else_body_child, 'else_port_map') - layer_attrs.update({'else_graph': else_body_ir}) - layer_attrs.update({'else_input_port_map': else_input_port_map}) - layer_attrs.update({'else_output_port_map': else_output_port_map}) - - return layer_attrs - - def __read_rt_info(self, layer, layer_attrs): - rt_info = RTInfo() - xml_rt_info = list(layer.iterfind('rt_info'))[0] - - for attr in xml_rt_info: - attr_name = attr.attrib['name'] - if attr_name == 'old_api_map_order': - rt_info.info.update(self.__read_old_api_map_order(attr, layer.attrib['type'])) - elif attr_name == 'old_api_map_element_type': - rt_info.info.update(self.__read_old_api_map_element_type(attr, layer.attrib['type'])) - else: - rt_info.info.update((self.__read_rt_info_common(attr))) - - layer_attrs.update({'rt_info': rt_info}) - return layer_attrs - - @staticmethod - def __read_old_api_map_order(attr, layer_type): - version = int(attr.attrib['version']) - order = list(map(int, attr.attrib['value'].split(','))) - old_api_map = OldAPIMapOrder(version=version) - if layer_type == 'Parameter': - old_api_map.old_api_transpose_parameter(order) - elif layer_type == 'Result': - old_api_map.old_api_transpose_result(order) - else: - raise AttributeError("Cannot read old_api_map for layer of type: {}".format(layer_type)) - - return {('old_api_map_order', version): old_api_map} - - @staticmethod - def __read_old_api_map_element_type(attr, layer_type): - version = int(attr.attrib['version']) - element_type = destination_type_to_np_data_type(attr.attrib['value']) - old_api_map = OldAPIMapElementType(version=version) - old_api_map.set_legacy_type(element_type) - return {('old_api_map_element_type', version): old_api_map} - - @staticmethod - def __read_rt_info_common(attr): - attr_name = attr.attrib['name'] - version = int(attr.attrib['version']) - rt_info = OrderedDict() - for key in attr.attrib: - if key not in ('name', 'version'): - rt_info[key] = attr.attrib[key] - return {(attr_name, version): rt_info} diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/__init__.py b/tools/mo/openvino/tools/mo/utils/ir_reader/__init__.py deleted file mode 100644 index 8ba81a92b19c53..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extender.py deleted file mode 100644 index 579d6efc16cb2f..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extender.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils import class_registration -from openvino.tools.mo.utils.graph import Node - - -class Extender(object): - registered_ops = {} - registered_cls = [] - # Add the derived class to excluded_classes if one should not be registered in registered_ops - excluded_classes = [] - - @staticmethod - def extend(op: Node): - pass - - @staticmethod - def get_extender_class_by_name(name: str): - return __class__.registered_ops[name] - - @classmethod - def class_type(cls): - return class_registration.ClassType.IR_READER_EXTENDER - - @staticmethod - def attr_to_list(node: Node, attribute: str): - if not node.has_valid(attribute): - log.warning('Attribute {} missed in node {} with type {}!'.format(attribute, node.soft_get('name'), - node.soft_get('type'))) - elif not isinstance(node[attribute], list): - node[attribute] = [node[attribute]] - - @staticmethod - def use_shapes_from_ir(node: Node): - # This function used instead of operation shape inference function to set all output shapes the same as - # restored from IR. Firstly, check equality of old (restored from IR) and - # new (calculated while shape inference) input shapes - node['new_input_shapes'] = list() - for n in node.in_ports(): - # We use such condition to handle optional inputs - if not node.in_port(n).disconnected(): - node.new_input_shapes.append(node.in_port(n).data.get_shape()) - assert len(node.new_input_shapes) == len(node.old_input_shapes), \ - 'Something wrong happened while {} node with type {} copy shape inference! {} != {}'.format( - node.name, node.type, len(node.new_input_shapes), len(node.old_input_shapes)) - for new_input_shape, old_input_shape in zip(node.new_input_shapes, node.old_input_shapes): - assert np.array_equal(new_input_shape, old_input_shape), \ - 'Something wrong happened while {} node with type {} copy shape inference! {} != {}'.format( - node.name, node.type, new_input_shape, old_input_shape) - - # We need to use number of connected input ports to avoid errors with numbering - # in node.ports dictionary, where used numbers of input nodes - connected_input_ports = [] - for n in node.in_ports(): - if not node.in_port(n).disconnected(): - connected_input_ports.append(node.in_port(n)) - i = len(connected_input_ports) - - # Set all output shapes the same as restored from IR - for num in node.out_ports(): - if i in node.ports: - node.out_port(num).data.set_shape(int64_array(node.ports[i][0])) - else: - assert node.out_port(num).data.get_shape() is not None, "Newly added port does not have set shape" - i += 1 diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/ExtractImagePatches_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/ExtractImagePatches_extender.py deleted file mode 100644 index 2ac87cfa44bcc8..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/ExtractImagePatches_extender.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class ExtractImagePatches(Extender): - op = 'ExtractImagePatches' - - @staticmethod - def extend(op: Node): - op['sizes'] = int64_array([1, 1] + op.sizes) - op['strides'] = int64_array([1, 1] + op.strides) - op['rates'] = int64_array([1, 1] + op.rates) - - op['spatial_dims'] = int64_array([2, 3]) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/GRUCell_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/GRUCell_extender.py deleted file mode 100644 index 3669f2d43e431a..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/GRUCell_extender.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import mark_input_bins -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class GRUCell_extender(Extender): - op = 'GRUCell' - - @staticmethod - def extend(op: Node): - if not op.has_valid('activations'): - op['activations'] = None - - mark_input_bins(op, start_port=2) - - op['need_copy_input_blobs'] = True diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/GRUSequence_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/GRUSequence_extender.py deleted file mode 100644 index 3679d3e945ad0d..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/GRUSequence_extender.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class GRUSequence_extender(Extender): - op = 'GRUSequence' - - @staticmethod - def extend(op: Node): - op['infer'] = Extender.use_shapes_from_ir diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/LSTMCell_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/LSTMCell_extender.py deleted file mode 100644 index d1766ecbd205a6..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/LSTMCell_extender.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class LSTMCell_extender(Extender): - op = 'LSTMCell' - - @staticmethod - def extend(op: Node): - if not op.has_valid('activations'): - op['activations'] = None - op['infer'] = Extender.use_shapes_from_ir diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/LSTMSequence_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/LSTMSequence_extender.py deleted file mode 100644 index 168b82f6ba6694..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/LSTMSequence_extender.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class LSTMSequence_extender(Extender): - op = 'LSTMSequence' - - @staticmethod - def extend(op: Node): - op['infer'] = Extender.use_shapes_from_ir diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/RNNCell_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/RNNCell_extender.py deleted file mode 100644 index e11f6cede5d04c..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/RNNCell_extender.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class RNNCell_extender(Extender): - op = 'RNNCell' - - @staticmethod - def extend(op: Node): - if not op.has_valid('activations'): - op['activations'] = None diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/__init__.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/binary_convolution_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/binary_convolution_extender.py deleted file mode 100644 index edf8b9a7744f17..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/binary_convolution_extender.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender -from openvino.tools.mo.utils.ir_reader.extenders.conv_extender import Conv_extender - - -class BinaryConv_extender(Extender): - op = 'BinaryConvolution' - - @staticmethod - def extend(op: Node): - Conv_extender.extend(op) - op['type_to_create'] = 'Convolution' diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/bucketize_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/bucketize_extender.py deleted file mode 100644 index e76ed95967df30..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/bucketize_extender.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class BucketizeExtender(Extender): - op = 'Bucketize' - - @staticmethod - def extend(op: Node): - if op.get_opset() != "extension": - op['output_type'] = destination_type_to_np_data_type(op.output_type) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/conv_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/conv_extender.py deleted file mode 100644 index 0c70874403b383..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/conv_extender.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class Conv_extender(Extender): - op = 'Convolution' - - @staticmethod - def extend(op: Node): - for attr in ['strides', 'dilations', 'pads_begin', 'pads_end', 'output_padding']: - Extender.attr_to_list(op, attr) - - op['stride'] = int64_array([1, 1] + op.strides) - op['dilation'] = int64_array([1, 1] + op.dilations) - - op['batch_dims'] = int64_array([0]) - op['channel_dims'] = int64_array([1]) - - if op.has_valid('output_padding'): - op.output_padding = int64_array([0, 0] + op.output_padding) - - # Be VERY careful with these attributes! - op['input_feature_channel'] = 1 - op['output_feature_channel'] = 0 - - dim = len(op.pads_begin) - - assert dim in (1, 2, 3), '{}D Convolution not supported!'.format(dim) - - pad = [[0, 0], [0, 0]] - pad.extend([[op.pads_begin[i], op.pads_end[i]] for i in range(dim)]) - - op['pad'] = int64_array(pad) - - op['spatial_dims'] = [i + 2 for i in range(dim)] diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/convert_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/convert_extender.py deleted file mode 100644 index 20ee8d0feb4f66..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/convert_extender.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class Convert_extender(Extender): - op = 'Convert' - - @staticmethod - def extend(op: Node): - op['dst_type'] = destination_type_to_np_data_type(op.destination_type) - # CompressQuantizeWeights generates IR with constant sub-graph, that should not be ConstFolded: - # Const(u8) -> Convert(to fp) -> (some eltwise operations) -> FakeQuantize - if op.in_node().in_node().soft_get('type') == 'Const': - op['stop_value_propagation'] = True diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/ctc_greedy_decoder_seq_len_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/ctc_greedy_decoder_seq_len_extender.py deleted file mode 100644 index 346bdfd85f0efa..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/ctc_greedy_decoder_seq_len_extender.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class CTCGreedyDecoderSeqLenExtender(Extender): - op = 'CTCGreedyDecoderSeqLen' - - @staticmethod - def extend(op: Node): - if op.has_valid('classes_index_type'): - op['classes_index_type'] = destination_type_to_np_data_type(op.classes_index_type) - if op.has_valid('sequence_length_type'): - op['sequence_length_type'] = destination_type_to_np_data_type(op.sequence_length_type) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/deconvolution_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/deconvolution_extender.py deleted file mode 100644 index 6cce2a74c98e37..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/deconvolution_extender.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class ConvolutionBackpropData_extender(Extender): - op = 'ConvolutionBackpropData' - - @staticmethod - def extend(op: Node): - common_backpropdata_extender(op) - - -class GroupConvolutionBackpropData_extender(Extender): - op = 'GroupConvolutionBackpropData' - - @staticmethod - def extend(op: Node): - common_backpropdata_extender(op) - - -def common_backpropdata_extender(op: Node): - for attr in ['strides', 'output_padding', 'pads_begin', 'pads_end', 'dilations']: - Extender.attr_to_list(op, attr) - - if op.has_valid('output_padding'): - op.output_padding = int64_array([0, 0] + op.output_padding) - - dim = len(op.strides) - - if op.has_valid('pads_begin') and op.has_valid('pads_end'): - pad = [[0, 0], [0, 0]] - pad.extend([[op.pads_begin[i], op.pads_end[i]] for i in range(dim)]) - - op['pad'] = int64_array(pad) - - op['spatial_dims'] = [i + 2 for i in range(dim)] - - if not op.has_valid('dilations'): - op['dilations'] = [1 for _ in range(dim)] - if not op.has_valid('strides'): - op['strides'] = [1 for _ in range(dim)] - - op['dilation'] = int64_array([1, 1] + op.dilations) - op['stride'] = int64_array([1, 1] + op.strides) - - op['infer'] = backpropdata_infer - - -def backpropdata_infer(op: Node): - Extender.use_shapes_from_ir(op) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/deformable_convolution_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/deformable_convolution_extender.py deleted file mode 100644 index e45df555c58ed3..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/deformable_convolution_extender.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender -from openvino.tools.mo.utils.ir_reader.extenders.conv_extender import Conv_extender - - -class DeformableConv_extender(Extender): - op = 'DeformableConvolution' - - @staticmethod - def extend(op: Node): - Conv_extender.extend(op) - op['bias_addable'] = False, - op['bias_term'] = False, - op['weights_index'] = 2 diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/einsum_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/einsum_extender.py deleted file mode 100644 index d04932bd5a15ed..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/einsum_extender.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class Einsum_extender(Extender): - op = 'Einsum' - - @staticmethod - def extend(op: Node): - einsum_name = op.soft_get('name', op.id) - if isinstance(op['equation'], list): - op['equation'] = ','.join(op['equation']) - elif not isinstance(op['equation'], str): - assert False, "Equation of Einsum node {} has incorrect format.".format(einsum_name) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/experimental_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/experimental_extender.py deleted file mode 100644 index 29125a79b311d3..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/experimental_extender.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class ExperimentalDetectronROIFeatureExtractor_extender(Extender): - op = 'ExperimentalDetectronROIFeatureExtractor' - - @staticmethod - def extend(op: Node): - Extender.attr_to_list(op, 'pyramid_scales') diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/eye_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/eye_extender.py deleted file mode 100644 index 7f25a4c8302507..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/eye_extender.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class EyeExtender(Extender): - op = 'Eye' - - @staticmethod - def extend(op: Node): - if op.has_valid('output_type'): - op['output_type'] = destination_type_to_np_data_type(op.output_type) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/fakequantize_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/fakequantize_extender.py deleted file mode 100644 index e32df4d3b0acec..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/fakequantize_extender.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class FakeQuantize_extender(Extender): - op = 'FakeQuantize' - - @staticmethod - def extend(op: Node): - op['stop_value_propagation'] = True diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/if_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/if_extender.py deleted file mode 100644 index 15fc73b2a42cf0..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/if_extender.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender -from openvino.tools.mo.utils.ir_reader.layer_to_class import copy_graph_with_ops - - -class IfExtender(Extender): - op = 'If' - - @staticmethod - def set_input_output_id(subgraph, input_port_map, output_port_map, num_of_in_ports, num_of_out_ports): - for node in subgraph.get_op_nodes(): - if not node.has_valid('id'): - continue - node_id = int(node.soft_get('id')) - for if_input_mapping_elem in input_port_map: - if node_id == if_input_mapping_elem['internal_layer_id']: - node['input_id'] = if_input_mapping_elem['external_port_id'] - for if_out_mapping_elem in output_port_map: - if node_id == if_out_mapping_elem['internal_layer_id']: - # If external_point ID is counted with inputs - if if_out_mapping_elem['external_port_id'] > num_of_out_ports: - node['output_id'] = if_out_mapping_elem['external_port_id'] - num_of_in_ports - # If external_point ID is counted from 0 - else: - node['output_id'] = if_out_mapping_elem['external_port_id'] - - @staticmethod - def extend(op: Node): - assert op.has('then_graph'), 'There is no "then_body" attribute in the If op {}.'.format(op.name) - assert op.has('else_graph'), 'There is no "else_body" attribute in the If op {}.'.format(op.name) - # Now op.body is an IREngine, we need to replace it with IREngine.graph - op.then_graph.graph.graph['cmd_params'] = op.graph.graph['cmd_params'] - op.then_graph.graph.graph['ir_version'] = op.graph.graph['ir_version'] - op.then_graph.graph.name = op.name + '/then_body' - - op.else_graph.graph.graph['cmd_params'] = op.graph.graph['cmd_params'] - op.else_graph.graph.graph['ir_version'] = op.graph.graph['ir_version'] - op.else_graph.graph.name = op.name + '/else_body' - op.then_graph = copy_graph_with_ops(op.then_graph.graph) - op.else_graph = copy_graph_with_ops(op.else_graph.graph) - - num_of_in_ports = len(op.in_ports()) - num_of_out_ports = len(op.out_ports()) - IfExtender.set_input_output_id(op.then_graph, op.then_input_port_map, op.then_output_port_map, num_of_in_ports, num_of_out_ports) - IfExtender.set_input_output_id(op.else_graph, op.else_input_port_map, op.else_output_port_map, num_of_in_ports, num_of_out_ports) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/interpolate_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/interpolate_extender.py deleted file mode 100644 index ffd17d4528a33b..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/interpolate_extender.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class Interpolate_extender(Extender): - op = 'Interpolate' - - @staticmethod - def extend(op: Node): - Extender.attr_to_list(op, 'axes') diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/loop_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/loop_extender.py deleted file mode 100644 index 284df400f241c0..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/loop_extender.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender -from openvino.tools.mo.utils.ir_reader.layer_to_class import copy_graph_with_ops - - -class LoopExtender(Extender): - op = 'Loop' - - @staticmethod - def extend(op: Node): - def normalize_port_map(port_map: dict): - for port in port_map: - for elem in ['axis', 'stride', 'part_size', 'start', 'end', 'purpose']: - if port.get(elem) is None: - port[elem] = None - - assert op.has('body'), 'There is no "body" attribute in the Loop op {}.'.format(op.name) - - # Now op.body is an IREngine, we need to replace it with IREngine.graph - op.body.graph.graph['cmd_params'] = op.graph.graph['cmd_params'] - op.body.graph.graph['ir_version'] = op.graph.graph['ir_version'] - op.body.graph.name = op.name + '/body' - - for node in op.body.graph.get_op_nodes(): - node['internal_layer_id'] = int(node.id) - - op.body = copy_graph_with_ops(op.body.graph) - - normalize_port_map(op.input_port_map) - normalize_port_map(op.output_port_map) - - # the 'external_port_id' uses end-to-end numbering of ports, but at this moment it is separate for input and - # output ports so we need to decrease the output por_id with a number of input ports - for record in op.output_port_map: - if record['external_port_id'] != -1: - record['external_port_id'] -= len(op.in_ports()) - - for edge in op.back_edges: - edge['from_layer'] = edge['from-layer'] - edge['to_layer'] = edge['to-layer'] - - edge['to_port'] = 0 - edge['from_port'] = 0 - - del(edge['from-layer']) - del(edge['to-layer']) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/non_max_suppression_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/non_max_suppression_extender.py deleted file mode 100644 index 97effc754e8ced..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/non_max_suppression_extender.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class NonMaxSuppressionExtender(Extender): - op = 'NonMaxSuppression' - - @staticmethod - def extend(op: Node): - if op.has_valid('output_type'): - op['output_type'] = destination_type_to_np_data_type(op.output_type) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/non_zero_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/non_zero_extender.py deleted file mode 100644 index 5b939dab2f60c8..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/non_zero_extender.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class NonZeroExtender(Extender): - op = 'NonZero' - - @staticmethod - def extend(op: Node): - op['output_type'] = destination_type_to_np_data_type(op.output_type) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/pad_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/pad_extender.py deleted file mode 100644 index 564818c18c2a49..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/pad_extender.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class Pad_extender(Extender): - op = 'Pad' - - @staticmethod - def extend(op: Node): - op['mode'] = op['pad_mode'] - del op['pad_mode'] diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/parameter_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/parameter_extender.py deleted file mode 100644 index 12927bd142c5eb..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/parameter_extender.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender -from openvino.runtime import PartialShape, Dimension - - -class Parameter_extender(Extender): - op = 'Parameter' - - @staticmethod - def extend(op: Node): - assert op.has_valid('element_type'), 'Parameter node {} has missed element_type attr!'.format(op.name) - op['data_type'] = destination_type_to_np_data_type(op.element_type) - if op.shape == '': - op.shape = int64_array([]) - else: - Extender.attr_to_list(op, 'shape') - - # Remove brackets from shape splited by comma separator - if isinstance(op.shape[0], str) and op.shape[0][0] == '[': - op.shape[0] = op.shape[0][1:] - if isinstance(op.shape[-1], str) and op.shape[-1][-1] == ']': - op.shape[-1] = op.shape[-1][:-1] - - shape = op.shape.copy() - has_shapes_with_boundaries = False - for i, dim in enumerate(op.shape): - if dim == -1 or (isinstance(dim, str) and ".." in dim): - shape[i] = -1 - # Check only if dim is not int - if not isinstance(dim, int) and '..' in dim: - has_shapes_with_boundaries = True - shape = shape_array([int(d) if d not in [-1, '?'] else dynamic_dimension_value for d in shape]) - - if has_shapes_with_boundaries: - shape_list = [] - for dim in op.shape: - shape_list.append(Dimension(dim)) - - # This value is used only for serialization of partial shapes with boundaries - # for Parameter node. - # 'user_shape' is not used in shape inference, as propagation of partial shapes with boundaries - # is not implemented in MO. - op['user_shape'] = PartialShape(shape_list) - - # If 'user_shape' is not set, 'shape' attribute is used for serialization. - # 'shape' is also used for shape inference. - op.shape = shape diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/pooling_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/pooling_extender.py deleted file mode 100644 index 23220c465ebe41..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/pooling_extender.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class AvgPool_extender(Extender): - op = 'AvgPool' - - @staticmethod - def extend(op: Node): - common_pool_extender(op) - - if 'exclude-pad' in op: - op['exclude_pad'] = op['exclude-pad'] - del op['exclude-pad'] - - -class MaxPool_extender(Extender): - op = 'MaxPool' - - @staticmethod - def extend(op: Node): - common_pool_extender(op) - - -def common_pool_extender(op: Node): - for attr in ['strides', 'pads_begin', 'pads_end', 'kernel', 'dilations']: - Extender.attr_to_list(op, attr) - op['stride'] = int64_array([1, 1] + op.strides) - op['window'] = int64_array([1, 1] + op.kernel) - op['kernel_spatial'] = op.kernel - op['output_spatial_shape'] = None - - if op.has_valid('dilations'): - op['dilation'] = int64_array([1, 1] + op.dilations) - if op.has_valid('index_element_type'): - op['index_element_type'] = destination_type_to_np_data_type(op.index_element_type) - - op['batch_dims'] = int64_array([0]), - op['channel_dims'] = int64_array([1]), - - op['pool_method'] = 'max' if op.type == 'MaxPool' else 'avg' - - dim = len(op.pads_begin) - - assert dim in (1, 2, 3), '{}D {} not supported! Node name: {}'.format(dim, op.soft_get('type'), op.soft_get('name', op.id)) - - pad = [[0, 0], [0, 0]] - pad.extend([[op.pads_begin[i], op.pads_end[i]] for i in range(dim)]) - - op['pad'] = int64_array(pad) - - op['spatial_dims'] = [i + 2 for i in range(dim)] - - if op.has_valid('rounding_type') and op.rounding_type == 'ceil': - op['pooling_convention'] = 'full' diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/priorbox_clustered_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/priorbox_clustered_extender.py deleted file mode 100644 index 72c147128af196..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/priorbox_clustered_extender.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender -from openvino.tools.mo.utils.ir_reader.extenders.priorbox_extender import PriorBox_extender - - -class PriorBoxClustered_extender(Extender): - op = 'PriorBoxClustered' - - @staticmethod - def extend(op: Node): - op['V10_infer'] = True - - PriorBox_extender.attr_restore(op, 'width', value=1.0) - PriorBox_extender.attr_restore(op, 'height', value=1.0) - PriorBox_extender.attr_restore(op, 'variance') diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/priorbox_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/priorbox_extender.py deleted file mode 100644 index ad301efeb3d865..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/priorbox_extender.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.multi_box_prior import multi_box_prior_infer_mxnet -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class PriorBox_extender(Extender): - op = 'PriorBox' - - @staticmethod - def extend(op: Node): - op['V10_infer'] = True - - attrs = ['min_size', 'max_size', 'aspect_ratio', 'variance', 'fixed_ratio', 'fixed_size', 'density'] - for attr in attrs: - PriorBox_extender.attr_restore(op, attr) - - @staticmethod - def attr_restore(node: Node, attribute: str, value=None): - # Function to restore some specific attr for PriorBox & PriorBoxClustered layers - if not node.has_valid(attribute): - node[attribute] = [] if value is None else [value] - if isinstance(node[attribute], str): - node[attribute] = [] - else: - Extender.attr_to_list(node, attribute) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/random_uniform_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/random_uniform_extender.py deleted file mode 100644 index 943049587d2187..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/random_uniform_extender.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class RandomUniformExtender(Extender): - op = 'RandomUniform' - - @staticmethod - def extend(op: Node): - if op.has_valid('output_type'): - op['output_type'] = destination_type_to_np_data_type(op.output_type) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/range_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/range_extender.py deleted file mode 100644 index 7b337c17209266..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/range_extender.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class RangeExtender(Extender): - op = 'Range' - - @staticmethod - def extend(op: Node): - if op.has_valid('output_type'): - op['output_type'] = destination_type_to_np_data_type(op.output_type) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/reorg_yolo_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/reorg_yolo_extender.py deleted file mode 100644 index f7b1da795ad9ac..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/reorg_yolo_extender.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class ReorgYolo_extender(Extender): - op = 'ReorgYolo' - - @staticmethod - def extend(op: Node): - op['batch_dims'] = int64_array([0]) - op['channel_dims'] = int64_array([1]) - op['spatial_dims'] = [2, 3] diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/shape_of_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/shape_of_extender.py deleted file mode 100644 index 7c3446fc505397..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/shape_of_extender.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class ShapeOfExtender(Extender): - op = 'ShapeOf' - - @staticmethod - def extend(op: Node): - if op.has_valid('output_type'): - op['output_type'] = destination_type_to_np_data_type(op.output_type) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/strided_slice_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/strided_slice_extender.py deleted file mode 100644 index 609f3d97cf96da..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/strided_slice_extender.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.ops.strided_slice import StridedSlice -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class StridedSlice_extender(Extender): - op = 'StridedSlice' - - @staticmethod - def extend(op: Node): - for attr in StridedSlice.get_mask_names(): - # We can not use op.has_and_set(attr) here as a condition, because it will return False if begin/end is - # 1D tensor and begin_mask/end_mask is equal to 0 - if op.has(attr) and op[attr] != '': - Extender.attr_to_list(op, attr) - else: - op[attr] = int64_array([]) - - op.begin_mask = int64_array([1 - i for i in op.begin_mask]) - op.end_mask = int64_array([1 - i for i in op.end_mask]) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/tensoriterator_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/tensoriterator_extender.py deleted file mode 100644 index 798603c8a998e1..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/tensoriterator_extender.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender -from openvino.tools.mo.utils.ir_reader.layer_to_class import copy_graph_with_ops - - -class TensorIterator_extender(Extender): - op = 'TensorIterator' - - @staticmethod - def extend(op: Node): - - def normalize_port_map(port_map: dict): - for port in port_map: - for elem in ['axis', 'stride', 'part_size', 'start', 'end']: - if port.get(elem) is None: - port[elem] = None - - assert op.has('body'), 'Something wrong with TensorIterator layer {}, please check!'.format(op.name) - - # Now op.body is an IREngine, we need to replace it with IREngine.graph - op.body.graph.graph['cmd_params'] = op.graph.graph['cmd_params'] - op.body.graph.graph['ir_version'] = op.graph.graph['ir_version'] - op.body.graph.name = op.name + '/body' - - for node in op.body.graph.get_op_nodes(): - node['internal_layer_id'] = int(node.id) - - op.body = copy_graph_with_ops(op.body.graph) - - normalize_port_map(op.input_port_map) - normalize_port_map(op.output_port_map) - - for edge in op.back_edges: - edge['from_layer'] = edge['from-layer'] - edge['to_layer'] = edge['to-layer'] - - del(edge['from-layer']) - del(edge['to-layer']) - - op['infer'] = Extender.use_shapes_from_ir diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/topk_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/topk_extender.py deleted file mode 100644 index 4b119288e7a4a3..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/topk_extender.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.passes.convert_data_type import destination_type_to_np_data_type -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class TopKExtender(Extender): - op = 'TopK' - - @staticmethod - def extend(op: Node): - if op.out_port(0).disconnected(): - op['remove_values_output'] = True - if op.has_valid('index_element_type'): - op['index_element_type'] = destination_type_to_np_data_type(op.index_element_type) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/variadic_split_extender.py b/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/variadic_split_extender.py deleted file mode 100644 index 541c8b0d8f2a1a..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/extenders/variadic_split_extender.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.utils.graph import Node -from openvino.tools.mo.utils.ir_reader.extender import Extender - - -class VariadicSplit_extender(Extender): - op = 'VariadicSplit' - - @staticmethod - def extend(op: Node): - op['out_ports_count'] = len(op.ports) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/scatter.py b/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/scatter.py deleted file mode 100644 index 844b41189e4cdb..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/scatter.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.scatter import ScatterUpdate, Scatter -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value - -class ScatterUpdateInternal(ScatterUpdate): - @staticmethod - def infer(node: Node): - updates_value = node.in_port(2).data.get_value() - if updates_value is not None and isinstance(updates_value, np.ma.masked_array) and updates_value.ndim == 1: - # we need to normalize masked_array so that the value infer works as expected - value = [item if item is not np.ma.masked else dynamic_dimension_value for item in updates_value] - updates_value = np.ma.masked_equal(value, dynamic_dimension_value).astype(dtype=updates_value.dtype) - node.in_port(2).data.set_value(updates_value) - ScatterUpdate.infer(node) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/squeeze.py b/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/squeeze.py deleted file mode 100644 index 70effa1f6f12c2..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/squeeze.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, is_fully_defined - - -class SqueezeInternal(Squeeze): - @staticmethod - def infer(node: Node): - if node.is_in_port_connected(1): - axis_value = node.in_port(1).data.get_value() - Squeeze.infer(node) - # preserve initial axis value - node.in_port(1).data.set_value(axis_value) - else: - # Squeeze without axes provided - node_name = node.soft_get('name', node.id) - input_shape = node.in_port(0).data.get_shape() - assert is_fully_defined( - input_shape), 'Squeeze dimensions are not defined for op "{}"'.format(node_name) - output_shape = [s for s in shape_array(input_shape).tolist() if s != 1] - node.out_port(0).data.set_shape(shape_array(output_shape)) - diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/unique.py b/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/unique.py deleted file mode 100644 index ebcc1021a486cf..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/unique.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.ops.op import Op - - -class UniqueInternal(Op): - op = 'Unique' - - def __init__(self, graph: Graph, attrs: dict): - mandatory_props = { - 'type': self.op, - 'op': self.op, - 'version': 'opset10', - 'infer': self.infer, - 'in_ports_count': 2, - 'out_ports_count': 4 - } - super().__init__(graph, mandatory_props, attrs) - - def supported_attrs(self): - return [ - 'sorted', - 'index_element_type', - 'count_element_type', - ] - - @staticmethod - def infer(node: Node): - input_shape = node.in_port(0).data.get_shape() - if node.is_out_port_connected(0): - if node.is_in_port_connected(1): - axis = node.in_port(1).data.get_value() - assert axis, "Unique must have constant axis." - out_shape = input_shape.copy() - out_shape[axis.item()] = dynamic_dimension - node.out_port(0).data.set_shape(out_shape) - else: - # no axis, means flattening - node.out_port(0).data.set_shape( - shape_array([dynamic_dimension])) - if node.is_out_port_connected(1): - node.out_port(1).data.set_shape(shape_array([dynamic_dimension])) - if node.is_out_port_connected(2): - node.out_port(2).data.set_shape(shape_array([dynamic_dimension])) - if node.is_out_port_connected(3): - node.out_port(3).data.set_shape(shape_array([dynamic_dimension])) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/unsqueeze.py b/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/unsqueeze.py deleted file mode 100644 index 41e1fd2f3776d0..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/internal_ops/unsqueeze.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.unsqueeze import Unsqueeze - - -class UnsqueezeInternal(Unsqueeze): - @staticmethod - def infer(node: Node): - axis_value = node.in_port(1).data.get_value() - Unsqueeze.infer(node) - # preserve initial axis value - node.in_port(1).data.set_value(axis_value) diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/layer_to_class.py b/tools/mo/openvino/tools/mo/utils/ir_reader/layer_to_class.py deleted file mode 100644 index cfbd1c9b4c91e2..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/layer_to_class.py +++ /dev/null @@ -1,438 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import os - -import numpy as np - -from openvino.tools.mo.back.MaxPool import MaxPool -from openvino.tools.mo.back.TopKNormalizer import TopKNormalizer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, strict_compare_tensors -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.GRU import GRU -from openvino.tools.mo.ops.ReduceOps import ReduceOp -from openvino.tools.mo.ops.activation_ops import Activation -from openvino.tools.mo.ops.clamp import AttributedClamp -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.ops.deconvolution import Deconvolution -from openvino.tools.mo.ops.dft import FFTBase -from openvino.tools.mo.ops.elementwise import Elementwise, UnaryElementwise, LogicalElementwise, BiasAdd, Div, Mul, Pow, \ - Sub -from openvino.tools.mo.ops.embedding_bag import EmbeddingBagBase -from openvino.tools.mo.ops.loop import Loop -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.pooling import Pooling -from openvino.tools.mo.ops.psroipooling import DeformablePSROIPoolingOp -from openvino.tools.mo.ops.scatter import Scatter -from openvino.tools.mo.ops.scatternd import ScatterNDBase -from openvino.tools.mo.ops.slice import OvSlice -from openvino.tools.mo.ops.split import Split, VariadicSplit -from openvino.tools.mo.utils.class_registration import update_registration -from openvino.tools.mo.utils.import_extensions import import_by_path -from openvino.tools.mo.utils.ir_reader.extender import Extender -from openvino.tools.mo.utils.ir_reader.internal_ops.squeeze import SqueezeInternal -from openvino.tools.mo.utils.ir_reader.internal_ops.unsqueeze import UnsqueezeInternal -from openvino.tools.mo.utils.ir_reader.internal_ops.unique import UniqueInternal -from openvino.tools.mo.utils.ir_reader.internal_ops.scatter import ScatterUpdateInternal - -# Operations not registered in collect_ops() function -custom_ops = { - 'AvgPool': Pooling, - 'BiasAdd': BiasAdd, - 'Convert': Cast, - 'ConvolutionBackpropData': Deconvolution, - 'DeformablePSROIPooling': DeformablePSROIPoolingOp, - 'Divide': Div, - 'GroupConvolution': Convolution, - 'GroupConvolutionBackpropData': Deconvolution, - 'GRUSequence': GRU, - 'Loop': Loop, - 'MaxPool': Pooling, - 'Multiply': Mul, - 'Power': Pow, - 'ScatterUpdate': ScatterUpdateInternal, - 'Slice': OvSlice, - 'Split': Split, - 'Squeeze': SqueezeInternal, - 'Subtract': Sub, - 'VariadicSplit': VariadicSplit, - 'Clamp': AttributedClamp, - 'Unique': UniqueInternal, - 'Unsqueeze': UnsqueezeInternal, -} - - -def collect_ops(path: str): - """ - A function to registrate all MO ops - :param path: Path to Model Optimizer folder - :return: - """ - import_by_path(os.path.join(path, 'mo', 'ops'), ['mo', 'ops'], 'openvino.tools.') - update_registration(classes=[Op, Activation, Elementwise, UnaryElementwise, LogicalElementwise, - EmbeddingBagBase, ReduceOp, Scatter, ScatterNDBase, FFTBase], - enabled_transforms=[], disabled_transforms=[], exclude_modules=set()) - - -def collect_extenders(path: str): - """ - A function to registrate all MO IR Reader extenders - :param path: Path to Model Optimizer folder - :return: - """ - import_by_path(os.path.join(path, 'mo', 'utils', 'ir_reader', 'extenders'), - ['mo', 'utils', 'ir_reader', 'extenders'], 'openvino.tools.') - update_registration(classes=[Extender], enabled_transforms=[], disabled_transforms=[], exclude_modules=set()) - - -def collect_node_outputs(node: Node) -> dict: - """ - Function to collects output connections of node. - :param node: node to collect connections - :return: dictionary of the form {out_port: [(input_port, destination_node_id)]} - """ - result = dict() - for out_port_idx, out_port in node.out_ports().items(): - dest_info = [] - for d in out_port.get_destinations(): - dest_info.append((d.idx, d.node.id)) - result[out_port_idx] = dest_info - return result - - -def restore_correct_ports(graph: Graph): - """ - Function renumbers from OV to MO port numbering and add ports to all nodes in graph. - :param graph: - :return: - """ - for node_id, attrs in graph.nodes(data=True): - if '_in_ports' not in attrs: - attrs['_in_ports'] = set() - if '_out_ports' not in attrs: - attrs['_out_ports'] = set() - - for u, v, k, d in graph.edges(data=True, keys=True): - from_node_attrs = graph.node[u] - to_node_attrs = graph.node[v] - is_control_flow = 'control_flow_edge' in d and d['control_flow_edge'] is True - - if 'in' in d: - in_port_id = d['in'] if not is_control_flow else 'control_flow_' + str(d['in']) - to_node_attrs['_in_ports'].update({in_port_id: {'control_flow': is_control_flow}}) - if 'out' in d: - node = Node(graph, u) - num_of_in_nodes = len(node.in_nodes()) - decremented_number = d['out'] - num_of_in_nodes - # Initially Const operation in IR has output port with number 1. But later the behaviour was changed - # so the output port become 0. This change was made to be consistent with the IR serializer in the OV which - # generates Const with output port 0. For the backward compatibility reason we need to decrement the Const - # output port number but for current version this number shouldn't be changed during reading the IR. - if node.type == 'Const' and d['out'] == 0: - decremented_number = d['out'] - out_port_id = decremented_number if not is_control_flow else 'control_flow_' + str(decremented_number) - from_node_attrs['_out_ports'].update({out_port_id: {'control_flow': is_control_flow}}) - d['out'] = decremented_number - - -def propagate_const_values(op: Node): - """ - Function propagates const value from input data node and reshape it to correct shape. - :param op: - :return: - """ - assert op.soft_get('type') == 'Const', 'Wrong operation type, {} instead of Const!' \ - ''.format(op.soft_get('type')) - assert 0 in op.in_nodes(), 'Can\'t propagate restored value to Const operation with name: {}, check input ports' \ - ''.format(op.soft_get('name')) - assert 0 in op.out_nodes(), 'Can\'t propagate restored value to Const operation with name: {}, check output ports' \ - ''.format(op.soft_get('name')) - - in_data_node = op.in_node() - out_data_node = op.out_node() - - value = in_data_node.value - assert len(op.out_node(0).out_nodes()) > 0, 'Const node {} have no consumers.'.format(op.soft_get('name')) - if op.out_node(0).out_node(0).type == 'BinaryConvolution': - # Unpack binary weights for binary convolution (revert PackBinaryWeights transformation) - weights_rounded = np.unpackbits(value) - weights_rounded.dtype = np.int8 - for elem in range(len(weights_rounded)): - if weights_rounded[elem] == 0: - weights_rounded[elem] -= 1 # pylint: disable=unsupported-assignment-operation - assert len(weights_rounded) % 8 == 0 - weights_rounded = weights_rounded.reshape([len(weights_rounded) // 8, 8]) # pylint: disable=no-member - weights_rounded = np.flip(weights_rounded, axis=1) - value = weights_rounded.flatten() - - op['shape'] = out_data_node.shape - # Reshape data node value for correct shape - if op['element_type'] in ['u4', 'i4']: - # Packed data types are custom from numpy perspective. - # Shape from the IR is incompatible with numpy value we store. - op['value'] = value - op['force_type'] = op['element_type'].upper() - op['force_shape'] = op.shape.copy() - else: - op['value'] = np.reshape(value, op.shape) - - -def groupconv_to_conv(op: Node): - """ - Function makes GroupConv op back to Conv op with weights reshaping - :param op: - :return: - """ - assert op.soft_get('type') == 'GroupConvolution', \ - 'Wrong operation type, {} instead of GroupConvolution!'.format(op.soft_get('type')) - - weights_shape = op.in_port(1).data.get_shape() - group = weights_shape[0] - new_shape = [weights_shape[1] * group, *weights_shape[2:]] - - weights_node = op.in_port(1).get_source().node - if weights_node.type == 'Const': - weights_node.value = np.reshape(weights_node.value, new_shape) - elif weights_node.type == 'Reshape': - # We remove reshape node added in ConvolutionWithGroupsResolver pass - assert strict_compare_tensors(weights_node.in_port(0).get_source().data.get_shape(), new_shape), \ - 'Weight shape and calculated shape mismatch in GroupConv node {}.'.format(op.name) - op.in_port(1).disconnect() - # We use add_destination method here to support case with multiple destinations of source port - weights_node.in_port(0).get_source().get_connection().add_destination(op.in_port(1)) - weights_node.in_port(0).disconnect() - op.graph.remove_node(weights_node.id) - elif weights_node.type == 'Convert' and weights_node.destination_type == 'f32'\ - and weights_node.in_port(0).get_source().node.type == 'Const': - # Support new FP16 IRs - const_node = weights_node.in_port(0).get_source().node - assert const_node.has_valid('value'), \ - 'Weights of GroupConv node {} have incorrect format'.format(op.name) - const_node.value = np.reshape(const_node.value, new_shape) - - else: - assert strict_compare_tensors(op.in_port(1).get_source().data.get_shape(), op.in_port(1).get_source().data.get_shape()), \ - 'Weight shape and calculated shape mismatch in GroupConv node {}.'.format(op.name) - # We need to set this attrs for correct shape infer as convolution - op['group'] = group - # The only way GroupConvolution with 'group' = 1 appears in IR is by converting from TF DepthwiseConv2dNative. - # In this case we need to specify 'op' parameter for the - # extensions.back.ConvolutionNormalizer.ConvolutionWithGroupsResolver to work properly. - # Otherwise there will be 'Convolution' instead 'GroupConvolution' in restored IR, since 'GroupConvolution' is - # extended as node with 'type' = 'Convolution' by IR reader - if group == 1: - op['op'] = 'DepthwiseConv2dNative' - op.type = 'Convolution' - - -def backprop_to_deconv(op: Node): - """ - Function changes BackpropData operations type to correct creation - :param op: - :return: - """ - assert op.soft_get('type') in ('ConvolutionBackpropData', 'GroupConvolutionBackpropData'), \ - 'Wrong operation type, {} instead of ConvolutionBackpropData/GroupConvolutionBackpropData!' \ - ''.format(op.soft_get('type')) - - if op.has_valid('output_padding'): - # In this case we need to create Deconvolution as Convolution - op['type_to_create'] = 'Convolution' - - -def ti_add_edge_attrs(op: Node): - """ - Function adds necessary edge attrs in TensorIterator node - :param op: - :return: - """ - assert op.soft_get('type') == 'TensorIterator', 'Wrong operation type, {} instead of TensorIterator!' \ - ''.format(op.soft_get('type')) - - i = 0 - for num in range(len(op.in_ports())): - op.in_port(num).external_port_id = i - i += 1 - for num in range(len(op.out_ports())): - op.out_port(num).external_port_id = i - i += 1 - - -def copy_input_blobs(op: Node, copy_op: Node): - """ - Function copy input blob data nodes from restored graph to copied one - :param op: Node from restored graph - :param copy_op: Node from copied graph - :return: - """ - for u, d in op.get_sorted_inputs(): - if 'bin' in d: - Op.create_and_connect_input_data_node(copy_op.graph, copy_op, - {'value': op.in_node(d['in']).value, - 'shape': op.in_node(d['in']).shape}, d) - - -# Map with preprocessing functions -preprocessing_op_nodes = { - 'Const': propagate_const_values, - 'GroupConvolution': groupconv_to_conv, - 'ConvolutionBackpropData': backprop_to_deconv, - 'GroupConvolutionBackpropData': backprop_to_deconv, -} - -# Map with postprocessing functions for nodes -postprocessing_op_nodes = { - 'TensorIterator': ti_add_edge_attrs, - 'TopK': TopKNormalizer.normalize_outputs, - 'MaxPool': MaxPool.normalize_outputs, -} - - -def restore_tensor_names(op: Node): - for out_port in op.ports: - # op.ports is our internal attribute, dictionary, where keys are numbers of output ports - # and values are tuples with shape and tensor name: - # {out_port_idx_1: (out_port_idx_1_shape, out_port_idx_1_tensor_name, out_port_idx_1_rt_info), - # out_port_idx_2: (out_port_idx_2_shape, out_port_idx_2_tensor_name, out_port_idx_2_rt_info)} - out_tensor_names = op.ports[out_port][1] - - # handle Constant operations with old style output port numbering - if op.soft_get('type') == 'Const': - assert len(op.ports) == 1, 'Something wrong with Constant node: {}, wrong number ' \ - 'of output ports: {}!'.format(op.soft_get('name'), len(op.ports)) - out_port = 0 - - out_port = out_port - len(op.in_nodes()) - - if out_tensor_names is not None: - # handle tensor names with commas and add them to dictionary as separate items - if out_tensor_names.find(',') >= 0: - str_to_replace = '' - out_tensor_names = (out_tensor_names.replace('\\,', str_to_replace)).split(',') - op.out_node(out_port)['fw_tensor_debug_info'] = [] - for out_tensor_name in out_tensor_names: - out_tensor_name = out_tensor_name.replace(str_to_replace, ',') - op.out_node(out_port)['fw_tensor_debug_info'].append((out_tensor_name, out_tensor_name)) - else: - op.out_node(out_port)['fw_tensor_debug_info'] = [(out_tensor_names, out_tensor_names)] - - -def copy_graph_with_ops(graph: Graph) -> Graph: - """ - Function to copy graph and apply extenders to appropriate nodes - :param graph: Graph to copy - :return:Copied graph with applied extenders - """ - new_graph = Graph() - new_graph.stage = 'back' - new_graph.graph = graph.graph - new_graph.inputs_order = graph.inputs_order - new_graph.outputs_order = graph.outputs_order - - node_connections = dict() - mapping_of_old_idx_into_new = dict() - - restore_correct_ports(graph) - - # Nodes preprocessing stage in source graph - # Firstly propagate values only for Const nodes, because other preprocessings - # assumes Const nodes are already preprocessed. - for op in graph.get_op_nodes(type='Const'): - preprocessing_op_nodes[op.type](op) - - for op in graph.get_op_nodes(): - if op.soft_get('type') != 'Const' and op.soft_get('type') in preprocessing_op_nodes: - preprocessing_op_nodes[op.type](op) - - # Create a new copy of graph with correct attributes (shape & type infer, backend attrs etc.) - for op in graph.get_op_nodes(): - - # Save input shapes restored from IR - op['old_input_shapes'] = list() - for n in op.in_nodes(): - op.old_input_shapes.append(int64_array(op.in_node(n).shape)) - - # Apply extenders to nodes in source graph - if op.type in Extender.registered_ops: - Extender.get_extender_class_by_name(op.type).extend(op) - else: - log.debug('Extender for node {} with type={} not found, please note.'.format(op.name, op.type)) - - # Add node with necessary type and extended attrs in new graph - op_type = op.soft_get('type_to_create', op.type) - - if op_type in custom_ops: - node = custom_ops[op_type](new_graph, op.attrs()).create_node() - else: - if op_type not in Op.registered_ops: - log.warning('Operation {} is not found in MO operations, please check it! ' - 'Simple shape infer function is used'.format(op_type)) - node = Op(new_graph, op.attrs()).create_node() - assert 'type' in node, 'Operation {} have no `type` attribute.'.format(node.soft_get('name')) - node['op'] = node.type - node['infer'] = Extender.use_shapes_from_ir - if 'ir_data_attrs' in op: - node['IE'] = [('layer', - [('id', lambda node: node.node), 'name', 'type', 'version'], - [('data', - list(op.ir_data_attrs.keys()), - []), - '@ports', - '@consts'])] - - else: - node = Op.get_op_class_by_name(op_type)(new_graph, op.attrs()).create_node() - - # Fill out_ports_count attribute - if 'out_ports_count' not in node and node.soft_get('type') != 'Result': - node['out_ports_count'] = len(op.out_edges()) - - # This attribute is no longer needed and we can delete it - if 'ir_data_attrs' in node: - del node['ir_data_attrs'] - - if op.has_and_set('need_copy_input_blobs'): - copy_input_blobs(op, node) - - # Collect node connections - mapping_of_old_idx_into_new[op.id] = node.id - node_connections[op.id] = collect_node_outputs(op) - - # Restore connections in new graph - for input_node_idx, its_outputs in list(node_connections.items()): - for out_port_idx, out_port_dest in its_outputs.items(): - for dest_in_port_idx, dest_node_idx in out_port_dest: - src = Node(new_graph, mapping_of_old_idx_into_new[input_node_idx]) - dst = Node(new_graph, mapping_of_old_idx_into_new[dest_node_idx]) - src.out_port(out_port_idx).connect(dst.in_port(dest_in_port_idx)) - - # Nodes postprocessing stage in new graph - for op in new_graph.get_op_nodes(): - # Call normalize node outputs for restored operations to connect temporary Result operations for disconnected - # output ports. We need to do that for correct shape inference. These Result operations will be removed during - # IR emitting. For TopK operation outputs normalizing we should use specific - # function TopKNormalizer.normalize_outputs. - if op.soft_get('type') != 'TopK': - Op.normalize_outputs(op) - - # Set correct_data_type attribute to Const data nodes to correct processing of restored values - if op.soft_get('type') == 'Const': - assert len(op.out_nodes()) == 1 and op.out_node(0).soft_get('kind') == 'data',\ - 'Const node {} not properly corrected to appropriate data node'.format(op.soft_get('name')) - op.out_node(0)['correct_data_type'] = True - - if op.has_and_set('rt_info'): - op.out_node(0)['rt_info'] = op.rt_info - - # operations postprocessing with some special types - if op.soft_get('type') in postprocessing_op_nodes: - postprocessing_op_nodes[op.type](op) - - restore_tensor_names(op) - - # clean up graph to shape inference - new_graph.clean_up() - - return new_graph diff --git a/tools/mo/openvino/tools/mo/utils/ir_reader/restore_graph.py b/tools/mo/openvino/tools/mo/utils/ir_reader/restore_graph.py deleted file mode 100644 index 5752a94d3126b4..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/ir_reader/restore_graph.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -from copy import copy - -from openvino.tools.mo.back.ConvolutionNormalizer import ConvolutionNormalizer, ConvolutionWithGroupsResolver -from openvino.tools.mo.back.ShapeOfConstFolding import ShapeOfConstFolding -from openvino.tools.mo.back.MarkNodesWithShapeValues import MarkNodesWithShapeValues -from openvino.tools.mo.back.PackBinaryWeights import PackBinaryWeights -from openvino.tools.mo.back.SpecialNodesFinalization import RemoveConstOps, CreateConstNodesReplacement -from openvino.tools.mo.back.StridedSliceMasksNormalizer import StridedSliceMasksNormalizer -from openvino.tools.mo.back.blob_normalizer import BlobNormalizer -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.passes.convert_data_type import data_type_str_to_precision -from openvino.tools.mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively -from openvino.tools.mo.pipeline.common import prepare_emit_ir -from openvino.tools.mo.utils.class_registration import apply_replacements_list -from openvino.tools.mo.utils.ir_engine.ir_engine import IREngine -from openvino.tools.mo.utils.ir_reader.layer_to_class import copy_graph_with_ops, collect_extenders, collect_ops -from openvino.tools.mo.utils.utils import get_mo_root_dir - - -def restore_graph_from_ir(path_to_xml: str, path_to_bin: str = None) -> (Graph, dict): - """ - Function to make valid graph and metadata for MO back stage from IR. - :param path_to_xml: - :param path_to_bin: - :return: (restored graph, meta data) - """ - ir = IREngine(path_to_xml, path_to_bin) - assert ir.graph.graph.get('ir_version') >= 10, 'IR version {} is not supported, ' \ - 'please generate actual IR for your model and use it.'.format(ir.graph.graph.get('ir_version')) - - path = get_mo_root_dir() - collect_ops(path) - collect_extenders(path) - - # Create a new copy of graph with correct attributes (shape & type infer, backend attrs etc.) - new_graph = copy_graph_with_ops(ir.graph) - - return new_graph, copy(ir.meta_data) - - -def save_restored_graph(graph: Graph, path: str, meta_data, name=None, rename_results=True): - """ - Function to apply all necessary transforms from back stage to prepare and save restored graph and metadata. - :param graph: Graph to save - :param path: Path to saved IR - :param meta_data: Namespace with converting parameters restored from IR - :param name: Name for saved IR - :return: - """ - - if name is None: - name = graph.name - - if 'data_type' not in meta_data: - log.debug('Provided `meta_data` does not contain `data_type` parameter. Set `data_type`' - ' parameter value to `FP32`.') - # Set data_type to FP32. All restored constants will be saved in provided data type. - data_type = 'FP32' - - # We need to specify this attribute to pass graph transformations. This information will not be saved into IR. - # All constants and placeholders will be saved with same types as restored from IR - graph.graph['cmd_params'].data_type = data_type - else: - data_type = data_type_str_to_precision(graph.graph['cmd_params'].data_type) - - assert data_type in ['FP16', 'FP32'], '`data_type` value {} is not supported by MO,' \ - ' cannot save graph'.format(data_type) - - # List items order matters, do not change it. - transformation_list = [ - ConvolutionWithGroupsResolver, - ShapeOfConstFolding, - StridedSliceMasksNormalizer, - PackBinaryWeights, - BlobNormalizer, - ConvolutionNormalizer, - MarkNodesWithShapeValues, - ] - - # We need to run some specific passes from MO back stage. - apply_replacements_list(graph, transformation_list) - - # Transformations with enabled=False should be run manually. - for_graph_and_each_sub_graph_recursively(graph, RemoveConstOps().find_and_replace_pattern) - for_graph_and_each_sub_graph_recursively(graph, CreateConstNodesReplacement().find_and_replace_pattern) - - prepare_emit_ir(graph, data_type, path, name, meta_info=meta_data, rename_results=rename_results) diff --git a/tools/mo/openvino/tools/mo/utils/json_schema.py b/tools/mo/openvino/tools/mo/utils/json_schema.py deleted file mode 100644 index bf3cc5e22c32d2..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/json_schema.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -schema_dict = { - "definitions": {}, - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "Root", - "type": "array", - "default": [], - "items": { - "$id": "#root/items", - "title": "Items", - "type": "object", - "required": [ - "id", - "match_kind" - ], - "properties": { - "custom_attributes": { - "$id": "#root/items/custom_attributes", - "title": "Custom_attributes", - "type": "object", - "properties": { - } - }, - "id": { - "$id": "#root/items/id", - "title": "Id", - "type": "string", - "pattern": "^.*$", - "minLength": 1 - }, - "inputs": { - "$id": "#root/items/inputs", - "title": "Inputs", - "type": "array", - "default": [], - "items": { - "$id": "#root/items/inputs/items", - "title": "Items", - "type": "array", - "default": [], - "items": { - "$id": "#root/items/inputs/items/items", - "title": "Items", - "type": "object", - "properties": { - "node": { - "$id": "#root/items/inputs/items/items/node", - "title": "Node", - "type": "string", - "default": "", - "pattern": "^.*$" - }, - "port": { - "$id": "#root/items/inputs/items/items/port", - "title": "Port", - "type": "integer", - "default": 0 - } - }, - "required": ["node", "port"] - } - - } - }, - "instances": { - "$id": "#root/items/instances", - "title": "Instances", - "type": ["array", "object"], - "items": { - "$id": "#root/items/instances/items", - "title": "Items", - "type": "string", - "default": "", - "pattern": "^.*$" - } - }, - "match_kind": { - "$id": "#root/items/match_kind", - "title": "Match_kind", - "type": "string", - "enum": ["points", "scope", "general"], - "default": "points", - "pattern": "^.*$" - }, - "outputs": { - "$id": "#root/items/outputs", - "title": "Outputs", - "type": "array", - "default": [], - "items": { - "$id": "#root/items/outputs/items", - "title": "Items", - "type": "object", - "properties": { - "node": { - "$id": "#root/items/outputs/items/node", - "title": "Node", - "type": "string", - "default": "", - "pattern": "^.*$" - }, - "port": { - "$id": "#root/items/outputs/items/port", - "title": "Port", - "type": "integer", - "default": 0 - } - }, - "required": ["node", "port"] - } - - }, - "include_inputs_to_sub_graph": { - "$id": "#root/items/include_inputs_to_sub_graph", - "title": "Include_inputs_to_sub_graph", - "type": "boolean", - "default": False - }, - "include_outputs_to_sub_graph": { - "$id": "#root/items/include_outputs_to_sub_graph", - "title": "Include_outputs_to_sub_graph", - "type": "boolean", - "default": False - } - } - } -} diff --git a/tools/mo/openvino/tools/mo/utils/logger.py b/tools/mo/openvino/tools/mo/utils/logger.py deleted file mode 100644 index c04a71dcf9ce62..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/logger.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import importlib.util -import logging as log -import os -import re -import sys -from argparse import Namespace -from copy import copy - -# WA for abseil bug that affects logging while importing TF starting 1.14 version -# Link to original issue: https://github.com/abseil/abseil-py/issues/99 -if importlib.util.find_spec('absl') is not None: - import absl.logging - - log.root.removeHandler(absl.logging._absl_handler) # pylint: disable=c-extension-no-member - -handler_num = 0 - - -class LvlFormatter(log.Formatter): - format_dict = { - log.DEBUG: "[ %(asctime)s ] [ %(levelname)s ] [ %(module)s:%(lineno)d ] %(msg)s", - log.INFO: "[ %(levelname)s ] %(msg)s", - log.WARNING: "[ WARNING ] %(msg)s", - log.ERROR: "[ %(levelname)s ] %(msg)s", - log.CRITICAL: "[ %(levelname)s ] %(msg)s", - 'framework_error': "[ FRAMEWORK ERROR ] %(msg)s", - 'analysis_info': "[ ANALYSIS INFO ] %(msg)s" - } - - def __init__(self, lvl, fmt=None): - log.Formatter.__init__(self, fmt) - self.lvl = lvl - - def format(self, record: log.LogRecord): - if self.lvl == 'DEBUG': - self._style._fmt = self.format_dict[log.DEBUG] - else: - self._style._fmt = self.format_dict[record.levelno] - if 'is_warning' in record.__dict__.keys(): - self._style._fmt = self.format_dict[log.WARNING] - if 'framework_error' in record.__dict__.keys(): - self._style._fmt = self.format_dict['framework_error'] - if 'analysis_info' in record.__dict__.keys(): - self._style._fmt = self.format_dict['analysis_info'] - return log.Formatter.format(self, record) - - -class TagFilter(log.Filter): - def __init__(self, regex: str): - self.regex = regex - - def filter(self, record: log.LogRecord): - if record.__dict__['funcName'] == 'load_grammar': # for nx not to log into our logs - return False - if self.regex: - if 'tag' in record.__dict__.keys(): - tag = record.__dict__['tag'] - return re.findall(self.regex, tag) - else: - return False - return True # if regex wasn't set print all logs - - -def init_logger(lvl: str, silent: bool): - global handler_num - log_exp = os.environ.get('MO_LOG_PATTERN') - if silent: - lvl = 'ERROR' - fmt = LvlFormatter(lvl=lvl) - handler = log.StreamHandler() - handler.setFormatter(fmt) - logger = log.getLogger() - logger.setLevel(lvl) - logger.addFilter(TagFilter(regex=log_exp)) - if handler_num == 0 and len(logger.handlers) == 0: - logger.addHandler(handler) - handler_num += 1 - -def get_logger_state(): - logger = log.getLogger() - return logger.level, copy(logger.filters), copy(logger.handlers) - -def restore_logger_state(state: tuple): - level, filters, handlers = state - logger = log.getLogger() - logger.setLevel(level) - logger.filters = filters - logger.handlers = handlers - - -def progress_bar(function: callable): - """ - Decorator for model conversion pipeline progress display - Works in combination with function: mo.utils.class_registration.apply_transform - """ - - def wrapper(*args, **kwargs): - for arg in ['graph', 'curr_transform_num', 'num_transforms']: - msg = 'Progress bar decorator is enabled for Model Conversion API transformation applying cycle only. ' \ - 'Argument `{}` {}' - - assert arg in kwargs, msg.format(arg, 'is missing') - assert kwargs[arg] is not None, msg.format(arg, 'should not be None') - - if 'progress' in kwargs['graph'].graph['cmd_params'] and kwargs['graph'].graph['cmd_params'].progress: - bar_len = 20 - total_replacers_count = kwargs['num_transforms'] - - def progress(i): - return int((i + 1) / total_replacers_count * bar_len) - - def percent(i): - return (i + 1) / total_replacers_count * 100 - - end = '' if not kwargs['graph'].graph['cmd_params'].stream_output else '\n' - curr_i = kwargs['curr_transform_num'] - print('\rProgress: [{:{}}]{:>7.2f}% done'.format('.' * progress(curr_i), bar_len, percent(curr_i)), end=end) - - sys.stdout.flush() - - function(*args, **kwargs) - - return wrapper - -def progress_printer(argv: Namespace): - """ - A higher-order factory function returning a configurable callback displaying a progress bar - Depending on the configuration stored in 'argv' the progress bar can be one-line, multi-line, or silent. - """ - def _progress_bar(progress, total, completed, endline): - bar_len = 20 - - def dots(): - return '.' * int(progress * bar_len) - - print('\rProgress: [{:{}}]{:>7.2f}% done'.format(dots(), bar_len, progress*100), end=endline) - sys.stdout.flush() - - def no_progress_bar(progress, total, completed): - """ A 'dummy' progressbar which doesn't print anything """ - pass - - def oneline_progress_bar(progress, total, completed): - """ A callback that always prints the progress in the same line (mimics real GUI progress bar)""" - _progress_bar(progress, total, completed, '') - - def newline_progress_bar(progress, total, completed): - """ A callback that prints an updated progress bar in separate lines """ - _progress_bar(progress, total, completed, '\n') - - if "progress" in argv and argv.progress: - if "stream_output" in argv and argv.stream_output: - return newline_progress_bar - else: - return oneline_progress_bar - else: - return no_progress_bar diff --git a/tools/mo/openvino/tools/mo/utils/model_analysis.py b/tools/mo/openvino/tools/mo/utils/model_analysis.py deleted file mode 100644 index d76baf02d29ba1..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/model_analysis.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import sys - -from openvino.tools.mo.front.user_data_repack import UserDataRepack -from openvino.tools.mo.load.loader import LoadFinish -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils import class_registration -from openvino.tools.mo.utils.error import Error - - -class AnalysisResults: - _instance = None - - def __new__(cls, *args, **kwargs): - if not cls._instance: - cls._instance = super(AnalysisResults, cls).__new__(cls, *args, **kwargs) - cls.results = {} - cls.messages = [] - return cls._instance - - def __getattr__(self, item): - return self.results[item] - - def __setattr__(self, key, value): - self.results[key] = value - - @classmethod - def get_result(cls, key=None): - if key is not None: - if key in cls.results and cls.results[key] is not None: - return cls.results[key] - else: - return cls.results - - @classmethod - def add_result(cls, result, key=None): - if key is not None: - cls.results[key] = result - else: - cls.results.update(result) - - @classmethod - def get_messages(cls): - return cls.messages - - @classmethod - def add_message(cls, message): - cls.messages.append(message) - - -class AnalyzeAction(object): - registered_cls = [] - registered_ops = {} - excluded_replacers = [] - run_not_recursively = True - - def find_and_replace_pattern(self, graph: Graph): - analysis_results = AnalysisResults() - failed_analysers = [] - - try: - result, msg = self.analyze(graph) # pylint: disable=assignment-from-no-return - except SystemExit: - # the analysis transformation printing analysis results to the screen calls sys.exit(0) which in fact raises - # SystemExit exception, so we handle it here - sys.exit(0) - except: - failed_analysers.append(str(self.__class__)) - analysis_results.add_result(failed_analysers, 'failed_analysers') - result = None - msg = None - - if result is not None: - analysis_results.add_result(result) - if msg is not None: - analysis_results.add_message(msg) - - def analyze(self, graph: Graph): - raise Error('The method must be implemented in the sub-class') - - def run_before(self): - """ - Returns list of replacer classes which this replacer must be run before. - :return: list of classes - """ - return [AnalysisCollectorAnchor, UserDataRepack] - - def run_after(self): - """ - Returns list of replacer classes which this replacer must be run after. - :return: list of classes - """ - return [LoadFinish] - - @classmethod - def class_type(cls): - return class_registration.ClassType.FRONT_REPLACER - - -class AnalysisCollectorAnchor(AnalyzeAction): - """ - All analyzers should depend on this one which is an anchor analyzer to develop custom post-processor of all - analyzers results. - """ - - def run_before(self): - return [] - - def analyze(self, graph: Graph): - pass - - -def graph_contains_scope(graph: Graph, scope: [str, tuple]): - """ - Checks whether the graph contains node(s) which name includes "scope" string. - :param graph: graph to check - :param scope: string or tuple with strings defining the scope - :return: the result of the check (True/False) - """ - if type(scope) is str: - return any([node.soft_get('name').find(scope) != -1 for node in graph.get_op_nodes()]) - else: - return any([graph_contains_scope(graph, s) for s in scope]) diff --git a/tools/mo/openvino/tools/mo/utils/pipeline_config.py b/tools/mo/openvino/tools/mo/utils/pipeline_config.py deleted file mode 100644 index 7ec54312c0b55e..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/pipeline_config.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import re - -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.simple_proto_parser import SimpleProtoParser - -# The list of rules how to map the value from the pipeline.config file to the dictionary with attributes. -# The rule is either a string or a tuple with two elements. In the first case the rule string is used as a key to -# search in the parsed pipeline.config file attributes dictionary and a key to save found value. In the second case the -# first element of the tuple is the key to save found value; the second element of the tuple is a string defining the -# path to the value of the attribute in the pipeline.config file. The path consists of the regular expression strings -# defining the dictionary key to look for separated with a '/' character. -mapping_rules = [ - 'num_classes', - # preprocessing block attributes - ('resizer_image_height', 'image_resizer/fixed_shape_resizer/height'), - ('resizer_image_width', 'image_resizer/fixed_shape_resizer/width'), - ('resizer_min_dimension', 'image_resizer/keep_aspect_ratio_resizer/min_dimension'), - ('resizer_max_dimension', 'image_resizer/keep_aspect_ratio_resizer/max_dimension'), - ('pad_to_max_dimension', 'image_resizer/keep_aspect_ratio_resizer/pad_to_max_dimension', False), - # anchor generator attributes - ('anchor_generator_height', 'first_stage_anchor_generator/grid_anchor_generator/height$', 256), - ('anchor_generator_width', 'first_stage_anchor_generator/grid_anchor_generator/width$', 256), - ('anchor_generator_height_stride', 'first_stage_anchor_generator/grid_anchor_generator/height_stride', 16), - ('anchor_generator_width_stride', 'first_stage_anchor_generator/grid_anchor_generator/width_stride', 16), - ('anchor_generator_scales', 'first_stage_anchor_generator/grid_anchor_generator/scales'), - ('anchor_generator_aspect_ratios', 'first_stage_anchor_generator/grid_anchor_generator/aspect_ratios'), - ('multiscale_anchor_generator_min_level', 'anchor_generator/multiscale_anchor_generator/min_level'), - ('multiscale_anchor_generator_max_level', 'anchor_generator/multiscale_anchor_generator/max_level'), - ('multiscale_anchor_generator_anchor_scale', 'anchor_generator/multiscale_anchor_generator/anchor_scale'), - ('multiscale_anchor_generator_aspect_ratios', 'anchor_generator/multiscale_anchor_generator/aspect_ratios'), - ('multiscale_anchor_generator_scales_per_octave', 'anchor_generator/multiscale_anchor_generator/scales_per_octave'), - # SSD anchor generator attributes - ('ssd_anchor_generator_min_scale', 'anchor_generator/ssd_anchor_generator/min_scale', 0.2), - ('ssd_anchor_generator_max_scale', 'anchor_generator/ssd_anchor_generator/max_scale', 0.95), - ('ssd_anchor_generator_num_layers', 'anchor_generator/ssd_anchor_generator/num_layers'), - ('ssd_anchor_generator_aspect_ratios', 'anchor_generator/ssd_anchor_generator/aspect_ratios'), - ('ssd_anchor_generator_scales', 'anchor_generator/ssd_anchor_generator/scales'), - ('ssd_anchor_generator_interpolated_scale_aspect_ratio', - 'anchor_generator/ssd_anchor_generator/interpolated_scale_aspect_ratio', 1.0), - ('ssd_anchor_generator_reduce_lowest', 'anchor_generator/ssd_anchor_generator/reduce_boxes_in_lowest_layer'), - ('ssd_anchor_generator_base_anchor_height', 'anchor_generator/ssd_anchor_generator/base_anchor_height', 1.0), - ('ssd_anchor_generator_base_anchor_width', 'anchor_generator/ssd_anchor_generator/base_anchor_width', 1.0), - # Proposal and ROI Pooling layers attributes - ('first_stage_nms_score_threshold', '.*_nms_score_threshold'), - ('first_stage_nms_iou_threshold', '.*_nms_iou_threshold'), - ('first_stage_max_proposals', '.*_max_proposals'), - ('num_spatial_bins_height', '.*/rfcn_box_predictor/num_spatial_bins_height'), - ('num_spatial_bins_width', '.*/rfcn_box_predictor/num_spatial_bins_width'), - ('crop_height', '.*/rfcn_box_predictor/crop_height'), - ('crop_width', '.*/rfcn_box_predictor/crop_width'), - 'initial_crop_size', - ('use_matmul_crop_and_resize', 'use_matmul_crop_and_resize', False), - ('add_background_class', 'add_background_class', True), - # Detection Output layer attributes - ('postprocessing_score_converter', '.*/score_converter'), - ('postprocessing_score_threshold', '.*/batch_non_max_suppression/score_threshold'), - ('postprocessing_iou_threshold', '.*/batch_non_max_suppression/iou_threshold'), - ('postprocessing_max_detections_per_class', '.*/batch_non_max_suppression/max_detections_per_class'), - ('postprocessing_max_total_detections', '.*/batch_non_max_suppression/max_total_detections'), - ('share_box_across_classes', 'second_stage_box_predictor/.*/share_box_across_classes$', False), - # Variances for predicted bounding box deltas (tx, ty, tw, th) - ('frcnn_variance_x', 'box_coder/faster_rcnn_box_coder/x_scale', 10.0), - ('frcnn_variance_y', 'box_coder/faster_rcnn_box_coder/y_scale', 10.0), - ('frcnn_variance_width', 'box_coder/faster_rcnn_box_coder/width_scale', 5.0), - ('frcnn_variance_height', 'box_coder/faster_rcnn_box_coder/height_scale', 5.0) -] - - -class PipelineConfig: - """ - The class that parses pipeline.config files used to generate TF models generated using Object Detection API. - The class stores data read from the file in a plain dictionary for easier access using the get_param function. - """ - - def __init__(self, file_name: str): - self._raw_data_dict = dict() - self._model_params = dict() - self._raw_data_dict = SimpleProtoParser().parse_file(file_name) - if not self._raw_data_dict: - raise Error('Failed to parse pipeline.config file {}'.format(file_name)) - - self._initialize_model_params() - - @staticmethod - def _get_value_by_path(params: dict, path: list): - if not path or len(path) == 0: - return None - if not isinstance(params, dict): - return None - compiled_regexp = re.compile(path[0]) - for key in params.keys(): - if re.match(compiled_regexp, key): - if len(path) == 1: - return params[key] - else: - value = __class__._get_value_by_path(params[key], path[1:]) - if value is not None: - return value - return None - - def _update_param_using_rule(self, params: dict, rule: [str, tuple]): - if isinstance(rule, str): - if rule in params: - self._model_params[rule] = params[rule] - log.debug('Found value "{}" for path "{}"'.format(params[rule], rule)) - elif isinstance(rule, tuple): - if len(rule) != 2 and len(rule) != 3: - raise Error('Invalid rule length. Rule must be a tuple with two elements: key and path, or three ' - 'elements: key, path, default_value.') - value = __class__._get_value_by_path(params, rule[1].split('/')) - if value is not None: - log.debug('Found value "{}" for path "{}"'.format(value, rule[1])) - self._model_params[rule[0]] = value - elif len(rule) == 3: - self._model_params[rule[0]] = rule[2] - log.debug('There is no value path "{}". Set default value "{}"'.format(value, rule[2])) - - else: - raise Error('Invalid rule type. Rule can be either string or tuple') - - def _initialize_model_params(self): - """ - Store global params in the dedicated dictionary self._model_params for easier use. - :return: None - """ - - if 'model' not in self._raw_data_dict: - raise Error('The "model" key is not found in the configuration file. Looks like the parsed file is not ' - 'Object Detection API model configuration file.') - params = list(self._raw_data_dict['model'].values())[0] - for rule in mapping_rules: - self._update_param_using_rule(params, rule) - - def get_param(self, param: str): - if param not in self._model_params: - return None - return self._model_params[param] diff --git a/tools/mo/openvino/tools/mo/utils/replacement_pattern.py b/tools/mo/openvino/tools/mo/utils/replacement_pattern.py deleted file mode 100644 index e9750e32d08f18..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/replacement_pattern.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.pattern_match import apply_pattern - - -class ReplacementPattern(object): - # List of classes that shouldn't be treated as standalone replacers - # All intermediate infrastructure classes should be here - excluded_replacers = [] - - def find_and_replace_pattern(self, graph: Graph): - apply_pattern(graph, **self.pattern(), action=self.replace_pattern) # pylint: disable=no-member - - def run_before(self): - """ - Returns list of replacer classes which this replacer must be run before. - :return: list of classes - """ - return [] - - def run_after(self): - """ - Returns list of replacer classes which this replacer must be run after. - :return: list of classes - """ - return [] diff --git a/tools/mo/openvino/tools/mo/utils/runtime_info.py b/tools/mo/openvino/tools/mo/utils/runtime_info.py deleted file mode 100644 index fdf2c4c48ef929..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/runtime_info.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import abc -from collections import defaultdict -from typing import Dict - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.middle.passes.convert_data_type import np_data_type_to_destination_type - - -class RTInfo: - """ - Class that stores runtime information. - """ - - def __init__(self): - """ - Dictionary with runtime information. - Key is a tuple that contains name of runtime info attribute and version of the attribute. - Value is an instance of a class derived from RTInfoElement that represents a particular runtime info attribute. - - Example of usage: - rt_info = RTInfo() - rt_info.info[('old_api_map_order', 0)] = OldAPIMapOrder() - - """ - self.info = defaultdict(dict) - - def contains(self, attribute_name: str): - attr_count = [key[0] for key in list(self.info.keys())].count(attribute_name) - assert attr_count <= 1, 'Incorrect rt_info attribute, got more than one {}.'.format(attribute_name) - return attr_count > 0 - - def get_attribute_version(self, attribute_name: str): - for name, version in list(self.info.keys()): - if name == attribute_name: - return version - raise Exception("rt_info does not contain attribute with name {}".format(attribute_name)) - - -class RTInfoElement: - """ - Class that stores element of runtime information. - """ - - @abc.abstractmethod - def serialize(self, node) -> Dict: - """ - Serialize method for RTInfoElement. - """ - - @abc.abstractmethod - def get_version(self): - """ - Get version of RTInfoElement. - """ - - @abc.abstractmethod - def get_name(self): - """ - Get name of RTInfoElement. - """ - - -class OldAPIMapOrder(RTInfoElement): - """ - Class that stores transpose order required for obtaining IR in old API. - """ - - def __init__(self, version=0): - self.info = defaultdict(dict) - self.version = version - self.name = "old_api_map_order" - - def old_api_transpose_parameter(self, inv: int64_array): - self.info['inverse_order'] = inv - - def old_api_transpose_result(self, order: int64_array): - self.info['order'] = order - - def serialize_old_api_map_for_parameter(self, node) -> Dict: - if 'inverse_order' not in self.info: - return {} - return {'value': ','.join(map(str, self.info['inverse_order']))} - - def serialize_old_api_map_for_result(self, node) -> Dict: - if 'order' not in self.info: - return {} - return {'value': ','.join(map(str, self.info['order']))} - - def serialize(self, node) -> Dict: - result = {} - if node.soft_get('type') == 'Parameter': - result = self.serialize_old_api_map_for_parameter(node) - elif node.soft_get('type') == 'Result': - result = self.serialize_old_api_map_for_result(node) - return result - - def get_version(self): - return self.version - - def get_name(self): - return self.name - - -class OldAPIMapElementType(RTInfoElement): - """ - Class that stores legacy type required for obtaining IR in old API. - """ - def __init__(self, version=0): - self.info = defaultdict(dict) - self.version = version - self.name = "old_api_map_element_type" - - def set_legacy_type(self, legacy_type: np.dtype): - self.info['legacy_type'] = legacy_type - - def serialize(self, node) -> Dict: - if 'legacy_type' not in self.info: - return {} - return {'value': np_data_type_to_destination_type(self.info['legacy_type'])} - - def get_version(self): - return self.version - - def get_name(self): - return self.name diff --git a/tools/mo/openvino/tools/mo/utils/shape.py b/tools/mo/openvino/tools/mo/utils/shape.py deleted file mode 100644 index 7240795b023e6f..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/shape.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.ops.elementwise import Add -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.graph_utils import create_op_node_with_second_input -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.graph.port import Port -from openvino.tools.mo.ops.concat import Concat -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.ops.squeeze import Squeeze - - -def get_canonical_axis_index_node(rank: Node, axis: int) -> Node: - """ - Returns positive axis value - - :param rank: the node of 0D output shape to get rank of tensor from - :param axis: integer value from [-rank; rank - 1] - :return: node producing positive integer value of axis - """ - graph = rank.graph - name = rank.soft_get('name', rank.id) - if axis < 0: - axis = Const(graph, {'name': name + '/negative_axis', 'value': int64_array(axis)}).create_node() - add = Add(graph, {'name': name + '/positive_axis'}).create_node() - rank.out_port(0).connect(add.in_port(0)) - axis.out_port(0).connect(add.in_port(1)) - return add - else: - return Const(graph, {'name': name + '/positive_axis', 'value': int64_array(axis)}).create_node() - - -def get_range_node_of_idxs(rank: Node, begin: int, end: int, - include_begin: bool = True, include_end: bool = False) -> Node: - """ - Returns node that produces 1D output of values of range from begin to end (ex)/(in)cluding begin or end point - - :param rank: the node of 0D output shape to get rank of tensor from - :param begin: integer value from [-rank; rank - 1] - :param end: integer value from [-rank; +rank] - :param include_begin: boolean flag to include or exclude start point from range output - :param include_end: boolean flag to include or exclude end point from range output - :return: range node producing 1D output - """ - graph = rank.graph - name = rank.soft_get('name', rank.id) - - start_idx = get_canonical_axis_index_node(rank, begin) - end_idx = get_canonical_axis_index_node(rank, end) - - if not include_begin: - const = Const(graph, {'value': int64_array(1), 'name': name + '/exclude_begin/value'}).create_node() - add = Add(graph, {'name': name + '/exclude_begin'}).create_node() - start_idx.out_port(0).connect(add.in_port(0)) - const.out_port(0).connect(add.in_port(1)) - start_idx = add - - if include_end: - const = Const(graph, {'value': int64_array(1), 'name': name + '/including_end/value'}).create_node() - add = Add(graph, {'name': name + '/including_end'}).create_node() - end_idx.out_port(0).connect(add.in_port(0)) - const.out_port(0).connect(add.in_port(1)) - end_idx = add - - delta = Const(graph, {'name': name + '/delta', 'value': int64_array(1)}).create_node() - range_node = Range(graph, {'name': name + '/range_idxs'}).create_node() - - start_idx.out_port(0).connect(range_node.in_port(0)) - end_idx.out_port(0).connect(range_node.in_port(1)) - delta.out_port(0).connect(range_node.in_port(2)) - - return range_node - - -def get_shape_values_by_indices_node(shape_node: Node, indices_node: Node) -> Node: - """ - The function returns a node that produces values of the specified indices node of the input node 'shape_node' - - :param shape_node: the node of 1D output shape to get elements from - :param indices_node: the node of 1D output shape with the list of element indices to get - :return: node producing required elements of the node - """ - graph = shape_node.graph - axis = Const(graph, {'value': int64_array(0), 'name': shape_node.name + '/Axis'}).create_node() - gather_node = Gather(graph, {'name': shape_node.name + '/Gather'}).create_node() - - shape_node.out_port(0).connect(gather_node.in_port(0)) - indices_node.out_port(0).connect(gather_node.in_port(1)) - axis.out_port(0).connect(gather_node.in_port(2)) - return gather_node - - -def node_to_get_shape_value_of_indices(shape_node: Node, indices: list) -> Node: - """ - The function returns a node that produces values of the specified indices of the input node 'shape_node' - - :param shape_node: the node of 1D output shape to get elements from - :param indices: the list of element indices to get - :return: node producing required elements of the node - """ - graph = shape_node.graph - indices_node = Const(graph, {'value': int64_array(indices), 'name': shape_node.name + '/Indices'}).create_node() - - gather_node = get_shape_values_by_indices_node(shape_node, indices_node) - return gather_node - - -def get_shape_values_by_range_idxs(shape: Node, rank: Node, begin: int, end: int, - include_begin: bool = True, include_end: bool = False): - """ - Gathers shape values that are represented by range from begin to end (in)/(ex)cluding begin or end point - - :param shape: the node of 1D output shape to get elements from - :param rank: the node of 0D output shape to get rank of tensor from - :param begin: integer value from [-rank; rank - 1] - :param end: integer value from [-rank; +rank] - :param include_begin: boolean flag to include or exclude start point from range output - :param include_end: boolean flag to include or exclude end point from range output - :return: gather node producing 1D output - """ - range_node = get_range_node_of_idxs(rank, begin, end, include_begin=include_begin, include_end=include_end) - return get_shape_values_by_indices_node(shape, range_node) - - -def node_to_get_batch_value(shape_node: Node) -> Node: - """ - The function returns a node that produces the batch value which is usually the element of the shape with index 0 - :param shape_node: the node of 1D output shape to get batch from - :return: the node producing batch value - """ - return node_to_get_shape_value_of_indices(shape_node, [0]) - - -def node_to_get_features_dimension_value(shape_node: Node) -> Node: - """ - The function returns a node that produces the feature dimension value - :param shape_node: the node of 1D output shape to get the feature dimension value from - :return: the node producing feature dimension value - """ - layout = shape_node.graph.graph['layout'] - if layout == 'NCHW': - return node_to_get_shape_value_of_indices(shape_node, [1]) - elif layout == 'NHWC': - return node_to_get_shape_value_of_indices(shape_node, [-1]) - else: - assert 'Unsupported layout "{}"'.format(layout) - - -def node_to_get_spatial_dimensions_value(shape_node: Node) -> Node: - """ - The function returns a node that produces the spatial dimension values - :param shape_node: the node of 1D output shape to get the spatial dimension values from - :return: the node producing the spatial dimension values - """ - layout = shape_node.graph.graph['layout'] - shape = shape_node.in_port(0).get_connection().get_source().data.get_shape() - assert shape is not None, 'The shape must be inferred before running this function' - - if layout == 'NCHW': - return node_to_get_shape_value_of_indices(shape_node, list(range(2, len(shape)))) - elif layout == 'NHWC': - return node_to_get_shape_value_of_indices(shape_node, list(range(1, len(shape) - 1))) - else: - assert 'Unsupported layout "{}"'.format(layout) - - -def new_shape_node_from_shape_nodes(input_shape_nodes: list): - """ - The function returns a node producing 1D tensor with concatenated shapes produced by nodes from "input_shape_nodes" - :param input_shape_nodes: list of nodes producing 1D tensors - :return: the node producing concatenated values of nodes from the "input_shape_nodes" - """ - assert len(input_shape_nodes) > 0, 'The list of input shape nodes should be non-empty' - new_shape_node = Concat(input_shape_nodes[0].graph, - {'axis': 0, - 'name': input_shape_nodes[0].soft_get('name', input_shape_nodes[0].id) + '/shapes_concat'} - ).create_node() - - for ind, input_node in enumerate(input_shape_nodes): - new_shape_node.add_input_port(ind) - new_shape_node.in_port(ind).connect(input_node.out_port(0)) - return new_shape_node - - -def get_shape_and_rank_nodes_by_port(port: Port, return_as_a_scalar: bool = True): - """ - The function returns nodes producing shape and rank of the data from the desired port in order to use those - operations on the middle/back phase - :param port: Port object that specifies node output port - :param return_as_a_scalar: boolean flag to return 1D or 0D rank - :return: shape and rank nodes - """ - input_node_name = port.node.soft_get('name', port.node.id) - graph = port.node.graph - - shape = Shape(graph, dict(name=input_node_name + '/ShapeOf')).create_node() - rank_1_d = Shape(graph, dict(name=input_node_name + '/1dRankOf')).create_node() - rank_1_d.in_port(0).connect(shape.out_port(0)) - shape.in_port(0).connect(port) - if not return_as_a_scalar: - return shape, rank_1_d - - rank = create_op_node_with_second_input(graph, Squeeze, int64_array([0]), {'name': input_node_name + '/0dRankOf'}, - rank_1_d) - return shape, rank - diff --git a/tools/mo/openvino/tools/mo/utils/simple_proto_parser.py b/tools/mo/openvino/tools/mo/utils/simple_proto_parser.py deleted file mode 100644 index 79dd1a556aafd1..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/simple_proto_parser.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import ast -import logging as log -import os - -from openvino.tools.mo.utils.error import Error - - -class SimpleProtoParser(object): - """ - This is a simple Proto2 parser that has limited functionality and is intended to parse configuration files for the - models created with Object Detection API only. The result of the parser is the dictionary. - """ - - _tokens = list() - _result = dict() - - def __init__(self): - self._tokens = list() - self._result = dict() - - @staticmethod - def _convert_value_to_correct_datatype(value: str): - """ - Converts string representation of the token to a value with proper data type. - :param value: string representation to be converted. - :return: converted to a correct data type value. - """ - if value == 'true': - return True - if value == 'false': - return False - try: - result = ast.literal_eval(value) - return result - except Exception: # if it is not possible to evaluate the value then consider it as a string - return value - - @staticmethod - def _convert_values_to_correct_datatypes(d: dict): - """ - Convert dictionary with values to correct data types. - :param d: dictionary with values. - :return: None - """ - for key, value in d.items(): - if isinstance(value, dict): - __class__._convert_values_to_correct_datatypes(value) - elif isinstance(value, list): - d[key] = [__class__._convert_value_to_correct_datatype(item) for item in value] - else: - d[key] = __class__._convert_value_to_correct_datatype(value) - - def _add_non_empty_token(self, token: str): - """ - Add token to the list of tokens if it is non-empty. - :param token: token to add - :return: None - """ - if token != "": - self._tokens.append(token) - - def _parse_list(self, result: list, token_ind: int): - prev_token = '[' - while token_ind < len(self._tokens): - cur_token = self._tokens[token_ind] - if cur_token == ']': - return token_ind + 1 - if cur_token == ',': - if prev_token == ',' or prev_token == '[': - raise Error('Missing value in the list at position {}'.format(token_ind)) - else: - result.append(cur_token) - token_ind += 1 - prev_token = cur_token - return token_ind - - def _parse_tokens(self, result: dict, token_ind: int, depth: int=0): - """ - Internal function that parses tokens. - :param result: current dictionary where to store parse result. - :param token_ind: index of the token from the tokens list to start parsing from. - :return: token index to continue parsing from. - """ - while token_ind < len(self._tokens): - cur_token = self._tokens[token_ind] - if cur_token == ',': # redundant commas that we simply ignore everywhere except list "[x, y, z...]" - token_ind += 1 - continue - if cur_token == '}': - return token_ind + 1 - next_token = self._tokens[token_ind + 1] - if next_token == '{': - result[cur_token] = dict() - token_ind = self._parse_tokens(result[cur_token], token_ind + 2, depth + 1) - elif next_token == ':': - next_next_token = self._tokens[token_ind + 2] - if next_next_token == '[': - result[cur_token] = list() - token_ind = self._parse_list(result[cur_token], token_ind + 3) - else: - if cur_token not in result: - result[cur_token] = self._tokens[token_ind + 2] - else: - if not isinstance(result[cur_token], list): - old_val = result[cur_token] - result[cur_token] = [old_val] - result[cur_token].append(self._tokens[token_ind + 2]) - token_ind += 3 - else: - raise Error('Wrong character "{}" in position {}'.format(next_token, token_ind)) - if depth != 0: - raise Error('Input/output braces mismatch.') - return token_ind - - def _convert_tokens_to_dict(self): - """ - Convert list of tokens into a dictionary with proper structure. - Then converts values in the dictionary to values of correct data types. For example, 'false' -> False, - 'true' -> true, '0.004' -> 0.004, etc. - :return: True if conversion is successful. - """ - try: - self._parse_tokens(self._result, 0) - except Exception as ex: - log.error('Failed to convert tokens to dictionary: {}'.format(str(ex))) - return False - self._convert_values_to_correct_datatypes(self._result) - return True - - def _split_to_tokens(self, file_content: str): - """ - The function gets file content as string and converts it to the list of tokens (all tokens are still strings). - :param file_content: file content as a string - """ - cur_token = '' - string_started = False - for line in file_content.split('\n'): - cur_token = '' - line = line.strip() - if line.startswith('#'): # skip comments - continue - for char in line: - if string_started: - if char == '"': # string ended - self._add_non_empty_token(cur_token) - cur_token = '' # start of a new string - string_started = False - else: - cur_token += char - elif char == '"': - self._add_non_empty_token(cur_token) - cur_token = '' # start of a new string - string_started = True - elif (char == " " and not string_started) or char == '\n': - self._add_non_empty_token(cur_token) - cur_token = '' - elif char in [':', '{', '}', '[', ']', ',']: - self._add_non_empty_token(cur_token) - self._tokens.append(char) - cur_token = '' - else: - cur_token += char - self._add_non_empty_token(cur_token) - self._add_non_empty_token(cur_token) - - def parse_from_string(self, file_content: str): - """ - Parses the proto text file passed as a string. - :param file_content: content of the file. - :return: dictionary with file content or None if the file cannot be parsed. - """ - self._split_to_tokens(file_content) - if not self._convert_tokens_to_dict(): - log.error('Failed to generate dictionary representation of file.') - return None - return self._result - - def parse_file(self, file_name: str): - """ - Parses the specified file and returns its representation as dictionary. - :param file_name: file name to parse. - :return: dictionary with file content or None if the file cannot be parsed. - """ - if not os.path.exists(file_name): - log.error('File {} does not exist'.format(file_name)) - return None - try: - with open(file_name) as file: - file_content = file.readlines() - except Exception as ex: - log.error('Failed to read file {}: {}'.format(file_name, str(ex))) - return None - return self.parse_from_string(''.join(file_content)) diff --git a/tools/mo/openvino/tools/mo/utils/str_to.py b/tools/mo/openvino/tools/mo/utils/str_to.py deleted file mode 100644 index 5c5f646497dee2..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/str_to.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -class StrTo(object): - @staticmethod - def tuple(type_of_elements: type, string: str): - if type_of_elements == int: - string = string.replace('L', '') - return tuple(type_of_elements(x) for x in string[1:-1].split(',') if x != '') - - @staticmethod - def list(string: str, type_of_elements: type, sep: str): - result = string.split(sep) - result = [type_of_elements(x) for x in result] - return result - - @staticmethod - def bool(val: str): - if val.lower() == "false": - return False - elif val.lower() == "true": - return True - else: - raise ValueError("Value is not boolean: " + val) diff --git a/tools/mo/openvino/tools/mo/utils/summarize_graph.py b/tools/mo/openvino/tools/mo/utils/summarize_graph.py deleted file mode 100644 index e9e87886c0356e..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/summarize_graph.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import os -import sys - -# do not print INFO and WARNING messages from TensorFlow -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -try: - import tensorflow.compat.v1 as tf_v1 -except ImportError: - import tensorflow as tf_v1 - -#in some environment suppressing through TF_CPP_MIN_LOG_LEVEL does not work -tf_v1.get_logger().setLevel("ERROR") - -unlikely_output_types = ['Const', 'Assign', 'NoOp', 'Parameter', 'Assert'] - - -def children(op_name: str, graph: tf_v1.Graph): - op = graph.get_operation_by_name(op_name) - return set(op for out in op.outputs for op in out.consumers()) - - -def summarize_graph(graph_def): - placeholders = dict() - outputs = list() - graph = tf_v1.Graph() - with graph.as_default(): # pylint: disable=not-context-manager - tf_v1.import_graph_def(graph_def, name='') - for node in graph.as_graph_def().node: # pylint: disable=no-member - if node.op == 'Placeholder': - node_dict = dict() - node_dict['type'] = tf_v1.DType(node.attr['dtype'].type).name - node_dict['shape'] = str(tf_v1.TensorShape(node.attr['shape'].shape)).replace(' ', '').replace('?', '-1') - placeholders[node.name] = node_dict - if len(children(node.name, graph)) == 0: - if node.op not in unlikely_output_types and node.name.split('/')[-1] not in unlikely_output_types: - outputs.append(node.name) - result = dict() - result['inputs'] = placeholders - result['outputs'] = outputs - return result - - -def main(): - sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)) - from openvino.tools.mo.front.tf.loader import load_tf_graph_def - - parser = argparse.ArgumentParser() - parser.add_argument("--input_model", type=str, help="Path to tensorflow model", default="") - parser.add_argument('--input_model_is_text', dest='text', - help='TensorFlow*: treat the input model file as a text protobuf format. If not specified, ' - 'the Model Optimizer treats it as a binary file by default.', action='store_true', - default=False) - parser.add_argument('--input_meta', action='store_true', - help='TensorFlow*: treat the input model file as a meta graph def format', default=False) - parser.add_argument("--input_checkpoint", type=str, help='TensorFlow variables file to load.', default="") - parser.add_argument('--saved_model_dir', type=str, default="", help="TensorFlow saved_model_dir") - parser.add_argument('--saved_model_tags', type=str, default="", - help="Group of tag(s) of the MetaGraphDef to load, in string \ - format, separated by ','. For tag-set contains multiple tags, all tags must be passed in.") - - argv = parser.parse_args() - if not argv.input_model and not argv.saved_model_dir: - print("[ ERROR ] Please, provide --input_model and --input_model_is_text if needed or --input_dir for saved " - "model directory") - sys.exit(1) - if argv.input_model and argv.saved_model_dir: - print("[ ERROR ] Both keys were provided --input_model and --input_dir. Please, provide only one of them") - sys.exit(1) - tags = argv.saved_model_tags.split(",") - graph_def, _, _, _ = load_tf_graph_def(graph_file_name=argv.input_model, is_binary=not argv.text, - checkpoint=argv.input_checkpoint, - model_dir=argv.saved_model_dir, saved_model_tags=tags) - summary = summarize_graph(graph_def) - print("{} input(s) detected:".format(len(summary['inputs']))) - for input in summary['inputs']: - print("Name: {}, type: {}, shape: {}".format(input, summary['inputs'][input]['type'], - summary['inputs'][input]['shape'])) - print("{} output(s) detected:".format(len(summary['outputs']))) - print(*summary['outputs'], sep="\n") - - -if __name__ == "__main__": # pragma: no cover - main() diff --git a/tools/mo/openvino/tools/mo/utils/telemetry_params.py b/tools/mo/openvino/tools/mo/utils/telemetry_params.py deleted file mode 100644 index ea099ce2a873e6..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/telemetry_params.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -telemetry_params = { - 'TID': "G-W5E9RNLD4H" -} diff --git a/tools/mo/openvino/tools/mo/utils/telemetry_stub.py b/tools/mo/openvino/tools/mo/utils/telemetry_stub.py deleted file mode 100644 index 142ebf2abac760..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/telemetry_stub.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -class Telemetry(object): - """ - Stab file for the Telemetry class which is used when Telemetry class is not available. - """ - - def __init__(self, *arg, **kwargs): - pass - - def send_event(self, *arg, **kwargs): - pass - - def send_error(self, *arg, **kwargs): - pass - - def start_session(self, *arg, **kwargs): - pass - - def end_session(self, *arg, **kwargs): - pass - - def force_shutdown(self, *arg, **kwargs): - pass - - def send_stack_trace(self, *arg, **kwargs): - pass diff --git a/tools/mo/openvino/tools/mo/utils/telemetry_utils.py b/tools/mo/openvino/tools/mo/utils/telemetry_utils.py deleted file mode 100644 index a3e4c248d95979..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/telemetry_utils.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -import argparse -import numbers -from collections import Counter - -import numpy as np -from openvino.runtime import get_version as get_rt_version # pylint: disable=no-name-in-module,import-error - -from openvino.tools.mo.front.common.partial_infer.utils import is_fully_defined, unmask_shape, int64_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively -from openvino.tools.mo.utils.cli_parser import get_params_with_paths_list -from openvino.tools.mo.utils.telemetry_params import telemetry_params -from openvino.tools.mo.utils.utils import check_values_equal - -try: - import openvino_telemetry as tm - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm - - -def init_mo_telemetry(app_name='Model Optimizer'): - return init_telemetry_class(tid=get_tid(), - app_name=app_name, - app_version=get_rt_version(), - backend='ga4', - enable_opt_in_dialog=False, - disable_in_ci=True - ) - - -def init_telemetry_class(tid, - app_name, - app_version, - backend, - enable_opt_in_dialog, - disable_in_ci): - # Init telemetry class - telemetry = tm.Telemetry(tid=tid, - app_name=app_name, - app_version=app_version, - backend=backend, - enable_opt_in_dialog=enable_opt_in_dialog, - disable_in_ci=disable_in_ci) - - # Telemetry is a singleton class and if it was already initialized in another tool - # some parameters will be incorrect, including app_name. - # In this case we need to force reinitialisation of telemetry. - if hasattr(telemetry, "backend") and telemetry.backend.app_name != app_name: - telemetry.init(tid=tid, - app_name=app_name, - app_version=app_version, - backend=backend, - enable_opt_in_dialog=enable_opt_in_dialog, - disable_in_ci=disable_in_ci) - return telemetry - - -def send_framework_info(framework: str): - """ - This function sends information about used framework. - :param framework: framework name. - """ - t = tm.Telemetry() - t.send_event('mo', 'framework', framework) - - -def get_tid(): - """ - This function returns the ID of the database to send telemetry. - """ - return telemetry_params['TID'] - - -def send_conversion_result(conversion_result: str, need_shutdown=False): - t = tm.Telemetry() - t.send_event('mo', 'conversion_result', conversion_result) - t.end_session('mo') - if need_shutdown: - t.force_shutdown(1.0) - - -def arg_to_str(arg): - # This method converts to string only known types, otherwise returns string with name of the type - from openvino.runtime import PartialShape, Shape, Type, Layout # pylint: disable=no-name-in-module,import-error - if isinstance(arg, (PartialShape, Shape, Type, Layout)): - return str(arg) - if isinstance(arg, (str, numbers.Number, bool)): - return str(arg) - return str(type(arg)) - - -def send_params_info(argv: argparse.Namespace, cli_parser: argparse.ArgumentParser): - """ - This function sends information about used command line parameters. - :param argv: command line parameters. - :param cli_parser: command line parameters parser. - """ - t = tm.Telemetry() - params_with_paths = get_params_with_paths_list() - for arg in vars(argv): - arg_value = getattr(argv, arg) - if not check_values_equal(arg_value, cli_parser.get_default(arg)): - if arg in params_with_paths: - # If command line argument value is a directory or a path to file it is not sent - # as it may contain confidential information. "1" value is used instead. - param_str = arg + ":" + str(1) - else: - param_str = arg + ":" + arg_to_str(arg_value) - - t.send_event('mo', 'cli_parameters', param_str) - - -def send_op_names_info(framework: str, graph: Graph): - """ - This function sends information about operations in model. - :param framework: framework name. - :param graph: model graph. - """ - op_counter = Counter() - - def gather_op_statistics(g: Graph, op_c: Counter = op_counter): - if hasattr(g, 'op_names_statistic'): - op_c += g.op_names_statistic - - for_graph_and_each_sub_graph_recursively(graph, gather_op_statistics) - - t = tm.Telemetry() - for op_name in op_counter: - t.send_event('mo', 'op_count', "{}_{}".format(framework, op_name), op_counter[op_name]) - - -def send_shapes_info(framework: str, graph: Graph): - """ - This function sends information about model input shapes. - :param framework: framework name. - :param graph: model graph. - """ - shapes = [] - for node in graph.get_op_nodes(): - op_type = node.soft_get('type', None) - if op_type == 'Parameter': - if 'shape' in node: - shapes.append(node['shape']) - t = tm.Telemetry() - - if shapes: - shape_str = "" - is_partially_defined = "0" - for shape in shapes: - shape_str += (np.array2string(int64_array(unmask_shape(shape))) if shape is not None else "Undefined") + "," - if not is_fully_defined(shape): - is_partially_defined = "1" - message_str = "{fw:" + framework + ",shape:\"" + shape_str[:-1] + "\"}" - t.send_event('mo', 'input_shapes', message_str) - t.send_event('mo', 'partially_defined_shape', - "{partially_defined_shape:" + is_partially_defined + ",fw:" + framework + "}") diff --git a/tools/mo/openvino/tools/mo/utils/tensorboard_util.py b/tools/mo/openvino/tools/mo/utils/tensorboard_util.py deleted file mode 100644 index 4759daea2c3935..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/tensorboard_util.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os - -# do not print INFO and WARNING messages from TensorFlow -os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' -try: - import tensorflow.compat.v1 as tf_v1 -except ImportError: - import tensorflow as tf_v1 - -# in some environment suppressing through TF_CPP_MIN_LOG_LEVEL does not work -tf_v1.get_logger().setLevel("ERROR") -from tensorflow.python.eager.context import graph_mode # pylint: disable=no-name-in-module,import-error - -try: - import tensorflow.contrib # pylint: disable=no-name-in-module,import-error -except: - pass # we try to import contrib for loading models that use contrib operations -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import refer_to_faq_msg - - -def dump_for_tensorboard(graph_def: tf_v1.GraphDef, logdir: str): - try: - # TODO: graph_def is a deprecated argument, use graph instead - print('Writing an event file for the tensorboard...') - with graph_mode(): - with tf_v1.summary.FileWriter(logdir=logdir, graph_def=graph_def) as writer: - writer.flush() - print('Done writing an event file.') - except Exception as err: - raise Error('Cannot write an event file for the tensorboard to directory "{}". ' + - refer_to_faq_msg(36), logdir) from err diff --git a/tools/mo/openvino/tools/mo/utils/type_utils.py b/tools/mo/openvino/tools/mo/utils/type_utils.py deleted file mode 100644 index 7cb082d2c05df2..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/type_utils.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log - -import numpy as np - -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.pipeline.common import convert_const_node_value_type -from openvino.tools.mo.utils.error import Error - -np_map_cast = {bool: lambda x: bool_cast(x), - np.int8: lambda x: np.int8(x), - np.int16: lambda x: np.int16(x), - np.int32: lambda x: np.int32(x), - np.int64: lambda x: np.int64(x), - np.uint8: lambda x: np.uint8(x), - np.uint16: lambda x: np.uint16(x), - np.uint32: lambda x: np.uint32(x), - np.uint64: lambda x: np.uint64(x), - np.float16: lambda x: np.float16(x), - np.float32: lambda x: np.float32(x), - np.double: lambda x: np.double(x), - str: lambda x: str(x)} - - -def bool_cast(x): - if isinstance(x, str): - return False if x.lower() in ['false', '0'] else True if x.lower() in ['true', '1'] else 'unknown_boolean_cast' - else: - return bool(x) - - -def override_data_type_of_constant(node: Node, lhs_idx: int = 0, rhs_idx: int = 1): - in_type_0 = node.in_port(lhs_idx).get_data_type() - in_type_1 = node.in_port(rhs_idx).get_data_type() - if in_type_0 != in_type_1: - # in case of input values data type mismatch we try to change the type of the constant to match the type of - # another input. - in_node_0 = node.in_port(0).get_source().node - in_node_1 = node.in_port(1).get_source().node - - if in_node_0.op == 'Const': - node_to_convert, src_type, dst_type = in_node_0, in_type_0, in_type_1 - elif in_node_1.op == 'Const': - node_to_convert, src_type, dst_type = in_node_1, in_type_1, in_type_0 - else: - raise Error("{} operation '{}' has inputs of different data types: '{}' and '{}' " - "that cannot be aligned".format(node.soft_get('op'), - node.soft_get('name'), - in_type_0, - in_type_1)) - log.error("Changing Const node '{}' data type from {} to {} for {} operation".format( - node_to_convert.soft_get('name', node_to_convert.id), src_type, dst_type, node.soft_get('op')), - extra={'is_warning': True}) - convert_const_node_value_type(node_to_convert, dst_type) diff --git a/tools/mo/openvino/tools/mo/utils/unsupported_ops.py b/tools/mo/openvino/tools/mo/utils/unsupported_ops.py deleted file mode 100644 index ef189269c4e328..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/unsupported_ops.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import collections - -from openvino.tools.mo.graph.graph import Node, Graph - - -class UnsupportedOps(object): - def __init__(self, graph: Graph): - self.graph = graph - # map op to a list of node names - self.unsupported = collections.defaultdict(list) - - def add(self, node: Node): - op = node.op if node.has_valid('op') else '' - name = node.name if node.has_valid('name') else '' - self.unsupported[op].append(name) - - def report(self, reporter, header=None): - if len(self.unsupported) > 0: - if header: - reporter(header) - for k, v in self.unsupported.items(): - reporter(' ' * 4 + str(k) + ' (' + str(len(v)) + ')') - for node_name in v: - reporter(' ' * 8 + node_name) diff --git a/tools/mo/openvino/tools/mo/utils/utils.py b/tools/mo/openvino/tools/mo/utils/utils.py deleted file mode 100644 index 84301ff5d92d7f..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/utils.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import functools -import os -import re -import warnings -from typing import Callable - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension - -try: - import openvino_telemetry as tm - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm - - -def refer_to_faq_msg(question_num: int): - try: - t = tm.Telemetry() - t.send_event('mo', 'error_info', "faq:" + str(question_num)) - except Exception: - # Telemetry can be not initialized if it is used in MO IR Reader - pass - - return '\n For more information please refer to Model Conversion API FAQ, question #{0}. ' \ - '(https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_prepare_model_Model_Optimizer_FAQ.html' \ - '?question={0}#question-{0})'.format(question_num) - - -def check_values_equal(val1, val2): - # This method is needed to check equality of values where some values can be None - if val1 is None and val2 is None: - return True - if val1 is None: - return False - if val2 is None: - return False - return val1 == val2 - - -class NamedAttrsClass: - def __init__(self, class_attrs: dict): - for key, val in class_attrs.items(): - self.__setattr__(key, val) - - -def match_shapes(pattern: np.array, shape: np.array): - """ Check if shape matches shape pattern handling undefined dimension and 0 in the pattern. """ - # Elements with value 0 and undefined values in pattern are just ignored. Other elements should match. - if pattern.size != shape.size: - return False - indices = [i for i, n in enumerate(pattern) if n != 0 and n is not dynamic_dimension] - return np.ma.allequal(pattern[indices], shape[indices]) - - -def symm_match_shapes(shape1: np.array, shape2: np.array): - """ Check if shape matches shape pattern handling -1 and 0 in the pattern. """ - # Elements with values -1 and 0 in both shapes are just ignored. - # Other elements should match. Undefined elements can be one side only. - return match_shapes(shape1, shape2) or match_shapes(shape2, shape1) - - -def deprecated_api(class_name=None, new_method_name=None): - def deprecated(func): - @functools.wraps(func) - def deprecation_message(*args, **kwargs): - dep_msg = "Call to deprecated function {}. ".format(func.__name__) - if class_name is not None: - dep_msg += "Please use {}.{} method" \ - "".format(class_name.__name__ if not isinstance(class_name, str) else class_name, - func.__name__ if new_method_name is None else new_method_name) - warnings.warn(dep_msg, DeprecationWarning, stacklevel=2) - return func(*args, **kwargs) - - return deprecation_message - - return deprecated - - -def array_to_str(node, attr): - if not node.has_valid(attr): - return None - else: - return ','.join(map(str, node[attr])) - - -def shrink_str_value(value: np.array, max_symbols=100): - value = str(value) - if len(value) > max_symbols: - value = value.strip('\n')[:max_symbols - 3] + '...' - return value - - -def files_by_pattern(dir: str, pattern: str, files_only=True, add_prefix=False): - """ - Return a list of files and directories (or only files if the files_only is set to True) in the directory dir that - match pattern string pattern. - :param dir: Directory to search for files - :param pattern: string defining pattern name - :param files_only: flag to include only files (not directories) to the result - :param add_prefix: flag to include the prefix string to the file names - :return: list of file and directory names - """ - pattern_compiled = re.compile(pattern) - matched_file_names = [] - for file_name in os.listdir(dir): - if re.match(pattern_compiled, file_name) and (not files_only or os.path.isfile(os.path.join(dir, file_name))): - matched_file_names.append(os.path.join(dir, file_name) if add_prefix else file_name) - return matched_file_names - - -def get_mo_root_dir(): - """ - Return the absolute path to the Model Optimizer root directory (where mo folder is located) - :return: path to the MO root directory - """ - return os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(os.path.realpath(__file__))), os.pardir, - os.pardir)) - - -def group_by_with_binary_predicate(xs: list, predicate: Callable) -> list: - """ - It is an analogue of the function groupby from itertools, but with a binary predicate. - In other words, group_by_with_binary_predicate generates a break or new group every time - the value of the predicate function is False. - :param xs: list of grouped value - :param predicate: criterion of equality - :return: grouped list - """ - if not xs: - return [] - prev = xs[0] - sequence = [prev] - result = [] - for x in xs[1:]: - if predicate(prev, x): - sequence.append(x) - prev = x - else: - result.append(sequence) - prev = x - sequence = [prev] - result.append(sequence) - return result - - -def unique_by(xs: list, predicate: Callable) -> list: - """ - This function groups elements of the list xs using 'predicate', and then takes one element from each group. - :param xs: input list - :param predicate: grouping criterion which is some binary predicate - :return: list with unique elements - """ - groups = group_by_with_binary_predicate(xs, predicate) - return [group[0] for group in groups] diff --git a/tools/mo/openvino/tools/mo/utils/version.py b/tools/mo/openvino/tools/mo/utils/version.py deleted file mode 100644 index 86bb346ac4bb18..00000000000000 --- a/tools/mo/openvino/tools/mo/utils/version.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import re -import subprocess # nosec -import sys - -from openvino.runtime import get_version as get_ie_version - -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.find_ie_version import find_ie_version -from openvino.tools.mo.utils.utils import get_mo_root_dir - - -def extract_release_version(version: str): - patterns = [ - # captures release version set by CI for example: '2021.1.0-1028-55e4d5673a8' - r"^([0-9]+).([0-9]+)*", - # captures release version generated by MO from release branch, for example: 'custom_releases/2021/1_55e4d567' - r"_releases/([0-9]+)/([0-9]+)_*" - ] - - for pattern in patterns: - m = re.search(pattern, version) - if m and len(m.groups()) == 2: - return m.group(1), m.group(2) - return None, None - - -def simplify_version(version: str): - release_version = extract_release_version(version) - if release_version == (None, None): - return "custom" - return "{}.{}".format(*release_version) - - -def extract_hash_from_version(full_version: str): - res = re.findall(r'[-_]([a-f0-9]{7,40})', full_version) - if len(res) > 0: - return res[0] - else: - return None - - - -def get_version_file_path(): - return os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, "version.txt") - - -def generate_mo_version(): - """ - Function generates version like in cmake - custom_{branch_name}_{commit_hash} - """ - try: - mo_dir = get_mo_root_dir() - branch_name = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=mo_dir).strip().decode() - commit_hash = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=mo_dir).strip().decode() - return "custom_{}_{}".format(branch_name, commit_hash) - except Exception as e: - return "unknown version" - - -def get_version(): - version_txt = get_version_file_path() - if not os.path.isfile(version_txt): - return generate_mo_version() - with open(version_txt) as f: - return f.readline().replace('\n', '') - - -def get_simplified_mo_version(): - return simplify_version(get_version()) - - -def get_simplified_ie_version(env=dict(), version=None): - if version is None: - try: - version = subprocess.check_output([sys.executable, os.path.join(os.path.dirname(__file__), "ie_version.py")], timeout=2, env=env).strip().decode() - except: - return "ie not found" - - # To support legacy OV versions - m = re.match(r"^([0-9]+).([0-9]+).(.*)", version) - if m and len(m.groups()) == 3: - return simplify_version(m.group(3)) - return simplify_version(version) - - -class SingletonMetaClass(type): - def __init__(self, cls_name, super_classes, dic): - self.__single_instance = None - super().__init__(cls_name, super_classes, dic) - - def __call__(cls, *args, **kwargs): - if cls.__single_instance is None: - cls.__single_instance = super(SingletonMetaClass, cls).__call__(*args, **kwargs) - return cls.__single_instance - - -class VersionChecker(metaclass=SingletonMetaClass): - def __init__(self): - self.runtime_checked = False - self.mo_version = None - self.ie_version = None - self.mo_simplified_version = None - self.ie_simplified_version = None - - def get_mo_version(self): - if self.mo_version: - return self.mo_version - self.mo_version = get_version() - return self.mo_version - - def get_ie_version(self): - if self.ie_version: - return self.ie_version - self.ie_version = get_ie_version() - return self.ie_version - - def get_mo_simplified_version(self): - if self.mo_simplified_version: - return self.mo_simplified_version - self.mo_simplified_version = simplify_version(self.get_mo_version()) - return self.mo_simplified_version - - def get_ie_simplified_version(self): - if self.ie_simplified_version: - return self.ie_simplified_version - self.ie_simplified_version = get_simplified_ie_version(env=os.environ) - return self.ie_simplified_version - - def check_runtime_dependencies(self, silent=True): - if not self.runtime_checked: - def raise_ie_not_found(): - raise Error("Could not find the OpenVINO or Python API.\n" - "Consider building the OpenVINO and Python APIs from sources or " - "try to install OpenVINO (TM) Toolkit using pip \npip install openvino") - - try: - if not find_ie_version(silent=silent): - raise_ie_not_found() - except Exception as e: - import logging as log - if log is not None: - log.error(e) - raise_ie_not_found() - self.runtime_checked = True diff --git a/tools/mo/requirements.txt b/tools/mo/requirements.txt deleted file mode 100644 index fea66495f303ee..00000000000000 --- a/tools/mo/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ --c ../constraints.txt -numpy>=1.16.6,<2.0.0 -networkx -defusedxml -openvino-telemetry -packaging diff --git a/tools/mo/requirements_caffe.txt b/tools/mo/requirements_caffe.txt deleted file mode 100644 index 2806576890500c..00000000000000 --- a/tools/mo/requirements_caffe.txt +++ /dev/null @@ -1,7 +0,0 @@ --c ../constraints.txt -numpy>=1.16.6,<1.27 -networkx -protobuf -defusedxml -requests -fastjsonschema \ No newline at end of file diff --git a/tools/mo/requirements_dev.txt b/tools/mo/requirements_dev.txt deleted file mode 100644 index 5798a0ba9f7722..00000000000000 --- a/tools/mo/requirements_dev.txt +++ /dev/null @@ -1,9 +0,0 @@ --c ../constraints.txt -coverage -astroid -pylint -pyenchant -defusedxml -requests -pytest -fastjsonschema \ No newline at end of file diff --git a/tools/mo/requirements_kaldi.txt b/tools/mo/requirements_kaldi.txt deleted file mode 100644 index 476ec8dab6535a..00000000000000 --- a/tools/mo/requirements_kaldi.txt +++ /dev/null @@ -1,8 +0,0 @@ --c ../constraints.txt -# wa: conversion for stateful models is failed on higher numpy versions -numpy>=1.16.6,<1.25; python_version<"3.12" -numpy>=1.16.6,<1.27; python_version>="3.12" -networkx -defusedxml -requests -fastjsonschema \ No newline at end of file diff --git a/tools/mo/requirements_onnx.txt b/tools/mo/requirements_onnx.txt deleted file mode 100644 index 28484f314a9d60..00000000000000 --- a/tools/mo/requirements_onnx.txt +++ /dev/null @@ -1,8 +0,0 @@ --c ../constraints.txt -numpy>=1.16.6,<1.27 -onnx -networkx -defusedxml -requests -fastjsonschema -protobuf \ No newline at end of file diff --git a/tools/mo/requirements_tf.txt b/tools/mo/requirements_tf.txt deleted file mode 100644 index fb19c216e955ad..00000000000000 --- a/tools/mo/requirements_tf.txt +++ /dev/null @@ -1,8 +0,0 @@ --c ../constraints.txt -h5py -tensorflow>=1.15.5,<2.19.0 -numpy>=1.16.6,<1.27 -networkx -defusedxml -requests -fastjsonschema diff --git a/tools/mo/requirements_tf2.txt b/tools/mo/requirements_tf2.txt deleted file mode 100644 index 50df4160c669d3..00000000000000 --- a/tools/mo/requirements_tf2.txt +++ /dev/null @@ -1,8 +0,0 @@ --c ../constraints.txt -h5py -tensorflow>=2.5,<2.19.0 -numpy>=1.16.6,<1.27 -networkx -defusedxml -requests -fastjsonschema diff --git a/tools/mo/setup.py b/tools/mo/setup.py deleted file mode 100644 index c2b50ac656dfd2..00000000000000 --- a/tools/mo/setup.py +++ /dev/null @@ -1,175 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -""" -Use this script to create a wheel with Model Optimizer code: - -$ python setup.py sdist bdist_wheel -""" - -import os -import re -from pathlib import Path -from shutil import copyfile, copy - -from setuptools import setup, find_namespace_packages -from setuptools.command.build_py import build_py -from setuptools.command.install import install - -from typing import Dict, List - -prefix = 'openvino/tools/mo/' -SETUP_DIR = Path(__file__).resolve().parent / Path(prefix) - - -def read_constraints(path: str='../constraints.txt') -> Dict[str, List[str]]: - """ - Read a constraints.txt file and return a dict - of {package_name: [required_version_1, required_version_2]}. - The dict values are a list because a package can be mentioned - multiple times, for example: - mxnet~=1.2.0; sys_platform == 'win32' - mxnet>=1.7.0; sys_platform != 'win32' - """ - constraints = {} - with open(Path(__file__).resolve().parent / path) as f: - raw_constraints = f.readlines() - for line in raw_constraints: - # skip comments - if line.startswith('#'): - continue - line = line.replace('\n', '') - # read constraints for that package - package, delimiter, constraint = re.split('(~|=|<|>|;)', line, maxsplit=1) - # if there is no entry for that package, add it - if constraints.get(package) is None: - constraints[package] = [delimiter + constraint] - # else add another entry for that package - else: - constraints[package].extend([delimiter + constraint]) - return constraints - - -def read_requirements(path: str) -> List[str]: - """ - Read a requirements.txt file and return a list - of requirements. Three cases are supported, the - list corresponds to priority: - 1. version specified in requirements.txt - 2. version specified in constraints.txt - 3. version unbound - - Putting environment markers into constraints.txt is prone to bugs. - They should be specified in requirements.txt files. - """ - requirements = [] - constraints = read_constraints() - with open(Path(__file__).resolve().parent / path) as f: - raw_requirements = f.readlines() - for line in raw_requirements: - # skip comments and constraints link - if line.startswith(('#', '-c')): - continue - # get rid of newlines - line = line.replace('\n', '') - # if version is specified (non-word chars present) - package_constraint = constraints.get(line.split(';')[0]) - if re.search('(~|=|<|>)', line) and len(line.split(';'))>1: - if package_constraint: # both markers and versions specified - marker_index = line.find(";") - # insert package version between package name and environment markers - line = line[:marker_index] \ - + ",".join([constraint for constraint in package_constraint]) \ - + line[marker_index:] - requirements.append(line) - # else get version from constraints - else: - constraint = constraints.get(line) - # if version found in constraints.txt - if constraint: - for marker in constraint: - requirements.append(line+marker) - # else version is unbound - else: - requirements.append(line) - return requirements - - -# Detect all the framework specific requirements_*.txt files. -requirements_txt = [] -py_modules = [] -for item in os.listdir(): - if re.match(r'requirements_?(tf|tf2|onnx|kaldi|caffe)?\.txt', item): - requirements_txt.append(item) -for item in os.listdir(prefix): - if re.match(r'mo(.*)\.py|main(.*)\.py', item): - py_modules.append(prefix.replace('/', '.') + item.split('.')[0]) -py_modules.append(prefix.replace('/', '.') + 'subprocess_main') -py_modules.append(prefix.replace('/', '.') + 'convert') -py_modules.append(prefix.replace('/', '.') + 'convert_impl') -py_modules.append(prefix.replace('/', '.') + '__main__') - -class InstallCmd(install): - def run(self): - install.run(self) - # copy requirements.txt files for all the frameworks - for name in requirements_txt: - copy(name, os.path.join(self.install_purelib, prefix)) - - version_txt = 'version.txt' - if os.path.exists(version_txt): - copyfile(os.path.join(version_txt), - os.path.join(self.install_purelib, prefix, version_txt)) - - -class BuildCmd(build_py): - def find_package_modules(self, package, package_dir): - modules = super().find_package_modules(package, package_dir) - return [ - (pkg, module, filename) - for (pkg, module, filename) in modules - ] - - -packages = find_namespace_packages(prefix[:-1]) -packages = [prefix.replace('/', '.') + p for p in packages] - -setup( - name='openvino-mo', - version='0.0.0', - author='Intel Corporation', - author_email='openvino_pushbot@intel.com', - url='https://github.com/openvinotoolkit/openvino', - packages=packages, - py_modules=py_modules, - cmdclass={ - 'install': InstallCmd, - 'build_py': BuildCmd, - }, - entry_points={ - 'console_scripts': [ - 'mo = openvino.tools.mo.__main__:main', - ], - }, - package_data={ - 'openvino.tools.mo.front.caffe.proto': ['*.proto'], - 'openvino.tools.mo.front.onnx': ['*.json'], - 'openvino.tools.mo.front.tf': ['*.json'], - 'openvino.tools.mo.front.caffe': ['CustomLayersMapping.xml*'] - }, - extras_require={ - 'caffe': read_requirements('requirements_caffe.txt'), - 'kaldi': read_requirements('requirements_kaldi.txt'), - 'onnx': read_requirements('requirements_onnx.txt'), - 'tensorflow': read_requirements('requirements_tf.txt'), - 'tensorflow2': read_requirements('requirements_tf2.txt'), - }, - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: Apache Software License", - "Operating System :: OS Independent", - ], - install_requires=read_requirements('requirements.txt'), -) diff --git a/tools/mo/unit_tests/__init__.py b/tools/mo/unit_tests/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/__init__.py b/tools/mo/unit_tests/mo/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/analysis/Iterator_get_next_test.py b/tools/mo/unit_tests/mo/analysis/Iterator_get_next_test.py deleted file mode 100644 index 7662996680df4d..00000000000000 --- a/tools/mo/unit_tests/mo/analysis/Iterator_get_next_test.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.analysis.inputs import InputsAnalysis -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from unit_tests.utils.graph import build_graph_with_edge_attrs - - -class IteratorGetNextAnalysisTest(unittest.TestCase): - - def test_positive(self): - graph = build_graph_with_edge_attrs( - { - 'iter_get_next': {'kind': 'op', 'op': 'IteratorGetNext', 'shapes': int64_array([[2, 2], [1, 1]]), - 'types': [None, None]}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - 'add': {'kind': 'op', 'op': 'Add'} - }, - [ - ('iter_get_next', 'sub', {'out': 0, 'in': 0}), - ('iter_get_next', 'add', {'out': 1, 'in': 0}) - ] - ) - inputs_desc = {} - message = InputsAnalysis.iterator_get_next_analysis(graph, inputs_desc) - ref_message = 'It looks like there is IteratorGetNext as input\n' \ - 'Run the Model Optimizer without --input option \n' \ - 'Otherwise, try to run the Model Optimizer with:\n\t\t--input "iter_get_next:0[2 2],iter_get_next:1[1 1]"\n' - self.assertEqual(message, ref_message) - - def test_negative(self): - graph = build_graph_with_edge_attrs( - { - 'placeholder': {'kind': 'op', 'op': 'Parameter'}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - 'add': {'kind': 'op', 'op': 'Add'} - }, - [ - ('placeholder', 'sub', {'out': 0, 'in': 0}), - ('placeholder', 'add', {'out': 0, 'in': 0}) - ] - ) - - inputs_desc = {} - message = InputsAnalysis.iterator_get_next_analysis(graph, inputs_desc) - self.assertEqual(message, None) diff --git a/tools/mo/unit_tests/mo/analysis/__init__.py b/tools/mo/unit_tests/mo/analysis/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/back/ChangeOutputTypeAttributes_test.py b/tools/mo/unit_tests/mo/back/ChangeOutputTypeAttributes_test.py deleted file mode 100644 index 830cf5c8eb8e95..00000000000000 --- a/tools/mo/unit_tests/mo/back/ChangeOutputTypeAttributes_test.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from copy import deepcopy - -import numpy as np - -from openvino.tools.mo.back.ChangeOutputTypeAttributes import ChangeOutputTypeAttributes -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.range import Range -from openvino.tools.mo.front.common.partial_infer.utils import float32_array -from openvino.tools.mo.middle.passes.convert_data_type import convert_blobs, data_type_str_to_np -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_empty_data, connect -from unit_tests.utils.graph import valued_const_with_data - - -class ChangeOutputTypeAttributesTests(unittest.TestCase): - - def test_range_correct_case(self): - graph, graph_ref = build_range_test_graphs(start=0, limit=10, delta=1, dst_type_str='FP16') - ChangeOutputTypeAttributes().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_range_correct_case_returns_shape_value(self): - graph, graph_ref = build_range_test_graphs(start=0, limit=10, delta=1, dst_type_str='FP32', - src_type_str='FP16', returns_shape_value=True) - ChangeOutputTypeAttributes().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=True) - self.assertTrue(flag, resp) - - # starting from ~1000 FP16 absolute difference between neighbor values is more than 1 - # fails because of shape inconsistency - def test_range_different_values(self): - graph, graph_ref = build_range_test_graphs(start=0, limit=50000, delta=1, dst_type_str='FP16') - self.assertRaises(Error, ChangeOutputTypeAttributes().find_and_replace_pattern, graph) - - def test_range_out_of_fp16_max(self): - graph, graph_ref = build_range_test_graphs(start=0, limit=100000, delta=1, dst_type_str='FP16') - self.assertRaises(Error, ChangeOutputTypeAttributes().find_and_replace_pattern, graph) - - def test_range_out_of_fp16_min(self): - graph, graph_ref = build_range_test_graphs(start=0, limit=-100000, delta=-1, dst_type_str='FP16') - self.assertRaises(Error, ChangeOutputTypeAttributes().find_and_replace_pattern, graph) - - def test_cast_correct_case(self): - input_data = np.array([0, 1000, 4, 9, 0]) - graph, graph_ref = build_cast_test_graphs(input_data, dst_type_str='FP16') - ChangeOutputTypeAttributes().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_cast_out_of_fp16_max(self): - input_data = np.array([0, 100000, 4, 9, 0]) - graph, graph_ref = build_cast_test_graphs(input_data, dst_type_str='FP16') - self.assertRaises(Error, ChangeOutputTypeAttributes().find_and_replace_pattern, graph) - - def test_cast_out_of_fp16_min(self): - input_data = np.array([0, -100000, 4, 9, 0]) - graph, graph_ref = build_cast_test_graphs(input_data, dst_type_str='FP16') - self.assertRaises(Error, ChangeOutputTypeAttributes().find_and_replace_pattern, graph) - - def test_cast_with_scalar(self): - input_data = np.array(4) - graph, graph_ref = build_cast_test_graphs(input_data, dst_type_str='FP16') - ChangeOutputTypeAttributes().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=True) - self.assertTrue(flag, resp) - -def build_range_test_graphs(start=0, limit=10, delta=1, dst_type_str='FP16', - src_type_str='FP32', returns_shape_value=None): - nodes = { - **valued_const_with_data('start', float32_array(start)), - **valued_const_with_data('limit', float32_array(limit)), - **valued_const_with_data('delta', float32_array(delta)), - **regular_op_with_empty_data('range', {'type': 'Range', 'op': 'Range', - 'returns_shape_value': returns_shape_value, - 'output_type': data_type_str_to_np(src_type_str), - 'infer': Range.infer}), - **result('res'), - } - - nodes_ref = deepcopy(nodes) - nodes_ref.update({ - **regular_op_with_empty_data('range', {'type': 'Range', 'op': 'Range', - 'returns_shape_value': returns_shape_value, - 'output_type': data_type_str_to_np(dst_type_str), - 'infer': Range.infer}), - }) - - edges = [ - *connect('start', '0:range'), - *connect('limit', '1:range'), - *connect('delta', '2:range'), - *connect('range', 'res'), - ] - graph = build_graph(nodes, edges) - graph_ref = build_graph(nodes_ref, edges) - - graph = partial_infer(graph) - - graph.graph['cmd_params'].data_type = dst_type_str - convert_blobs(graph, dst_type_str) - return graph, graph_ref - - -def build_cast_test_graphs(input_data, dst_type_str='FP16'): - nodes = { - **valued_const_with_data('input', float32_array(input_data)), - **regular_op_with_empty_data('cast', {'type': 'Convert', 'op': 'Cast', - 'dst_type': np.float32, - 'infer': Cast.infer}), - **result('res'), - } - - nodes_ref = deepcopy(nodes) - nodes_ref.update({ - **regular_op_with_empty_data('cast', {'type': 'Convert', 'op': 'Cast', - 'dst_type': data_type_str_to_np(dst_type_str), - 'infer': Cast.infer}), - }) - - edges = [ - *connect('input', 'cast'), - *connect('cast', 'res'), - ] - graph = build_graph(nodes, edges) - graph_ref = build_graph(nodes_ref, edges) - - graph = partial_infer(graph) - - graph.graph['cmd_params'].data_type = dst_type_str - convert_blobs(graph, dst_type_str) - return graph, graph_ref diff --git a/tools/mo/unit_tests/mo/back/ChangeRandomUniformOutputType_test.py b/tools/mo/unit_tests/mo/back/ChangeRandomUniformOutputType_test.py deleted file mode 100644 index df26df54713ca1..00000000000000 --- a/tools/mo/unit_tests/mo/back/ChangeRandomUniformOutputType_test.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from argparse import Namespace -import pytest -import numpy as np - -from openvino.tools.mo.back.ChangeRandomUniformOutputType import ChangeRandomUniformOutputType -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, connect, regular_op_with_shaped_data - -nodes = { - **regular_op_with_shaped_data('placeholder', [3], {'type': 'Parameter'}), - **regular_op_with_shaped_data('random_uniform', [3, 4, 5], {'type': 'RandomUniform', 'op': 'RandomUniform'}), - **regular_op_with_shaped_data('convert', [3, 4, 5], {'type': 'Convert'}), - **result('result'), - - # new RandomUniform node and inputs - **regular_op_with_shaped_data('min_val', [1], {'type': 'Const'}), - **regular_op_with_shaped_data('max_val', [1], {'type': 'Const'}), - **regular_op_with_shaped_data('shape', [3], {'type': 'Const'}), -} - -edges = [*connect('placeholder', '0:random_uniform'), *connect('min_val', '1:random_uniform'), - *connect('max_val', '2:random_uniform'), *connect('random_uniform', 'result')] -edges_with_convert = [*connect('placeholder', '0:random_uniform'), *connect('min_val', '1:random_uniform'), - *connect('max_val', '2:random_uniform'), *connect('random_uniform', 'convert'), - *connect('convert', 'result'), ] - - -class TestChangeRandomUniformOutputType(): - @pytest.mark.parametrize("ir_type, out_type, dst_type", [ - ("FP16", np.float32, np.float16), - ("FP32", np.float16, np.float32), - ("FP32", np.float32, None), - ("FP32", np.int64, None) -]) - def test_change_random_uniform_output_type(self,ir_type, out_type, dst_type): - graph = build_graph(nodes, edges, cli=Namespace(data_type=ir_type)) - graph_ref = build_graph(nodes, edges if dst_type is None else edges_with_convert, {}, - nodes_with_edges_only=True) - Node(graph, 'random_uniform')['output_type'] = out_type - - ChangeRandomUniformOutputType().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - assert flag, resp - - if dst_type is not None: - convert_node = Node(graph, 'random_uniform').out_port(0).get_destination().node - assert convert_node['dst_type'] == dst_type diff --git a/tools/mo/unit_tests/mo/back/ClampNormalizer_test.py b/tools/mo/unit_tests/mo/back/ClampNormalizer_test.py deleted file mode 100644 index 325471c1500718..00000000000000 --- a/tools/mo/unit_tests/mo/back/ClampNormalizer_test.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.back.ClampNormalizer import ClampNormalizer -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, result, connect - - -class AttributedClampNormalizerTests(unittest.TestCase): - - def test_2_inputs(self): - nodes = { - **regular_op_with_shaped_data('placeholder', [1, 3, 20, 20], {'type': 'Parameter'}), - **regular_op_with_shaped_data('a_clamp', [1, 3, 20, 20], {'type': None, 'op': 'Clamp'}), - **regular_op_with_shaped_data('clamp', [1, 3, 20, 20], - {'type': 'Clamp', 'op': 'AttributedClamp', 'min': -3.5, 'max': 3.5}), - **valued_const_with_data('min', np.array(-3.5)), - **valued_const_with_data('max', np.array(3.5)), - **result('result'), - } - edges = [*connect('placeholder', '0:a_clamp'), - *connect('min', '1:a_clamp'), - *connect('max', '2:a_clamp'), - *connect('a_clamp', 'result'), - ] - graph = build_graph(nodes, edges) - ClampNormalizer().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes, [*connect('placeholder', '0:clamp'), *connect('clamp', 'result')]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) - - def test_all_dynamic_inputs(self): - nodes = { - **regular_op_with_shaped_data('placeholder', [1, 3, 20, 20], {'type': 'Parameter'}), - **regular_op_with_shaped_data('min', [1, 3, 20, 20], {'type': 'Parameter'}), - **regular_op_with_shaped_data('max', [1, 3, 20, 20], {'type': 'Parameter'}), - **regular_op_with_shaped_data('a_clamp', [1, 3, 20, 20], {'type': None, 'op': 'Clamp'}), - **regular_op_with_shaped_data('maximum', [1, 3, 20, 20], {'type': 'Maximum', 'op': 'Maximum'}), - **regular_op_with_shaped_data('minimum', [1, 3, 20, 20], {'type': 'Minimum', 'op': 'Minimum'}), - **result('result'), - } - edges = [*connect('placeholder', '0:a_clamp'), - *connect('min', '1:a_clamp'), - *connect('max', '2:a_clamp'), - *connect('a_clamp', 'result'), - ] - graph = build_graph(nodes, edges) - ClampNormalizer().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes, [*connect('placeholder', '0:maximum'), - *connect('min', '1:maximum'), - *connect('maximum', '0:minimum'), - *connect('max', '1:minimum'), - *connect('minimum', 'result') - ]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) - - def test_no_max_input(self): - nodes = { - **regular_op_with_shaped_data('placeholder', [1, 3, 20, 20], {'type': 'Parameter'}), - **regular_op_with_shaped_data('a_clamp', [1, 3, 20, 20], {'type': None, 'op': 'Clamp'}), - **regular_op_with_shaped_data('maximum', [1, 3, 20, 20], {'type': 'Maximum', 'op': 'Maximum'}), - **valued_const_with_data('min', np.array(-3.5)), - **result('result'), - } - edges = [*connect('placeholder', '0:a_clamp'), - *connect('min', '1:a_clamp'), - *connect('a_clamp', 'result'), - ] - graph = build_graph(nodes, edges) - ClampNormalizer().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes, [*connect('placeholder', '0:maximum'), - *connect('min', '1:maximum'), - *connect('maximum', 'result') - ]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) - - def test_no_min_input(self): - nodes = { - **regular_op_with_shaped_data('placeholder', [1, 3, 20, 20], {'type': 'Parameter'}), - **regular_op_with_shaped_data('a_clamp', [1, 3, 20, 20], {'type': None, 'op': 'Clamp'}), - **regular_op_with_shaped_data('minimum', [1, 3, 20, 20], {'type': 'Minimum', 'op': 'Minimum'}), - **valued_const_with_data('max', np.array(3.5)), - **result('result'), - } - edges = [*connect('placeholder', '0:a_clamp'), - *connect('max', '2:a_clamp'), - *connect('a_clamp', 'result'), - ] - graph = build_graph(nodes, edges) - ClampNormalizer().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes, [*connect('placeholder', '0:minimum'), - *connect('max', '1:minimum'), - *connect('minimum', 'result') - ]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/ConvolutionNormalizer_test.py b/tools/mo/unit_tests/mo/back/ConvolutionNormalizer_test.py deleted file mode 100644 index e0902c9d53bd87..00000000000000 --- a/tools/mo/unit_tests/mo/back/ConvolutionNormalizer_test.py +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.back.ConvolutionNormalizer import PullReshapeThroughFQ, V7ConvolutionWithGroupsResolver, \ - V10ConvolutionWithGroupsResolver -from openvino.tools.mo.back.ShapeOfConstFolding import ShapeOfConstFolding -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.ops.fakequantize import FakeQuantize -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, regular_op_with_empty_data, \ - valued_const_with_data, connect - - -def graph_template(weights_initial_shape, new_reshape_shape, limits_initial_shape, limits_new_shape=None): - limits_new_shape = limits_initial_shape if limits_new_shape is None else limits_new_shape - - core_connections = [ - *connect('input:0', '0:convolution'), - *connect('convolution:0', '0:output'), - ] - - core_nodes = lambda weights_shape, limit_shape, reshape_shape: { - **regular_op_with_shaped_data('input', None, {'type': 'Parameter', 'op': 'Parameter'}), - - **valued_const_with_data('weights', np.ones(weights_shape)), - - **valued_const_with_data('dim', int64_array(reshape_shape)), - **regular_op_with_shaped_data('reshape', reshape_shape, {'type': 'Reshape', 'infer': Reshape.infer, 'op': 'Reshape'}), - - **valued_const_with_data('il', np.ones(limit_shape)), - **valued_const_with_data('ih', np.ones(limit_shape)), - **valued_const_with_data('ol', np.ones(limit_shape)), - **valued_const_with_data('oh', np.ones(limit_shape)), - - **regular_op_with_shaped_data('FQ', weights_shape, {'type': 'FakeQuantize', 'infer': FakeQuantize.infer, - 'stop_value_propagation': True, 'levels': 2, 'op': 'FakeQuantize'}), - - **regular_op_with_shaped_data('convolution', None, {'type': 'Convolution', 'op': 'Convolution'}), - - **result(), - } - - nodes_before = core_nodes(weights_initial_shape, limits_initial_shape, new_reshape_shape) - edges_before = [ - - *connect('weights:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - - *connect('FQ:0', '0:reshape'), - *connect('dim:0', '1:reshape'), - *connect('reshape:0', '1:convolution'), - - *core_connections, - ] - graph = build_graph(nodes_attrs=nodes_before, edges=edges_before, nodes_with_edges_only=True) - - nodes_after = core_nodes(new_reshape_shape, limits_new_shape, []) - edges_after = [ - *connect('weights:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', '1:convolution'), - - *core_connections, - ] - graph_ref = build_graph(nodes_attrs=nodes_after, edges=edges_after, nodes_with_edges_only=True) - return graph, graph_ref - - -class TestPullReshapeThroughFQ(unittest.TestCase): - - def test_v7_weights_reshape(self): - graph, graph_ref = graph_template([3, 8, 7, 7], [24, 1, 7, 7], [1, 1, 1, 1]) - - PullReshapeThroughFQ().find_and_replace_pattern(graph) - graph.clean_up() - graph_ref.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, last_node='output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_reshape_reducing_tensor_rank(self): - graph, graph_ref = graph_template([3, 8, 7, 7], [24, 7, 7], [1, 1, 1]) - - PullReshapeThroughFQ().find_and_replace_pattern(graph) - graph.clean_up() - graph_ref.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, last_node='output', check_op_attrs=True) - self.assertTrue(flag, resp) - - -class TestV7ConvolutionWithGroupsResolver(unittest.TestCase): - def test_v7_group_convolution_resolver(self): - nodes = { - **regular_op_with_shaped_data('input', [1, 3, 224, 224], {'type': 'Parameter'}), - - **valued_const_with_data('weights', np.ones([3, 8, 7, 7])), - - **valued_const_with_data('dim', int64_array([24, -1, 0, 0])), - **regular_op_with_empty_data('reshape', {'type': 'Reshape'}), - - **regular_op_with_shaped_data('convolution', None, {'type': 'Convolution', 'group': 3, 'output': 24}), - - **result(), - } - graph = build_graph(nodes, [ - *connect('input', '0:convolution'), - *connect('weights', '1:convolution'), - *connect('convolution', 'output'), - ], nodes_with_edges_only=True) - - V7ConvolutionWithGroupsResolver().find_and_replace_pattern(graph) - graph_ref = build_graph(nodes, [ - *connect('input', '0:convolution'), - *connect('weights', '0:reshape'), - *connect('dim', '1:reshape'), - *connect('reshape', '1:convolution'), - *connect('convolution', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, last_node='output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_v7_group_convolution_resolver_weight_are_in_the_right_layout(self): - nodes = { - **regular_op_with_shaped_data('input', [1, 3, 224, 224], {'type': 'Parameter'}), - **valued_const_with_data('weights', np.ones([24, 1, 7, 7])), - **regular_op_with_shaped_data('convolution', None, {'type': 'Convolution', 'group': 3, 'output': 24}), - **result(), - } - edges = [ - *connect('input', '0:convolution'), - *connect('weights', '1:convolution'), - *connect('convolution', 'output'), - ] - graph = build_graph(nodes, edges) - V7ConvolutionWithGroupsResolver().find_and_replace_pattern(graph) - graph_ref = build_graph(nodes, edges) - (flag, resp) = compare_graphs(graph, graph_ref, last_node='output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_v7_group_convolution_resolver_depthwise_conv2d(self): - nodes = { - **regular_op_with_shaped_data('input', [1, 1, 224, 224], {'type': 'Parameter'}), - - **valued_const_with_data('weights', np.ones([1, 8, 7, 7])), - - **valued_const_with_data('dim', int64_array([8, -1, 0, 0])), - **regular_op_with_empty_data('reshape', {'type': 'Reshape'}), - - **regular_op_with_shaped_data('convolution', None, {'type': 'Convolution', 'group': 1, 'output': 8, - 'op': 'DepthwiseConv2dNative'}), - - **result(), - } - graph = build_graph(nodes, [ - *connect('input', '0:convolution'), - *connect('weights', '1:convolution'), - *connect('convolution', 'output'), - ], nodes_with_edges_only=True) - - V7ConvolutionWithGroupsResolver().find_and_replace_pattern(graph) - graph_ref = build_graph(nodes, [ - *connect('input', '0:convolution'), - *connect('weights', '0:reshape'), - *connect('dim', '1:reshape'), - *connect('reshape', '1:convolution'), - *connect('convolution', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, last_node='output', check_op_attrs=True) - self.assertTrue(flag, resp) - - -class TestV10ConvolutionWithGroupsResolver(unittest.TestCase): - - @staticmethod - def apply_transformation(graph): - V10ConvolutionWithGroupsResolver().find_and_replace_pattern(graph) - graph.clean_up() - ShapeOfConstFolding().find_and_replace_pattern(graph) - graph.clean_up() - - def test_v10_group_convolution_resolver(self): - nodes = { - **regular_op_with_shaped_data('input', [1, 3, 224, 224], {'type': 'Parameter'}), - - **valued_const_with_data('weights', np.ones([3, 8, 7, 7])), - - **valued_const_with_data('new_weights', np.ones([3, 8, 1, 7, 7])), - - **regular_op_with_shaped_data('convolution', None, {'type': 'Convolution', 'group': 3, 'output': 24}), - - **result(), - } - graph = build_graph(nodes, [ - *connect('input', '0:convolution'), - *connect('weights', '1:convolution'), - *connect('convolution', 'output'), - ], nodes_with_edges_only=True) - - TestV10ConvolutionWithGroupsResolver.apply_transformation(graph) - - nodes['convolution']['type'] = 'GroupConvolution' - del nodes['convolution']['group'] - - graph_ref = build_graph(nodes, [ - *connect('input', '0:convolution'), - *connect('new_weights', '1:convolution'), - *connect('convolution', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, last_node='output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_v10_group_convolution_resolver_depthwise_conv2d(self): - nodes = { - **regular_op_with_shaped_data('input', [1, 1, 224, 224], {'type': 'Parameter'}), - - **valued_const_with_data('weights', np.ones([1, 8, 7, 7])), - - **valued_const_with_data('new_weights', np.ones([1, 8, 1, 7, 7])), - - **regular_op_with_shaped_data('convolution', None, {'type': 'Convolution', 'group': 1, 'output': 8, - 'op': 'DepthwiseConv2dNative'}), - - **result(), - } - graph = build_graph(nodes, [ - *connect('input', '0:convolution'), - *connect('weights', '1:convolution'), - *connect('convolution', 'output'), - ], nodes_with_edges_only=True) - - TestV10ConvolutionWithGroupsResolver.apply_transformation(graph) - - nodes['convolution']['type'] = 'GroupConvolution' - del nodes['convolution']['group'] - - graph_ref = build_graph(nodes, [ - *connect('input', '0:convolution'), - *connect('new_weights', '1:convolution'), - *connect('convolution', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, last_node='output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_v10_group_convolution_resolver_depthwise_conv2d_dynamic(self): - nodes = { - **regular_op_with_shaped_data('input', [-1, -1, -1, -1], {'type': 'Parameter'}), - - **valued_const_with_data('weights', np.ones([1, 8, 7, 7])), - - **valued_const_with_data('new_weights', np.ones([1, 8, 1, 7, 7])), - - **regular_op_with_shaped_data('convolution', None, {'type': 'Convolution', 'group': 1, 'output': 8, - 'op': 'DepthwiseConv2dNative'}), - - **result(), - } - graph = build_graph(nodes, [ - *connect('input', '0:convolution'), - *connect('weights', '1:convolution'), - *connect('convolution', 'output'), - ], nodes_with_edges_only=True) - - TestV10ConvolutionWithGroupsResolver.apply_transformation(graph) - - nodes['convolution']['type'] = 'GroupConvolution' - del nodes['convolution']['group'] - - graph_ref = build_graph(nodes, [ - *connect('input', '0:convolution'), - *connect('new_weights', '1:convolution'), - *connect('convolution', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, last_node='output', check_op_attrs=True) - self.assertTrue(flag, resp) - - diff --git a/tools/mo/unit_tests/mo/back/CutMemory_test.py b/tools/mo/unit_tests/mo/back/CutMemory_test.py deleted file mode 100644 index 7a36c6fdd4bb68..00000000000000 --- a/tools/mo/unit_tests/mo/back/CutMemory_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.back.CutMemory import CutMemoryInput, CutMemoryOutput -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class CutMemoryTest(unittest.TestCase): - def test_remove_memory(self): - """Memory should be replaced by input and output""" - graph = build_graph( - nodes_attrs={ - 'input': {'kind': 'op'}, - 'data_in': {'kind': 'data', 'shape': None, 'value': None}, - 'const_0': {'kind': 'op', 'op': 'Const'}, - 'const_0_data': {'kind': 'data'}, - 'memory_in': {'kind': 'op', 'op': 'ReadValue', 'variable_id': 'memory_'}, - 'data_mem': {'kind': 'data', 'shape': None, 'value': None}, - 'concat': {'kind': 'op', 'op': 'Concat', 'axis': 0}, - 'concat_data': {'kind': 'data', 'shape': None, 'value': None}, - 'some_op': {'kind': 'op'}, - 'some_op_data': {'kind': 'data', 'shape': None, 'value': None}, - 'memory_out': {'kind': 'op', 'op': 'Assign', 'variable_id': 'memory_'}, - 'data_mem_out': {'kind': 'data', 'shape': None, 'value': None}, - 'mem_out_result': {'kind': 'op', 'op': 'Result'} - }, - edges=[ - ('input', 'data_in'), - ('const_0', 'const_0_data'), ('const_0_data', 'memory_in'), ('memory_in', 'data_mem'), - ('data_in', 'concat', {'in': 0}), ('data_mem', 'concat', {'in': 1}), - ('concat', 'concat_data'), ('concat_data', 'some_op'), - ('some_op', 'some_op_data'), ('some_op_data', 'memory_out'), - ('memory_out', 'data_mem_out'), ('data_mem_out', 'mem_out_result') - ] - ) - graph_ref = build_graph( - nodes_attrs={ - 'input': {'kind': 'op'}, - 'data_in': {'kind': 'data', 'shape': None, 'value': None}, - 'new_input': {'kind': 'op', 'op': 'Parameter'}, - 'new_in_data': {'kind': 'data', 'shape': None, 'value': None}, - 'concat': {'kind': 'op', 'op': 'Concat', 'axis': 0}, - 'concat_data': {'kind': 'data', 'shape': None, 'value': None}, - 'some_op': {'kind': 'op'}, - 'some_op_data': {'kind': 'data', 'shape': None, 'value': None}, - 'crop': {'kind': 'op', 'op': 'Crop', 'axis': np.array([0])}, - 'crop_data': {'kind': 'data', 'shape': None, 'value': None}, - 'mem_out_result': {'kind': 'op', 'op': 'Result'}, - }, - edges=[ - ('input', 'data_in'), ('new_input', 'new_in_data'), - ('data_in', 'concat', {'in': 0}), ('new_in_data', 'concat', {'in': 1}), - ('concat', 'concat_data'), ('concat_data', 'some_op'), - ('some_op', 'some_op_data'), ('some_op_data', 'crop'), - ('crop', 'crop_data'), ('crop_data', 'mem_out_result') - ], - ) - CutMemoryInput().find_and_replace_pattern(graph) - CutMemoryOutput().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, last_node='mem_out_result', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/FakeOutputResolver_test.py b/tools/mo/unit_tests/mo/back/FakeOutputResolver_test.py deleted file mode 100644 index d583d407f45cc7..00000000000000 --- a/tools/mo/unit_tests/mo/back/FakeOutputResolver_test.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.back.FakeOutputResolver import FakeOutputResolver -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_empty_data, connect, empty_data, \ - valued_const_with_data - - -class FakeOutputResolverTest(unittest.TestCase): - def test_one(self): - nodes = { - **regular_op_with_empty_data('input', {'type': 'Parameter'}), - **regular_op_with_empty_data('some_op', {'type': 'SomeOp', 'name': 'some_op_name'}), - **regular_op_with_empty_data('fake_output', - {'type': None, 'kind': 'op', 'op': 'FakeOutput', 'name': 'my_output_name'}), - **valued_const_with_data('const', int64_array(0)), - **regular_op_with_empty_data('add', {'type': None, 'kind': 'op', 'op': 'Add', 'name': 'my_output_name'}), - **result('result'), - } - edges = [*connect('input', 'some_op'), - *connect('some_op', 'fake_output'), - *connect('fake_output', 'result'), - ] - graph = build_graph(nodes, edges) - - edges_ref = [*connect('input', 'some_op'), - *connect('some_op', '0:add'), - *connect('const', '1:add'), - *connect('add', 'result'), - ] - - graph_ref = build_graph(nodes, edges_ref) - - FakeOutputResolver().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - def test_multi(self): - nodes = { - **regular_op_with_empty_data('input', {'type': 'Parameter'}), - **regular_op_with_empty_data('some_op', {'type': 'SomeOp', 'name': 'some_op_name'}), - **empty_data('some_op_d2'), - **regular_op_with_empty_data('fake_output1', - {'type': None, 'kind': 'op', 'op': 'FakeOutput', 'name': 'my_output_name1'}), - **regular_op_with_empty_data('fake_output2', - {'type': None, 'kind': 'op', 'op': 'FakeOutput', 'name': 'my_output_name2'}), - - **valued_const_with_data('const1', int64_array(0)), - **valued_const_with_data('const2', int64_array(0)), - **regular_op_with_empty_data('add1', {'type': None, 'kind': 'op', 'op': 'Add', 'name': 'my_output_name1'}), - **regular_op_with_empty_data('add2', {'type': None, 'kind': 'op', 'op': 'Add', 'name': 'my_output_name2'}), - **result('result1'), - **result('result2'), - } - edges = [*connect('input', 'some_op'), - *connect('some_op', 'fake_output1'), - ('some_op', 'some_op_d2'), - ('some_op_d2', 'fake_output2'), - *connect('fake_output1', 'result1'), - *connect('fake_output2', 'result2'), - ] - graph = build_graph(nodes, edges) - - edges_ref = [*connect('input', 'some_op'), - *connect('some_op', '0:add1'), - *connect('const1', '1:add1'), - ('some_op', 'some_op_d2'), - ('some_op_d2', 'add2', {'in': 0}), - *connect('const2', '1:add2'), - *connect('add1', 'result1'), - *connect('add2', 'result2'), - ] - - graph_ref = build_graph(nodes, edges_ref) - - FakeOutputResolver().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result1') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/FuseTransposesSequence_test.py b/tools/mo/unit_tests/mo/back/FuseTransposesSequence_test.py deleted file mode 100644 index 1a24a12495449d..00000000000000 --- a/tools/mo/unit_tests/mo/back/FuseTransposesSequence_test.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.back.FuseTransposesSequence import FuseTransposesSequence -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the -# dictionary with node attributes. -nodes_attributes = { - 'placeholder_1': {'name': 'placeholder_1', 'value': None, 'shape': None, 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'placeholder_1_data': {'name': 'placeholder_1_data', 'value': None, 'shape': None, 'kind': 'data', - 'data_type': None}, - # Transpose layers - 'const_1': {'value': None, 'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'const_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'permute_1': {'type': 'Transpose', 'value': None, 'kind': 'op', 'op': 'Transpose'}, - 'permute_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'const_2': {'value': None, 'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'const_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'permute_2': {'type': 'Transpose', 'value': None, 'kind': 'op', 'op': 'Transpose'}, - 'permute_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'const_3': {'value': None, 'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'const_3_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'permute_3': {'type': 'Transpose', 'value': None, 'kind': 'op', 'op': 'Transpose'}, - 'permute_3_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'op_output': {'op': 'Result', 'kind': 'op'} -} - - -class FuseTransposesSequenceTest(unittest.TestCase): - def test_1(self): - # - # NHWC NCHW NHWC - # Input->DATA->Transpose->DATA->Transpose->DATA => Input->DATA - # - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'permute_1'), - ('permute_1', 'permute_1_data'), - ('permute_1_data', 'permute_2'), - ('permute_2', 'permute_2_data'), - ('permute_2_data', 'op_output'), - - ('const_1', 'const_1_data'), - ('const_1_data', 'permute_1', {'in': 1}), - - ('const_2', 'const_2_data'), - ('const_2_data', 'permute_2', {'in': 1}), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - - 'const_1_data': {'value': np.array([0, 3, 1, 2])}, - 'permute_1_data': {'shape': np.array([1, 3, 227, 227])}, - - 'const_2_data': {'value': np.array([0, 2, 3, 1])}, - 'permute_2_data': {'shape': np.array([1, 227, 227, 3])}, - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}}, - nodes_with_edges_only=True) - - pattern = FuseTransposesSequence() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_2(self): - # - # Input->DATA->Transpose->DATA->Transpose->DATA => Input->DATA->Transpose->DATA - # - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'permute_1'), - ('permute_1', 'permute_1_data'), - ('permute_1_data', 'permute_2'), - ('permute_2', 'permute_2_data'), - ('permute_2_data', 'op_output'), - - ('const_1', 'const_1_data'), - ('const_1_data', 'permute_1', {'in': 1}), - - ('const_2', 'const_2_data'), - ('const_2_data', 'permute_2', {'in': 1}), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_1': {'shape': np.array([4])}, - 'const_1_data': {'value': np.array([0, 3, 1, 2])}, - 'permute_1_data': {'shape': np.array([1, 3, 227, 227])}, - - 'const_2': {'shape': np.array([4])}, - 'const_2_data': {'value': np.array([0, 1, 2, 3])}, - 'permute_2_data': {'shape': np.array([1, 3, 227, 227])}, - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'permute_1'), - ('permute_1', 'permute_1_data'), - ('permute_1_data', 'op_output'), - - ('const_1', 'const_1_data'), - ('const_1_data', 'permute_1', {'in': 1}), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_1_data': {'value': np.array([0, 3, 1, 2])}, - 'permute_1_data': {'shape': np.array([1, 3, 227, 227])}, - }, nodes_with_edges_only=True) - - pattern = FuseTransposesSequence() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - -if __name__ == '__main__': - unittest.main() diff --git a/tools/mo/unit_tests/mo/back/GatherTreeNormalizer_test.py b/tools/mo/unit_tests/mo/back/GatherTreeNormalizer_test.py deleted file mode 100644 index 358b26750f318f..00000000000000 --- a/tools/mo/unit_tests/mo/back/GatherTreeNormalizer_test.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.back.GatherNormalizer import GatherTreeNormalizer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.middle.passes.eliminate import shape_inference -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, result, connect - - -class GatherTreeNormalizerTests(unittest.TestCase): - def test_gather_tree_normalizer(self): - nodes = { - **regular_op_with_shaped_data('data_0', [100, 1, 10], {'type': 'Parameter'}), - **regular_op_with_shaped_data('data_1', [100, 1, 10], {'type': 'Parameter'}), - **regular_op_with_shaped_data('data_2', [1], {'type': 'Parameter'}), - **regular_op_with_shaped_data('gather_tree', [1], {'type': 'GatherTree'}), - **valued_const_with_data('const', np.array([2])), - **result('result'), - } - edges = [*connect('data_0', '0:gather_tree'), - *connect('data_1', '1:gather_tree'), - *connect('data_2', '2:gather_tree'), - *connect('const', '3:gather_tree'), - *connect('gather_tree', 'result'), - ] - ref_edges = [*connect('data_0', '0:gather_tree'), - *connect('data_1', '1:gather_tree'), - *connect('data_2', '2:gather_tree'), - *connect('const', '0:squeeze'), - *connect('squeeze_axis', '1:squeeze'), - *connect('squeeze', '3:gather_tree'), - *connect('gather_tree', 'result'),] - ref_nodes = nodes.copy() - ref_nodes.update({**valued_const_with_data('squeeze_axis', int64_array([0])), - **regular_op_with_shaped_data('squeeze', [], {'type': 'Squeeze'})}) - graph = build_graph(nodes, edges) - GatherTreeNormalizer().find_and_replace_pattern(graph) - # run shape inference to make sure that shape overriding happened - shape_inference(graph) - - ref_graph = build_graph(ref_nodes, ref_edges) - - (flag, resp) = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/InterpolateReshape_test.py b/tools/mo/unit_tests/mo/back/InterpolateReshape_test.py deleted file mode 100644 index 839d13edd04dca..00000000000000 --- a/tools/mo/unit_tests/mo/back/InterpolateReshape_test.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.back.InterpolateReshape import InterpolateReshapeWA, InterpolateConcat -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \ - connect_data - -nodes = { - **regular_op_with_shaped_data('placeholder', [1, 3, 30, 40], {'type': 'Parameter', 'op': 'Parameter'}), - **valued_const_with_data('out_shape', np.array([60, 160])), - - **regular_op_with_shaped_data('interpolate', [1, 3, 60, 160], {'type': 'Interpolate', 'axes': [2, 3], - 'op': 'Interpolate', 'version': 'opset1'}), - - **regular_op_with_shaped_data('shape', [4], {'type': 'ShapeOf', 'op': 'ShapeOf'}), - **valued_const_with_data('indices', np.array([2, 3])), - **valued_const_with_data('axis', np.array(0)), - **regular_op_with_shaped_data('gather', [2], {'type': 'Gather', 'op': 'Gather'}), - - **valued_const_with_data('multiplier', np.array([2, 4])), - **regular_op_with_shaped_data('mul', [2], {'type': 'Multiply', 'op': 'Mul'}), - - **regular_op_with_shaped_data('placeholder_1', [1, 3, 60, 160], {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('concat', [1, 7, 60, 160], {'type': 'Concat', 'axis': 1, 'op': 'Concat'}), - - **result(), -} - - -class TestInterpolateReshapeWA(unittest.TestCase): - def test_interpolate_reshape_graph_comparison(self): - graph = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect('out_shape', '1:interpolate'), - *connect('interpolate', 'output'), - ], nodes_with_edges_only=True) - InterpolateReshapeWA().find_and_replace_pattern(graph) - graph.clean_up() - graph_ref = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect_data('placeholder', 'shape'), - *connect('shape', '0:gather'), - *connect('indices', '1:gather'), - *connect('axis', '2:gather'), - *connect('gather', '0:mul'), - *connect('multiplier', '1:mul'), - *connect('mul', '1:interpolate'), - *connect('interpolate', 'output'), - ], nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - -class TestInterpolateConcat(unittest.TestCase): - def test_interpolate_concat_reshape_graph_comparison(self): - graph = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect('out_shape', '1:interpolate'), - *connect('interpolate', '0:concat'), - *connect('placeholder_1', '1:concat'), - *connect('concat', 'output'), - ], nodes_with_edges_only=True) - - InterpolateConcat().find_and_replace_pattern(graph) - graph.clean_up() - graph_ref = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect('placeholder_1', 'shape'), - *connect('shape', '0:gather'), - *connect('indices', '1:gather'), - *connect('axis', '2:gather'), - *connect('gather', '1:interpolate'), - *connect('interpolate', '0:concat'), - *connect_data('placeholder_1', '1:concat'), - *connect('concat', 'output'), - ], nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/LayoutChangeForGatherND_test.py b/tools/mo/unit_tests/mo/back/LayoutChangeForGatherND_test.py deleted file mode 100644 index 19d7246cd2b649..00000000000000 --- a/tools/mo/unit_tests/mo/back/LayoutChangeForGatherND_test.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.back.LayoutChangeForGatherND import LayoutChangeForGatherND -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # GatherND - 'gathernd': {'type': 'GatherND', 'kind': 'op', 'op': 'GatherND'}, - 'gathernd_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Result layer - 'result': {'type': 'Result', 'kind': 'op', 'op': 'Result'}, - # Transpose layers - 'transpose_1': {'type': 'Transpose', 'kind': 'op', 'op': 'Transpose', 'need_shape_inference': True}, - 'transpose_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'axis_1_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None}, - 'axis_1_const_data': {'kind': 'data', 'value': None, 'shape': None}, - 'transpose_2': {'type': 'Transpose', 'kind': 'op', 'op': 'Transpose', 'need_shape_inference': True}, - 'transpose_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'axis_2_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None}, - 'axis_2_const_data': {'kind': 'data', 'value': None, 'shape': None}, - 'transpose_3': {'type': 'Transpose', 'kind': 'op', 'op': 'Transpose', 'need_shape_inference': True}, - 'transpose_3_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'axis_3_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None}, - 'axis_3_const_data': {'kind': 'data', 'value': None, 'shape': None}, -} - - -class LayoutChangeForGatherNDTests(unittest.TestCase): - def test_tf_all_ports(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_1_data', 'gathernd'), - ('placeholder_2_data', 'gathernd'), - ('gathernd', 'gathernd_data'), - ('gathernd_data', 'result'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - 'placeholder_2_data': {'shape': np.array([1, 3, 224, 224])}, - 'gathernd_data': {'shape': np.array([1, 3, 224, 224])}, - }) - graph.graph['fw'] = 'tf' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_1_data', 'transpose_1'), - ('axis_1_const', 'axis_1_const_data'), - ('axis_1_const_data', 'transpose_1'), - ('transpose_1', 'transpose_1_data'), - ('placeholder_2_data', 'transpose_2'), - ('axis_2_const', 'axis_2_const_data'), - ('axis_2_const_data', 'transpose_2'), - ('transpose_2', 'transpose_2_data'), - ('transpose_1_data', 'gathernd'), - ('transpose_2_data', 'gathernd'), - ('gathernd', 'gathernd_data'), - ('gathernd_data', 'transpose_3'), - ('axis_3_const', 'axis_3_const_data'), - ('axis_3_const_data', 'transpose_3'), - ('transpose_3', 'transpose_3_data'), - ('transpose_3_data', 'result'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - 'placeholder_2_data': {'shape': np.array([1, 3, 224, 224])}, - 'axis_1_const_data': {'value': int64_array([0, 2, 3, 1])}, - 'axis_2_const_data': {'value': int64_array([0, 2, 3, 1])}, - 'gathernd_data': {'shape': np.array([1, 3, 224, 224])}, - 'axis_3_const_data': {'value': int64_array([0, 3, 1, 2])}, - }) - - pattern = LayoutChangeForGatherND() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_tf_one_ports(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_1_data', 'gathernd'), - ('placeholder_2_data', 'gathernd'), - ('gathernd', 'gathernd_data'), - ('gathernd_data', 'result'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - 'placeholder_2_data': {'shape': np.array([1, 3])}, - 'gathernd_data': {'shape': np.array([1, 3])}, - }) - graph.graph['fw'] = 'tf' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_1_data', 'transpose_1'), - ('axis_1_const', 'axis_1_const_data'), - ('axis_1_const_data', 'transpose_1'), - ('transpose_1', 'transpose_1_data'), - ('transpose_1_data', 'gathernd'), - ('placeholder_2_data', 'gathernd'), - ('gathernd', 'gathernd_data'), - ('gathernd_data', 'result'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - 'placeholder_2_data': {'shape': np.array([1, 3])}, - 'axis_1_const_data': {'value': int64_array([0, 2, 3, 1])}, - 'gathernd_data': {'shape': np.array([1, 3])} - }) - - pattern = LayoutChangeForGatherND() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/MarkNodesWithShapeValues_test.py b/tools/mo/unit_tests/mo/back/MarkNodesWithShapeValues_test.py deleted file mode 100644 index 21dd0272dd0b4f..00000000000000 --- a/tools/mo/unit_tests/mo/back/MarkNodesWithShapeValues_test.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.back.MarkNodesWithShapeValues import MarkNodesWithShapeValues -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_empty_data, shaped_const_with_data, connect, \ - regular_op, regular_op_with_shaped_data - - -class TestMarkDataTypeInShapeOfSubgraphs(unittest.TestCase): - - def test_run_with_shape_subgraph_input(self): - inp_shape = (1, 3, 1000, 1000) - dst_type = np.float32 - - nodes = { - **shaped_const_with_data('input', int64_array(inp_shape)), - **regular_op_with_empty_data('shape', {'type': 'ShapeOf'}), - **regular_op_with_empty_data('cast_to_float', {'type': 'Cast', 'dst_type': dst_type}), - **regular_op('mul_const', {'op': 'Const'}), - **{'mul_const_d': {'kind': 'data', 'value': float32_array([1., 1., 1., 100.])}}, - **regular_op_with_empty_data('mul', {'type': 'Mul'}), - **regular_op_with_empty_data('cast_to_int', {'type': 'Cast', 'dst_type': np.int64}), - **regular_op_with_empty_data('interpolate', {'type': 'Interpolate', 'shape_calculation_model': 'scales'}), - **result('res'), - } - - nodes_ref = { - **shaped_const_with_data('input', int64_array(inp_shape)), - **regular_op_with_empty_data('shape', {'type': 'ShapeOf'}), - **regular_op_with_empty_data('cast_to_float', {'type': 'Cast', 'dst_type': dst_type, - 'returns_shape_value': True}), - **regular_op_with_empty_data('mul', {'type': 'Mul', 'returns_shape_value': True}), - **regular_op('mul_const', {'op': 'Const', 'returns_shape_value': True}), - **{'mul_const_d': {'kind': 'data', 'value': float32_array([1., 1., 1., 100.]), - 'correct_data_type': True}}, - **regular_op_with_empty_data('cast_to_int', {'type': 'Cast', 'dst_type': np.int64, - 'returns_shape_value': True}), - **regular_op_with_empty_data('interpolate', {'type': 'Interpolate', 'shape_calculation_model': 'scales'}), - **result('res'), - } - - edges = [ - *connect('input', '0:interpolate'), - *connect('input', '0:shape', skip_data=True), - *connect('shape', '0:cast_to_float'), - *connect('cast_to_float', '0:mul'), - *connect('mul_const', '1:mul'), - *connect('mul', '0:cast_to_int'), - *connect('cast_to_int', '1:interpolate'), - *connect('interpolate', 'res'), - ] - graph = build_graph(nodes, edges) - interp_node = Node(graph, 'interpolate') - interp_node.add_input_port(2) - - MarkNodesWithShapeValues().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_ref, edges) - (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_run_with_const_input(self): - inp_shape = (1, 3, 1000, 1000) - - nodes = { - **shaped_const_with_data('input', int64_array(inp_shape)), - **regular_op('sizes_const', {'op': 'Const'}), - **{'sizes_const_d': {'kind': 'data', 'value': float32_array([1., 1., 1., 100.])}}, - **regular_op_with_empty_data('interpolate', {'type': 'Interpolate', 'shape_calculation_model': 'scales'}), - **result('res'), - } - - nodes_ref = { - **shaped_const_with_data('input', int64_array(inp_shape)), - **regular_op('sizes_const', {'op': 'Const', 'returns_shape_value': True}), - **{'sizes_const_d': {'kind': 'data', 'value': float32_array([1., 1., 1., 100.])}}, - **regular_op_with_empty_data('interpolate', {'type': 'Interpolate', 'shape_calculation_model': 'scales'}), - **result('res'), - } - - edges = [ - *connect('input', '0:interpolate'), - *connect('sizes_const', '1:interpolate'), - *connect('interpolate', 'res'), - ] - graph = build_graph(nodes, edges) - interp_node = Node(graph, 'interpolate') - interp_node.add_input_port(2) - - MarkNodesWithShapeValues().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_ref, edges) - (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_run_with_solitary_shapeof_in_shape_value_subgraph(self): - # in this case MarkNodesWithShapeValues must leave graph unchanged - # so reference nodes are exactly the same - - inp_shape_1 = int64_array((1, 3, 100, 100)) - inp_shape_2 = int64_array((1, 3, 100, 50)) # inp_2 and const will be concatenated to (1, 3, 200, 50) - const_shape = int64_array((1, 3, 100, 50)) - - nodes = { - **regular_op_with_shaped_data('input_1', inp_shape_1, {'op': 'Parameter', 'type': 'Parameter'}), - **regular_op_with_shaped_data('input_2', inp_shape_2, {'op': 'Parameter', 'type': 'Parameter', - 'returns_shape_value': False}), - **shaped_const_with_data('const', const_shape), - **regular_op_with_empty_data('concat', {'op': 'Concat', 'type': 'Concat', 'axis': 2, - 'returns_shape_value': False}), - **regular_op_with_empty_data('shapeof', {'op': 'ShapeOf', 'type': 'ShapeOf'}), - **regular_op_with_empty_data('reshape', {'op': 'Reshape', 'type': 'Reshape'}), - **result('res'), - } - - edges = [ - *connect('input_1', '0:reshape'), - *connect('input_2', '0:concat'), - *connect('const', '1:concat'), - *connect('concat', 'shapeof'), - *connect('shapeof', '1:reshape'), - *connect('reshape', 'res'), - ] - - graph = build_graph(nodes, edges) - - MarkNodesWithShapeValues().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, edges) - (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=True) - self.assertTrue(flag, "'returns_shape_value' should be False or unset for ShapeOf input nodes" + ': ' + str(resp)) diff --git a/tools/mo/unit_tests/mo/back/MatMulNormalizer_test.py b/tools/mo/unit_tests/mo/back/MatMulNormalizer_test.py deleted file mode 100644 index aa0c5c7b4c11dd..00000000000000 --- a/tools/mo/unit_tests/mo/back/MatMulNormalizer_test.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from argparse import Namespace - -import numpy as np -import pytest - -from openvino.tools.mo.back.MatMulNormalizer import SmartReshape_HC_Reshape_MatMul, PullTransposeThroughFQUp -from openvino.tools.mo.ops.MatMul import MatMul -from openvino.tools.mo.ops.fakequantize import FakeQuantize -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, \ - shaped_const_with_data, result, connect, connect_data -from unit_tests.utils.graph import regular_op_with_empty_data as op_with_empty_data - - -class TestSmartReshape_HC_Reshape_MatMulTest(): - @pytest.mark.parametrize("in1_shape, in2_shape, reshape_pattern, transpose_a, transpose_b, updated_pattern", - [ - ([1, 20, 30], [30, 40], [20, -1], False, False, [-1, 30]), - ([1, 20, 30], [40, 30], [20, -1], False, True, [-1, 30]), - ([1, 30, 20], [30, 40], [-1, 20], True, False, [30, -1]), - ([1, 30, 20], [40, 30], [-1, 20], True, True, [30, -1]), - ] - ) - def test_reshape_on_the_A_input(self, - in1_shape, in2_shape, reshape_pattern, transpose_a, transpose_b, updated_pattern): - nodes = { - **regular_op_with_shaped_data('in_1', in1_shape, dict(type='Parameter', op='Parameter')), - **regular_op_with_shaped_data('in_2', in2_shape, dict(type='Parameter', op='Parameter')), - **valued_const_with_data('dim', int64_array(reshape_pattern)), - **op_with_empty_data('reshape', - dict(type='Reshape', op='Reshape', infer=Reshape.infer, need_shape_inference=True)), - **op_with_empty_data('matmul', - dict(type='MatMul', op='MatMul', infer=MatMul.infer, need_shape_inference=True, - transpose_a=transpose_a, transpose_b=transpose_b, dim_attrs={})), - **result(), - } - edges = [ - *connect('in_1:0', '0:reshape'), - *connect('dim:0', '1:reshape'), - *connect('reshape:0', '0:matmul'), - *connect('in_2:0', '1:matmul'), - *connect('matmul:0', 'output'), - ] - graph = build_graph(nodes_attrs=nodes, edges=edges, cli=Namespace(static_shape=True)) - graph.clean_up() - SmartReshape_HC_Reshape_MatMul().find_and_replace_pattern(graph) - graph.clean_up() - - graph_ref = build_graph(nodes_attrs=nodes, edges=edges, update_attributes={ - 'dim': {'value': int64_array(updated_pattern)}, 'dim_d': {'value': int64_array(updated_pattern)}}) - graph_ref.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - @pytest.mark.parametrize("in1_shape, in2_shape, reshape_pattern, transpose_a, transpose_b, updated_pattern",[ - ([20, 30], [1, 30, 40], [-1, 40], False, False, [30, -1]), - ([20, 30], [1, 40, 30], [40, -1], False, True, [-1, 30]), - ([30, 20], [1, 30, 40], [-1, 40], True, False, [30, -1]), - ([30, 20], [1, 40, 30], [40, -1], True, True, [-1, 30]), - ]) - def test_reshape_on_the_B_input(self, - in1_shape, in2_shape, reshape_pattern, transpose_a, transpose_b, updated_pattern): - nodes = { - **regular_op_with_shaped_data('in_1', in1_shape, dict(type='Parameter', op='Parameter')), - **regular_op_with_shaped_data('in_2', in2_shape, dict(type='Parameter', op='Parameter')), - **valued_const_with_data('dim', int64_array(reshape_pattern)), - **op_with_empty_data('reshape', - dict(type='Reshape', op='Reshape', infer=Reshape.infer, need_shape_inference=True)), - **op_with_empty_data('matmul', - dict(type='MatMul', op='MatMul', infer=MatMul.infer, need_shape_inference=True, - transpose_a=transpose_a, transpose_b=transpose_b, dim_attrs={})), - **result(), - } - edges = [ - *connect('in_1:0', '0:matmul'), - *connect('in_2:0', '0:reshape'), - *connect('dim:0', '1:reshape'), - *connect('reshape:0', '1:matmul'), - *connect('matmul:0', 'output'), - ] - graph = build_graph(nodes_attrs=nodes, edges=edges, cli=Namespace(static_shape=True)) - graph.clean_up() - SmartReshape_HC_Reshape_MatMul().find_and_replace_pattern(graph) - graph.clean_up() - - graph_ref = build_graph(nodes_attrs=nodes, edges=edges, update_attributes={ - 'dim': {'value': int64_array(updated_pattern)}, 'dim_d': {'value': int64_array(updated_pattern)}}) - graph_ref.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - -class FQTransposePullerTest(unittest.TestCase): - def nodes(self, input_shape, transpose_shape, fq_shape, is_input_const): - nodes = { - **valued_const_with_data('il', np.array([[[[0]]]])), - **valued_const_with_data('ih', np.array([[[[255]]]])), - **valued_const_with_data('ol', np.array([[[[0]]]])), - **valued_const_with_data('oh', np.array([[[[255]]]])), - **regular_op_with_shaped_data('FQ', fq_shape, dict(type='FakeQuantize', op='FakeQuantize', infer=FakeQuantize.infer)), - **valued_const_with_data('order', int64_array([0, 2, 3, 1])), - **regular_op_with_shaped_data('transpose', transpose_shape, dict(type='Transpose', op='Transpose', infer=Transpose.infer)), - **regular_op_with_shaped_data('relu', fq_shape, dict(type='Relu', op='Relu')), - - **result(), - } - - if is_input_const: - input_node = shaped_const_with_data('input', input_shape) - else: - input_node = regular_op_with_shaped_data('input', input_shape, dict(type='Parameter', op='Parameter')) - - nodes.update(input_node) - return nodes - - def test_positive(self): - nodes = self.nodes([1, 3, 224, 224], [1, 224, 224, 3], [1, 3, 224, 224], True) - edges = [ - *connect('input', '0:FQ'), - *connect('il', '1:FQ'), - *connect('ih', '2:FQ'), - *connect('ol', '3:FQ'), - *connect('oh', '4:FQ'), - *connect('FQ:0', '0:transpose'), - *connect('order:0', '1:transpose'), - *connect('transpose:0', 'output'), - ] - graph = build_graph(nodes_attrs=nodes, edges=edges, nodes_with_edges_only=True) - PullTransposeThroughFQUp().find_and_replace_pattern(graph) - graph.clean_up() - - nodes = self.nodes([1, 3, 224, 224], [1, 224, 224, 3], [1, 224, 224, 3], True) - edges = [ - *connect('input', '0:transpose'), - *connect('order:0', '1:transpose'), - *connect('transpose', '0:FQ'), - *connect('il', '1:FQ'), - *connect('ih', '2:FQ'), - *connect('ol', '3:FQ'), - *connect('oh', '4:FQ'), - *connect('FQ:0', 'output'), - ] - graph_ref = build_graph(nodes_attrs=nodes, edges=edges, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_negative_1(self): - nodes = self.nodes([1, 3, 224, 224], [1, 224, 224, 3], [1, 3, 224, 224], True) - edges = [ - *connect('input', '0:FQ'), - *connect('il', '1:FQ'), - *connect('ih', '2:FQ'), - *connect('ol', '3:FQ'), - *connect('oh', '4:FQ'), - *connect('FQ:0', '0:transpose'), - *connect_data('FQ:0', 'relu'), - *connect('order:0', '1:transpose'), - *connect('transpose:0', 'output'), - ] - graph = build_graph(nodes_attrs=nodes, edges=edges, nodes_with_edges_only=True) - graph_ref = graph.copy() - PullTransposeThroughFQUp().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_negative_2(self): - nodes = self.nodes([1, 3, 224, 224], [1, 224, 224, 3], [1, 3, 224, 224], False) - edges = [ - *connect('input', '0:FQ'), - *connect('il', '1:FQ'), - *connect('ih', '2:FQ'), - *connect('ol', '3:FQ'), - *connect('oh', '4:FQ'), - *connect('FQ:0', '0:transpose'), - *connect('order:0', '1:transpose'), - *connect('transpose:0', 'output'), - ] - graph = build_graph(nodes_attrs=nodes, edges=edges, nodes_with_edges_only=True) - graph_ref = graph.copy() - PullTransposeThroughFQUp().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/MaxPoolTest.py b/tools/mo/unit_tests/mo/back/MaxPoolTest.py deleted file mode 100644 index 47d3291690b58f..00000000000000 --- a/tools/mo/unit_tests/mo/back/MaxPoolTest.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.back.MaxPool import MaxPool -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class TestMaxPool(unittest.TestCase): - - def test_no_out_normalization(self): - graph = build_graph( - nodes_attrs={ - 'input': {'kind': 'op', 'op': 'Parameter', 'name': 'node'}, - 'input_data': {'kind': 'data'}, - 'pool': {'kind': 'op', 'name': 'node', 'type': 'Pooling', 'pool_method': 'max'}, - 'pool_data': {'kind': 'data'}, - 'result': {'kind': 'op', 'op': 'Result', 'name': 'node'} - }, - edges=[ - ('input', 'input_data'), - ('input_data', 'pool'), - ('pool', 'pool_data'), - ('pool_data', 'result') - ] - ) - - graph_ref = build_graph( - nodes_attrs={ - 'input': {'kind': 'op', 'op': 'Parameter', 'name': 'node'}, - 'input_data': {'kind': 'data'}, - 'pool': {'kind': 'op', 'name': 'node', 'type': 'MaxPool'}, - 'pool_data': {'kind': 'data'}, - 'result': {'kind': 'op', 'op': 'Result', 'name': 'node'}, - }, - edges=[ - ('input', 'input_data'), - ('input_data', 'pool'), - ('pool', 'pool_data'), - ('pool_data', 'result'), - ] - ) - - MaxPool().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_out_normalization(self): - graph = build_graph( - nodes_attrs={ - 'input': {'kind': 'op', 'op': 'Parameter', 'name': 'node'}, - 'input_data': {'kind': 'data'}, - 'pool': {'kind': 'op', 'name': 'node', 'type': 'Pooling', 'pool_method': 'max'}, - 'pool_data': {'kind': 'data'}, - 'result': {'kind': 'op', 'op': 'Result', 'name': 'node'} - }, - edges=[ - ('input', 'input_data'), - ('input_data', 'pool'), - ('pool', 'pool_data'), - ('pool_data', 'result') - ] - ) - - graph_ref = build_graph( - nodes_attrs={ - 'input': {'kind': 'op', 'op': 'Parameter', 'name': 'node'}, - 'input_data': {'kind': 'data'}, - 'pool': {'kind': 'op', 'name': 'node', 'type': 'MaxPool'}, - 'pool_data': {'kind': 'data'}, - 'pool_data_added': {'kind': 'data'}, - 'result': {'kind': 'op', 'op': 'Result', 'name': 'node'}, - 'result_added': {'kind': 'op', 'op': 'Result', 'name': 'node'} - }, - edges=[ - ('input', 'input_data'), - ('input_data', 'pool'), - ('pool', 'pool_data'), - ('pool_data', 'result'), - ('pool', 'pool_data_added'), - ('pool_data_added', 'result_added') - ] - ) - - pool_op = Node(graph, 'pool') - pool_op.add_output_port(1) # add disconnected output port to check normalization - - MaxPool().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/OptimizeTransposeReshapeSequence_test.py b/tools/mo/unit_tests/mo/back/OptimizeTransposeReshapeSequence_test.py deleted file mode 100644 index 8dbe232db1292b..00000000000000 --- a/tools/mo/unit_tests/mo/back/OptimizeTransposeReshapeSequence_test.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.back.OptimizeTransposeReshapeSequence import match_shapes, split_input_permute_dimension, \ - split_dims_indices, split_output_permute_dimension -from openvino.tools.mo.front.common.partial_infer.utils import int64_array - - -class SplitDimsIndicesTest(unittest.TestCase): - def test_1(self): - self.assertListEqual(list(split_dims_indices(int64_array([1, 32, 64, 60]), int64_array([1, 8, 4, 64, 3, 20]))), [1, 3]) - - def test_2(self): - self.assertListEqual(list(split_dims_indices(int64_array([8, 4, 64, 3, 20]), int64_array([1, 8, 4, 64, 3, 20, 1, 1]))), [0, 4, 4]) - - def test_3(self): - self.assertListEqual(list(split_dims_indices(int64_array([120]), int64_array([2, 3, 4, 1, 5]))), [0, 0, 0, 0]) - - def test_4(self): - self.assertListEqual(list(split_dims_indices(int64_array([120, 1]), int64_array([2, 3, 4, 5, 1]))), [0, 0, 0]) - - def test_5(self): - self.assertListEqual(list(split_dims_indices(int64_array([1, 4, 1, 1]), int64_array([1, 2, 1, 1, 2, 1, 1]))), [1, 1, 1]) - - def test_6(self): - self.assertListEqual(list(split_dims_indices(int64_array([1, 20, 64]), int64_array([1, 1, 20, 64]))), [1]) - - -class SplitOutputTransposeDimensionTest(unittest.TestCase): - def test_1(self): - self.assertListEqual(list(split_output_permute_dimension(3, int64_array([0, 2, 3, 1]))), [0, 3, 4, 1, 2]) - - def test_2(self): - self.assertListEqual(list(split_output_permute_dimension(0, int64_array([0, 1, 3, 2]))), [0, 1, 2, 4, 3]) - - def test_3(self): - self.assertListEqual(list(split_output_permute_dimension(1, int64_array([0, 3, 1, 2]))), [0, 3, 4, 1, 2]) - - -class SplitInputTransposeDimensionTest(unittest.TestCase): - def test_1(self): - self.assertListEqual(list(split_input_permute_dimension(1, int64_array([0, 2, 3, 1]))), [0, 3, 4, 1, 2]) - - def test_2(self): - self.assertListEqual(list(split_input_permute_dimension(0, int64_array([0, 1, 3, 2]))), [0, 1, 2, 4, 3]) - - def test_3(self): - self.assertListEqual(list(split_input_permute_dimension(3, int64_array([0, 3, 1, 2]))), [0, 3, 4, 1, 2]) - - def test_4(self): - self.assertListEqual(list(split_input_permute_dimension(0, int64_array([0, 1, 2, 3]))), [0, 1, 2, 3, 4]) - - def test_5(self): - self.assertListEqual(list(split_input_permute_dimension(3, int64_array([0, 1, 2, 3]))), [0, 1, 2, 3, 4]) - - -class MatchShapesTest(unittest.TestCase): - def test_basic(self): - self.assertListEqual(list(match_shapes(int64_array([1, 32, 64, 60]), int64_array([8, 4, 64, 3, 20]))), [1, 8, 4, 64, 3, 20]) - - def test_ones_in_the_middle(self): - self.assertListEqual(list(match_shapes(int64_array([32, 1, 2, 3, 1, 8]), int64_array([4, 2, 1, 4, 6, 1, 1, 8]))), [4, 2, 1, 4, 1, 2, 3, 1, 1, 8]) - - def test_trailing_one(self): - self.assertListEqual(list(match_shapes(int64_array([1, 32, 64, 60, 1]), int64_array([8, 4, 64, 3, 20]))), [1, 8, 4, 64, 3, 20, 1]) - - def test_one_to_many(self): - self.assertListEqual(list(match_shapes(int64_array([120]), int64_array([2, 3, 4, 5]))), [2, 3, 4, 5]) - - def test_many_to_one(self): - self.assertListEqual(list(match_shapes(int64_array([2, 3, 4, 5]), int64_array([120]))), [2, 3, 4, 5]) - - def test_many_to_one_with_trailing(self): - self.assertListEqual(list(match_shapes(int64_array([2, 3, 4, 5]), int64_array([120, 1, 1]))), [2, 3, 4, 5, 1, 1]) - - def test_equal_shapes(self): - self.assertListEqual(list(match_shapes(int64_array([2, 3, 4, 5]), int64_array([2, 3, 4, 5]))), [2, 3, 4, 5]) - - def test_one(self): - self.assertListEqual(list(match_shapes(int64_array([1]), int64_array([1]))), [1]) - - def test_ones_equal_lengths(self): - self.assertListEqual(list(match_shapes(int64_array([1, 1, 1]), int64_array([1, 1, 1]))), [1, 1, 1]) - - def test_ones_different_lengths(self): - self.assertListEqual(list(match_shapes(int64_array([1]), int64_array([1, 1, 1]))), [1, 1, 1]) - - def test_intersection_of_input_output_dimensions(self): # is this test correct? Looks like yes... - self.assertListEqual(list(match_shapes(int64_array([10, 20, 7]), int64_array([5, 4, 1, 70]))), [5, 2, 2, 1, 10, 7]) - - def test_trailing_ones(self): - self.assertListEqual(list(match_shapes(int64_array([1, 1, 10]), int64_array([1, 5, 1, 1, 2, 1]))), [1, 1, 5, 1, 1, 2, 1]) - - def test_not_matchabale_shapes(self): - self.assertIsNone(match_shapes(int64_array([5, 7]), int64_array([7, 5]))) diff --git a/tools/mo/unit_tests/mo/back/ReduceTransposeDimensions_test.py b/tools/mo/unit_tests/mo/back/ReduceTransposeDimensions_test.py deleted file mode 100644 index d82eb539784048..00000000000000 --- a/tools/mo/unit_tests/mo/back/ReduceTransposeDimensions_test.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.back.ReduceTransposeDimensions import sequential_dims, merge_permute_order_dimensions, merge_dims -from openvino.tools.mo.front.common.partial_infer.utils import int64_array - - -class SequentialDimsTest(unittest.TestCase): - def test_returns_first_instance(self): - self.assertListEqual(sequential_dims(int64_array([0, 3, 4, 1, 2])), [1, 2]) - - def test_returns_last_indices(self): - self.assertListEqual(sequential_dims(int64_array([4, 0, 3, 1, 2])), [3, 4]) - - def test_returns_full_list(self): - self.assertListEqual(sequential_dims(int64_array([0, 1, 2, 3, 4])), [0, 1, 2, 3, 4]) - - def test_returns_from_the_beginning(self): - self.assertListEqual(sequential_dims(int64_array([1, 2, 3, 0, 4])), [0, 1, 2]) - - def test_no_sequential_dims(self): - self.assertIsNone(sequential_dims(int64_array([2, 1, 3, 0, 4]))) - - def test_2d_input_with_sequential_dims(self): - self.assertListEqual(sequential_dims(int64_array([0, 1])), [0, 1]) - - def test_2d_input_without_sequential_dims(self): - self.assertIsNone(sequential_dims(int64_array([1, 0]))) - - -class MergeTransposeOrderDimensionsTest(unittest.TestCase): - def test_merge_last_dims(self): - self.assertListEqual(list(merge_permute_order_dimensions([1, 2], int64_array([0, 3, 4, 1, 2]))), [0, 3, 1, 2]) - - def test_merge_last_indices(self): - self.assertListEqual(list(merge_permute_order_dimensions([3, 4], int64_array([0, 3, 4, 1, 2]))), [0, 2, 3, 1]) - - def test_merge_start_indices(self): - self.assertListEqual(list(merge_permute_order_dimensions([0, 1], int64_array([1, 2, 4, 3, 0]))), [1, 3, 2, 0]) - - def test_merge_all_dims(self): - self.assertListEqual(list(merge_permute_order_dimensions([0, 1, 2], int64_array([0, 1, 2]))), [0]) - - def test_merge_3_dims(self): - self.assertListEqual(list(merge_permute_order_dimensions([1, 2, 3], int64_array([3, 0, 1, 2, 4]))), [1, 0, 2]) - - -class MergeDimsTest(unittest.TestCase): - def test_merge_middle_dims(self): - self.assertListEqual(list(merge_dims([1, 2], int64_array([3, 2, 5, 7]))), [3, 10, 7]) - - def test_merge_first_dim(self): - self.assertListEqual(list(merge_dims([0, 1], int64_array([3, 2, 5, 7]))), [6, 5, 7]) - - def test_merge_last_dim(self): - self.assertListEqual(list(merge_dims([2, 3], int64_array([3, 2, 5, 7]))), [3, 2, 35]) - - def test_merge_all_dims(self): - self.assertListEqual(list(merge_dims([0, 1, 2, 3], int64_array([3, 2, 5, 7]))), [210]) - - def test_reduce_with_minus_one(self): - self.assertListEqual(list(merge_dims([1, 2], int64_array([3, -1, 5, 7]))), [3, -1, 7]) - - def test_merge_with_0_being_merged(self): - with self.assertRaisesRegex(AssertionError, ".*The value 0 is not supported.*"): - merge_dims([1, 2], int64_array([3, 0, 5, 7])) - - def test_merge_with_0_not_merged(self): - with self.assertRaisesRegex(AssertionError, ".*The value 0 is not supported.*"): - merge_dims([2, 3], int64_array([3, 0, 5, 7])) diff --git a/tools/mo/unit_tests/mo/back/ResultRename_test.py b/tools/mo/unit_tests/mo/back/ResultRename_test.py deleted file mode 100644 index 6b8a6c5ddee481..00000000000000 --- a/tools/mo/unit_tests/mo/back/ResultRename_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.back.ResultRename import ResultRename -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result - -nodes = { - 'Op1': {'type': 'Op1', 'kind': 'op', 'op': 'Op1'}, - 'Op2': {'type': 'Op2', 'kind': 'op', 'op': 'Op2'}, - 'Op1_data': {'kind': 'data', 'fw_tensor_debug_info': [('Op1', 'Op1_tensor')]}, - 'Op2_data': {'kind': 'data', 'fw_tensor_debug_info': [('Op2', 'Op2_tensor')]}, - **result('result1'), - **result('result2'), -} - - -class ResultRenameTest(unittest.TestCase): - def test_case1(self): - graph = build_graph(nodes, [('Op1', 'Op1_data'), ('Op1_data', 'result1')]) - ResultRename().find_and_replace_pattern(graph) - res_node = Node(graph, 'result1') - self.assertTrue(res_node['name'] == 'Op1_tensor') - - def test_case2(self): - graph = build_graph(nodes, []) - graph_ref = build_graph(nodes, []) - - ResultRename().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'result1', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_case3(self): - graph = build_graph(nodes, [('Op1', 'Op1_data'), ('Op1_data', 'result1')]) - res_node_graph = Node(graph, 'Op1') - res_node_graph['name'] = 'Op1_tensor' - ResultRename().find_and_replace_pattern(graph) - res_node = Node(graph, 'result1') - self.assertTrue(res_node['name'] == 'Op1_tensor/sink_port_0') - - def test_case4(self): - graph = build_graph(nodes, [('Op1', 'Op1_data'), ('Op1_data', 'result1'), - ('Op1_data', 'Op2'), ('Op2', 'Op2_data'), - ('Op2_data', 'result2')]) - graph.outputs_order = ['result1', 'result2'] - - ResultRename().find_and_replace_pattern(graph) - res1_node = Node(graph, 'result1') - res2_node = Node(graph, 'result2') - self.assertTrue(res1_node['name'] == 'Op1_tensor') - self.assertTrue(res2_node['name'] == 'Op2_tensor') - - self.assertTrue(graph.outputs_order == ['Op1_tensor', 'Op2_tensor']) - - def test_case5(self): - graph = build_graph(nodes, [('Op1', 'Op1_data'), ('Op1_data', 'result1'), - ('Op1_data', 'Op2'), ('Op2', 'Op2_data'), - ('Op2_data', 'result2')]) - - res_node_graph = Node(graph, 'result1') - res_node_graph['name'] = 'Op1_tensor' - ResultRename().find_and_replace_pattern(graph) - res1_node = Node(graph, 'result1') - res2_node = Node(graph, 'result2') - self.assertTrue(res1_node['name'] == 'Op1_tensor') - self.assertTrue(res2_node['name'] == 'Op2_tensor') - - def test_case6(self): - _nodes = nodes.copy() - _nodes.update({ - 'Op3': {'type': 'Op3', 'kind': 'op', 'op': 'Op3'}, - 'Op4': {'type': 'Op4', 'kind': 'op', 'op': 'Op4'}, - 'Op3_data': {'kind': 'data', 'fw_tensor_debug_info': [('Op3', 'Op3_tensor')]}, - 'Op4_data': {'kind': 'data', 'fw_tensor_debug_info': [('Op4', 'Op4_tensor')]}, - **result('result3'), - **result('result4'), - }) - graph = build_graph(_nodes, [('Op1', 'Op1_data'), ('Op1_data', 'result1'), ('Op1_data', 'Op2'), - ('Op2', 'Op2_data'), ('Op2_data', 'result2'), ('Op2_data', 'Op3'), - ('Op3', 'Op3_data'), ('Op3_data', 'result3'), ('Op3_data', 'Op4'), - ('Op4', 'Op4_data'), ('Op4_data', 'result4')]) - graph.outputs_order = ['result1', 'result3', 'result4', 'result2'] - - ResultRename().find_and_replace_pattern(graph) - self.assertTrue(Node(graph, 'result1')['name'] == 'Op1_tensor') - self.assertTrue(Node(graph, 'result2')['name'] == 'Op2_tensor') - self.assertTrue(Node(graph, 'result3')['name'] == 'Op3_tensor') - self.assertTrue(Node(graph, 'result4')['name'] == 'Op4_tensor') - - self.assertTrue(graph.outputs_order == ['Op1_tensor', 'Op3_tensor', 'Op4_tensor', 'Op2_tensor']) diff --git a/tools/mo/unit_tests/mo/back/ReverseInputChannels_test.py b/tools/mo/unit_tests/mo/back/ReverseInputChannels_test.py deleted file mode 100644 index 1ea0eaf1cc8b9f..00000000000000 --- a/tools/mo/unit_tests/mo/back/ReverseInputChannels_test.py +++ /dev/null @@ -1,259 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from argparse import Namespace - -import numpy as np - -from openvino.tools.mo.back.ReverseInputChannels import ReverseChannelsPropagationUp, ReverseChannelsPropagationDown -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from openvino.tools.mo.utils.runtime_info import OldAPIMapOrder, RTInfo -from unit_tests.utils.graph import build_graph, result, connect, regular_op_with_shaped_data, valued_const_with_data - -nodes = { - **regular_op_with_shaped_data('placeholder1', [1, 3, 10, 10], {'type': 'Parameter', 'rt_info': RTInfo()}), - **regular_op_with_shaped_data('placeholder2', [1, 1, 1, 1], {'type': 'Parameter'}), - - **regular_op_with_shaped_data('mul', [1, 3, 10, 10], {'type': 'Multiply'}), - **regular_op_with_shaped_data('reverse_channels', [1, 3, 10, 10], - {'type': 'ReverseChannels', 'axis': int64_array(1)}), - - **regular_op_with_shaped_data('pad', [1, 3, 10, 10], {'type': 'Pad'}), - - **result('result'), -} - -nodes2 = { - **regular_op_with_shaped_data('placeholder', [1, 3, 10, 10], {'type': 'Parameter'}), - - **valued_const_with_data('mul_const', float32_array([-127.5, -127.5, -127.5])), - **regular_op_with_shaped_data('mul', [1, 3, 10, 10], {'type': 'Multiply'}), - **valued_const_with_data('pad_const_1', int64_array([0, 0, 0, 0])), - **valued_const_with_data('pad_const_2', int64_array([0, 0, 1, 1])), - **regular_op_with_shaped_data('pad', [1, 3, 10, 10], {'type': 'Pad'}), - **regular_op_with_shaped_data('reverse_channels', [1, 3, 10, 10], - {'type': 'ReverseChannels', 'axis': int64_array(1)}), - **result('result'), - **result('result2'), -} - -nodes3 = { - **regular_op_with_shaped_data('placeholder', [1, 3, 10, 10], {'type': 'Parameter'}), - **regular_op_with_shaped_data('transpose', [1, 3, 10, 10], {'type': 'Transpose'}), - **valued_const_with_data('transpose_order', int64_array([0, 3, 1, 2])), - **regular_op_with_shaped_data('reverse_channels_up', [1, 3, 10, 10], - {'type': 'ReverseChannels', 'axis': int64_array(3)}), - **regular_op_with_shaped_data('reverse_channels_down', [1, 3, 10, 10], - {'type': 'ReverseChannels', 'axis': int64_array(1)}), - **result('result'), - **result('result2'), -} - - -def get_nodes(shape, axis=1): - return { - **regular_op_with_shaped_data('placeholder1', shape, - {'type': 'Parameter', 'shape': shape, 'rt_info': RTInfo()}), - **regular_op_with_shaped_data('placeholder2', [1, 1, 1, 1], {'type': 'Parameter', 'shape': [1, 1, 1, 1]}), - - **regular_op_with_shaped_data('mul', shape, {'type': 'Multiply'}), - **regular_op_with_shaped_data('reverse_channels', shape, - {'op': 'ReverseChannels', 'type': None, 'axis': int64_array(axis)}), - - **regular_op_with_shaped_data('pad', shape, {'type': 'Pad'}), - - **result('result'), - } - - -class ReverseInputChannelsTest(unittest.TestCase): - def check_graph_attrs(self, graph: Graph, parameter_node_names: list): - for node in graph.get_op_nodes(): - if node.soft_get('name') in parameter_node_names: - self.assertTrue(node.soft_get('type') == 'Parameter') - out_node = node.out_node(0) - self.assertTrue(out_node['fw_tensor_debug_info'] == ['fw_name', 0]) - else: - for idx in node.out_nodes(): - out_node = node.out_node(idx) - self.assertFalse('fw_tensor_debug_info' in out_node) - - def set_graph_attrs(self, graph: Graph, parameter_node_names: list): - for node in graph.get_op_nodes(): - if node.soft_get('name') in parameter_node_names: - self.assertTrue(node.soft_get('type') == 'Parameter') - out_node = node.out_node(0) - out_node['fw_tensor_debug_info'] = ['fw_name', 0] - - def test_lift_up_through_eltwise(self): - graph = build_graph(nodes, [*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'), - *connect('mul', 'reverse_channels'), *connect('reverse_channels', 'result')]) - self.set_graph_attrs(graph, ['placeholder1', 'placeholder2']) - - node = Node(graph, 'mul') - reverse_channels = Node(graph, 'reverse_channels') - - ReverseChannelsPropagationUp.lift_up_through_eltwise(node, reverse_channels) - self.check_graph_attrs(graph, ['placeholder1', 'placeholder2']) - - def test_lift_up_through_eltwise_broadcast(self): - graph = build_graph(nodes, [*connect('placeholder1', '0:mul'), *connect('placeholder2', '1:mul'), - *connect('mul', 'reverse_channels'), *connect('reverse_channels', 'result')]) - self.set_graph_attrs(graph, ['placeholder1', 'placeholder2']) - placeholder_node = Node(graph, 'placeholder2') - placeholder_node.out_port(0).data.set_shape([]) - - node = Node(graph, 'mul') - reverse_channels = Node(graph, 'reverse_channels') - - ReverseChannelsPropagationUp.lift_up_through_eltwise(node, reverse_channels) - self.check_graph_attrs(graph, ['placeholder1', 'placeholder2']) - - def test_lift_up_through_pad(self): - graph = build_graph(nodes2, [*connect('placeholder', '0:mul'), *connect('mul_const', '1:mul'), - *connect('mul', '0:pad'), *connect('pad_const_1', '1:pad'), - *connect('pad_const_2', '2:pad'), *connect('pad', 'reverse_channels'), - *connect('reverse_channels', 'result')]) - self.set_graph_attrs(graph, ['placeholder']) - - node = Node(graph, 'pad') - reverse_channels = Node(graph, 'reverse_channels') - - keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_zero_port_only(node, - reverse_channels) - self.assertTrue(keep_moving_up is True) - self.assertTrue(len(new_reverses) == 1) - self.check_graph_attrs(graph, ['placeholder']) - - def test_lift_up_through_pad2(self): - graph = build_graph(nodes2, [*connect('placeholder', '0:mul'), *connect('mul_const', '1:mul'), - *connect('mul', '0:pad'), *connect('pad_const_1', '1:pad'), - *connect('pad_const_2', '2:pad'), *connect('pad', 'reverse_channels'), - *connect('reverse_channels:0', '0:result'), - *connect('reverse_channels:0', '0:result2')]) - self.set_graph_attrs(graph, ['placeholder']) - - node = Node(graph, 'pad') - reverse_channels = Node(graph, 'reverse_channels') - - keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_zero_port_only(node, - reverse_channels) - self.assertTrue(keep_moving_up is True) - self.assertTrue(len(new_reverses) == 1) - self.check_graph_attrs(graph, ['placeholder']) - - def test_pass_rc_through(self): - graph = build_graph(nodes2, [*connect('placeholder', '0:mul'), *connect('mul_const', '1:mul'), - *connect('mul', 'reverse_channels'), *connect('reverse_channels', '0:pad'), - *connect('pad_const_1', '1:pad'), *connect('pad_const_2', '2:pad'), - *connect('pad', 'result')]) - self.set_graph_attrs(graph, ['placeholder']) - - node = Node(graph, 'pad') - reverse_channels = Node(graph, 'reverse_channels') - - ReverseChannelsPropagationDown.pass_rc_through_zero_port_only(node, reverse_channels) - self.check_graph_attrs(graph, ['placeholder']) - - def test_lift_up_through_transpose(self): - graph = build_graph(nodes3, [*connect('placeholder', '0:transpose'), *connect('transpose_order', '1:transpose'), - *connect('transpose', 'reverse_channels_down'), - *connect('reverse_channels_down', 'result')]) - graph_ref = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_down'), - *connect('transpose_order', '1:transpose'), - *connect('reverse_channels_down', 'transpose'), - *connect('transpose', 'result')]) - self.set_graph_attrs(graph, ['placeholder']) - - node = Node(graph, 'transpose') - reverse_channels = Node(graph, 'reverse_channels_down') - - keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_transpose(node, reverse_channels) - self.assertTrue(keep_moving_up is True) - self.assertTrue(len(new_reverses) == 1) - self.check_graph_attrs(graph, ['placeholder']) - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - reverse_channels = Node(graph, 'reverse_channels_down') - self.assertTrue(reverse_channels.axis == 3) - self.assertTrue(type(reverse_channels.axis) == np.ndarray) - - def test_lift_down_through_transpose(self): - graph = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_up'), - *connect('transpose_order', '1:transpose'), - *connect('reverse_channels_up', '0:transpose'), - *connect('transpose', 'result')]) - graph_ref = build_graph(nodes3, [*connect('placeholder', '0:transpose'), - *connect('transpose_order', '1:transpose'), - *connect('transpose', 'reverse_channels_up'), - *connect('reverse_channels_up', '0:result')]) - self.set_graph_attrs(graph, ['placeholder']) - - node = Node(graph, 'transpose') - reverse_channels = Node(graph, 'reverse_channels_up') - - keep_moving_down = ReverseChannelsPropagationDown.pass_rc_through_transpose(node, reverse_channels) - - self.assertTrue(keep_moving_down is True) - self.check_graph_attrs(graph, ['placeholder']) - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - reverse_channels = Node(graph, 'reverse_channels_down') - self.assertTrue(reverse_channels.axis == 1) - self.assertTrue(type(reverse_channels.axis) == np.ndarray) - - def test_lift_up_through_transpose_negative_axis(self): - graph = build_graph(nodes3, [*connect('placeholder', '0:transpose'), *connect('transpose_order', '1:transpose'), - *connect('transpose', 'reverse_channels_down'), - *connect('reverse_channels_down', 'result')]) - graph_ref = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_down'), - *connect('transpose_order', '1:transpose'), - *connect('reverse_channels_down', 'transpose'), - *connect('transpose', 'result')]) - self.set_graph_attrs(graph, ['placeholder']) - - node = Node(graph, 'transpose') - reverse_channels = Node(graph, 'reverse_channels_down') - reverse_channels.axis = int64_array(-3) - - keep_moving_up, new_reverses = ReverseChannelsPropagationUp.lift_up_through_transpose(node, reverse_channels) - self.assertTrue(keep_moving_up is True) - self.assertTrue(len(new_reverses) == 1) - self.check_graph_attrs(graph, ['placeholder']) - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - reverse_channels = Node(graph, 'reverse_channels_down') - self.assertTrue(reverse_channels.axis == 3) - self.assertTrue(type(reverse_channels.axis) == np.ndarray) - - def test_lift_down_through_transpose_negative_axis(self): - graph = build_graph(nodes3, [*connect('placeholder', 'reverse_channels_up'), - *connect('transpose_order', '1:transpose'), - *connect('reverse_channels_up', '0:transpose'), - *connect('transpose', 'result')]) - graph_ref = build_graph(nodes3, [*connect('placeholder', '0:transpose'), - *connect('transpose_order', '1:transpose'), - *connect('transpose', 'reverse_channels_up'), - *connect('reverse_channels_up', '0:result')]) - self.set_graph_attrs(graph, ['placeholder']) - - node = Node(graph, 'transpose') - reverse_channels = Node(graph, 'reverse_channels_up') - reverse_channels.axis = int64_array(-1) - - keep_moving_down = ReverseChannelsPropagationDown.pass_rc_through_transpose(node, reverse_channels) - - self.assertTrue(keep_moving_down is True) - self.check_graph_attrs(graph, ['placeholder']) - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - reverse_channels = Node(graph, 'reverse_channels_down') - self.assertTrue(reverse_channels.axis == 1) - self.assertTrue(type(reverse_channels.axis) == np.ndarray) diff --git a/tools/mo/unit_tests/mo/back/ShapeOfConstFolding_test.py b/tools/mo/unit_tests/mo/back/ShapeOfConstFolding_test.py deleted file mode 100644 index 41313c40422dae..00000000000000 --- a/tools/mo/unit_tests/mo/back/ShapeOfConstFolding_test.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.back.ShapeOfConstFolding import ShapeOfConstFolding -from openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -const_value = np.random.rand(1, 3, 30, 30) -nodes_attributes = {'input': {'shape': int64_array([1, 3, 30, 30]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'input_data': {'value': None, 'shape': int64_array([1, 3, 30, 30]), 'kind': 'data'}, - 'const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': const_value}, - 'const_data': {'kind': 'data', 'value': const_value}, - 'shapeof_input': {'kind': 'op', 'op': 'ShapeOf', 'value': int64_array([1, 3, 30, 30])}, - 'shapeof_input_data': {'value': None, 'shape': None, 'kind': 'data', - 'value': int64_array([1, 3, 30, 30])}, - - 'shapeof_const': {'kind': 'op', 'op': 'ShapeOf', 'value': int64_array([1, 3, 30, 30])}, - 'shapeof_const_data': {'value': None, 'shape': None, 'kind': 'data', - 'value': int64_array([1, 3, 30, 30])}, - - 'mul': {'kind': 'op', 'op': 'Mul', 'infer': lambda node: eltwise_infer(node, lambda a, b: a * b)}, - 'mul_data': {'kind': 'data', 'value': np.array([1, 9, 900, 900])}, - 'last': {'kind': 'op', 'op': 'Result'}, - - # new nodes - 'new_const_shapeof': {'type': 'Const', 'kind': 'op', 'op': 'Const', - 'value': int64_array([1, 3, 30, 30])} - } - -const_value2 = np.random.rand(30, 30) -nodes_attributes2 = {'input': {'shape': int64_array([1, 3, 30, 30]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'input_data': {'value': None, 'shape': int64_array([1, 3, 30, 30]), 'kind': 'data'}, - - 'const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': const_value2}, - 'const_data': {'kind': 'data', 'value': const_value2}, - - 'shapeof_const': {'kind': 'op', 'op': 'ShapeOf', 'value': int64_array([2700, 30])}, - 'shapeof_const_data': {'value': int64_array([2700, 30]), 'shape': None, 'kind': 'data'}, - - 'gather': {'kind': 'op', 'op': 'Gather', 'batch_dims': 0}, - 'gather_data': {'kind': 'data'}, - - 'const_concat': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': [1]}, - 'const_concat_data': {'kind': 'data', 'value': [1]}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data'}, - - 'reshape': {'kind': 'op', 'op': 'Reshape'}, - 'reshape_data': {'kind': 'data'}, - - 'matmul': {'kind': 'op', 'op': 'MatMul'}, - 'matmul_data': {'kind': 'data'}, - 'last': {'kind': 'op', 'op': 'Result'}, - - # new nodes - 'new_const_shapeof': {'type': 'Const', 'kind': 'op', 'op': 'Const', - 'value': int64_array([2700, 30])}, - } - - -class ShapeOfConstFoldingTests(unittest.TestCase): - def test_const_with_one_output(self): - graph = build_graph(nodes_attributes, - [('input', 'input_data'), - ('input_data', 'shapeof_input'), - ('shapeof_input', 'shapeof_input_data'), - ('shapeof_input_data', 'mul'), - ('const', 'const_data'), - ('const_data', 'shapeof_const'), - ('shapeof_const', 'shapeof_const_data'), - ('shapeof_const_data', 'mul'), - ('mul', 'mul_data'), - ('mul_data', 'last')], - { - 'input': {'shape': int64_array([1, 3, 30, 30])}, - 'input_data': {'shape': int64_array([1, 3, 30, 30])}, - 'shapeof_input': {'value': int64_array([1, 3, 30, 30])}, - 'shapeof_input_data': {'value': int64_array([1, 3, 30, 30])}, - 'const': {'value': const_value}, - 'const_data': {'value': const_value}, - 'shapeof_const': {'value': int64_array([1, 3, 30, 30])}, - 'shapeof_const_data': {'value': int64_array([1, 3, 30, 30])}, - 'mul_data': {'value': int64_array([1, 9, 900, 900])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('input', 'input_data'), - ('input_data', 'shapeof_input'), - ('shapeof_input', 'shapeof_input_data'), - ('shapeof_input_data', 'mul'), - ('new_const_shapeof', 'shapeof_const_data'), - ('shapeof_const_data', 'mul'), - ('mul', 'mul_data'), - ('mul_data', 'last')], - { - 'input': {'shape': int64_array([1, 3, 30, 30])}, - 'input_data': {'shape': int64_array([1, 3, 30, 30])}, - 'shapeof_input': {'value': int64_array([1, 3, 30, 30])}, - 'shapeof_input_data': {'value': int64_array([1, 3, 30, 30])}, - 'new_const_shapeof': {'value': int64_array([1, 3, 30, 30])}, - 'shapeof_const_data': {'value': int64_array([1, 3, 30, 30])}, - 'mul_data': {'value': int64_array([1, 9, 900, 900])}, - }, - nodes_with_edges_only=True) - ShapeOfConstFolding().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'last') - self.assertTrue(flag, resp) - - def test_const_with_two_outputs(self): - graph = build_graph(nodes_attributes2, - [('input', 'input_data'), - ('input_data', 'reshape'), - ('const', 'const_data'), - ('const_data', 'shapeof_const'), - ('shapeof_const', 'shapeof_const_data'), - ('shapeof_const_data', 'gather'), - ('gather', 'gather_data'), - ('const_concat', 'const_concat_data'), - ('const_concat_data', 'concat'), - ('gather_data', 'concat'), - ('concat', 'reshape'), - ('reshape', 'reshape_data'), - ('reshape_data', 'matmul'), - ('const_data', 'matmul'), - ('matmul', 'matmul_data'), - ('matmul_data', 'last') - ], - { - 'input': {'shape': int64_array([1, 3, 30, 30])}, - 'input_data': {'shape': int64_array([1, 3, 30, 30])}, - 'shapeof_const': {'value': int64_array([2700, 30])}, - 'shapeof_const_data': {'value': int64_array([2700, 30])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes2, - [('input', 'input_data'), - ('input_data', 'reshape'), - ('new_const_shapeof', 'shapeof_const_data'), - ('shapeof_const_data', 'gather'), - ('gather', 'gather_data'), - ('const_concat', 'const_concat_data'), - ('const_concat_data', 'concat'), - ('gather_data', 'concat'), - ('concat', 'reshape'), - ('reshape', 'reshape_data'), - ('reshape_data', 'matmul'), - ('const', 'const_data'), - ('const_data', 'matmul'), - ('matmul', 'matmul_data'), - ('matmul_data', 'last')], - { - 'input': {'shape': int64_array([1, 3, 30, 30])}, - 'input_data': {'shape': int64_array([1, 3, 30, 30])}, - 'new_const_shapeof': {'value': int64_array([2700, 30])}, - 'shapeof_const_data': {'value': int64_array([2700, 30])}, - }, - nodes_with_edges_only=True) - ShapeOfConstFolding().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'last') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/ShuffleChannelPatternOptimization_test.py b/tools/mo/unit_tests/mo/back/ShuffleChannelPatternOptimization_test.py deleted file mode 100644 index 546080c0af083b..00000000000000 --- a/tools/mo/unit_tests/mo/back/ShuffleChannelPatternOptimization_test.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from argparse import Namespace -import pytest - -from openvino.tools.mo.back.ShuffleChannelPatternOptimization import ShuffleChannelFusion, DepthToSpaceFusion -from openvino.tools.mo.ops.depth_to_space import DepthToSpaceOp -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.ops.shufflechannel import ShuffleChannels -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.ops.reshape import Reshape -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, \ - valued_const_with_data, connect, regular_op_with_empty_data - - -class TestShuffleChannelFusionTest(): - @staticmethod - def get_graphs(input_shape, reshape_0_pattern, order, reshape_1_pattern, group): - nodes = { - **regular_op_with_shaped_data('input', input_shape, {'type': 'Parameter', 'shape': int64_array(input_shape), - 'infer': Parameter.infer}), - - **valued_const_with_data('reshape_0_pattern', int64_array(reshape_0_pattern)), - **regular_op_with_empty_data('reshape_0', {'type': 'Reshape', 'infer': Reshape.infer}), - - **valued_const_with_data('order', int64_array(order)), - **regular_op_with_empty_data('transpose', {'type': 'Transpose', 'infer': Transpose.infer}), - - **valued_const_with_data('reshape_1_pattern', int64_array(reshape_1_pattern)), - **regular_op_with_empty_data('reshape_1', {'type': 'Reshape', 'infer': Reshape.infer, - 'name': 'final_reshape'}), - - **result(), - } - edges = [ - *connect('input', '0:reshape_0'), - *connect('reshape_0_pattern', '1:reshape_0'), - *connect('reshape_0', '0:transpose'), - *connect('order', '1:transpose'), - *connect('transpose', '0:reshape_1'), - *connect('reshape_1_pattern', '1:reshape_1'), - *connect('reshape_1', 'output'), - ] - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - for node in graph.get_op_nodes(): - node['op'] = node['type'] - graph.clean_up() - - ref_nodes = { - **regular_op_with_shaped_data('input', input_shape, {'type': 'Parameter', 'shape': int64_array(input_shape), - 'infer': Parameter.infer}), - **regular_op_with_empty_data('shuffle_channel', {'type': 'ShuffleChannels', 'infer': ShuffleChannels.infer, - 'name': 'final_reshape', 'group': group}), - **result() - } - ref_edges = [*connect('input', 'shuffle_channel'), *connect('shuffle_channel', 'output')] - graph_ref = build_graph(ref_nodes, ref_edges, nodes_with_edges_only=True) - for node in graph_ref.get_op_nodes(): - node['op'] = node['type'] - graph_ref.clean_up() - - return graph, graph_ref - - @pytest.mark.parametrize("input_shape, reshape_0_pattern, order, reshape_1_pattern, group",[ - ([1, 512, 7, 6], [1, 2, 256, 7, 6], [0, 2, 1, 3, 4], [1, 512, 7, 6], 2), - ([2, 512, 7, 6], [2, 2, 256, 7, 6], [0, 2, 1, 3, 4], [2, 512, 7, 6], 2), - ([1, 200, 200, 200], [1, 50, 4, 200, 200], [0, 2, 1, 3, 4], [1, 200, 200, 200], 50), - ]) - def test_fusion(self, input_shape, reshape_0_pattern, order, reshape_1_pattern, group): - graph, graph_ref = self.get_graphs(input_shape, reshape_0_pattern, order, reshape_1_pattern, group) - ShuffleChannelFusion().find_and_replace_pattern(graph) - graph.clean_up() - (flag, resp) = compare_graphs(graph, graph_ref, 'output') - assert flag, resp - assert len(graph.get_op_nodes(name='final_reshape')) == 1 and \ - graph.get_op_nodes(name='final_reshape')[0].op == 'ShuffleChannels' - - @pytest.mark.parametrize("input_shape, reshape_0_pattern, order, reshape_1_pattern, group",[ - ([1, 512, 7, 6], [0, 2, 256, 7, 6], [0, 2, 1, 3, 4], [1, 512, 7, 6], 2), - ([1, 512, 7, 6], [1, 2, 256, 7, 6], [0, 2, 1, 4, 3], [1, 512, 7, 6], 2), - ([1, 512, 7, 6], [1, 2, 256, 7, 6], [0, 2, 1, 3, 4], [-1, 512, 7, 6], 2), - ]) - def test_negative(self, input_shape, reshape_0_pattern, order, reshape_1_pattern, group): - graph, _ = self.get_graphs(input_shape, reshape_0_pattern, order, reshape_1_pattern, group) - graph_ref = graph.copy() - ShuffleChannelFusion().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'output') - assert flag, resp - - -class TestDepthToSpaceFusionTest(): - @staticmethod - def get_graphs(input_shape, reshape_0_pattern, order, reshape_1_pattern, block_size): - nodes = { - **regular_op_with_shaped_data('input', input_shape, {'type': 'Parameter', 'shape': int64_array(input_shape), - 'infer': Parameter.infer}), - - **valued_const_with_data('reshape_0_pattern', int64_array(reshape_0_pattern)), - **regular_op_with_empty_data('reshape_0', {'type': 'Reshape', 'infer': Reshape.infer}), - - **valued_const_with_data('order', int64_array(order)), - **regular_op_with_empty_data('transpose', {'type': 'Transpose', 'infer': Transpose.infer}), - - **valued_const_with_data('reshape_1_pattern', int64_array(reshape_1_pattern)), - **regular_op_with_empty_data('reshape_1', {'type': 'Reshape', 'infer': Reshape.infer, - 'name': 'final_reshape'}), - - **result(), - } - edges = [ - *connect('input', '0:reshape_0'), - *connect('reshape_0_pattern', '1:reshape_0'), - *connect('reshape_0', '0:transpose'), - *connect('order', '1:transpose'), - *connect('transpose', '0:reshape_1'), - *connect('reshape_1_pattern', '1:reshape_1'), - *connect('reshape_1', 'output'), - ] - graph = build_graph(nodes, edges, nodes_with_edges_only=True, cli=Namespace()) - for node in graph.get_op_nodes(): - node['op'] = node['type'] - graph.clean_up() - - ref_nodes = { - **regular_op_with_shaped_data('input', input_shape, {'type': 'Parameter', 'shape': int64_array(input_shape), - 'infer': Parameter.infer}), - **regular_op_with_empty_data('depth_to_space', {'type': 'DepthToSpace', 'infer': DepthToSpaceOp.infer, - 'name': 'final_reshape', 'block_size': block_size}), - **result() - } - ref_edges = [*connect('input', 'depth_to_space'), *connect('depth_to_space', 'output')] - graph_ref = build_graph(ref_nodes, ref_edges, nodes_with_edges_only=True) - for node in graph_ref.get_op_nodes(): - node['op'] = node['type'] - graph_ref.clean_up() - graph.graph['layout'] = 'NCHW' - graph_ref.graph['layout'] = 'NCHW' - - return graph, graph_ref - - @pytest.mark.parametrize("input_shape, reshape_0_pattern, order, reshape_1_pattern, block_size",[ - ([1, 512, 7, 6], [1, 2, 2, 128, 7, 6], [0, 1, 4, 2, 5, 3], [1, 128, 14, 12], 2), - ([2, 512, 7, 6], [2, 2, 2, 128, 7, 6], [0, 1, 4, 2, 5, 3], [2, 128, 14, 12], 2), - ([1, 200, 200, 200], [1, 2, 2, 50, 200, 200], [0, 1, 4, 2, 5, 3], [1, 50, 400, 400], 2), - ]) - def test_fusion(self, input_shape, reshape_0_pattern, order, reshape_1_pattern, block_size): - graph, graph_ref = self.get_graphs(input_shape, reshape_0_pattern, order, reshape_1_pattern, block_size) - DepthToSpaceFusion().find_and_replace_pattern(graph) - graph.clean_up() - (flag, resp) = compare_graphs(graph, graph_ref, 'output') - assert flag, resp - assert len(graph.get_op_nodes(name='final_reshape')) == 1 and \ - graph.get_op_nodes(name='final_reshape')[0].op == 'DepthToSpace' - - @pytest.mark.parametrize("input_shape, reshape_0_pattern, order, reshape_1_pattern, group",[ - ([1, 512, 7, 6], [0, 2, 2, 128, 7, 6], [0, 1, 4, 2, 5, 3], [1, 128, 14, 12], 2), - ([2, 512, 7, 6], [2, 2, 2, 128, 7, 6], [0, 1, 4, 2, 5, 3], [-1, 128, 14, 12], 2), - ([1, 200, 200, 200], [1, 2, 2, 50, 200, 200], [0, 1, 4, 2, 3, 5], [1, 50, 400, 400], 2), - ]) - def test_negative(self, input_shape, reshape_0_pattern, order, reshape_1_pattern, group): - graph, _ = self.get_graphs(input_shape, reshape_0_pattern, order, reshape_1_pattern, group) - graph_ref = graph.copy() - DepthToSpaceFusion().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'output') - assert flag, resp diff --git a/tools/mo/unit_tests/mo/back/SpecialNodesFinalization_test.py b/tools/mo/unit_tests/mo/back/SpecialNodesFinalization_test.py deleted file mode 100644 index a288c411bacf21..00000000000000 --- a/tools/mo/unit_tests/mo/back/SpecialNodesFinalization_test.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.back.SpecialNodesFinalization import CreateConstNodesReplacement -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph_with_attrs - - -class CreateConstNodesReplacementTest(unittest.TestCase): - nodes = [ - ('data_node', {'kind': 'data', 'shape': None, 'value': None}), - ('next_node', {'kind': 'op'}), - ] - edges = [ - ('data_node', 'next_node') - ] - - new_nodes = [ - ('const', {'kind': 'op', 'op': 'Const'}), - ('const_data', {'kind': 'data'}) - ] - new_edges = [ - ('const', 'data_node'), - ('const_data', 'const') - ] - - def test_one_node(self): - """We should add Const node and data node.""" - shape = np.array([2, 3, 4]) - data = np.zeros(shape) - graph = build_graph_with_attrs( - nodes_with_attrs=self.nodes, - edges_with_attrs=self.edges, - update_nodes_attributes=[('data_node', {'shape': shape, 'value': data})] - ) - graph_ref = build_graph_with_attrs( - nodes_with_attrs=self.nodes + self.new_nodes, - edges_with_attrs=self.edges + self.new_edges, - update_nodes_attributes=[('data_node', {'shape': shape, 'value': data}), - ('const_data', {'shape': shape, 'value': data})] - ) - tested_pattern = CreateConstNodesReplacement() - tested_pattern.find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, last_node='next_node') - self.assertTrue(flag, resp) - - def test_one_bin_node(self): - """Nothing should happen.""" - shape = np.array([2, 3, 4]) - data = np.zeros(shape) - graph = build_graph_with_attrs( - nodes_with_attrs=self.nodes, - edges_with_attrs=self.edges, - update_nodes_attributes=[('data_node', {'shape': shape, 'value': data})], - update_edge_attrs={('data_node', 'next_node', 0): {'bin': 0}}, - ) - tested_pattern = CreateConstNodesReplacement() - tested_pattern.find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph, last_node='next_node') - self.assertTrue(flag, resp) - - def test_two_nodes_with_bin(self): - """Test case for data node with 2 consumers with bin edge attr. - Nothing should happened.""" - shape = np.array([2, 3, 4]) - data = np.zeros(shape) - graph = build_graph_with_attrs( - nodes_with_attrs=self.nodes + [('next_node_2', {'kind': 'op'})], - edges_with_attrs=self.edges + [('data_node', 'next_node_2')], - update_nodes_attributes=[('data_node', {'shape': shape, 'value': data})], - update_edge_attrs={('data_node', 'next_node', 0): {'bin': 0}, ('data_node', 'next_node_2', 0): {'bin': 0}}, - ) - tested_pattern = CreateConstNodesReplacement() - tested_pattern.find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph, last_node='next_node') - self.assertTrue(flag, resp) - - def test_two_nodes_one_bin(self): - """Test case for two output nodes, one with 'bin' parameter, other without.""" - shape = np.array([2, 3, 4]) - data = np.zeros(shape) - graph = build_graph_with_attrs( - nodes_with_attrs=self.nodes + [('next_node_2', {'kind': 'op'})], - edges_with_attrs=self.edges + [('data_node', 'next_node_2')], - update_nodes_attributes=[('data_node', {'shape': shape, 'value': data})], - update_edge_attrs={('data_node', 'next_node', 0): {'bin': 0}}, - ) - graph_ref = build_graph_with_attrs( - nodes_with_attrs=self.nodes + self.new_nodes + [('next_node_2', {'kind': 'op'})], - edges_with_attrs=self.edges + self.new_edges + [('data_node', 'next_node_2')], - update_nodes_attributes=[('data_node', {'shape': shape, 'value': data}), - ('const_data', {'shape': shape, 'value': data})] - ) - tested_pattern = CreateConstNodesReplacement() - tested_pattern.find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, last_node='next_node') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/TransposeDFT_test.py b/tools/mo/unit_tests/mo/back/TransposeDFT_test.py deleted file mode 100644 index 85e20268caeedf..00000000000000 --- a/tools/mo/unit_tests/mo/back/TransposeDFT_test.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.back.TransposeDFT import TransposeDFT -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \ - regular_op_with_empty_data - -dft_graph_node_attrs = { - **regular_op_with_shaped_data('placeholder', [8, 2, 40, 56], {'type': 'Parameter', 'op': 'Parameter'}), - **valued_const_with_data('axes', int64_array([-2, -1])), - **regular_op_with_shaped_data('dft', [8, 2, 40, 56], {'op': 'DFT', 'need_insert_transposes_for_dft': True}), - **regular_op_with_shaped_data('abs', [8, 2, 40, 56], {'type': 'Abs', 'op': 'Abs'}), - **result(), -} - -dft_graph_edges = [ - *connect('placeholder', '0:dft'), - *connect('axes', '1:dft'), - *connect('dft', 'abs'), - *connect('abs', 'output'), -] - - -transposed_dft_graph_node_attrs = { - **regular_op_with_shaped_data('placeholder', [8, 2, 40, 56], {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_empty_data('transpose_before', - {'type': 'Transpose', 'op': 'Transpose', 'need_shape_inference': True}), - **valued_const_with_data('transpose_before_axis_const', int64_array([0, 2, 3, 1])), - **regular_op_with_empty_data('transpose_after', - {'type': 'Transpose', 'op': 'Transpose', 'need_shape_inference': True}), - **valued_const_with_data('transpose_after_axis_const', int64_array([0, 3, 1, 2])), - **valued_const_with_data('dft_axes', int64_array([-2, -1])), - **regular_op_with_shaped_data('dft', [8, 2, 40, 56], {'op': 'DFT', 'need_insert_transposes_for_dft': True}), - **regular_op_with_shaped_data('abs', [8, 2, 40, 56], {'type': 'Abs', 'op': 'Abs'}), - **result(), -} - -transposed_dft_graph_edges = [ - *connect('placeholder', '0:transpose_before'), - *connect('transpose_before_axis_const', '1:transpose_before'), - *connect('transpose_before', '0:dft'), - *connect('dft_axes', '1:dft'), - *connect('dft', '0:transpose_after'), - *connect('transpose_after_axis_const', '1:transpose_after'), - *connect('transpose_after', 'abs'), - *connect('abs', 'output'), -] - - -nontransposed_dft_graph_node_attrs = { - **regular_op_with_shaped_data('placeholder', [8, 2, 40, 56], {'type': 'Parameter', 'op': 'Parameter'}), - **valued_const_with_data('axes', int64_array([-2, -1])), - **regular_op_with_shaped_data('dft', [8, 2, 40, 56], {'op': 'DFT'}), - **regular_op_with_shaped_data('abs', [8, 2, 40, 56], {'type': 'Abs', 'op': 'Abs'}), - **result(), -} - -nontransposed_dft_graph_edges = [ - *connect('placeholder', '0:dft'), - *connect('axes', '1:dft'), - *connect('dft', 'abs'), - *connect('abs', 'output'), -] - - -class TransposeDFTTest(unittest.TestCase): - def test_dft_transpose(self): - graph = build_graph(nodes_attrs=dft_graph_node_attrs, edges=dft_graph_edges) - ref_graph = build_graph(nodes_attrs=transposed_dft_graph_node_attrs, edges=transposed_dft_graph_edges) - graph.graph['fw'] = 'tf' - TransposeDFT().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_dft_nontranspose(self): - graph = build_graph(nodes_attrs=nontransposed_dft_graph_node_attrs, edges=nontransposed_dft_graph_edges) - ref_graph = build_graph(nodes_attrs=nontransposed_dft_graph_node_attrs, edges=nontransposed_dft_graph_edges) - TransposeDFT().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/TransposeReduceFusing_test.py b/tools/mo/unit_tests/mo/back/TransposeReduceFusing_test.py deleted file mode 100644 index 70fdaa3bd84a00..00000000000000 --- a/tools/mo/unit_tests/mo/back/TransposeReduceFusing_test.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.back.TransposeReduceFusing import TransposeReduce -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - # op - 'placeholder': {'kind': 'op', 'op': 'Parameter'}, - 'transpose': {'kind': 'op', 'type': 'Transpose'}, - 'reduceMean': {'kind': 'op', 'type': 'ReduceMean', 'keep_dims': False}, - 'transpose_const': {'kind': 'op', 'type': 'Const'}, - 'reduceMeanConst': {'kind': 'op', 'type': 'Const'}, - 'convolution': {'kind': 'op', 'op': 'Convolution'}, - 'gather': {'kind': 'op', 'op': 'Gather'}, - 'gather_const': {'kind': 'op', 'op': 'Const'}, - - # data - 'placeholder_data': {'kind': 'data'}, - 'transpose_data': {'kind': 'data'}, - 'reduceMean_data': {'kind': 'data'}, - 'transpose_const_data': {'kind': 'data'}, - 'reduceMeanConst_data': {'kind': 'data'}, - 'gather_data': {'kind': 'data'}, - 'gather_const_data': {'kind': 'data'} -} - - -class TestTransposeReduceFusing(unittest.TestCase): - - def test_positive(self): - graph = build_graph(nodes_attributes, - [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'transpose', {'in': 0}), - ('transpose_const', 'transpose_const_data'), - ('transpose_const_data', 'transpose', {'in': 1}), - ('transpose', 'transpose_data'), - ('transpose_data', 'reduceMean', {'in': 0}), - ('reduceMeanConst', 'reduceMeanConst_data'), - ('reduceMeanConst_data', 'reduceMean', {'in': 1}), - ('reduceMean', 'reduceMean_data'), - ('reduceMean_data', 'convolution') - ], - { - 'transpose_const': {'value': int64_array([0, 2, 3, 1])}, - 'transpose_const_data': {'value': int64_array([0, 2, 3, 1])}, - 'reduceMeanConst': {'value': int64_array([1, 2])}, - 'reduceMeanConst_data': {'value': int64_array([1, 2])} - }, - nodes_with_edges_only=True) - ref_graph = build_graph(nodes_attributes, - [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'reduceMean', {'in': 0}), - ('transpose_const', 'transpose_const_data'), - ('transpose_const_data', 'gather', {'in': 0}), - ('reduceMeanConst', 'reduceMeanConst_data'), - ('reduceMeanConst_data', 'gather', {'in': 1}), - ('gather_const', 'gather_const_data'), - ('gather_const_data', 'gather', {'in': 2}), - ('gather', 'gather_data'), - ('gather_data', 'reduceMean', {'in': 1}), - ('reduceMean', 'reduceMean_data'), - ('reduceMean_data', 'convolution') - ], - { - 'transpose_const_data': {'value': int64_array([0, 2, 3, 1])}, - 'reduceMeanConst_data': {'value': int64_array([1, 2])}, - }, - nodes_with_edges_only=True) - TransposeReduce().find_and_replace_pattern(graph) - flag, resp = compare_graphs(graph, ref_graph, 'convolution', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_negative_values(self): - graph = build_graph(nodes_attributes, - [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'transpose', {'in': 0}), - ('transpose_const', 'transpose_const_data'), - ('transpose_const_data', 'transpose', {'in': 1}), - ('transpose', 'transpose_data'), - ('transpose_data', 'reduceMean', {'in': 0}), - ('reduceMeanConst', 'reduceMeanConst_data'), - ('reduceMeanConst_data', 'reduceMean', {'in': 1}), - ('reduceMean', 'reduceMean_data'), - ('reduceMean_data', 'convolution') - ], - { - 'transpose_const': {'value': int64_array([0, 1, 3, 2])}, - 'transpose_const_data': {'value': int64_array([0, 1, 3, 2])}, - 'reduceMeanConst': {'value': int64_array([1])}, - 'reduceMeanConst_data': {'value': int64_array([1])} - }, - nodes_with_edges_only=True) - ref_graph = build_graph(nodes_attributes, - [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'transpose', {'in': 0}), - ('transpose_const', 'transpose_const_data'), - ('transpose_const_data', 'transpose', {'in': 1}), - ('transpose', 'transpose_data'), - ('transpose_data', 'reduceMean', {'in': 0}), - ('reduceMeanConst', 'reduceMeanConst_data'), - ('reduceMeanConst_data', 'reduceMean', {'in': 1}), - ('reduceMean', 'reduceMean_data'), - ('reduceMean_data', 'convolution') - ], - { - 'transpose_const': {'value': int64_array([0, 1, 3, 2])}, - 'transpose_const_data': {'value': int64_array([0, 1, 3, 2])}, - 'reduceMeanConst': {'value': int64_array([1])}, - 'reduceMeanConst_data': {'value': int64_array([1])} - }, - nodes_with_edges_only=True) - TransposeReduce().find_and_replace_pattern(graph) - flag, resp = compare_graphs(graph, ref_graph, 'convolution', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_negative(self): - graph = build_graph(nodes_attributes, - [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'reduceMean', {'in': 0}), - ('reduceMeanConst', 'reduceMeanConst_data'), - ('reduceMeanConst_data', 'reduceMean', {'in': 1}), - ('reduceMean', 'reduceMean_data'), - ('reduceMean_data', 'convolution') - ], - nodes_with_edges_only=True) - ref_graph = build_graph(nodes_attributes, - [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'reduceMean', {'in': 0}), - ('reduceMeanConst', 'reduceMeanConst_data'), - ('reduceMeanConst_data', 'reduceMean', {'in': 1}), - ('reduceMean', 'reduceMean_data'), - ('reduceMean_data', 'convolution') - ], - nodes_with_edges_only=True) - - TransposeReduce().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'convolution', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/back/__init__.py b/tools/mo/unit_tests/mo/back/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/back/add_outputs_recursive_test.py b/tools/mo/unit_tests/mo/back/add_outputs_recursive_test.py deleted file mode 100644 index c3ea17b67238ec..00000000000000 --- a/tools/mo/unit_tests/mo/back/add_outputs_recursive_test.py +++ /dev/null @@ -1,602 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -import numpy as np -import unittest - -from openvino.tools.mo.back.add_outputs_recursive import AddOutputRecursive -from openvino.tools.mo.ops.If import If -from openvino.tools.mo.ops.loop import Loop -from openvino.tools.mo.ops.tensor_iterator import TensorIterator -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value, shape_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, connect, shaped_parameter, \ - valued_const_with_data, shaped_const_with_data, regular_op_with_shaped_data - -# test for Loop -main_graph_nodes = { - **shaped_parameter("IN_1", [1, 4, 64, 54]), - **shaped_parameter("IN_2", [1, 4, 64, 54]), - **valued_const_with_data("M", int64_array([5])), - **valued_const_with_data("cond", int64_array([1])), - **regular_op_with_empty_data("Loop", {'op': "Loop", 'type': 'Loop', 'sub_graphs': ['body'], "body": None, - 'input_port_map': [{'external_port_id': 1, 'internal_layer_id': 2, - 'axis': None}, - {'external_port_id': 2, 'internal_layer_id': 0, - 'axis': None}, - {'external_port_id': 3, 'internal_layer_id': 1, - 'axis': None}], - 'output_port_map': [{'external_port_id': 0, 'internal_layer_id': 4, - 'axis': None}, - {'external_port_id': -1, 'internal_layer_id': 5, - 'axis': None, 'purpose': "execution_condition"}], - 'back_edges': [{'from_layer': 8, 'to_layer': 7}, - {'from_layer': 10, 'to_layer': 9}], - 'infer': Loop.infer}), - **result("OUT_1") -} - -sub_graph_1_nodes = { - **shaped_parameter("IN_2", int64_array([1, 4, 64, 54]), {'internal_layer_id': 0}), - **valued_const_with_data("M_2", int64_array([10])), - **valued_const_with_data("cond_2", int64_array([1])), - **regular_op_with_empty_data("Loop_2", {'op': "Loop", 'type': 'Loop', 'sub_graphs': ['body'], "body": None, - 'input_port_map': [{'external_port_id': 1, 'internal_layer_id': 0, - 'axis': None}, - {'external_port_id': 2, 'internal_layer_id': 2, - 'axis': None}], - 'output_port_map': [{'external_port_id': 0, 'internal_layer_id': 7, - 'axis': None}, - {'external_port_id': -1, 'internal_layer_id': 6, - 'axis': None, - 'purpose': "execution_condition"}], - 'back_edges': [{'from_layer': 1, 'to_layer': 0}, - {'from_layer': 8, 'to_layer': 2}], - 'infer': Loop.infer}), - **regular_op_with_empty_data('Loop_2_out', {'op': 'Result', 'type': 'Result', 'infer': lambda x: None, - 'internal_layer_id': 3}), - **shaped_parameter("in_1_int", int64_array([1, 4, 64, 54]), {'internal_layer_id': 1}), - **regular_op_with_empty_data("in_1_int_out", - {'op': 'Result', 'type': 'Result', 'infer': lambda x: None, 'internal_layer_id': 4}), - **shaped_parameter("cond_1_int", int64_array([1]), {'internal_layer_id': 2}), - **regular_op_with_empty_data("cond_1_int_out", {'op': 'Result', 'type': 'Result', 'infer': lambda x: None, - 'internal_layer_id': 5}), -} - -sub_graph_2_nodes = { - **shaped_parameter('cond_2_int', [1, 4, 64, 54], {'internal_layer_id': 0}), - **regular_op_with_empty_data("cond_2_int_out", - {'op': 'Result', 'type': 'Result', 'infer': lambda x: None, 'internal_layer_id': 8}), - **shaped_parameter('in_2_int', [1, 4, 64, 54], {'internal_layer_id': 1}), - **shaped_const_with_data('ones', int64_array([1, 4, 64, 54]), {'internal_layer_id': 9}), - **regular_op_with_shaped_data('OUT_2', int64_array([1, 4, 64, 54]), {'op': "Add", 'infer': copy_shape_infer}), - **regular_op_with_empty_data('OUT_2_out', - {'op': 'Result', 'type': 'Result', 'infer': lambda x: None, 'internal_layer_id': 7}), - **regular_op_with_shaped_data('in_2_int_out', int64_array([1, 4, 64, 54]), - {'op': 'Result', 'type': 'Result', 'infer': lambda x: None, 'internal_layer_id': 6}) -} - - -def ti_create_main_graph(body): - main_graph = build_graph(nodes_attrs=ti_main_graph_nodes, - edges=[*connect('M', '0:Loop'), - *connect('cond', '1:Loop'), - *connect('IN_2', '2:Loop'), - *connect('IN_1', "3:Loop"), - *connect('Loop:0', 'OUT_1')], - nodes_with_edges_only=True) - loop_node = Node(main_graph, 'Loop') - loop_node.body = body - loop_node.in_edge(0)['external_port_id'] = 0 - loop_node.in_edge(1)['external_port_id'] = 1 - loop_node.in_edge(2)['external_port_id'] = 2 - loop_node.in_edge(3)['external_port_id'] = 3 - loop_node.out_edge(0)['external_port_id'] = 4 - - return main_graph - - -def if_create_main_graph(): - sub_graph_2 = build_graph(nodes_attrs=if_sub_graph_2_then_nodes, - edges=[*connect('in_2_int', 'OUT_2'), - *connect('ones', 'OUT_2'), - *connect('OUT_2', 'OUT_2_out')], - nodes_with_edges_only=True) - - sub_graph_2_else = build_graph(nodes_attrs=if_sub_graph_2_else_nodes, - edges=[*connect('in_2_int_else', 'OUT_2_else'), - *connect('ones_else', 'OUT_2_else'), - *connect('OUT_2_else', 'OUT_2_out_else')], - nodes_with_edges_only=True) - - sub_graph_1 = build_graph(nodes_attrs=if_sub_graph_1_then_nodes, - edges=[*connect('cond_2', '0:If_2'), - *connect('IN_2', '1:If_2'), - *connect('If_2:0', 'If_2_out'), - *connect('in_1_int', 'in_1_int_out')], - nodes_with_edges_only=True) - if_node_1 = Node(sub_graph_1, 'If_2') - if_node_1.then_graph = sub_graph_2 - if_node_1.else_graph = sub_graph_2_else - - return sub_graph_1 - - -class AddOutputRecursiveTest(unittest.TestCase): - - def test_add_output_1(self): - sub_graph_2 = build_graph(nodes_attrs=sub_graph_2_nodes, - edges=[*connect('cond_2_int', 'cond_2_int_out'), - *connect('in_2_int', 'OUT_2'), - *connect('ones', 'OUT_2'), - *connect('OUT_2', 'OUT_2_out'), - *connect('in_2_int', 'in_2_int_out')], - nodes_with_edges_only=True) - - sub_graph_1 = build_graph(nodes_attrs=sub_graph_1_nodes, - edges=[*connect('M_2', '0:Loop_2'), - *connect('cond_2', '1:Loop_2'), - *connect('IN_2', '2:Loop_2'), - *connect('Loop_2:0', 'Loop_2_out'), - *connect('in_1_int', 'in_1_int_out'), - *connect('cond_1_int', 'cond_1_int_out')], - nodes_with_edges_only=True) - loop_node_1 = Node(sub_graph_1, 'Loop_2') - loop_node_1.body = sub_graph_2 - - main_graph = build_graph(nodes_attrs=main_graph_nodes, - edges=[*connect('M', '0:Loop'), - *connect('cond', '1:Loop'), - *connect('IN_2', '2:Loop'), - *connect('IN_1', "3:Loop"), - *connect('Loop:0', 'OUT_1')], - nodes_with_edges_only=True) - loop_node = Node(main_graph, 'Loop') - loop_node.body = sub_graph_1 - main_graph.graph['additional_outputs'] = ['Loop', 'Loop_2'] - loop_node_1['out_ports_count'] = 2 - loop_node_1.add_output_port(1) - loop_node_1['output_port_map'].append({'external_port_id': 1, 'internal_layer_id': 8, 'axis': None}) - - loop_node_output_port_map_len = len(loop_node.output_port_map) - loop_node_out_ports_len = len(loop_node.out_ports()) - loop_2_out_ports_len = len(loop_node_1.out_ports()) - max_layer_id = 5 - - results = AddOutputRecursive().find_and_replace_pattern(main_graph) - - self.assertEqual(len(results), 2) - loop_node = Node(main_graph, 'Loop') - self.assertEqual(len(loop_node.output_port_map), loop_node_output_port_map_len + 2) - self.assertEqual(len(loop_node.out_ports()), loop_node_out_ports_len + 2) - self.assertEqual(loop_node.out_port(1).get_destination().node.op, 'Result') - self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == int64_array([5, 10, 4, 64, 54]))) - last_node = Node(sub_graph_1, 'Loop_2') - self.assertEqual(len(last_node.out_ports()), loop_2_out_ports_len) - unsq_node = last_node.out_port(0).get_destinations()[1].node - self.assertEqual(unsq_node.op, 'Unsqueeze') - self.assertEqual(unsq_node.out_port(0).get_destination().node.op, 'Result') - self.assertEqual(unsq_node.out_port(0).get_destination().node.internal_layer_id, max_layer_id + 3) - self.assertTrue(np.all(unsq_node.out_port(0).data.get_shape() == int64_array([1, 10, 4, 64, 54]))) - - -# test for TensorIterator -ti_main_graph_nodes = { - **shaped_parameter("IN_1", [1, 4, 64, 54]), - **shaped_parameter("IN_2", [1, 4, 64, 54]), - **valued_const_with_data("M", int64_array([5])), - **valued_const_with_data("cond", int64_array([1])), - **regular_op_with_empty_data("Loop", {'op': "TensorIterator", 'type': 'TensorIterator', - 'sub_graphs': ['body'], "body": None, - 'input_port_map': [{'external_port_id': 1, 'internal_layer_id': 2, 'axis': None}, - {'external_port_id': 2, 'internal_layer_id': 0, 'axis': None}, - {'external_port_id': 3, 'internal_layer_id': 1, 'axis': None}], - 'output_port_map': [{'external_port_id': 4, 'internal_layer_id': 4, 'axis': None}], - 'back_edges': [{'from_layer': 8, 'to_layer': 7}, - {'from_layer': 10, 'to_layer': 9}], - 'infer': TensorIterator.infer}), - **result("OUT_1") -} - -ti_sub_graph_1_nodes = { - **shaped_parameter("IN_2", int64_array([1, 4, 64, 54]), {'internal_layer_id': 0}), - **valued_const_with_data("cond_2", int64_array([1])), - **regular_op_with_empty_data("Loop_2", {'op': "TensorIterator", 'type': 'TensorIterator', - 'sub_graphs': ['body'], "body": None, - 'input_port_map': [{'external_port_id': 1, 'internal_layer_id': 0, 'axis': None}, - {'external_port_id': 0, 'internal_layer_id': 1, 'axis': 0}], - 'output_port_map': [{'external_port_id': 2, 'internal_layer_id': 7, - 'axis': None}, - ], - 'back_edges': [{'from_layer': 1, 'to_layer': 0}, - {'from_layer': 8, 'to_layer': 2}], - 'infer': TensorIterator.infer}), - **regular_op_with_empty_data('Loop_2_out', {'op': 'Result', 'type': 'Result', 'infer': lambda x: None, - 'internal_layer_id': 3}), - **shaped_parameter("in_1_int", int64_array([1, 4, 64, 54]), {'internal_layer_id': 1}), - **regular_op_with_empty_data("in_1_int_out", {'op': 'Result', 'type': 'Result', 'infer': lambda x: None, - 'internal_layer_id': 4}), - **shaped_parameter("cond_1_int", int64_array([1]), {'internal_layer_id': 2}), - **regular_op_with_empty_data("cond_1_int_out", {'op': 'Result', 'type': 'Result', 'infer': lambda x: None, - 'internal_layer_id': 5}), -} - -ti_sub_graph_2_nodes = { - **shaped_parameter('cond_2_int', [1, 4, 64, 54], {'internal_layer_id': 0}), - **result("cond_2_int_out"), - **shaped_parameter('in_2_int', [1, 4, 64, 54], {'internal_layer_id': 1}), - **shaped_const_with_data('ones', int64_array([1, 4, 64, 54])), - **regular_op_with_shaped_data('OUT_2', int64_array([1, 4, 64, 54]), - {'op': "Add", 'infer': copy_shape_infer}), - **regular_op_with_empty_data('OUT_2_out', {'op': 'Result', 'type': 'Result', 'infer': lambda x: None, - 'internal_layer_id': 7}), - **regular_op_with_empty_data('in_2_int_out', {'op': 'Result', 'type': 'Result', 'infer': lambda x: None, - 'internal_layer_id': 6}) -} - - -class TI_AddOutputRecursiveTest(unittest.TestCase): - @staticmethod - def create_graph(): - sub_graph_2 = build_graph(nodes_attrs=ti_sub_graph_2_nodes, - edges=[*connect('cond_2_int', 'cond_2_int_out'), - *connect('in_2_int', 'OUT_2'), - *connect('ones', 'OUT_2'), - *connect('OUT_2', 'OUT_2_out'), - *connect('in_2_int', 'in_2_int_out')], - nodes_with_edges_only=True) - - sub_graph_1 = build_graph(nodes_attrs=ti_sub_graph_1_nodes, - edges=[*connect('cond_2', '1:Loop_2'), - *connect('IN_2', '0:Loop_2'), - *connect('Loop_2:0', 'Loop_2_out'), - *connect('in_1_int', 'in_1_int_out'), - *connect('cond_1_int', 'cond_1_int_out')], - nodes_with_edges_only=True) - loop_node_1 = Node(sub_graph_1, 'Loop_2') - loop_node_1.body = sub_graph_2 - loop_node_1.in_edge(0)['external_port_id'] = 0 - loop_node_1.in_edge(1)['external_port_id'] = 1 - loop_node_1.out_edge(0)['external_port_id'] = 2 - - main_graph = ti_create_main_graph(sub_graph_1) - main_graph.graph['additional_outputs'] = ['Loop', 'Loop_2'] - - return main_graph, sub_graph_1 - - def check_body_last_node(self, body, node_id, loop_2_node_out_ports_len): - last_node = Node(body, node_id) - max_layer_id = 5 - self.assertEqual(len(last_node.out_ports()), loop_2_node_out_ports_len) - unsq_node = last_node.out_port(0).get_destinations()[1].node - self.assertEqual(unsq_node.op, 'Unsqueeze') - self.assertEqual(unsq_node.out_port(0).get_destination().node.op, 'Result') - self.assertEqual(unsq_node.out_port(0).get_destination().node.internal_layer_id, max_layer_id + 3) - self.assertTrue(np.all(unsq_node.out_port(0).data.get_shape() == int64_array([1, 1, 4, 64, 54]))) - - def check_loop_node(self, graph, node_id, port_map_len, out_ports_len): - loop_node = Node(graph, node_id) - self.assertEqual(len(loop_node.output_port_map), port_map_len + 1) - self.assertEqual(len(loop_node.out_ports()), out_ports_len + 1) - self.assertEqual(loop_node.out_port(1).get_destination().node.op, 'Result') - - def test_add_output_1(self): - main_graph, sub_graph_1 = self.create_graph() - - loop_node = Node(main_graph, 'Loop') - loop_node_output_port_map_len = len(loop_node.output_port_map) - loop_node_out_ports_len = len(loop_node.out_ports()) - loop_node_2 = Node(sub_graph_1, 'Loop_2') - loop_2_node_out_ports_len = len(loop_node_2.out_ports()) - - AddOutputRecursive().find_and_replace_pattern(main_graph) - - self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len) - self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == int64_array([1, 1, 4, 64, 54]))) - self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len) - - def test_add_output_dynamic(self): - main_graph, sub_graph_1 = self.create_graph() - - loop_node = Node(main_graph, 'Loop') - loop_node_output_port_map_len = len(loop_node.output_port_map) - loop_node_out_ports_len = len(loop_node.out_ports()) - loop_node_2 = Node(sub_graph_1, 'Loop_2') - loop_2_node_out_ports_len = len(loop_node_2.out_ports()) - - loop_node.input_port_map[2]['axis'] = 1 - loop_node.input_port_map[2]['start'] = 0 - loop_node.input_port_map[2]['end'] = -1 - loop_node.input_port_map[2]['stride'] = 1 - in_1_node = Node(main_graph, 'IN_1') - in_1_node['shape'] = shape_array([1, dynamic_dimension_value, 64, 54]) - - AddOutputRecursive().find_and_replace_pattern(main_graph) - - self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len) - self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == - shape_array([dynamic_dimension_value, 1, 4, 64, 54]))) - self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len) - - def test_add_output_several_iterations(self): - main_graph, sub_graph_1 = self.create_graph() - - loop_node = Node(main_graph, 'Loop') - loop_node_output_port_map_len = len(loop_node.output_port_map) - loop_node_out_ports_len = len(loop_node.out_ports()) - loop_node_2 = Node(sub_graph_1, 'Loop_2') - loop_2_node_out_ports_len = len(loop_node_2.out_ports()) - - loop_node.input_port_map[2]['axis'] = 1 - loop_node.input_port_map[2]['start'] = 0 - loop_node.input_port_map[2]['end'] = -1 - loop_node.input_port_map[2]['stride'] = 1 - loop_node.output_port_map[0]['axis'] = 1 - loop_node.output_port_map[0]['start'] = 0 - loop_node.output_port_map[0]['end'] = 10 - loop_node.output_port_map[0]['stride'] = 2 - - AddOutputRecursive().find_and_replace_pattern(main_graph) - - self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len) - self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([4, 1, 4, 64, 54]))) - self.assertTrue(np.all(loop_node.out_port(0).data.get_shape() == shape_array([1, 5, 64, 54]))) - self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len) - - def test_add_output_several_iterations_wo_start_end(self): - main_graph, sub_graph_1 = self.create_graph() - - loop_node = Node(main_graph, 'Loop') - loop_node_output_port_map_len = len(loop_node.output_port_map) - loop_node_out_ports_len = len(loop_node.out_ports()) - loop_node.input_port_map[2]['axis'] = 1 - loop_node.input_port_map[2]['stride'] = 1 - - loop_node_2 = Node(sub_graph_1, 'Loop_2') - loop_2_node_out_ports_len = len(loop_node_2.out_ports()) - - AddOutputRecursive().find_and_replace_pattern(main_graph) - - self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len) - self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([4, 1, 4, 64, 54]))) - self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len) - - def test_add_output_several_iterations_negative_end(self): - main_graph, sub_graph_1 = self.create_graph() - - loop_node = Node(main_graph, 'Loop') - loop_node_output_port_map_len = len(loop_node.output_port_map) - loop_node_out_ports_len = len(loop_node.out_ports()) - loop_node_2 = Node(sub_graph_1, 'Loop_2') - loop_2_node_out_ports_len = len(loop_node_2.out_ports()) - - loop_node.input_port_map[2]['axis'] = 1 - loop_node.input_port_map[2]['start'] = 0 - loop_node.input_port_map[2]['end'] = -3 - loop_node.input_port_map[2]['stride'] = 1 - loop_node.output_port_map[0]['axis'] = 1 - loop_node.output_port_map[0]['start'] = 0 - loop_node.output_port_map[0]['end'] = -1 - loop_node.output_port_map[0]['stride'] = 2 - - AddOutputRecursive().find_and_replace_pattern(main_graph) - - self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len) - self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([2, 1, 4, 64, 54]))) - self.assertTrue(np.all(loop_node.out_port(0).data.get_shape() == shape_array([1, 2, 64, 54]))) - self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len) - - def test_add_output_several_iterations_negative_stride(self): - main_graph, sub_graph_1 = self.create_graph() - - loop_node = Node(main_graph, 'Loop') - - loop_node_output_port_map_len = len(loop_node.output_port_map) - loop_node_out_ports_len = len(loop_node.out_ports()) - loop_node_2 = Node(sub_graph_1, 'Loop_2') - loop_2_node_out_ports_len = len(loop_node_2.out_ports()) - - loop_node.input_port_map[2]['axis'] = 1 - loop_node.input_port_map[2]['start'] = -1 - loop_node.input_port_map[2]['end'] = 0 - loop_node.input_port_map[2]['stride'] = -2 - loop_node.output_port_map[0]['axis'] = 1 - loop_node.output_port_map[0]['start'] = 0 - loop_node.output_port_map[0]['end'] = -1 - loop_node.output_port_map[0]['stride'] = 2 - - AddOutputRecursive().find_and_replace_pattern(main_graph) - - self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len) - self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([2, 1, 4, 64, 54]))) - self.assertTrue(np.all(loop_node.out_port(0).data.get_shape() == shape_array([1, 2, 64, 54]))) - self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len) - - def test_add_output_several_iterations_negative_start_end_input(self): - main_graph, sub_graph_1 = self.create_graph() - - loop_node = Node(main_graph, 'Loop') - loop_node_output_port_map_len = len(loop_node.output_port_map) - loop_node_out_ports_len = len(loop_node.out_ports()) - loop_node_2 = Node(sub_graph_1, 'Loop_2') - loop_2_node_out_ports_len = len(loop_node_2.out_ports()) - - loop_node.input_port_map[2]['axis'] = 1 - loop_node.input_port_map[2]['start'] = -1 - loop_node.input_port_map[2]['end'] = -4 - loop_node.input_port_map[2]['stride'] = -2 - loop_node.output_port_map[0]['axis'] = 1 - loop_node.output_port_map[0]['start'] = 0 - loop_node.output_port_map[0]['end'] = -1 - loop_node.output_port_map[0]['stride'] = 2 - - AddOutputRecursive().find_and_replace_pattern(main_graph) - - self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len) - self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([2, 1, 4, 64, 54]))) - self.assertTrue(np.all(loop_node.out_port(0).data.get_shape() == shape_array([1, 2, 64, 54]))) - self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len) - - def test_add_output_several_iterations_negative_start_end_output(self): - main_graph, sub_graph_1 = self.create_graph() - - loop_node = Node(main_graph, 'Loop') - loop_node_output_port_map_len = len(loop_node.output_port_map) - loop_node_out_ports_len = len(loop_node.out_ports()) - loop_node_2 = Node(sub_graph_1, 'Loop_2') - loop_2_node_out_ports_len = len(loop_node_2.out_ports()) - - loop_node.input_port_map[2]['axis'] = 1 - loop_node.input_port_map[2]['start'] = -1 - loop_node.input_port_map[2]['end'] = -4 - loop_node.input_port_map[2]['stride'] = -2 - loop_node.output_port_map[0]['axis'] = 1 - loop_node.output_port_map[0]['start'] = -4 - loop_node.output_port_map[0]['end'] = -1 - loop_node.output_port_map[0]['stride'] = 1 - - AddOutputRecursive().find_and_replace_pattern(main_graph) - - self.check_loop_node(main_graph, 'Loop', loop_node_output_port_map_len, loop_node_out_ports_len) - self.assertTrue(np.all(loop_node.out_port(1).data.get_shape() == shape_array([2, 1, 4, 64, 54]))) - self.assertTrue(np.all(loop_node.out_port(0).data.get_shape() == shape_array([1, 3, 64, 54]))) - self.check_body_last_node(sub_graph_1, 'Loop_2', loop_2_node_out_ports_len) - - -# test for If -if_main_graph_nodes = { - **shaped_parameter("IN_1", [1, 4, 64, 54]), - **shaped_parameter("IN_2", [1, 4, 64, 54]), - **valued_const_with_data("cond", int64_array([1])), - **regular_op_with_empty_data("If", {'op': "If", 'type': 'If', 'sub_graphs': ['then_graph', 'else_graph'], - "then_graph": None, 'else_graph': None, 'infer': If.infer}), - **result("OUT_1") -} - -if_sub_graph_1_then_nodes = { - **shaped_parameter("IN_2", int64_array([1, 4, 64, 54]), {'input_id': 2}), - **valued_const_with_data("cond_2", int64_array([1])), - **regular_op_with_empty_data("If_2", {'op': "If", 'type': 'If', 'sub_graphs': ['then_graph', 'else_graph'], - "then_graph": None, 'else_graph': None, 'infer': If.infer}), - **regular_op_with_empty_data('If_2_out', {'op': 'Result', 'type': 'Result', 'infer': lambda x: None}), - **shaped_parameter("in_1_int", int64_array([1, 4, 64, 54]), {'input_id': 1}), - **regular_op_with_empty_data("in_1_int_out", {'op': 'Result', 'type': 'Result', 'output_id': 0}) -} - -if_sub_graph_1_else_nodes = { - **shaped_parameter("in_1_int", int64_array([1, 4, 64, 54]), {'input_id': 1}), - **regular_op_with_empty_data("in_1_int_out", {'op': 'Result', 'type': 'Result', 'output_id': 0}) -} - -if_sub_graph_2_then_nodes = { - **shaped_parameter('in_2_int', [1, 4, 64, 54], {'input_id': 1}), - **shaped_const_with_data('ones', int64_array([1, 4, 64, 54])), - **regular_op_with_shaped_data('OUT_2', int64_array([1, 4, 64, 54]), {'op': "Add"}), - **regular_op_with_empty_data('OUT_2_out', {'op': 'Result', 'type': 'Result', 'output_id': 0}), -} - -if_sub_graph_2_else_nodes = { - **shaped_parameter('in_2_int_else', [1, 4, 64, 54], {'input_id': 1}), - **shaped_const_with_data('ones_else', int64_array([1, 4, 64, 54])), - **regular_op_with_shaped_data('OUT_2_else', int64_array([1, 4, 64, 54]), {'op': "Sub"}), - **regular_op_with_empty_data('OUT_2_out_else', {'op': 'Result', 'type': 'Result', 'output_id': 0}), -} - - -class IF_AddOutputRecursiveTest(unittest.TestCase): - def test_add_output_1(self): - sub_graph_1 = if_create_main_graph() - if_node_1 = Node(sub_graph_1, 'If_2') - - sub_graph_1_else = build_graph(nodes_attrs=if_sub_graph_1_else_nodes, - edges=[*connect('in_1_int', 'in_1_int_out')], - nodes_with_edges_only=True) - - main_graph = build_graph(nodes_attrs=if_main_graph_nodes, - edges=[*connect('cond', '0:If'), - *connect('IN_1', '1:If'), - *connect('IN_2', "2:If"), - *connect('If:0', 'OUT_1')], - nodes_with_edges_only=True) - if_node = Node(main_graph, 'If') - if_node.then_graph = sub_graph_1 - if_node.else_graph = sub_graph_1_else - if_node_out_ports_len = len(if_node.out_ports()) - if_2_node_out_ports_len = len(if_node_1.out_ports()) - - main_graph.graph['additional_outputs'] = ['If', ['If_2', 'in_1_int']] - - AddOutputRecursive().find_and_replace_pattern(main_graph) - if_node = Node(main_graph, 'If') - self.assertEqual(len(if_node.out_ports()), if_node_out_ports_len + 1) - self.assertEqual(if_node.out_port(1).get_destination().node.op, 'Result') - self.assertTrue(np.all(if_node.out_port(1).data.get_shape() == int64_array([1, 4, 64, 54]))) - last_node = Node(sub_graph_1, 'If_2') - self.assertEqual(len(last_node.out_ports()), if_2_node_out_ports_len) - self.assertEqual(last_node.out_port(0).get_destinations()[1].node.op, 'Result') - self.assertTrue(np.all(last_node.out_port(0).data.get_shape() == int64_array([1, 4, 64, 54]))) - - -class SplitUserPathTest(unittest.TestCase): - - @staticmethod - def create_graph(): - sub_graph_1 = if_create_main_graph() - out_node = Node(sub_graph_1, 'If_2_out') - out_node['internal_layer_id'] = 4 - - main_graph = ti_create_main_graph(sub_graph_1) - - return main_graph - - def test_linear_graph_change(self): - graph = self.create_graph() - path = ['Loop', 'in_1_int'] - ref_path = [] - loop_node = Node(graph, 'Loop') - ref_path.append({'node': loop_node, 'graph': graph}) - ref_path.append({'node': Node(loop_node.body, 'in_1_int'), 'graph': loop_node.body}) - - tracks = AddOutputRecursive().split_path_to_simple_tracks(graph, path) - - self.assertTrue(np.all(tracks[0] == ref_path)) - - def test_1_if_graph_change(self): - graph = self.create_graph() - path = ['Loop', 'If_2', ['OUT_2', 'OUT_2_else']] - ref_path = [[]] - loop_node = Node(graph, 'Loop') - ref_path[0].append({'node': loop_node, 'graph': graph}) - if_node = Node(loop_node.body, 'If_2') - ref_path[0].append({'node': if_node, 'graph': loop_node.body}) - ref_path.append([]) - ref_path[1] = ref_path[0][:] - ref_path[0].append({'node': Node(if_node.then_graph, 'OUT_2'), 'graph': if_node.then_graph}) - ref_path[1].append({'node': Node(if_node.else_graph, 'OUT_2_else'), 'graph': if_node.else_graph}) - - tracks = AddOutputRecursive().split_path_to_simple_tracks(graph, path) - - self.assertTrue(np.all(tracks[0] == ref_path[0])) - self.assertTrue(np.all(tracks[1] == ref_path[1])) - - def test_1_if_graph_change_add_output(self): - graph = self.create_graph() - graph.graph['additional_outputs'] = ['Loop', 'If_2', ['OUT_2', 'OUT_2_else']] - - AddOutputRecursive().find_and_replace_pattern(graph) - - loop_node = Node(graph, 'Loop') - if_node = Node(loop_node.body, 'If_2') - left_node = Node(if_node.then_graph, 'OUT_2') - right_node = Node(if_node.else_graph, 'OUT_2_else') - self.assertEqual(len(left_node.out_port(0).get_destinations()), 2) - self.assertEqual(left_node.out_port(0).get_destinations()[1].node.op, 'Result') - - self.assertEqual(len(right_node.out_port(0).get_destinations()), 2) - self.assertEqual(right_node.out_port(0).get_destinations()[1].node.op, 'Result') - - self.assertTrue(len(if_node.out_ports()), 2) - self.assertTrue(if_node.out_port(1).get_destination().node.op, 'Result') - - self.assertTrue(len(loop_node.out_ports()), 2) - self.assertTrue(loop_node.out_port(1).get_destination().node.op, 'Result') diff --git a/tools/mo/unit_tests/mo/back/compress_quantized_weights_test.py b/tools/mo/unit_tests/mo/back/compress_quantized_weights_test.py deleted file mode 100644 index 451f3e96cc17f1..00000000000000 --- a/tools/mo/unit_tests/mo/back/compress_quantized_weights_test.py +++ /dev/null @@ -1,438 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from argparse import Namespace - -import numpy as np -import pytest - -from openvino.tools.mo.back.compress_quantized_weights import CompressQuantizeWeights, ZeroPointOptimizer -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.ops.elementwise import Sub, Mul -from openvino.tools.mo.ops.fakequantize import FakeQuantize -from openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from openvino.tools.mo.middle.passes.infer import type_infer -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, result, connect, \ - shaped_const_with_data - - -def nodes_dict(original, transformed=None, levels=255, data=None, - il=[-127], ih=[127], ol=[-127], oh=[127], - scale=np.array([1]), zp=np.array([0]), int_data=None): - shape = [1, 2, 3, 4] if data is None else np.array(data).shape - data = np.ones(shape, dtype=original) if data is None else np.array(data, dtype=original) - if int_data is None: - int_data = data.astype(dtype=np.int8) - transformed = transformed if transformed is not None else original - - return { - **valued_const_with_data('weights', data), - **valued_const_with_data('int_weights', int_data), - - **regular_op_with_shaped_data( - 'weights_cast', shape, {'type': 'Convert', 'op': 'Cast', 'infer': Cast.infer, 'dst_type': np.float32}), - - **regular_op_with_shaped_data( - 'cast', shape, {'type': 'Convert', 'op': 'Cast', 'infer': Cast.infer, 'dst_type': transformed}), - - **valued_const_with_data('il', np.array(il)), - **valued_const_with_data('ih', np.array(ih)), - **valued_const_with_data('ol', np.array(ol)), - **valued_const_with_data('oh', np.array(oh)), - - **regular_op_with_shaped_data( - 'FQ', shape, {'type': 'FakeQuantize', 'infer': FakeQuantize.infer, 'stop_value_propagation': True, - 'levels': levels, 'op': 'FakeQuantize'}), - - **valued_const_with_data('zp', zp), - **valued_const_with_data('scale', scale), - - **regular_op_with_shaped_data( - 'sub', shape, {'type': 'Subtract', 'op': 'Sub', 'infer': lambda node: eltwise_infer(node, Sub.operation)}), - - **regular_op_with_shaped_data( - 'mul', shape, {'type': 'Multiply', 'op': 'Mul', 'infer': lambda node: eltwise_infer(node, Mul.operation)}), - - **result() - } - - -class CompressionQuantizeDequantizeSeparateTest(unittest.TestCase): - def test_quantize(self): - original_type = np.float32 - nodes = nodes_dict(original_type) - - graph = build_graph(nodes, [ - *connect('weights:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', 'output'), - ], nodes_with_edges_only=True) - - error_message = 'Unexpected number of FakeQuantize nodes {} CompressQuantizeWeights.quantize_data call `{}`' - fq_nodes = graph.get_op_nodes(type='FakeQuantize') - self.assertEqual(len(fq_nodes), 1, error_message.format('before', len(fq_nodes))) - fake_quantize = fq_nodes[0] - - CompressQuantizeWeights.quantize_data(fake_quantize, original_type, np.int8, "signed") - graph.clean_up() - - fq_nodes = graph.get_op_nodes(type='FakeQuantize') - self.assertEqual(len(fq_nodes), 1, error_message.format('after', len(fq_nodes))) - self.assertEqual(fq_nodes[0].in_port(0).get_source().node.soft_get('type'), 'Const') - self.assertEqual(fq_nodes[0].in_port(0).get_source().node.data_type, np.int8) - - graph_ref = build_graph(nodes, [ - *connect('int_weights:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_dequantize(self): - original_type = np.float32 - nodes = nodes_dict(original_type, np.int8) - - graph = build_graph(nodes, [ - *connect('weights:0', '0:cast'), - *connect('cast:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', 'output'), - ], nodes_with_edges_only=True) - - error_message = 'Unexpected number of {} nodes {} CompressQuantizeWeights.dequantize_data call `{}`' - fq_nodes = graph.get_op_nodes(type='FakeQuantize') - cast_nodes = graph.get_op_nodes(name='cast') - self.assertEqual(len(fq_nodes), 1, error_message.format('FakeQuantize', 'before', len(fq_nodes))) - self.assertEqual(len(cast_nodes), 1, error_message.format('Convert', 'before', len(cast_nodes))) - cast_nodes[0]['need_shape_inference'] = True - - CompressQuantizeWeights.dequantize_data(fq_nodes[0], original_type, np.int8) - graph.clean_up() - - fq_nodes = graph.get_op_nodes(type='FakeQuantize') - self.assertEqual(len(fq_nodes), 0, error_message.format('FakeQuantize', 'after', len(fq_nodes))) - - graph_ref = build_graph(nodes, [ - *connect('int_weights:0', '0:cast'), - *connect('cast:0', '0:sub'), - *connect('zp:0', '1:sub'), - *connect('sub:0', '0:mul'), - *connect('scale:0', '1:mul'), - *connect('mul:0', 'output'), - ], {'cast': {'dst_type': original_type}}, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_quantize_new_fp16(self): - original_type = np.float16 - nodes = nodes_dict(original_type) - - graph = build_graph(nodes, [ - *connect('weights:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', 'output'), - ], nodes_with_edges_only=True) - - error_message = 'Unexpected number of FakeQuantize nodes {} CompressQuantizeWeights.quantize_data call `{}`' - fq_nodes = graph.get_op_nodes(type='FakeQuantize') - self.assertEqual(len(fq_nodes), 1, error_message.format('before', len(fq_nodes))) - fake_quantize = fq_nodes[0] - - CompressQuantizeWeights.quantize_data(fake_quantize, original_type, np.int8, "signed") - graph.clean_up() - - fq_nodes = graph.get_op_nodes(type='FakeQuantize') - self.assertEqual(len(fq_nodes), 1, error_message.format('after', len(fq_nodes))) - self.assertEqual(fq_nodes[0].in_port(0).get_source().node.soft_get('type'), 'Const') - self.assertEqual(fq_nodes[0].in_port(0).get_source().node.data_type, np.int8) - - graph_ref = build_graph(nodes, [ - *connect('int_weights:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - -class TestCompressionDataTypeTest(): - @pytest.mark.parametrize("original",[np.int64, - np.int32, - np.float64, - np.float32, - np.float16]) - def test_data_type(self, original): - nodes = nodes_dict(original) - - graph = build_graph(nodes, [ - *connect('weights:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', 'output'), - ], nodes_with_edges_only=True, cli=Namespace(static_shape=True)) - - CompressQuantizeWeights().find_and_replace_pattern(graph) - graph.clean_up() - - graph_ref = build_graph(nodes, [ - *connect('int_weights:0', '0:cast'), - *connect('cast:0', '0:sub'), - *connect('zp:0', '1:sub'), - *connect('sub:0', '0:mul'), - *connect('scale:0', '1:mul'), - *connect('mul:0', 'output'), - ], nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - def test_data_type_new_fp16(self): - nodes = nodes_dict(np.float16) - - graph = build_graph(nodes, [ - *connect('weights:0', '0:weights_cast'), - *connect('weights_cast:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', 'output'), - ], nodes_with_edges_only=True, cli=Namespace(data_type='FP16', static_shape=True)) - - CompressQuantizeWeights().find_and_replace_pattern(graph) - graph.clean_up() - - graph_ref = build_graph(nodes, [ - *connect('int_weights:0', '0:weights_cast'), - *connect('weights_cast:0', '0:sub'), - *connect('zp:0', '1:sub'), - *connect('sub:0', '0:mul'), - *connect('scale:0', '1:mul'), - *connect('mul:0', 'output'), - ], nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - - def test_fp16_fake_quantize(self): - original_type = np.float16 - input_low = np.array([-0.59033203125, -1.4833984375, -1.2900390625], dtype=np.float16) - input_high = np.array([0.59033203125, 1.4833984375, 1.2900390625], dtype=np.float16) - output_low = np.array([0.295166015625, 0.74169921875, 0.64501953125], dtype=np.float16) - output_high = np.array([-0.295166015625, -0.74169921875, -0.64501953125], dtype=np.float16) - scale = np.array([-0.002325, -0.00584, -0.005077], dtype=np.float16) - int_data = np.array([43, 103, 118], dtype=np.int8) - nodes = nodes_dict(original_type, transformed=np.int8, - levels=255, data=np.array([0.2, 1.2, 1.2], dtype=np.float16), - il=input_low, ih=input_high, ol=output_low, oh=output_high, scale=scale, int_data=int_data) - - graph = build_graph(nodes, [ - *connect('weights:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', 'output'), - ], nodes_with_edges_only=True) - type_infer(graph) - - error_message = 'Unexpected number of {} nodes {} CompressQuantizeWeights.dequantize_data call `{}`' - fq_nodes = graph.get_op_nodes(type='FakeQuantize') - assert len(fq_nodes) == 1, error_message.format('FakeQuantize', 'before', len(fq_nodes)) - - CompressQuantizeWeights().find_and_replace_pattern(graph) - graph.clean_up() - ZeroPointOptimizer().find_and_replace_pattern(graph) - graph.clean_up() - - fq_nodes = graph.get_op_nodes(type='FakeQuantize') - assert len(fq_nodes) == 0, error_message.format('FakeQuantize', 'after', len(fq_nodes)) - - graph_ref = build_graph(nodes, [ - *connect('int_weights:0', '0:cast'), - *connect('cast:0', '0:mul'), - *connect('scale:0', '1:mul'), - *connect('mul:0', 'output'), - ], {'cast': {'dst_type': original_type}}, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - -class TestAccuracyCheckFP32Test(): - eps = np.finfo(np.float32).eps - - @pytest.mark.parametrize("data, in_low, in_high, out_low, out_high, levels, add_cast" ,[ - ([-2.586, -1.338, 2.773, 4.414], [-2.586], [4.414], [-2.586], [4.414], 256, False), - ([-1.5, -0.32, 0.167, 2.8], [-1.5], [2.8], [-1.5], [2.8], 256, False), - ([1, 1 + eps, 1 + 2 * eps, 1 + 3 * eps], [1], [1 + 3 * eps], [1], [1 + 3 * eps], 256, False), - ([1.0, 2.0, 3.0, 4.0], [1], [4], [1], [4], 256, False), - ([-2.586, -1.338, 2.773, 4.414], [-2.586], [4.414], [-2.586], [4.414], 256, True), - ([-1.5, -0.32, 0.167, 2.8], [-1.5], [2.8], [-1.5], [2.8], 256, True), - ([1, 1 + eps, 1 + 2 * eps, 1 + 3 * eps], [1], [1 + 3 * eps], [1], [1 + 3 * eps], 256, True), - ([1.0, 2.0, 3.0, 4.0], [1], [4], [1], [4], 256, True), - ]) - def test_accuracy(self, data, in_low, in_high, out_low, out_high, levels, add_cast): - if not add_cast: - nodes = nodes_dict(np.float32, None, levels, data, in_low, in_high, out_low, out_high) - graph = build_graph(nodes, [ - *connect('weights:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', 'output'), - ], nodes_with_edges_only=True) - else: - nodes = nodes_dict(np.float16, None, levels, data, in_low, in_high, out_low, out_high) - graph = build_graph(nodes, [ - *connect('weights:0', '0:weights_cast'), - *connect('weights_cast:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', 'output'), - ], nodes_with_edges_only=True) - graph_ref = graph.copy() - - CompressQuantizeWeights().find_and_replace_pattern(graph) - - for node in graph.get_op_nodes() + graph_ref.get_op_nodes(): - node['stop_value_propagation'] = False - node['need_shape_inference'] = node.soft_get('need_shape_inference', True) - - graph.clean_up() - graph_ref.clean_up() - - const_result_graph = build_graph({**shaped_const_with_data('weights', np.array(data).shape), **result()}, - [*connect('weights', 'output')], nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, const_result_graph, 'output', check_op_attrs=True) - assert flag, resp - - (flag, resp) = compare_graphs(graph_ref, const_result_graph, 'output', check_op_attrs=True) - assert flag, resp - - # as this two graphs calculated the same data through different constant folding functions, they resulted in - # constants of different data type since FakeQuantize always have f32 output dtype, but eltwises use numpy - # for folding which doesn't have such restriction - const_node = graph.get_op_nodes(type='Const') - assert len(const_node) == 1 - if const_node[0].data_type == np.float64: - const_node[0].data_type = np.float32 - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - # I would like to leave this commented code here to quickly check the actual output value: - # print(result_node.in_port(0).data.get_value()) # actual calculated value - - -class TestNegativeCompressionTestLevels(): - @pytest.mark.parametrize("levels" , [(2), (257), (None), (0), (-5)]) - def test_negative_fq_unacceptable_levels(self, levels): - nodes = nodes_dict(np.float32, None, levels) - - graph = build_graph(nodes, [ - *connect('weights:0', '0:FQ'), - *connect('il:0', '1:FQ'), - *connect('ih:0', '2:FQ'), - *connect('ol:0', '3:FQ'), - *connect('oh:0', '4:FQ'), - *connect('FQ:0', 'output'), - ], nodes_with_edges_only=True) - graph_ref = graph.copy() - CompressQuantizeWeights().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - - -class TestZeroPointOptimizerTestClass(): - @pytest.mark.parametrize("weights, zero_point, adj_weights, adj_zero_point" ,[ - ([-10, 7], [-1], [-9, 8], [0]), - ([-10, 7], [-0.99999999], [-9, 8], [0]), - ]) - def test_zero_point_optimization(self, weights, zero_point, adj_weights, adj_zero_point): - nodes = lambda w, zp: { - **valued_const_with_data('weights', np.array(w, dtype=np.int8)), - **regular_op_with_shaped_data( - 'cast', [len(w)], {'type': 'Convert', 'op': 'Cast', 'infer': Cast.infer, 'dst_type': np.float32}), - **valued_const_with_data('zp', np.array(zp, dtype=np.float32)), - **regular_op_with_shaped_data( - 'sub', [len(w)], - {'type': 'Subtract', 'op': 'Sub', 'infer': lambda node: eltwise_infer(node, Sub.operation)}), - **result() - } - edges = [ - *connect("weights:0", "0:cast"), - *connect("cast:0", "0:sub"), - *connect("zp:0", "1:sub"), - *connect("sub:0", "0:output"), - ] - graph = build_graph(nodes(weights, zero_point), edges, nodes_with_edges_only=True) - ZeroPointOptimizer().find_and_replace_pattern(graph) - graph.clean_up() - - graph_ref = build_graph(nodes(adj_weights, adj_zero_point), [ - *connect("weights:0", "0:cast"), - *connect("cast:0", "0:output"), - ], nodes_with_edges_only=True) - graph_ref.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - @pytest.mark.parametrize("weights, zero_point, adj_weights, adj_zero_point" ,[ - ([-128, 7], [1], [-128, 7], [1]), - ([127, 7], [-1], [127, 7], [-1]), - ]) - def test_negative_zero_point_optimization(self, weights, zero_point, adj_weights, adj_zero_point): - nodes = lambda w, zp: { - **valued_const_with_data('weights', np.array(w, dtype=np.int8)), - **regular_op_with_shaped_data( - 'cast', [len(w)], {'type': 'Convert', 'op': 'Cast', 'infer': Cast.infer, 'dst_type': np.float32}), - **valued_const_with_data('zp', np.array(zp, dtype=np.float32)), - **regular_op_with_shaped_data( - 'sub', [len(w)], - {'type': 'Subtract', 'op': 'Sub', 'infer': lambda node: eltwise_infer(node, Sub.operation)}), - **result() - } - edges = [ - *connect("weights:0", "0:cast"), - *connect("cast:0", "0:sub"), - *connect("zp:0", "1:sub"), - *connect("sub:0", "0:output"), - ] - graph = build_graph(nodes(weights, zero_point), edges, nodes_with_edges_only=True) - ZeroPointOptimizer().find_and_replace_pattern(graph) - graph.clean_up() - - graph_ref = build_graph(nodes(adj_weights, adj_zero_point), edges, nodes_with_edges_only=True) - graph_ref.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/back/ie_ir_ver_2/__init__.py b/tools/mo/unit_tests/mo/back/ie_ir_ver_2/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/back/ie_ir_ver_2/emitter_test.py b/tools/mo/unit_tests/mo/back/ie_ir_ver_2/emitter_test.py deleted file mode 100644 index 7f781f0536efa6..00000000000000 --- a/tools/mo/unit_tests/mo/back/ie_ir_ver_2/emitter_test.py +++ /dev/null @@ -1,268 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import MagicMock - -import defusedxml.ElementTree as ET -import numpy as np -from defusedxml import defuse_stdlib - -from openvino.tools.mo.back.ie_ir_ver_2.emitter import soft_get, xml_shape, serialize_runtime_info, serialize_network, \ - port_renumber -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.infer import partial_infer, type_infer -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.ops.pooling import Pooling -from openvino.tools.mo.ops.result import Result -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.runtime_info import RTInfo, OldAPIMapOrder, OldAPIMapElementType -from openvino.tools.mo.utils.unsupported_ops import UnsupportedOps -from unit_tests.utils.graph import valued_const_with_data, result, regular_op_with_empty_data, connect, \ - shaped_parameter, build_graph, regular_op - -# defuse_stdlib provide patched version of xml.etree.ElementTree which allows to use objects from xml.etree.ElementTree -# in a safe manner without including unsafe xml.etree.ElementTree -ET_defused = defuse_stdlib()[ET] -Element = ET_defused.Element -tostring = ET_defused.tostring - -expected_result = b'2105050' - - -class TestEmitter(unittest.TestCase): - def test_xml_shape(self): - net = Element('net') - xml_shape(np.array([2, 10, 50, 50], dtype=np.int64), net) - self.assertEqual(tostring(net), expected_result) - - def test_xml_shape_float_values(self): - net = Element('net') - xml_shape(np.array([2.0, 10.0, 50.0, 50.0], dtype=np.float32), net) - self.assertEqual(tostring(net), expected_result) - - def test_xml_shape_non_integer_values(self): - net = Element('net') - with self.assertRaises(Error): - xml_shape(np.array([2.0, 10.0, 50.0, 50.5], dtype=np.float32), net) - - def test_xml_shape_negative_values(self): - net = Element('net') - with self.assertRaises(Error): - xml_shape(np.array([2, 10, 50, -50], dtype=np.int64), net) - - -class TestSoftGet(unittest.TestCase): - - def test_node(self): - node = MagicMock() - node.soft_get = lambda attr: attr - self.assertEqual(soft_get(node, 'string'), 'string') - - def test_not_callable(self): - node = MagicMock() - node.soft_get = 'foo' - self.assertEqual(soft_get(node, 'string'), '') - - def test_not_node_1(self): - node = {'soft_get': lambda attr: attr} - self.assertEqual(soft_get(node, 'string'), '') - - def test_not_node_2(self): - node = 'something-else' - self.assertEqual(soft_get(node, 'string'), '') - - -class TestSerializeRTInfo(unittest.TestCase): - def test_serialize_old_api_map_parameter(self): - graph = build_graph({**regular_op('placeholder', {'type': 'Parameter', 'rt_info': RTInfo()}), - **result('result')}, - [('placeholder', 'result')], {}, nodes_with_edges_only=True) - param_node = Node(graph, 'placeholder') - param_node.rt_info.info[('old_api_map_order', 0)] = OldAPIMapOrder() - param_node.rt_info.info[('old_api_map_order', 0)].old_api_transpose_parameter([0, 2, 3, 1]) - param_node.rt_info.info[('old_api_map_element_type', 0)] = OldAPIMapElementType() - param_node.rt_info.info[('old_api_map_element_type', 0)].set_legacy_type(np.float32) - - net = Element('net') - serialize_runtime_info(param_node, net) - serialize_res = str(tostring(net)) - self.assertTrue("name=\"old_api_map_order\"" in serialize_res) - self.assertTrue("name=\"old_api_map_element_type\"" in serialize_res) - self.assertTrue("version=\"0\"" in serialize_res) - self.assertTrue("value=\"0,2,3,1\"" in serialize_res) - self.assertTrue("value=\"f32\"" in serialize_res) - self.assertTrue(serialize_res.startswith("b'")) - self.assertTrue(serialize_res.endswith("'")) - - del param_node.rt_info.info[('old_api_map_order', 0)] - param_node.rt_info.info[('old_api_map_element_type', 0)] = OldAPIMapElementType() - param_node.rt_info.info[('old_api_map_element_type', 0)].set_legacy_type(np.float16) - - net = Element('net') - serialize_runtime_info(param_node, net) - serialize_res = str(tostring(net)) - self.assertTrue("name=\"old_api_map_element_type\"" in serialize_res) - self.assertTrue("version=\"0\"" in serialize_res) - self.assertTrue("value=\"f16\"" in serialize_res) - self.assertTrue(serialize_res.startswith("b'")) - self.assertTrue(serialize_res.endswith("'")) - - def test_serialize_old_api_map_result(self): - graph = build_graph({**regular_op('placeholder', {'type': 'Parameter', 'rt_info': RTInfo()}), - **regular_op('result', {'type': 'Result', 'rt_info': RTInfo()})}, - [('placeholder', 'result')], {}, nodes_with_edges_only=True) - result_node = Node(graph, 'result') - result_node.rt_info.info[('old_api_map_order', 0)] = OldAPIMapOrder() - result_node.rt_info.info[('old_api_map_order', 0)].old_api_transpose_result([0, 3, 1, 2]) - - net = Element('net') - serialize_runtime_info(result_node, net) - serialize_res = str(tostring(net)) - self.assertTrue("name=\"old_api_map_order\"" in serialize_res) - self.assertTrue("version=\"0\"" in serialize_res) - self.assertTrue("value=\"0,3,1,2\"" in serialize_res) - self.assertTrue(serialize_res.startswith("b'")) - self.assertTrue(serialize_res.endswith("'")) - - -class TestSerialize(unittest.TestCase): - @staticmethod - def build_graph_with_gather(): - nodes = { - **shaped_parameter('data', int64_array([3, 3]), {'data_type': np.float32, 'type': Parameter.op}), - **shaped_parameter('indices', int64_array([1, 2]), {'data_type': np.float32, 'type': Parameter.op}), - **valued_const_with_data('axis', int64_array(1)), - **regular_op_with_empty_data('gather', {'op': 'Gather', 'batch_dims': 0, 'infer': Gather.infer, - 'type': Gather.op}), - **result('res'), - } - - edges = [ - *connect('data', '0:gather'), - *connect('indices', '1:gather'), - *connect('axis', '2:gather'), - *connect('gather', 'res'), - ] - - graph = build_graph(nodes, edges) - - data_node = Node(graph, 'data') - Parameter.update_node_stat(data_node, {}) - indices_node = Node(graph, 'indices') - Parameter.update_node_stat(indices_node, {}) - - gather_node = Node(graph, 'gather') - Gather.update_node_stat(gather_node, {}) - - res_node = Node(graph, 'res') - Result.update_node_stat(res_node, {}) - - partial_infer(graph) - type_infer(graph) - - return graph - - @staticmethod - def build_graph_with_maxpool(): - graph = build_graph( - nodes_attrs={ - 'input': {'kind': 'op', 'op': 'Parameter', 'name': 'node', 'infer': Parameter.infer, - 'shape': [1, 3, 10, 10]}, - 'input_data': {'kind': 'data', 'value': None, 'shape': None}, - - 'pool': {'kind': 'op', 'type': 'MaxPool', 'infer': Pooling.infer, - 'window': np.array([1, 1, 2, 2]), 'stride': np.array([1, 1, 2, 2]), - 'pad': np.array([[0, 0], [0, 0], [0, 0], [1, 1]]), - 'pad_spatial_shape': np.array([[0, 0], [1, 1]]), - 'pool_method': 'max', 'exclude_pad': False, 'global_pool': False, - 'output_spatial_shape': None, 'output_shape': None, - 'kernel_spatial': np.array([2, 2]), 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'batch_dims': np.array([0]), - 'pooling_convention': 'full', 'dilation': np.array([1, 1, 2, 2]), - 'auto_pad': 'valid'}, - - 'pool_data': {'kind': 'data', 'value': None, 'shape': None}, - 'pool_data_added': {'kind': 'data', 'value': None, 'shape': None}, - 'result': {'kind': 'op', 'op': 'Result'}, - 'result_added': {'kind': 'op', 'op': 'Result'} - }, - edges=[ - ('input', 'input_data'), - ('input_data', 'pool'), - ('pool', 'pool_data', {'out': 0}), - ('pool_data', 'result'), - ('pool', 'pool_data_added', {'out': 1}), - ('pool_data_added', 'result_added') - ] - ) - - input_node = Node(graph, 'input') - Parameter.update_node_stat(input_node, {}) - - pool_node = Node(graph, 'pool') - Pooling.update_node_stat(pool_node, {'pool_method': 'max'}) - - result_node = Node(graph, 'result') - Result.update_node_stat(result_node, {}) - result_added_node = Node(graph, 'result_added') - Result.update_node_stat(result_added_node, {}) - - partial_infer(graph) - type_infer(graph) - return graph - - def test_gather(self): - graph = self.build_graph_with_gather() - - net = Element('net') - graph.outputs_order = ['gather'] - unsupported = UnsupportedOps(graph) - port_renumber(graph) - - serialize_network(graph, net, unsupported) - xml_string = str(tostring(net)) - self.assertTrue("type=\"Parameter\"" in xml_string) - self.assertTrue("type=\"Result\"" in xml_string) - self.assertTrue("type=\"Gather\"" in xml_string) - - def test_maxpool(self): - graph = self.build_graph_with_maxpool() - - net = Element('net') - graph.outputs_order = ['pool'] - unsupported = UnsupportedOps(graph) - port_renumber(graph) - serialize_network(graph, net, unsupported) - xml_string = str(tostring(net)) - self.assertTrue("type=\"Parameter\"" in xml_string) - self.assertTrue("type=\"Result\"" in xml_string) - self.assertTrue("type=\"Pooling\"" in xml_string) - - def test_maxpool_raises(self): - graph = self.build_graph_with_maxpool() - - pool_node = Node(graph, 'pool') - result_node = Node(graph, 'result') - result_added_node = Node(graph, 'result_added') - pool_out_1 = Node(graph, 'pool_data') - pool_out_2 = Node(graph, 'pool_data_added') - - # when operation does not have output data nodes Exception should be raised - graph.remove_edge(pool_node.id, pool_out_1.id) - graph.remove_edge(pool_node.id, pool_out_2.id) - graph.remove_edge(pool_out_1.id, result_node.id) - graph.remove_edge(pool_out_2.id, result_added_node.id) - - graph.remove_node(result_node.id) - graph.remove_node(result_added_node.id) - - net = Element('net') - graph.outputs_order = ['pool'] - unsupported = UnsupportedOps(graph) - port_renumber(graph) - - with self.assertRaisesRegex(AssertionError, "Incorrect graph. Non-Result node.*"): - serialize_network(graph, net, unsupported) diff --git a/tools/mo/unit_tests/mo/back/insert_compatibility_l2normalization_test.py b/tools/mo/unit_tests/mo/back/insert_compatibility_l2normalization_test.py deleted file mode 100644 index 19bbeeba8bad7c..00000000000000 --- a/tools/mo/unit_tests/mo/back/insert_compatibility_l2normalization_test.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.back.insert_compatibility_l2normalization import CompatibilityL2NormalizationPattern -from unit_tests.utils.graph import build_graph - - -class CompatibilityL2NormalizationPatternTest(unittest.TestCase): - nodes = { - 'input_node': { - 'kind': 'data' - }, - 'l2norm_node': { - 'op': 'Normalize', - 'kind': 'op', - 'type': 'Normalize', - }, - 'output_node': { - 'kind': 'data' - } - } - - def test_insert_data(self): - graph = build_graph(self.nodes, [('input_node', 'l2norm_node'), ('l2norm_node', 'output_node')], - {'input_node': {'shape': np.array([1, 10])}, - }) - CompatibilityL2NormalizationPattern().find_and_replace_pattern(graph) - self.assertEqual(len(graph.nodes()), 5) - self.assertEqual(graph.node['l2norm_node_weights']['name'], 'l2norm_node_weights') - self.assertEqual(len(graph.node['l2norm_node_weights']['value']), 10) - - expect_value = np.full([10], 1.0, np.float32) - - for i, val in enumerate(expect_value): - self.assertEqual(graph.node['l2norm_node_weights']['value'][i], val) diff --git a/tools/mo/unit_tests/mo/back/kaldi_remove_memory_output_test.py b/tools/mo/unit_tests/mo/back/kaldi_remove_memory_output_test.py deleted file mode 100644 index c57d0e5d0886f5..00000000000000 --- a/tools/mo/unit_tests/mo/back/kaldi_remove_memory_output_test.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.back.kaldi_remove_memory_output import KaldiRemoveMemoryOutputBackReplacementPattern -from unit_tests.utils.graph import build_graph - - -class KaldiRemoveMemoryOutputTest(unittest.TestCase): - nodes = { - 'input_node': { - 'kind': 'data' - }, - 'memory_node': { - 'op': 'Assign', - 'kind': 'op' - }, - 'output_node': { - 'kind': 'data' - }, - 'op_output': { - 'kind': 'data', - 'op': 'Result', - } - } - - def test_remove_out_data_for_memory(self): - graph = build_graph(self.nodes, - [ - ('input_node', 'memory_node'), - ('memory_node', 'output_node'), - ('output_node', 'op_output') - ]) - KaldiRemoveMemoryOutputBackReplacementPattern().find_and_replace_pattern(graph) - self.assertNotIn('output_node', graph.node) - - def test_do_not_remove_out_data_for_memory(self): - graph = build_graph(self.nodes, - [ - ('input_node', 'memory_node'), - ('memory_node', 'output_node'), - ]) - KaldiRemoveMemoryOutputBackReplacementPattern().find_and_replace_pattern(graph) - self.assertIn('output_node', graph.node) diff --git a/tools/mo/unit_tests/mo/back/moc_preprocessing_test_actual.py b/tools/mo/unit_tests/mo/back/moc_preprocessing_test_actual.py deleted file mode 100644 index 39d4913a26a103..00000000000000 --- a/tools/mo/unit_tests/mo/back/moc_preprocessing_test_actual.py +++ /dev/null @@ -1,731 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from argparse import Namespace - -import numpy as np -from openvino.tools.mo.utils.error import Error -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry - -try: - # pylint: disable=no-name-in-module,import-error - from openvino.tools.mo.back.preprocessing import apply_preprocessing - - # pylint: disable=no-name-in-module,import-error - import openvino.runtime.opset8 as ops - from openvino.runtime import Model, Layout, PartialShape - -except Exception: - print("No OpenVINO API available," - "ensure to set correct PYTHONPATH when running these tests") - raise - - -def create_function3(shape1=[2, 2]): - input1 = ops.parameter(shape1, dtype=np.float32, name="input1") - input1.get_output_tensor(0).set_names({'a_input', 'b_input', 'c_input'}) - relu1 = ops.relu(input1) - res1 = ops.result(relu1, "res") - res1.get_output_tensor(0).set_names({'res'}) - function = Model(results=[res1], parameters=[input1], name="TestFunction") - return function - - -def create_function2(shape1=[2, 2], shape2=[2, 2], dtype1=np.float32, dtype2=np.float32): - input1 = ops.parameter(shape1, dtype=dtype1, name="input1") - input1.get_output_tensor(0).set_names({'input1', 'input1a'}) - relu1 = ops.relu(input1) - res1 = ops.result(relu1, "res1") - res1.get_output_tensor(0).set_names({'res1', 'res1a'}) - input2 = ops.parameter(shape2, dtype=dtype2, name="input2") - input2.get_output_tensor(0).set_names({'input2', 'input2a'}) - relu2 = ops.relu(input2) - res2 = ops.result(relu2, "res2") - res2.get_output_tensor(0).set_names({'res2', 'res2a'}) - function = Model(results=[res1, res2], parameters=[input1, input2], name="TestFunction") - return function - - -def create_function1(shape1=[2, 2]): - input1 = ops.parameter(shape1, dtype=np.float32, name="input1") - input1.get_output_tensor(0).set_names({'input1a', 'input1b'}) - relu1 = ops.relu(input1) - res1 = ops.result(relu1, "res1") - res1.get_output_tensor(0).set_names({'res1', 'res1a'}) - function = Model(results=[res1], parameters=[input1], name="TestFunction") - return function - - -def process_function(ov_function: Model, argv: Namespace): - apply_preprocessing(ov_function=ov_function, argv=argv) - - -class TestPreprocessingMOC(UnitTestWithMockedTelemetry): - def setUp(self): - super(TestPreprocessingMOC, self).setUp() - pass - - def check_constant(self, const_node, expected, shape=None): - self.assertEqual(const_node.get_type_name(), 'Constant') - self.assertTrue(np.allclose(const_node.get_vector(), expected)) - if shape is not None: - assert const_node.shape == PartialShape(shape) - - def check_scale_constant(self, node, expected, shape=None): - const_node = node.input(1).get_source_output().get_node() - if node.get_type_name() != 'Divide': - expected = 1. / expected - self.check_constant(const_node, expected, shape) - - def check_mean_constant(self, node, expected, shape=None): - const_node = node.input(1).get_source_output().get_node() - if node.get_type_name() != 'Subtract': - expected = -expected.toList() - self.check_constant(const_node, expected, shape) - - def test_scale_single_value(self): - argv = Namespace(mean_scale_values=None, scale=2.0) - function = create_function2() - process_function(ov_function=function, argv=argv) - - for param in function.get_parameters(): - op_node = list(param.output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, [2.0]) - - def test_scale_single_value_fp64(self): - argv = Namespace(mean_scale_values=None, scale=2.0) - function = create_function2(dtype1=np.float64) - process_function(ov_function=function, argv=argv) - - for ov_input in function.inputs: - op_node = list(ov_input.get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, [2.0]) - - def test_scale_single_value_fp16(self): - argv = Namespace(mean_scale_values=None, scale=2.0) - function = create_function2(dtype1=np.float16) - process_function(ov_function=function, argv=argv) - - for ov_input in function.inputs: - op_node = list(ov_input.get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - - def test_scale_vector(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([4.]), 'mean': None}}, scale=None) - function = create_function2() - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, [4.0], shape=None) - # Verify that input2 is not affected - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertEqual(op_node.get_type_name(), 'Relu') - - def test_scale_vector3(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([2., 4., 8.]), 'mean': None}}, scale=None) - function = create_function2(shape1=[1, 3, 224, 224]) - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1]) - - # Verify that input2 is not affected - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertEqual(op_node.get_type_name(), 'Relu') - - # Verify that guessed layout (?C??) is not appeared in input1 - self.assertEqual(function.get_parameters()[0].layout, Layout()) - - def test_scale_vector4_layout(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([2., 4., 8., 9.]), 'mean': None}}, - layout_values={'input1': {'source_layout': 'nhwc'}}, - scale=None) - function = create_function2(shape1=[1, 3, 3, 4]) # Use layout to determine channels dim - - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=[2., 4., 8., 9.], shape=[1, 1, 1, 4]) - - # Verify that input2 is not affected - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertEqual(op_node.get_type_name(), 'Relu') - - # Verify that layout (NHWC) is appeared in input1 - self.assertEqual(function.get_parameters()[0].layout, Layout('nhwc')) - - def test_mean_single(self): - argv = Namespace(mean_scale_values={'input1': {'mean': np.array([4.]), 'scale': None}}, scale=None) - function = create_function2() - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, [4.0], shape=None) - # Verify that input2 is not affected - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertEqual(op_node.get_type_name(), 'Relu') - - def test_mean_single_fp64(self): - argv = Namespace(mean_scale_values={'input1': {'mean': np.array([4.]), 'scale': None}}, scale=None) - function = create_function2(dtype1=np.float64) - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, [4.0], shape=None) - # Verify that input2 is not affected - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertEqual(op_node.get_type_name(), 'Relu') - - def test_mean_single_fp16(self): - argv = Namespace(mean_scale_values={'input1': {'mean': np.array([4.]), 'scale': None}}, scale=None) - function = create_function2(dtype1=np.float16) - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - # Verify that input2 is not affected - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertEqual(op_node.get_type_name(), 'Relu') - - def test_mean_vector3(self): - argv = Namespace(mean_scale_values={'input2': {'mean': np.array([2., 4., 8.]), 'scale': None}}, scale=None) - function = create_function2(shape2=[1, 3, 224, 224]) - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1]) - - # Verify that input1 is not affected - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertEqual(op_node.get_type_name(), 'Relu') - - # Verify that guessed layout (?C??) is not appeared in input2 - self.assertEqual(function.get_parameters()[1].layout, Layout()) - - def test_mean_scale(self): - argv = Namespace(mean_scale_values={'input2a': {'mean': np.array([1., 2., 3.]), - 'scale': np.array([2., 4., 8.])}}, - scale=None) - function = create_function2(shape2=[1, 3, 224, 224]) - process_function(ov_function=function, argv=argv) - # Verify that first is 'subtract mean', then 'scale' - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[1., 2., 3.], shape=[1, 3, 1, 1]) - - op_node = list(op_node.output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1]) - - # Verify that input1 is not affected - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertEqual(op_node.get_type_name(), 'Relu') - - # Verify that guessed layout (?C??) is not appeared in input2 - self.assertEqual(function.get_parameters()[1].layout, Layout()) - - def test_mean_scale_with_layout(self): - argv = Namespace(mean_scale_values={'input2a': {'mean': np.array([1., 2., 3., 4.]), - 'scale': np.array([2., 4., 8., 9.])}}, - scale=None) - function = create_function2(shape2=[1, 3, 3, 4]) - function.get_parameters()[1].layout = Layout("NHWC") - process_function(ov_function=function, argv=argv) - # Verify that first is 'subtract mean', then 'scale' - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[1., 2., 3., 4.], shape=[1, 1, 1, 4]) - - op_node = list(op_node.output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=[2., 4., 8., 9.], shape=[1, 1, 1, 4]) - - # Verify that input1 is not affected - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertEqual(op_node.get_type_name(), 'Relu') - - # Verify that layout presents in function after preprocessing - self.assertEqual(function.get_parameters()[1].layout, Layout("NHWC")) - - def test_mean_scale_with_layout_dynamic(self): - argv = Namespace(mean_scale_values={'input2a': {'mean': np.array([1., 2., 3., 4.]), - 'scale': np.array([2., 4., 8., 9.])}}, - scale=None) - function = create_function2(shape2=[-1, -1, -1, -1]) - function.get_parameters()[1].layout = Layout("NHWC") - process_function(ov_function=function, argv=argv) - # Verify that first is 'subtract mean', then 'scale' - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[1., 2., 3., 4.], shape=[1, 1, 1, 4]) - - op_node = list(op_node.output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=[2., 4., 8., 9.], shape=[1, 1, 1, 4]) - - # Verify that input1 is not affected - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertEqual(op_node.get_type_name(), 'Relu') - - # Verify that layout presents in function after preprocessing - self.assertEqual(function.get_parameters()[1].layout, Layout("NHWC")) - - def test_no_param_name(self): - argv = Namespace(mean_scale_values=list(np.array([(np.array([1., 2., 3.]), np.array([2., 4., 6.])), - (np.array([7., 8., 9.]), None)], - dtype='object')), scale=None) - function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 224, 224, 3]) - process_function(ov_function=function, argv=argv) - - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[1., 2., 3.], shape=[1, 3, 1, 1]) - - op_node = list(op_node.output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=[2., 4., 6.], shape=[1, 3, 1, 1]) - - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[7., 8., 9.], shape=[1, 1, 1, 3]) - - # Verify that guessed layouts are not appeared in inputs - self.assertEqual(function.get_parameters()[0].layout, Layout()) - self.assertEqual(function.get_parameters()[1].layout, Layout()) - - def test_no_param_name_single_value(self): - argv = Namespace(mean_scale_values=list(np.array([(np.array([1.]), None), - (np.array([2., 3., 4.]), np.array([5.]))], - dtype='object')), scale=None) - function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 224, 224, 3]) - process_function(ov_function=function, argv=argv) - - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[1.], shape=None) - - op_node = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[2., 3., 4.], shape=[1, 1, 1, 3]) - - op_node = list(op_node.output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=[5.], shape=None) - - # Two inputs, but 'mean_scale_value' has only one array - def test_error_no_param_name_number_not_match(self): - argv = Namespace(mean_scale_values=[(np.array([2., 3.]), np.array([4.]))], scale=None) - function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 2, 224, 224]) - with self.assertRaisesRegex(Error, '.*question.*61.*'): - process_function(ov_function=function, argv=argv) - - def test_mean_scale_error_no_node_name_found(self): - argv = Namespace(mean_scale_values={'not_found': {'scale': np.array([1.]), 'mean': np.array([1.])}}, - scale=None) - function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 2, 224, 224]) - with self.assertRaisesRegex(Error, '.*question.*83.*'): - process_function(ov_function=function, argv=argv) - - def test_layout_error_no_node_name_found(self): - argv = Namespace(layout_values={'not_found': {'source_layout': 'nhwc'}}, - scale=None) - function = create_function2(shape1=[1, 3, 224, 224], shape2=[1, 2, 224, 224]) - with self.assertRaisesRegex(Error, '.*question.*83.*'): - process_function(ov_function=function, argv=argv) - - def test_error_dimension_mismatch(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3., 4.]), 'mean': None}}, - scale=None) - function = create_function2(shape1=[1, 3, 224, 224]) - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_error_dimension_not_clear(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]), 'mean': None}}, - scale=None) - function = create_function2(shape1=[1, 3, 3, 3]) # Not clear to which 3 should scale be applied - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_error_dimension_mismatch_with_scale(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3., 4.]), - 'mean': np.array([1., 2., 3.])}}, - scale=None) - function = create_function2(shape1=[1, 3, 4, 224]) - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_error_guess_c_wrong_position_3d(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]), - 'mean': np.array([1., 2., 3.])}}, - scale=None) - function = create_function2(shape1=[2, 3, 4]) - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_error_guess_c_wrong_position_4d(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]), - 'mean': np.array([1., 2., 3.])}}, - scale=None) - function = create_function2(shape1=[1, 2, 3, 4]) - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_error_guess_c_wrong_position_5d(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]), - 'mean': np.array([1., 2., 3.])}}, - scale=None) - function = create_function2(shape1=[1, 2, 3, 4, 5]) - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_error_guess_c_wrong_position_6d(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.]), - 'mean': np.array([1., 2., 3.])}}, - scale=None) - function = create_function2(shape1=[1, 2, 4, 5, 6, 3]) - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_error_2_names_to_same_input(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([1., 2., 3.])}, - 'input1a': {'scale': np.array([1., 2., 3.])}}, - scale=None) - function = create_function2(shape1=[1, 3, 224, 224]) - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_error_2_names_to_same_input_single_value(self): - argv = Namespace(mean_scale_values={'input1': {'scale': np.array([2.])}, - 'input1a': {'scale': np.array([3.])}}, - scale=None) - function = create_function2(shape1=[1, 3, 224, 224]) - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_reverse_input_channels(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None) - function = create_function2(shape1=[1, 224, 224, 3], shape2=[1, 3, 224, 224]) - process_function(ov_function=function, - argv=argv) - # Verify that some operations are inserted. - # In future, consider using mock PrePostProcessor to verify that 'reverse_channels' was called - op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node0.get_type_name() != 'Relu') - op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node1.get_type_name() != 'Relu') - - # Verify that guessed layouts are not appeared in input1,input2 - self.assertEqual(function.get_parameters()[0].layout, Layout()) - self.assertEqual(function.get_parameters()[1].layout, Layout()) - - def test_reverse_input_channels_func_layout(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None) - function = create_function2(shape1=[1, 3, 3, 3], shape2=[1, 3, 3, 3]) - function.get_parameters()[0].layout = Layout("NCHW") - function.get_parameters()[1].layout = Layout("NHWC") - process_function(ov_function=function, - argv=argv) - # Verify that some operations are inserted. - # In future, consider using mock PrePostProcessor to verify that 'reverse_channels' was called - op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node0.get_type_name() != 'Relu') - op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node1.get_type_name() != 'Relu') - - # Verify that guessed layouts are not appeared in input1,input2 - self.assertEqual(function.get_parameters()[0].layout, Layout("NCHW")) - self.assertEqual(function.get_parameters()[1].layout, Layout("NHWC")) - - def test_reverse_input_channels_layout(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None, - layout_values={'input1a': { 'source_layout': 'nhwc' }, - 'input2a': { 'source_layout': 'nchw' } - }) - function = create_function2(shape1=[1, 224, 224, 4], shape2=[1, 4, 224, 224]) - # no suitable inputs - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_reverse_input_channels_3d(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None, - layout_values=None) - function = create_function2(shape1=[224, 224, 3], shape2=[3, 224, 224]) - process_function(ov_function=function, argv=argv) - # Verify that reverse_channels are applied. - op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node0.get_type_name() != 'Relu') - op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node1.get_type_name() != 'Relu') - - def test_reverse_input_channels_6d(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None, - layout_values=None) - function = create_function2(shape1=[4, 4, 4, 4, 4, 3], shape2=[4, 3, 4, 4, 4, 4]) - # no suitable inputs - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_reverse_input_channels_dynamic(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None, - layout_values=None) - function = create_function2(shape1=[1, -1, 5, 5], shape2=[-1, -1, -1, -1]) - # no suitable inputs - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_reverse_input_channels_dynamic_layout(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None, - layout_values={'input1a': {'source_layout': 'nchw'}, - 'input2a': {'source_layout': 'nhwc'} - }) - function = create_function2(shape1=[1, -1, 5, 5], shape2=[-1, -1, -1, -1]) - process_function(ov_function=function, argv=argv) - # Verify that reverse_channels are applied. - op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node0.get_type_name() != 'Relu') - op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node1.get_type_name() != 'Relu') - - def test_reverse_input_channels_layout_change(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None, - layout_values={'input1a': {'source_layout': 'nchw', 'target_layout': 'nhwc'}, - 'input2a': {'source_layout': 'nhwc', 'target_layout': 'nchw'} - }) - function = create_function2(shape1=[1, 3, 5, 5], shape2=[1, 5, 5, 3]) - process_function(ov_function=function, argv=argv) - # Verify that reverse_channels are applied. - op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node0.get_type_name() != 'Relu') - op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node1.get_type_name() != 'Relu') - - def test_reverse_input_channels_2_channels(self): - argv = Namespace(reverse_input_channels=True, - mean_scale_values=None, - scale=None) - function = create_function2(shape1=[1, 224, 224, 2], shape2=[1, 3, 224, 224]) - process_function(ov_function=function, argv=argv) - # Verify that some operations are inserted to input2. - op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node0.get_type_name() == 'Relu') - op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node1.get_type_name() != 'Relu') - - # Verify that guessed layouts are not appeared in input1,input2 - self.assertEqual(function.get_parameters()[0].layout, Layout()) - self.assertEqual(function.get_parameters()[1].layout, Layout()) - - # When input name for layout is empty for model with one input - it is applied to this input - def test_scale_vector3_layout_empty_input_name(self): - argv = Namespace(mean_scale_values=list(np.array([(None, np.array([2., 4., 8.]))], - dtype='object')), - layout_values={'': {'source_layout': 'nchw'}}, - scale=None) - function = create_function1(shape1=[1, 3, 3, 3]) # Use layout to determine channels dim - - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1]) - - # Verify that layout (nchw) is appeared in input1 - self.assertEqual(function.get_parameters()[0].layout, Layout('nchw')) - - def test_layout_output(self): - argv = Namespace(mean_scale_values=None, - layout_values={ - 'res1': { - 'source_layout': 'nchw', - 'target_layout': 'nhwc' - }, - 'res2a': { - 'source_layout': 'ncdhw' - } - }, - scale=None) - function = create_function2(shape1=[1, 3, 3, 3], shape2=[1, 3, 3, 3, 3]) - - process_function(ov_function=function, argv=argv) - op_node = function.get_results()[0].input(0).get_source_output().get_node() - self.assertEqual(op_node.get_type_name(), 'Transpose') - - self.assertEqual(function.get_results()[0].layout, Layout('nhwc')) - self.assertEqual(function.get_results()[1].layout, Layout('ncdhw')) - - def test_error_layout_empty_input_name_2_inputs(self): - argv = Namespace(mean_scale_values=None, - layout_values={'': {'source_layout': 'nchw'}}, - scale=None) - function = create_function2(shape1=[1, 3, 3, 3]) - - # Verify user friendly error message contains number of inputs and their names - with self.assertRaisesRegex(Error, '.*2.*inputs.*input1.*input2.*'): - process_function(ov_function=function, argv=argv) - - def test_incompatible_layout(self): - function = create_function2(shape1=[1, 224, 224, 3], shape2=[1, 4, 224, 224]) - with self.assertRaisesRegex(Exception, '.*input1.*'): - function.get_parameters()[0].layout = Layout("NDHWC") - - def test_guess_layout_reverse_channels_dont_apply_to_4(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None) - function = create_function2(shape1=[1, 224, 224, 3], shape2=[1, 4, 224, 224]) - process_function(ov_function=function, argv=argv) - - op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node0.get_type_name() != 'Relu') - op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node1.get_type_name() == 'Relu') - - def test_error_guess_layout_reverse_channels_multi_3(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None) - function = create_function2(shape1=[1, 224, 224, 3], shape2=[1, 3, 3, 224]) - process_function(ov_function=function, argv=argv) - # Applied to only input1 - op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node0.get_type_name() != 'Relu') - op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node1.get_type_name() == 'Relu') - - - def test_no_guess_layout_reverse_channels_has_layout_no_c(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None) - function = create_function2(shape1=[1, 224, 224, 3], shape2=[1, 3, 224, 224]) - function.get_parameters()[0].layout = Layout("NHW?") - function.get_parameters()[1].layout = Layout("N?HW") - # no suitable inputs - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_guess_layout_reverse_channels_incorrect_pos(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None) - function = create_function2(shape1=[1, 4, 224, 224], shape2=[1, 224, 224, 2]) - function.get_parameters()[0].layout = Layout("NCHW") - function.get_parameters()[1].layout = Layout("NHWC") - # no suitable inputs - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_no_reverse_channels_even_with_layout(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None) - function = create_function2(shape1=[3, 4, 224, 224], shape2=[1, 224, 3, 224]) - # no suitable inputs - with self.assertRaises(Exception): - process_function(ov_function=function, argv=argv) - - def test_reverse_channels_and_mean_scale(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values={ - 'input2a': { - 'mean': np.array([1., 2., 3.]), - 'scale': np.array([2., 4., 8.])}}, - scale=None) - function = create_function2(shape2=[1, 3, 224, 224]) - process_function(ov_function=function, argv=argv) - - # Verify that first is gather, then subtract 'mean', then 'scale' - gather = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(gather.get_type_name() == 'Gather') - range_node = gather.input(1).get_source_output().get_node() - self.assertTrue(range_node.get_type_name() == 'Range') - start = range_node.input(0).get_source_output().get_node() - end = range_node.input(1).get_source_output().get_node() - step = range_node.input(2).get_source_output().get_node() - self.check_constant(start, expected=[2], shape=[]) - self.check_constant(end, expected=[-1], shape=[]) - self.check_constant(step, expected=[-1], shape=[]) - axes = gather.input(2).get_source_output().get_node() - self.check_constant(axes, expected=[1], shape=[1]) - - op_node = list(gather.output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[1., 2., 3.], shape=[1, 3, 1, 1]) - - op_node = list(op_node.output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1]) - - # Verify that input1 is not affected - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertEqual(op_node.get_type_name(), 'Relu') - - # Verify that guessed layout (?C??) is not appeared in input2 - self.assertEqual(function.get_parameters()[1].layout, Layout()) - - def test_friendly_name(self): - argv = Namespace(mean_scale_values={'input1': {'mean': np.array([2., 4., 8.]), 'scale': None}}, - layout_values={'input1': {'source_layout': 'nchw'}}, - scale=None) - function = create_function1(shape1=[1, 3, 224, 224]) - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1]) - - # Verify that layout (nchw) is appeared in input1 - self.assertEqual(function.get_parameters()[0].layout, Layout('nchw')) - - def test_sorting_tensor_names(self): - argv = Namespace(mean_scale_values={'c_input': {'mean': np.array([2., 4., 8.]), 'scale': None}}, - layout_values={'c_input': {'source_layout': 'nchw'}}, - scale=127.5) - function = create_function3(shape1=[1, 3, 224, 224]) - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1]) - - op_node = list(op_node.output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=127.5, shape=[1]) - - # Verify that layout (nchw) is appeared in input1 - self.assertEqual(function.get_parameters()[0].layout, Layout('nchw')) - - def test_sorting_tensor_names_friendly_name_case(self): - argv = Namespace(mean_scale_values={'input1': {'mean': np.array([2., 4., 8.]), 'scale': None}}, - layout_values={'input1': {'source_layout': 'nchw'}}, - scale=127.5) - function = create_function3(shape1=[1, 3, 224, 224]) - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1]) - - op_node = list(op_node.output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=127.5, shape=[1]) - - # Verify that layout (nchw) is appeared in input1 - self.assertEqual(function.get_parameters()[0].layout, Layout('nchw')) - - def test_sorting_tensor_names_unnamed_layout(self): - argv = Namespace(mean_scale_values={'input1': {'mean': np.array([2., 4., 8.]), 'scale': None}}, - layout_values={'': {'source_layout': 'nchw'}}, - scale=127.5) - function = create_function3(shape1=[1, 3, 224, 224]) - process_function(ov_function=function, argv=argv) - op_node = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Subtract' or op_node.get_type_name() == 'Add') - self.check_mean_constant(op_node, expected=[2., 4., 8.], shape=[1, 3, 1, 1]) - - op_node = list(op_node.output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node.get_type_name() == 'Divide' or op_node.get_type_name() == 'Multiply') - self.check_scale_constant(op_node, expected=127.5, shape=[1]) - - # Verify that layout (nchw) is appeared in input1 - self.assertEqual(function.get_parameters()[0].layout, Layout('nchw')) - - def test_sorting_tensor_names_unnamed_layout_list(self): - argv = Namespace(reverse_input_channels=True, mean_scale_values=None, scale=None, - layout_values=[{'source_layout': 'nchw', 'target_layout': 'nhwc'}, - {'source_layout': 'nhwc', 'target_layout': 'nchw'}]) - - function = create_function2(shape1=[1, 3, 5, 5], shape2=[1, 5, 5, 3]) - process_function(ov_function=function, argv=argv) - # Verify that reverse_channels are applied. - op_node0 = list(function.get_parameters()[0].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node0.get_type_name() != 'Relu') - op_node1 = list(function.get_parameters()[1].output(0).get_target_inputs())[0].get_node() - self.assertTrue(op_node1.get_type_name() != 'Relu') diff --git a/tools/mo/unit_tests/mo/back/names_uniqueness_check_test.py b/tools/mo/unit_tests/mo/back/names_uniqueness_check_test.py deleted file mode 100644 index 7623c19d39a67f..00000000000000 --- a/tools/mo/unit_tests/mo/back/names_uniqueness_check_test.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.back.names_uniqueness_check import NamesUniquenessCheck -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - - -class TestNamesUniquenessCheck(unittest.TestCase): - - def test_1(self): - graph = build_graph( - nodes_attrs={ - 'input': {'kind': 'op', 'op': 'Parameter', 'name': 'node'}, - 'cast': {'kind': 'op', 'op': 'Cast', 'name': 'node'}, - 'result': {'kind': 'op', 'op': 'Result', 'name': 'node'} - }, - edges=[ - ('input', 'cast'), - ('cast', 'result') - ] - ) - - NamesUniquenessCheck().find_and_replace_pattern(graph) - names = [node.name for node in graph.get_op_nodes()] - result_name = Node(graph, 'result').name - - self.assertTrue(len(set(names)) == 3) - self.assertTrue(result_name == 'node') - - def test_2(self): - graph = build_graph( - nodes_attrs={ - 'input': {'kind': 'op', 'op': 'Parameter', 'name': 'node'}, - 'cast': {'kind': 'op', 'op': 'Cast', 'name': 'node_0'}, - 'result': {'kind': 'op', 'op': 'Result', 'name': 'node'} - }, - edges=[ - ('input', 'cast'), - ('cast', 'result') - ] - ) - - NamesUniquenessCheck().find_and_replace_pattern(graph) - names = [node.name for node in graph.get_op_nodes()] - result_name = Node(graph, 'result').name - - self.assertTrue(len(set(names)) == 3) - self.assertTrue(result_name == 'node') - - def test_3(self): - graph = build_graph( - nodes_attrs={ - 'input': {'kind': 'op', 'op': 'Parameter', 'name': 'node_0'}, - 'cast': {'kind': 'op', 'op': 'Cast', 'name': 'node_1'}, - 'result_1': {'kind': 'op', 'op': 'Result', 'name': 'node'}, - 'result_2': {'kind': 'op', 'op': 'Result', 'name': 'node'} - }, - edges=[ - ('input', 'cast'), - ('cast', 'result_1'), - ('cast', 'result_2'), - ] - ) - NamesUniquenessCheck().find_and_replace_pattern(graph) - names = [node.name for node in graph.get_op_nodes()] - - self.assertTrue('node' in names) - self.assertTrue(len(set(names)) == 4) diff --git a/tools/mo/unit_tests/mo/back/remove_last_softmax_test.py b/tools/mo/unit_tests/mo/back/remove_last_softmax_test.py deleted file mode 100644 index d9b278f51c88bd..00000000000000 --- a/tools/mo/unit_tests/mo/back/remove_last_softmax_test.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.back.remove_last_softmax_pattern import RemoveLastSoftMaxPattern, RemoveLastLogSoftMaxPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class KaldiRemoveLastSoftMaxTest(unittest.TestCase): - nodes = { - 'input_node': { - 'kind': 'data' - }, - 'softmax_node': { - 'op': 'SoftMax', - 'kind': 'op' - }, - 'output_node': { - 'kind': 'data' - }, - 'op_output': { - 'kind': 'op', - 'op': 'Result' - }, - 'log_node': { - 'op': 'Log', - 'kind': 'op' - }, - 'log_data': { - 'kind': 'data' - }, - } - - nodes_for_logsoftmax = { - 'input': {'kind': 'op', 'op': 'Parameter'}, - 'input_data': {'kind': 'data'}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - 'reduce_max_node': {'kind': 'op', 'op': 'ReduceMax'}, - 'reduce_max_node_data': {'kind': 'data'}, - 'reduce_max_axis': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': int64_array([1]), - 'shape': int64_array([1]), - }, - 'reduce_max_axis_data': { - 'kind': 'data', - 'value': int64_array([1]), - 'shape': int64_array([1]), - }, - 'sub_data': {'kind': 'data'}, - 'exp': {'kind': 'op', 'op': 'Exp'}, - 'exp_data': {'kind': 'data'}, - 'reduce_sum_node': {'kind': 'op', 'op': 'ReduceSum'}, - 'reduce_sum_node_data': {'kind': 'data'}, - 'reduce_sum_axis': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': int64_array([1]), - 'shape': int64_array([1]), - }, - 'reduce_sum_axis_data': { - 'kind': 'data', - 'value': int64_array([1]), - 'shape': int64_array([1]), - }, - 'log': {'kind': 'op', 'op': 'Log'}, - 'log_data': {'kind': 'data'}, - 'last_sub': {'kind': 'op', 'op': 'Sub'}, - 'last_sub_data': {'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - } - - edges_for_logsoftmax = [ - ('input', 'input_data'), - ('input_data', 'sub', {'in': 0}), - ('input_data', 'reduce_max_node', {'in': 0}), - ('reduce_max_node', 'reduce_max_node_data'), - ('reduce_max_node_data', 'sub', {'in': 1}), - ('reduce_max_axis', 'reduce_max_axis_data'), - ('reduce_max_axis_data', 'reduce_max_node', {'in': 1}), - ('sub', 'sub_data'), - ('sub_data', 'exp', {'out': 0, 'in': 0}), - ('exp', 'exp_data'), - ('exp_data', 'reduce_sum_node', {'in': 0}), - ('reduce_sum_node', 'reduce_sum_node_data'), - ('reduce_sum_axis', 'reduce_sum_axis_data'), - ('reduce_sum_axis_data', 'reduce_sum_node', {'in': 1}), - ('reduce_sum_node_data', 'log'), - ('log', 'log_data'), - ('log_data', 'last_sub', {'in': 1}), - ('last_sub', 'last_sub_data'), - ('sub_data', 'last_sub', {'out': 0, 'in': 0}), - ('last_sub_data', 'op_output'), - ] - - def test_remove_last_SoftMax(self): - graph = build_graph(self.nodes, [ - ('input_node', 'softmax_node'), - ('softmax_node', 'output_node'), - ('output_node', 'op_output') - ], nodes_with_edges_only=True) - RemoveLastSoftMaxPattern().find_and_replace_pattern(graph) - self.assertNotIn('softmax_node', graph.node) - - def test_remove_last_LogSoftMax(self): - graph = build_graph(nodes_attrs=self.nodes_for_logsoftmax, edges=self.edges_for_logsoftmax) - RemoveLastLogSoftMaxPattern().find_and_replace_pattern(graph) - graph.clean_up() - - ref_graph_nodes_attributes = { - 'input': {'kind': 'op', 'op': 'Parameter'}, - 'input_data': {'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - } - - ref_graph_edges = [('input', 'input_data'), ('input_data', 'op_output')] - ref_graph = build_graph(ref_graph_nodes_attributes, ref_graph_edges) - (flag, resp) = compare_graphs(graph, ref_graph, 'op_output') - self.assertTrue(flag, resp) - - def test_do_not_remove_not_last_SoftMax(self): - graph = build_graph(self.nodes, [ - ('input_node', 'softmax_node'), - ('softmax_node', 'output_node') - ]) - RemoveLastSoftMaxPattern().find_and_replace_pattern(graph) - self.assertIn('softmax_node', graph.node) diff --git a/tools/mo/unit_tests/mo/bom_test.py b/tools/mo/unit_tests/mo/bom_test.py deleted file mode 100644 index b836eefc888f2b..00000000000000 --- a/tools/mo/unit_tests/mo/bom_test.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import platform -import re -import unittest -from itertools import islice - -from openvino.tools.mo.utils.utils import get_mo_root_dir - -dir_patterns_to_skip = ['.*__pycache__.*'] -file_patterns_to_skip = ['.*\\.DS_Store$', - '.*\\.swp', - '.*\\.pyc$', - 'requirements.*\.txt', - 'version.txt'] -full_name_patterns_to_skip = ['^openvino/tools/mo/utils/convert.py$', - '^openvino/tools/mo/front/caffe/CustomLayersMapping.xml$', - ] -if platform.system() == 'Windows': - full_name_patterns_to_skip = [i.replace('/', '\\\\') for i in full_name_patterns_to_skip] - - -def is_match(name: str, patterns: ()): - return any((re.match(pattern, name) for pattern in patterns)) - - -class TestBOMFile(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.existing_files = [] - cur_path = os.path.join(os.path.realpath(__file__), os.pardir) - mo_path = os.path.abspath(os.path.join(cur_path, os.pardir, os.pardir)) - with open(os.path.join(mo_path, 'automation', 'package_BOM.txt'), 'r') as bom_file: - if platform.system() == 'Windows': - cls.existing_files = [name.rstrip().replace('/', '\\') for name in bom_file.readlines()] - else: - cls.existing_files = [name.rstrip() for name in bom_file.readlines()] - - # dirs_to_search is the root directory where MO is located, 'openvino_project_root/tools/mo/openvino/tools' - cls.dirs_to_search = os.path.normpath(get_mo_root_dir() + '/mo/') - cls.prefix = os.path.normpath(get_mo_root_dir() + '../../../') # prefix which is used in BOM file - cls.expected_header = [re.compile(pattern) for pattern in [ - r'^# Copyright \([cC]\) [0-9\-]+ Intel Corporation$', - r'^# SPDX-License-Identifier: Apache-2.0$', - ]] - - def test_bom_file(self): - missing_files = list() - for src_dir in [self.dirs_to_search]: - if not os.path.isdir(src_dir): - continue - for root, dirs, files in os.walk(src_dir): - if is_match(root, dir_patterns_to_skip): - continue - for f in files: - full_name = os.path.join(root, f) - full_name = full_name[len(self.prefix) + 1:] - if is_match(f, file_patterns_to_skip): - continue - if is_match(full_name, full_name_patterns_to_skip): - continue - if full_name not in self.existing_files: - missing_files.append(full_name) - - if len(missing_files) != 0: - print("Missing files:") - for f in missing_files: - print(f.replace('\\', '/')) - self.assertTrue(not len(missing_files), '{} files missed in BOM'.format(len(missing_files))) - - def test_bom_does_not_contain_unittest_files(self): - for file_name in self.existing_files: - self.assertFalse(file_name.endswith('_test.py'), 'BOM file contains test file {}'.format(file_name)) - - def test_deleted_files_still_stored_in_bom(self): - deleted = list() - for file in self.existing_files: - if not os.path.isfile(os.path.join(self.prefix, file)): - deleted.append(file) - if len(deleted) != 0: - print("Deleted files still stored in BOM file:") - for f in deleted: - print(f) - self.assertTrue(not len(deleted), '{} files deleted but still stored in BOM'.format(len(deleted))) - - def test_alphabetical_order_and_duplicates(self): - sorted_bom = sorted([x for x in self.existing_files if self.existing_files.count(x) == 1], key=str.lower) - if self.existing_files != sorted_bom: - print("Wrong order. Alphabetical order of BOM is:") - print(*sorted_bom, sep='\n') - self.assertTrue(False) - - def test_missed_intel_header(self): - missing_files = list() - for src_dir in [self.dirs_to_search]: - if not os.path.isdir(src_dir): - continue - for root, dirs, files in os.walk(src_dir): - if is_match(root, dir_patterns_to_skip): - continue - for f in files: - ignores = [ - '^__init__.py$', - '^caffe_pb2.py$', - '^.*.pyc$', - '^generate_caffe_pb2.py$' - ] - if not is_match(f, ['.*.py$']) or is_match(f, ignores): - continue - full_name = os.path.join(root, f) - with open(full_name, 'r') as source_f: - # read two more lines from the file because it can contain shebang and empty lines - s = [x.strip() for x in islice(source_f, len(self.expected_header) + 2)] - # skip shebang and empty lines in the beginning of the file - try: - while s[0] in ('', '#!/usr/bin/env python3'): - s = s[1:] - for str_ind in range(0, len(self.expected_header)): - if not re.match(self.expected_header[str_ind], s[str_ind]): - missing_files.append(full_name) - break - except: - pass - self.assertTrue(not len(missing_files), - '{} files with missed header: \n{}'.format(len(missing_files), '\n'.join(missing_files))) diff --git a/tools/mo/unit_tests/mo/convert/import_from_mo_test.py b/tools/mo/unit_tests/mo/convert/import_from_mo_test.py deleted file mode 100644 index 03f523fe57f372..00000000000000 --- a/tools/mo/unit_tests/mo/convert/import_from_mo_test.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import tempfile -from pathlib import Path - -import pytest -from openvino.runtime import serialize - -from openvino.tools.mo import InputCutInfo, LayoutMap -from openvino.tools.mo.utils.ir_engine.ir_engine import IREngine -from unit_tests.utils.graph import build_graph -from utils import create_onnx_model, save_to_onnx - - -class TestConvertImportMOTest(): - test_directory = os.path.dirname(os.path.realpath(__file__)) - - @staticmethod - def create_onnx_model(): - # - # Create ONNX model - # - - import onnx - from onnx import helper - from onnx import TensorProto - - shape = [1, 2, 3] - - input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape) - output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape) - - node_def = onnx.helper.make_node( - 'Relu', - inputs=['input'], - outputs=['Relu_out'], - ) - node_def2 = onnx.helper.make_node( - 'Sigmoid', - inputs=['Relu_out'], - outputs=['output'], - ) - - # Create the graph (GraphProto) - graph_def = helper.make_graph( - [node_def, node_def2], - 'test_model', - [input], - [output], - ) - - # Create the model (ModelProto) - onnx_net = helper.make_model(graph_def, producer_name='test_model') - return onnx_net - - @staticmethod - def create_model_ref(): - nodes_attributes = { - 'input': {'kind': 'op', 'type': 'Parameter'}, - 'input_data': {'shape': [1, 2, 3], 'kind': 'data'}, - 'relu': {'kind': 'op', 'type': 'ReLU'}, - 'relu_data': {'shape': [1, 2, 3], 'kind': 'data'}, - 'sigmoid': {'kind': 'op', 'type': 'Sigmoid'}, - 'sigmoid_data': {'shape': [1, 2, 3], 'kind': 'data'}, - 'result': {'kind': 'op', 'type': 'Result'} - } - - ref_graph = build_graph(nodes_attributes, - [('input', 'input_data'), - ('input_data', 'relu'), - ('relu', 'relu_data'), - ('relu_data', 'sigmoid'), - ('sigmoid', 'sigmoid_data'), - ('sigmoid_data', 'result'), - ]) - return ref_graph - - @pytest.mark.parametrize("params",[ - ({}), - ({'input': InputCutInfo(name='LeakyRelu_out', shape=None, type=None, value=None)}), - ({'layout': {'input': LayoutMap(source_layout='NCHW', target_layout='NHWC')}}), - ]) - # Checks convert import from openvino.tools.mo - def test_import(self, params): - from openvino.tools.mo import convert_model - - with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir: - model = create_onnx_model() - model_path = save_to_onnx(model, tmpdir) - out_xml = os.path.join(tmpdir, "model.xml") - - ov_model = convert_model(input_model=model_path, **params) - serialize(ov_model, out_xml.encode('utf-8'), out_xml.replace('.xml', '.bin').encode('utf-8')) - assert os.path.exists(out_xml) - - def test_input_model_path(self): - from openvino.tools.mo import convert_model - - with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir: - model = self.create_onnx_model() - model_path = save_to_onnx(model, tmpdir) - out_xml = os.path.join(tmpdir, "model.xml") - - ov_model = convert_model(Path(model_path)) - serialize(ov_model, out_xml.encode('utf-8'), out_xml.replace('.xml', '.bin').encode('utf-8')) - - ir = IREngine(out_xml, out_xml.replace('.xml', '.bin')) - ref_graph = self.create_model_ref() - flag, resp = ir.compare(ref_graph) - assert flag, '\n'.join(resp) - - def test_unnamed_input_model(self): - from openvino.tools.mo import convert_model - with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir: - model = self.create_onnx_model() - model_path = save_to_onnx(model, tmpdir) - out_xml = os.path.join(tmpdir, "model.xml") - - ov_model = convert_model(model_path) - serialize(ov_model, out_xml.encode('utf-8'), out_xml.replace('.xml', '.bin').encode('utf-8')) - - ir = IREngine(out_xml, out_xml.replace('.xml', '.bin')) - ref_graph = self.create_model_ref() - flag, resp = ir.compare(ref_graph) - assert flag, '\n'.join(resp) diff --git a/tools/mo/unit_tests/mo/convert/logger_test_actual.py b/tools/mo/unit_tests/mo/convert/logger_test_actual.py deleted file mode 100644 index 6e384a71bb6596..00000000000000 --- a/tools/mo/unit_tests/mo/convert/logger_test_actual.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import os -import sys -import tempfile - - -def create_tf_model(out_dir): - import tensorflow as tf - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - inp1 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input') - inp2 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input') - relu = tf.nn.relu(inp1 + inp2, name='Relu') - - output = tf.nn.sigmoid(relu, name='Sigmoid') - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - tf.io.write_graph(tf_net, out_dir + os.sep, 'model_bool.pb', as_text=False) - return out_dir + os.sep + 'model_bool.pb' - - -def run_main(): - from openvino.tools.mo import convert_model - - log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout) - test_directory = os.path.dirname(os.path.realpath(__file__)) - - with tempfile.TemporaryDirectory(dir=test_directory) as tmpdir: - tf_model = create_tf_model(test_directory) - _ = convert_model(tf_model) - - log.info("test message 1") - - logger = log.getLogger() - assert logger.level == 20 - assert len(logger.handlers) == 1 - assert len(logger.filters) == 0 - - _ = convert_model(tf_model, log_level="DEBUG", silent=False) - - log.info("test message 2") - - logger = log.getLogger() - assert logger.level == 20 - assert len(logger.handlers) == 1 - assert len(logger.filters) == 0 - - _ = convert_model(tf_model, log_level="CRITICAL", silent=False) - - log.info("test message 3") - - logger = log.getLogger() - assert logger.level == 20 - assert len(logger.handlers) == 1 - assert len(logger.filters) == 0 - - -if __name__ == "__main__": - run_main() diff --git a/tools/mo/unit_tests/mo/convert/meta_data_test.py b/tools/mo/unit_tests/mo/convert/meta_data_test.py deleted file mode 100644 index 6493737533b60a..00000000000000 --- a/tools/mo/unit_tests/mo/convert/meta_data_test.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import tempfile -from pathlib import Path - -from openvino.runtime import get_version as get_rt_version -from openvino.runtime import serialize - -from openvino.tools.mo import convert_model -from openvino.tools.mo.utils import import_extensions -from openvino.tools.mo.utils.version import get_version -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from utils import save_to_onnx - -from openvino.tools.mo.utils.ir_reader.restore_graph import restore_graph_from_ir, save_restored_graph - - -class MetaDataTest(UnitTestWithMockedTelemetry): - test_directory = os.path.dirname(os.path.realpath(__file__)) - - def test_meta_data(self): - def create_onnx_model(): - # - # Create ONNX model - # - - import onnx - from onnx import helper - from onnx import TensorProto - - shape = [1, 2, 3] - - input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape) - output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape) - - node_def = onnx.helper.make_node( - 'Relu', - inputs=['input'], - outputs=['Relu_out'], - ) - node_def2 = onnx.helper.make_node( - 'Sigmoid', - inputs=['Relu_out'], - outputs=['output'], - ) - - # Create the graph (GraphProto) - graph_def = helper.make_graph( - [node_def, node_def2], - 'test_model', - [input], - [output], - ) - - # Create the model (ModelProto) - onnx_net = helper.make_model(graph_def, producer_name='test_model') - return onnx_net - - def ref_meta_data(): - return { - 'MO_version': get_version(), - 'Runtime_version': get_rt_version(), - 'legacy_frontend': "False", - 'conversion_parameters': { - 'input_model': Path.joinpath(Path("DIR"), Path("model.onnx")), - } - - } - - def check_meta_data(ov_model): - ref_meta = ref_meta_data() - for key, value in ref_meta.items(): - if key == 'conversion_parameters': - for param_name, param_value in value.items(): - val = ov_model.get_rt_info([key, param_name]).astype(str) - if param_name in ['extensions', 'caffe_parser_path', 'input_model', 'k', 'output_dir']: - val = Path(val) - assert val == param_value, \ - "Runtime info attribute with name {} does not match. Expected: {}, " \ - "got {}".format(param_name, param_value, val) - continue - assert ov_model.get_rt_info(key).astype(str) == value, \ - "Runtime info attribute with name {} does not match. Expected: {}, " \ - "got {}".format(key, value, ov_model.get_rt_info(key).astype(str)) - - with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir: - - model = create_onnx_model() - model_path = save_to_onnx(model, tmpdir) - out_xml = os.path.join(tmpdir, "model.xml") - - ov_model = convert_model(model_path) - check_meta_data(ov_model) - - serialize(ov_model, out_xml.encode('utf-8'), out_xml.replace('.xml', '.bin').encode('utf-8')) - - from openvino.runtime import Core - core = Core() - serialized_model = core.read_model(out_xml) - check_meta_data(serialized_model) - - restored_graph, meta_data = restore_graph_from_ir(out_xml, out_xml.replace('.xml', '.bin')) - save_restored_graph(restored_graph, tmpdir, meta_data, "mo_ir_reader_test_model") - - mo_ir_reader_test_model = core.read_model(os.path.join(tmpdir, "mo_ir_reader_test_model.xml")) - check_meta_data(mo_ir_reader_test_model) diff --git a/tools/mo/unit_tests/mo/convert/meta_data_test_actual.py b/tools/mo/unit_tests/mo/convert/meta_data_test_actual.py deleted file mode 100644 index 9d012bc43dc616..00000000000000 --- a/tools/mo/unit_tests/mo/convert/meta_data_test_actual.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import tempfile -import unittest -from pathlib import Path - -from openvino.runtime import get_version as get_rt_version -from openvino.runtime import serialize -from openvino.tools.mo import convert_model -from openvino.tools.mo.utils.ir_reader.restore_graph import restore_graph_from_ir, save_restored_graph -from openvino.tools.mo.utils.version import get_version - - -class MetaDataTestTF(unittest.TestCase): - test_directory = os.path.dirname(os.path.realpath(__file__)) - - @staticmethod - def check_meta_data(ov_model, ref_meta): - ignore_attrs = ['version', 'optimization'] - for key, value in ref_meta.items(): - if key == 'conversion_parameters': - for param_name, param_value in value.items(): - val = ov_model.get_rt_info([key, param_name]).astype(str) - if param_name in ['extensions', 'caffe_parser_path', 'input_model', 'k', 'output_dir']: - val = Path(val) - assert val == param_value, \ - "Runtime info attribute with name {} does not match. Expected: {}, " \ - "got {}".format(param_name, param_value, val) - continue - assert ov_model.get_rt_info(key).astype(str) == value, \ - "Runtime info attribute with name {} does not match. Expected: {}, " \ - "got {}".format(key, value, ov_model.get_rt_info(key).astype(str)) - - for key, value in ov_model.get_rt_info().items(): - if key in ignore_attrs: - continue - assert key in ref_meta, "Unexpected runtime info attribute: {}".format(key) - - def test_meta_data_tf(self): - def create_tf_model(out_dir): - import tensorflow as tf - - tf.compat.v1.reset_default_graph() - - with tf.compat.v1.Session() as sess: - inp1 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input') - inp2 = tf.compat.v1.placeholder(tf.float32, [1, 2, 3], 'Input') - relu = tf.nn.relu(inp1 + inp2, name='Relu') - - output = tf.nn.sigmoid(relu, name='Sigmoid') - - tf.compat.v1.global_variables_initializer() - tf_net = sess.graph_def - tf.io.write_graph(tf_net, out_dir + os.sep, 'model_bool.pb', as_text=False) - return out_dir + os.sep + 'model_bool.pb' - - def ref_meta_data(): - return { - 'MO_version': get_version(), - 'Runtime_version': get_rt_version(), - 'legacy_frontend': "False", - 'conversion_parameters': { - 'scale': "1.5", - 'batch': "1" - } - } - - with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir: - model = create_tf_model(tmpdir) - out_xml = os.path.join(tmpdir, "model.xml") - ref_meta = ref_meta_data() - - ov_model = convert_model(model, scale=1.5, batch=1) - self.check_meta_data(ov_model, ref_meta) - - serialize(ov_model, out_xml.encode('utf-8'), out_xml.replace('.xml', '.bin').encode('utf-8')) - - from openvino.runtime import Core - core = Core() - deserialized_model = core.read_model(out_xml) - self.check_meta_data(deserialized_model, ref_meta) - - restored_graph, meta_data = restore_graph_from_ir(out_xml, out_xml.replace('.xml', '.bin')) - save_restored_graph(restored_graph, tmpdir, meta_data, "mo_ir_reader_test_model") - - mo_ir_reader_test_model = core.read_model(os.path.join(tmpdir, "mo_ir_reader_test_model.xml")) - self.check_meta_data(mo_ir_reader_test_model, ref_meta) diff --git a/tools/mo/unit_tests/mo/convert/utils.py b/tools/mo/unit_tests/mo/convert/utils.py deleted file mode 100644 index 66970cab8e597c..00000000000000 --- a/tools/mo/unit_tests/mo/convert/utils.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os - - -def create_onnx_model(): - # - # Create ONNX model - # - - import onnx - from onnx import helper - from onnx import TensorProto - - shape = [1, 3, 2, 2] - - input = helper.make_tensor_value_info('input', TensorProto.FLOAT, shape) - output = helper.make_tensor_value_info('output', TensorProto.FLOAT, shape) - - node_def = onnx.helper.make_node( - 'LeakyRelu', - inputs=['input'], - outputs=['LeakyRelu_out'], - alpha=0.1 - ) - node_def2 = onnx.helper.make_node( - 'Elu', - inputs=['LeakyRelu_out'], - outputs=['output'], - alpha=0.1 - ) - - # Create the graph (GraphProto) - graph_def = helper.make_graph( - [node_def, node_def2], - 'test_model', - [input], - [output], - ) - - # Create the model (ModelProto) - onnx_net = helper.make_model(graph_def, producer_name='test_model') - return onnx_net - - -def save_to_onnx(onnx_model, path_to_saved_onnx_model): - import onnx - path = os.path.join(path_to_saved_onnx_model, 'model.onnx') - onnx.save(onnx_model, path) - assert os.path.isfile(path), "model.onnx haven't been saved here: {}".format(path_to_saved_onnx_model) - return path diff --git a/tools/mo/unit_tests/mo/convert/version_checker_test_actual.py b/tools/mo/unit_tests/mo/convert/version_checker_test_actual.py deleted file mode 100644 index 663f1f207a2bde..00000000000000 --- a/tools/mo/unit_tests/mo/convert/version_checker_test_actual.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.runtime import get_version as get_ie_version - -from openvino.tools.mo.utils.version import get_version, get_simplified_ie_version, \ - get_simplified_mo_version, VersionChecker - - -class VersionCheckerTest(unittest.TestCase): - def test_version_checker(self): - import datetime - import os - ref_mo_version = get_version() - ref_ie_version = get_ie_version() - ref_mo_simplified_version = get_simplified_mo_version() - ref_ie_simplified_version = get_simplified_ie_version(env=os.environ) - - # first init of VersionChecker - start_time = datetime.datetime.now() - VersionChecker().check_runtime_dependencies() - VersionChecker().get_mo_version() - VersionChecker().get_ie_version() - VersionChecker().get_mo_simplified_version() - VersionChecker().get_ie_simplified_version() - first_init_time = (datetime.datetime.now() - start_time).total_seconds() - - # Loop with multiple usages of VersionChecker - start_time = datetime.datetime.now() - for _ in range(100): - VersionChecker().check_runtime_dependencies() - assert VersionChecker().get_mo_version() == ref_mo_version - assert VersionChecker().get_ie_version() == ref_ie_version - assert VersionChecker().get_mo_simplified_version() == ref_mo_simplified_version - assert VersionChecker().get_ie_simplified_version() == ref_ie_simplified_version - loop_time = (datetime.datetime.now() - start_time).total_seconds() - - # Check that time of loop is less than first init, so no actual initialization happens - assert loop_time < first_init_time diff --git a/tools/mo/unit_tests/mo/extensions_test_actual.py b/tools/mo/unit_tests/mo/extensions_test_actual.py deleted file mode 100644 index df9dc8c7688e9f..00000000000000 --- a/tools/mo/unit_tests/mo/extensions_test_actual.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest -import unittest -from unittest.mock import Mock -import onnx -from onnx.helper import make_graph, make_model, make_tensor_value_info -import os -from os import path -import json -import argparse -from pathlib import Path -from itertools import chain -from openvino.tools.mo.convert_impl import prepare_ir -from openvino.frontend import ( - FrontEndManager, -) # pylint: disable=no-name-in-module,import-error - -try: - import openvino_telemetry as tm - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm - - -def base_args_config(): - args = argparse.Namespace() - args.feManager = FrontEndManager() - args.extensions = None - args.use_legacy_frontend = False - args.use_new_frontend = True - args.framework = "onnx" - args.model_name = None - args.input_model = None - args.silent = True - args.transform = [] - args.legacy_ir_generation = False - args.scale = None - args.output = None - args.input = None - args.input_shape = None - args.batch = None - args.mean_values = None - args.scale_values = None - args.output_dir = os.getcwd() - args.freeze_placeholder_with_value = None - args.transformations_config = None - args.disable_gfusing = None - args.static_shape = None - args.reverse_input_channels = None - args.data_type = None - args.layout = None - args.source_layout = None - args.target_layout = None - return args - - -def get_builtin_extensions_path(): - win_folder_path = Path(__file__).parent.parent.parent.parent - linux_folder_path = win_folder_path.joinpath("lib") - for lib_path in chain( - win_folder_path.glob("*.dll"), linux_folder_path.glob("*.so") - ): - if "libtest_builtin_extensions" in lib_path.name: - return str(lib_path) - return "" - - -class TestMoFallback(unittest.TestCase): - def setUp(self): - tm.Telemetry.__init__ = Mock(return_value=None) - tm.Telemetry.send_event = Mock() - - self.models = {} - relu = onnx.helper.make_node("Relu", inputs=["in"], outputs=["out"]) - input_tensors = [ - make_tensor_value_info("in", onnx.TensorProto.FLOAT, (1, 2)), - ] - output_tensors = [ - make_tensor_value_info("out", onnx.TensorProto.FLOAT, (1, 2)), - ] - graph = make_graph([relu], "test_graph", input_tensors, output_tensors) - model = make_model( - graph, - producer_name="MO tests", - opset_imports=[onnx.helper.make_opsetid("", 13)], - ) - self.models["test_model.onnx"] = model - for name, model in self.models.items(): - onnx.save(model, name) - - def tearDown(self): - for name in self.models.keys(): - os.remove(name) - - @pytest.mark.skipif( - len(get_builtin_extensions_path()) == 0, - reason="The extension library path was not found", - ) - def test_conersion_if_extensions_is_used(self): - args = base_args_config() - args.input_model = "test_model.onnx" - args.extensions = [get_builtin_extensions_path()] - - graph, model = prepare_ir(args) - - assert any(op.get_type_name() == "Swish" for op in model.get_ops()) - assert all(op.get_type_name() != "Relu" for op in model.get_ops()) diff --git a/tools/mo/unit_tests/mo/front/ATenToEmbeddingBag_test.py b/tools/mo/unit_tests/mo/front/ATenToEmbeddingBag_test.py deleted file mode 100644 index 2dba32302a4dc5..00000000000000 --- a/tools/mo/unit_tests/mo/front/ATenToEmbeddingBag_test.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.ATenToEmbeddingBag import AtenToEmbeddingBag -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op, const - - -class AtenToEmbeddingBagTest(unittest.TestCase): - def test(self): - nodes = { - **const('weights_inp', np.random.randn(100, 2)), - **regular_op('indices_inp', {'type': 'Parameter'}), - **regular_op('offsets_inp', {'type': 'Parameter'}), - **regular_op('aten', {'type': None, 'kind': 'op', 'op': 'ATen', 'operator': 'embedding_bag', 'mode': 0, - 'name': 'my_aten'}), - - **regular_op('emb_bag', {'type': 'EmbeddingBagOffsetsSum', 'kind': 'op', 'op': 'EmbeddingBagOffsetsSum'}), - **result('result'), - } - edges = [('weights_inp', 'aten'), - ('indices_inp', 'aten'), - ('offsets_inp', 'aten'), - ('aten', 'result'), - ] - graph = build_graph(nodes, edges) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - edges_ref = [('weights_inp', 'emb_bag'), - ('indices_inp', 'emb_bag'), - ('offsets_inp', 'emb_bag'), - ('emb_bag', 'result'), - ] - - graph_ref = build_graph(nodes, edges_ref) - - AtenToEmbeddingBag().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - def test_packed(self): - nodes = { - **const('weights_inp', np.random.randn(100, 4)), - **regular_op('indices_inp', {'type': 'Parameter'}), - **regular_op('aten', {'type': None, 'kind': 'op', 'op': 'ATen', 'operator': 'embedding_bag', 'mode': 0, - 'name': 'my_aten'}), - - **regular_op('emb_bag', {'type': 'EmbeddingBagPackedSum', 'kind': 'op', - 'op': 'EmbeddingBagPackedSum'}), - **result('result'), - } - edges = [('weights_inp', 'aten'), - ('indices_inp', 'aten'), - ('aten', 'result'), - ] - graph = build_graph(nodes, edges) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - edges_ref = [('weights_inp', 'emb_bag'), - ('indices_inp', 'emb_bag'), - ('emb_bag', 'result'), - ] - - graph_ref = build_graph(nodes, edges_ref) - - AtenToEmbeddingBag().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - def test_per_sample_weights(self): - nodes = { - **const('weights_inp', np.random.randn(100, 2)), - **regular_op('indices_inp', {'type': 'Parameter'}), - **regular_op('offsets_inp', {'type': 'Parameter'}), - **regular_op('per_sample_weights', {'type': 'Parameter'}), - **regular_op('aten', {'type': None, 'kind': 'op', 'op': 'ATen', 'operator': 'embedding_bag', 'mode': 0, - 'name': 'my_aten'}), - - **regular_op('emb_bag', {'type': 'EmbeddingBagOffsetsSum', 'kind': 'op', - 'op': 'EmbeddingBagOffsetsSum'}), - **regular_op('WeightsRank', {'type': None, 'kind': 'op', 'op': 'Rank'}), - **regular_op('WeightsRank/axis', {'type': 'Add', 'kind': 'op', 'op': 'Add'}), - **regular_op('gather1', {'type': 'Gather', 'kind': 'op', 'op': 'Gather'}), - **regular_op('gather2', {'type': 'Gather', 'kind': 'op', 'op': 'Gather'}), - **regular_op('WeightsShape', {'type': 'ShapeOf', 'kind': 'op', 'op': 'ShapeOf'}), - **regular_op('Broadcast', {'type': 'Broadcast', 'kind': 'op', 'op': 'Broadcast'}), - **regular_op('Unsqueeze', {'type': 'Unsqueeze', 'kind': 'op', 'op': 'Unsqueeze'}), - **const('WeightsShape/Axis', int64_array(0)), - **const('zero1', int64_array(0)), - **const('zero2', int64_array(0)), - **const('Unsqueeze/value', int64_array(0)), - **const('Broadcast/value', int64_array(0)), - **const('neg', int64_array(-1)), - **regular_op('Concat', {'type': 'Concat', 'kind': 'op', 'op': 'Concat'}), - **result('result'), - } - edges = [('weights_inp', 'aten'), - ('indices_inp', 'aten'), - ('offsets_inp', 'aten'), - ('per_sample_weights', 'aten'), - ('aten', 'result'), - ] - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - edges_ref = [('weights_inp', 'Concat', {'in': 0, 'out': 0}), - ('weights_inp', 'WeightsShape', {'in': 0, 'out': 0}), - ('weights_inp', 'WeightsRank', {'in': 0, 'out': 0}), - ('WeightsRank', 'WeightsRank/axis'), - ('neg', 'WeightsRank/axis'), - ('WeightsShape', 'gather1', {'in': 0, 'out': 0}), - ('WeightsRank/axis', 'gather1'), - ('WeightsShape/Axis', 'gather1'), - ('WeightsShape', 'gather2', {'in': 0, 'out': 0}), - ('zero1', 'gather2'), - ('zero2', 'gather2'), - ('Broadcast/value', 'Broadcast'), - ('gather1', 'Broadcast'), - ('Broadcast', 'Unsqueeze'), - ('Unsqueeze/value', 'Unsqueeze'), - ('Unsqueeze', 'Concat'), - ('Concat', 'emb_bag'), - ('indices_inp', 'emb_bag'), - ('offsets_inp', 'emb_bag'), - ('gather2', 'emb_bag'), - ('per_sample_weights', 'emb_bag'), - ('emb_bag', 'result'), - ] - - graph_ref = build_graph(nodes, edges_ref, nodes_with_edges_only=True) - - AtenToEmbeddingBag().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/AttributedClampNormalizer_test.py b/tools/mo/unit_tests/mo/front/AttributedClampNormalizer_test.py deleted file mode 100644 index d447ed72849620..00000000000000 --- a/tools/mo/unit_tests/mo/front/AttributedClampNormalizer_test.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.AttributedClampNormalizer import AttributedClampNormalizer -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - -nodes_attributes = { - 'placeholder': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'attr_clamp': {'type': 'Clamp', 'kind': 'op', 'op': 'AttributedClamp', 'name': 'attr_clamp', - 'min': np.array(-3.5, dtype=np.float32), 'max': np.array(3.5, dtype=np.float32)}, - 'result': {'type': 'Result', 'value': None, 'kind': 'op', 'op': 'Result'}, - - # new Clamp layer and inputs - 'clamp': {'type': None, 'kind': 'op', 'op': 'Clamp'}, - **const('min', np.array(-3.5, dtype=np.float32)), - **const('max', np.array(3.5, dtype=np.float32)), -} - - -class AttributedClampNormalizerTest(unittest.TestCase): - def test_1(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'attr_clamp', {'in': 0, 'out': 0}), - ('attr_clamp', 'result', {'in': 0, 'out': 0}), - ], - {}, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder', 'clamp', {'in': 0, 'out': 0}), - ('min', 'clamp', {'in': 1, 'out': 0}), - ('max', 'clamp', {'in': 2, 'out': 0}), - ('clamp', 'result') - ], - {}, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - replacer = AttributedClampNormalizer() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Clamp')[0]]['name'] == 'attr_clamp') diff --git a/tools/mo/unit_tests/mo/front/AttributedPadToPad_test.py b/tools/mo/unit_tests/mo/front/AttributedPadToPad_test.py deleted file mode 100644 index 212967a92a64b8..00000000000000 --- a/tools/mo/unit_tests/mo/front/AttributedPadToPad_test.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.AttributedPadToPad import AttributedPadToPad -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - -nodes_attributes = { - 'placeholder': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'attr_pad': {'type': None, 'kind': 'op', 'op': 'AttributedPad', 'mode': 'constant', 'name': 'attr_pad', - 'pads': int64_array([1, 2, 3, 4, 5, 6]).reshape([3, 2]), 'fill_value': 0.75}, - 'result': {'type': 'Result', 'value': None, 'kind': 'op', 'op': 'Result'}, - - # new Pad layer and inputs - 'pad': {'type': 'Pad', 'kind': 'op', 'op': 'Pad', 'mode': 'constant'}, - 'convert_like': {'type': 'ConvertLike', 'kind': 'op', 'op': 'ConvertLike'}, - **const('pad_begin', int64_array([1, 3, 5])), - **const('pad_end', int64_array([2, 4, 6])), - **const('pad_fill', np.array(0.75)), -} - - -class AttributedPadToPadTest(unittest.TestCase): - def test_mode_constant(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'attr_pad', {'in': 0, 'out': 0}), - ('attr_pad', 'result', {'in': 0, 'out': 0}), - ], - {}, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder', 'pad', {'in': 0, 'out': 0}), - ('pad_begin', 'pad', {'in': 1, 'out': 0}), - ('pad_end', 'pad', {'in': 2, 'out': 0}), - ('pad_fill', 'convert_like', {'in': 0, 'out': 0}), - ('placeholder', 'convert_like', {'in': 1, 'out': 0}), - ('convert_like', 'pad', {'in': 3, 'out': 0}), - ('pad', 'result') - ], - {}, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - replacer = AttributedPadToPad() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Pad')[0]]['name'] == 'attr_pad') - - def test_mode_non_constant(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'attr_pad', {'in': 0, 'out': 0}), - ('attr_pad', 'result', {'in': 0, 'out': 0}), - ], - {'attr_pad': {'mode': 'reflect'}}, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder', 'pad', {'in': 0, 'out': 0}), - ('pad_begin', 'pad', {'in': 1, 'out': 0}), - ('pad_end', 'pad', {'in': 2, 'out': 0}), - ('pad', 'result') - ], - {'pad': {'mode': 'reflect'}}, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - replacer = AttributedPadToPad() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Pad')[0]]['name'] == 'attr_pad') diff --git a/tools/mo/unit_tests/mo/front/AttributedRandomUniformToRandomUniform_test.py b/tools/mo/unit_tests/mo/front/AttributedRandomUniformToRandomUniform_test.py deleted file mode 100644 index 00beab5a624fa7..00000000000000 --- a/tools/mo/unit_tests/mo/front/AttributedRandomUniformToRandomUniform_test.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.AttributedRandomUniformToRandomUniform import AttributedRandomUniformToRandomUniform -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const, result, regular_op - -nodes = { - **regular_op('placeholder', {'type': 'Parameter'}), - **regular_op('attr_random_uniform', {'type': 'AttributedRandomUniform', 'op': 'AttributedRandomUniform', - 'output_type': np.float32, - 'min_val': float32_array([-1.5]), 'max_val': float32_array([10.7]), - 'shape': int64_array([5, 4, 3])}), - **result('result'), - - # new RandomUniform node and inputs - **regular_op('random_uniform', {'type': 'RandomUniform'}), - **const('min_val', float32_array([-1.5])), - **const('max_val', float32_array([10.7])), - **const('shape', int64_array([5, 4, 3])), -} - - -class AttributedRandomUniformToRandomUniformTest(unittest.TestCase): - def test_min_max(self): - graph = build_graph(nodes, - [('placeholder', 'attr_random_uniform', {'in': 0, 'out': 0}), - ('attr_random_uniform', 'result', {'in': 0, 'out': 0})], {}, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes, - [('placeholder', 'random_uniform', {'in': 0, 'out': 0}), - ('min_val', 'random_uniform', {'in': 1, 'out': 0}), - ('max_val', 'random_uniform', {'in': 2, 'out': 0}), - ('random_uniform', 'result')], {}, nodes_with_edges_only=True) - graph.stage = 'front' - - AttributedRandomUniformToRandomUniform().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue( - graph.node[graph.get_nodes_with_attributes(op='RandomUniform')[0]]['name'] == 'attr_random_uniform') - - def test_min_max_shape(self): - graph = build_graph(nodes, - [('attr_random_uniform', 'result', {'in': 0, 'out': 0})], {}, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes, - [('shape', 'random_uniform', {'in': 0, 'out': 0}), - ('min_val', 'random_uniform', {'in': 1, 'out': 0}), - ('max_val', 'random_uniform', {'in': 2, 'out': 0}), - ('random_uniform', 'result')], {}, nodes_with_edges_only=True) - graph.stage = 'front' - - AttributedRandomUniformToRandomUniform().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue( - graph.node[graph.get_nodes_with_attributes(op='RandomUniform')[0]]['name'] == 'attr_random_uniform') diff --git a/tools/mo/unit_tests/mo/front/AttributedRollToRoll_test.py b/tools/mo/unit_tests/mo/front/AttributedRollToRoll_test.py deleted file mode 100644 index b6518cc00b1315..00000000000000 --- a/tools/mo/unit_tests/mo/front/AttributedRollToRoll_test.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.AttributedRollToRoll import AttributedRollToRoll -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const, result, regular_op - -nodes_attributes = { - **regular_op('placeholder', {'type': 'Parameter'}), - **regular_op('attr_roll', {'type': 'AttributedRoll', 'op': 'AttributedRoll', 'axes': int64_array([-1, 2, 3]), - 'shift': int64_array([5, -2, 3])}), - **result('result'), - - # new Roll node and inputs - **regular_op('roll', {'type': 'Roll'}), - **const('roll_axes', int64_array([-1, 2, 3])), - **const('roll_shift', int64_array([5, -2, 3])) -} - - -class AttributedRollToRollTest(unittest.TestCase): - def test_axes_shift(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'attr_roll', {'in': 0, 'out': 0}), - ('attr_roll', 'result', {'in': 0, 'out': 0})], {}, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder', 'roll', {'in': 0, 'out': 0}), - ('roll_shift', 'roll', {'in': 1, 'out': 0}), - ('roll_axes', 'roll', {'in': 2, 'out': 0}), - ('roll', 'result')], {}, nodes_with_edges_only=True) - graph.stage = 'front' - - AttributedRollToRoll().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Roll')[0]]['name'] == 'attr_roll') - - def test_axes(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'attr_roll', {'in': 0, 'out': 0}), - ('attr_roll', 'result', {'in': 0, 'out': 0})], {}, nodes_with_edges_only=True) - Node(graph, 'attr_roll')['axes'] = None - - graph_ref = build_graph(nodes_attributes, - [('placeholder', 'roll', {'in': 0, 'out': 0}), - ('roll_shift', 'roll', {'in': 1, 'out': 0}), - ('roll', 'result')], {}, nodes_with_edges_only=True) - graph.stage = 'front' - - AttributedRollToRoll().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Roll')[0]]['name'] == 'attr_roll') diff --git a/tools/mo/unit_tests/mo/front/GeLUMerger_Erf_test.py b/tools/mo/unit_tests/mo/front/GeLUMerger_Erf_test.py deleted file mode 100644 index 43dca0a506b28f..00000000000000 --- a/tools/mo/unit_tests/mo/front/GeLUMerger_Erf_test.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from math import sqrt - -from openvino.tools.mo.front.GeLUMerger_Erf import GeLUMergerErf -from openvino.tools.mo.front.common.partial_infer.utils import float_array, int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import const, regular_op, result, build_graph - -ref_nodes = {**regular_op('input', {'type': 'Parameter'}), - **regular_op('gelu', {'type': 'Gelu', 'approximation_mode': 'erf', 'name': 'final_mul'}), - **result('result') - } -ref_edges = [('input', 'gelu'), ('gelu', 'result')] - - -class GeLUMergerErfTest(unittest.TestCase): - nodes = { - **regular_op('input', {'op': 'Parameter', 'type': 'Parameter'}), - **regular_op('mul', {'op': 'Mul'}), - **regular_op('mul0', {'op': 'Mul', 'name': 'final_mul'}), - **regular_op('div', {'op': 'Div'}), - **regular_op('erf', {'op': 'Erf'}), - **regular_op('add', {'op': 'Add'}), - **const('mul_param', float_array([0.5])), - **const('div_param', float_array([sqrt(2.)])), - **const('add_param', int64_array([1])), - **result('result'), - } - - def test_gelu_p1(self): - edges = [('input', 'mul'), - ('mul', 'mul0'), - ('input', 'div'), - ('div', 'erf'), - ('erf', 'add'), - ('add', 'mul0'), - ('mul_param', 'mul'), - ('div_param', 'div'), - ('add_param', 'add'), - ('mul0', 'result')] - - graph = build_graph(self.nodes, edges) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - GeLUMergerErf().find_and_replace_pattern(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(graph.get_op_nodes(op='Gelu')[0].approximation_mode == 'erf') - self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and - graph.get_op_nodes(name='final_mul')[0].op == 'Gelu') - - def test_gelu_p2(self): - edges = [('input', 'mul'), - ('div', 'erf'), - ('erf', 'add'), - ('add', 'mul'), - ('mul', 'mul0'), - ('mul_param', 'mul0'), - ('div_param', 'div'), - ('add_param', 'add'), - ('mul0', 'result')] - - graph = build_graph(self.nodes, edges) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - GeLUMergerErf().find_and_replace_pattern(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(graph.get_op_nodes(op='Gelu')[0].approximation_mode == 'erf') - self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and - graph.get_op_nodes(name='final_mul')[0].op == 'Gelu') - - def test_gelu_p3(self): - edges = [('input', 'mul'), - ('div', 'erf'), - ('erf', 'add'), - ('add', 'mul'), - ('mul', 'mul0'), - ('mul_param', 'mul'), - ('div_param', 'div'), - ('add_param', 'add'), - ('mul0', 'result')] - - graph = build_graph(self.nodes, edges) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - GeLUMergerErf().find_and_replace_pattern(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(graph.get_op_nodes(op='Gelu')[0].approximation_mode == 'erf') - self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and - graph.get_op_nodes(name='final_mul')[0].op == 'Gelu') diff --git a/tools/mo/unit_tests/mo/front/GeLUMerger_Tanh_test.py b/tools/mo/unit_tests/mo/front/GeLUMerger_Tanh_test.py deleted file mode 100644 index 7b8570e91a72b1..00000000000000 --- a/tools/mo/unit_tests/mo/front/GeLUMerger_Tanh_test.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from math import sqrt - -import numpy as np - -from openvino.tools.mo.front.GeLUMerger_Tanh import GeLUMergerTanh -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes_erf = { - 'inp': {'kind': 'op', 'op': 'AnyOp'}, - 'mul': {'kind': 'op', 'op': 'Mul'}, - 'mul0': {'kind': 'op', 'op': 'Mul'}, - 'div': {'kind': 'op', 'op': 'Div'}, - 'erf': {'kind': 'op', 'op': 'Erf'}, - 'add': {'kind': 'op', 'op': 'Add'}, - 'mul_param': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'div_param': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'add_param': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, -} - -nodes_attributes_tanh = { - 'inp': {'kind': 'op', 'op': 'AnyOp'}, - 'pow': {'kind': 'op', 'op': 'Pow'}, - 'mul': {'kind': 'op', 'op': 'Mul'}, - 'mul0': {'kind': 'op', 'op': 'Mul'}, - 'mul1': {'kind': 'op', 'op': 'Mul'}, - 'mul2': {'kind': 'op', 'op': 'Mul'}, - 'tanh': {'kind': 'op', 'op': 'Tanh'}, - 'add': {'kind': 'op', 'op': 'Add'}, - 'add0': {'kind': 'op', 'op': 'Add'}, - 'mul_param': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'mul0_param': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'mul1_param': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, -} - -nodes_attributes_ref = { - 'inp': {'kind': 'op', 'op': 'AnyOp'}, - 'gelu': {'kind': 'op', 'op': 'Gelu', 'approximation_mode': 'tanh'}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, -} - - -class TestGeLUMergerReplacement(unittest.TestCase): - def test_GeLUMergerTanh(self): - graph = build_graph(nodes_attributes_tanh, - [('inp', 'mul2', {'out': 0}), - ('inp', 'add', {'out': 0}), - ('inp', 'pow', {'out': 0}), - ('pow', 'mul'), - ('mul', 'add'), - ('add', 'mul0'), - ('mul0', 'tanh'), - ('tanh', 'add0'), - ('add0', 'mul1'), - ('mul1', 'mul2'), - ('mul_param', 'mul'), - ('mul0_param', 'mul0'), - ('mul1_param', 'mul1'), - ('mul2', 'out'), - ], - {'mul0_param': {'shape': np.array([1]), 'value': np.array(sqrt(2.0/3.1415926))}, - 'mul1_param': {'shape': np.array([1]), 'value': np.array(0.5)}, - 'mul_param': {'shape': np.array([1]), 'value': np.array(0.044715)} - }, - nodes_with_edges_only=True) - graph_ref = build_graph(nodes_attributes_ref, - [('inp', 'gelu'), - ('gelu', 'out')], - {}, nodes_with_edges_only=True) - graph.stage = 'front' - - replacer = GeLUMergerTanh() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/GlobalPoolingToReduce_test.py b/tools/mo/unit_tests/mo/front/GlobalPoolingToReduce_test.py deleted file mode 100644 index c12e1e14920a8a..00000000000000 --- a/tools/mo/unit_tests/mo/front/GlobalPoolingToReduce_test.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.global_pooling_to_reduce import GlobalPoolingToReduce -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op, result, build_graph_with_edge_attrs, const - -nodes = {**regular_op('input', {'type': 'Parameter'}), - - **regular_op('relu', {'type': 'Relu'}), - **regular_op('pooling', {'type': 'Pooling', 'global_pool': True, 'pool_method': 'avg'}), - - **result('result'), - - **regular_op('rank', {'type': 'Rank'}), - **regular_op('reduce_mean', {'type': 'ReduceMean'}), - **regular_op('range', {'type': 'Range'}), - **const('const_1', int64_array(2)), - **const('const_2', int64_array(1)), - - } -edges = [('input', 'relu', {'in': 0, 'out': 0}), ('relu', 'pooling', {'in': 0, 'out': 0}), - ('pooling', 'result', {'in': 0, 'out': 0})] -ref_edges = [('input', 'relu', {'in': 0, 'out': 0}), ('relu', 'rank', {'in': 0, 'out': 0}), - ('rank', 'range', {'in': 1, 'out': 0}), - ('relu', 'reduce_mean', {'in': 0, 'out': 0}), - ('const_1', 'range', {'in': 0, 'out': 0}), ('const_2', 'range', {'in': 2, 'out': 0}), - ('range', 'reduce_mean', {'in': 1, 'out': 0}), - ('reduce_mean', 'result', {'in': 0, 'out': 0})] - - -class GlobalPoolingToReduceTest(unittest.TestCase): - def test_global_pooling_to_reduce(self): - graph = build_graph_with_edge_attrs(nodes, edges) - - graph_ref = build_graph(nodes, ref_edges) - graph.stage = 'front' - graph.graph['layout'] = 'NCHW' - node = Node(graph, 'relu') - node.out_edge(0)['fw_tensor_debug_info'] = [('Relu_0', 'Relu_tensor')] - - GlobalPoolingToReduce().find_and_replace_pattern(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - node = Node(graph, 'relu') - edge_attrs = node.out_port(0).get_destinations()[0].get_in_edge_attrs() - self.assertTrue('fw_tensor_debug_info' in edge_attrs) - self.assertTrue(edge_attrs['fw_tensor_debug_info'] == [('Relu_0', 'Relu_tensor')]) diff --git a/tools/mo/unit_tests/mo/front/HSigmoid_fusion_test.py b/tools/mo/unit_tests/mo/front/HSigmoid_fusion_test.py deleted file mode 100644 index 02e39f1b720912..00000000000000 --- a/tools/mo/unit_tests/mo/front/HSigmoid_fusion_test.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.HSigmoid_fusion import HSigmoidWithClamp, HSigmoidWithMinMax, HSigmoidWithReluDiv, \ - HSigmoidWithReluMul -from openvino.tools.mo.front.common.partial_infer.utils import float_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const, regular_op, result, build_graph_with_edge_attrs - -ref_nodes = {**regular_op('input', {'type': 'Parameter'}), - **regular_op('hsigmoid', {'type': 'HSigmoid', 'name': 'final_mul'}), - **result('result') - } -ref_edges = [('input', 'hsigmoid'), ('hsigmoid', 'result')] - - -class HSigmoidWithClampTest(unittest.TestCase): - nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('relu6', {'op': 'Clamp'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **const('const_0', float_array([0.0])), - **const('const_3', float_array([3.0])), - **const('const_6', float_array([6.0])), - **const('const_1_6', float_array([1.0 / 6.0])), - **result('result'), - } - - edges = [('input', 'add', {'in': 0, 'out': 0}), - ('const_3', 'add', {'in': 1, 'out': 0}), - ('add', 'relu6', {'in': 0, 'out': 0}), - ('const_0', 'relu6', {'in': 1, 'out': 0}), - ('const_6', 'relu6', {'in': 2, 'out': 0}), - ('relu6', 'mul_2', {'in': 1, 'out': 0}), - ('const_1_6', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})] - - def test_hsigmoid_with_clamp(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {}) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - HSigmoidWithClamp().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and - graph.get_op_nodes(name='final_mul')[0].op == 'HSigmoid') - - def test_hsigmoid_with_clamp_wrong_constant(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {'const_0': {'value': float_array([0.00001])}}) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSigmoidWithClamp().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - def test_hsigmoid_with_clamp_different_tensors(self): - graph = build_graph_with_edge_attrs({ - **regular_op('input', {'type': 'Parameter'}), - **regular_op('input_2', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('relu6', {'op': 'Clamp'}), - **regular_op('mul', {'op': 'Mul'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **const('const_0', float_array([0.0])), - **const('const_3', float_array([3.0])), - **const('const_6', float_array([6.0])), - **const('const_1_6', float_array([1.0 / 6.0])), - **result('result'), - }, [('input', 'mul', {'in': 0, 'out': 0}), - ('input_2', 'add', {'in': 0, 'out': 0}), - ('const_3', 'add', {'in': 1, 'out': 0}), - ('add', 'relu6', {'in': 0, 'out': 0}), - ('const_0', 'relu6', {'in': 1, 'out': 0}), - ('const_6', 'relu6', {'in': 2, 'out': 0}), - ('relu6', 'mul', {'in': 1, 'out': 0}), - ('mul', 'mul_2', {'in': 0, 'out': 0}), - ('const_1_6', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})]) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSigmoidWithClamp().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - -class HSigmoidWithMinMaxTest(unittest.TestCase): - nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('max', {'op': 'Maximum'}), - **regular_op('min', {'op': 'Minimum'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **const('const_0', float_array([0.0])), - **const('const_3', float_array([3.0])), - **const('const_6', float_array([6.0])), - **const('const_1_6', float_array([1.0 / 6.0])), - **result('result'), - } - - edges = [('input', 'add', {'in': 0, 'out': 0}), - ('const_3', 'add', {'in': 1, 'out': 0}), - ('add', 'max', {'in': 0, 'out': 0}), - ('const_0', 'max', {'in': 1, 'out': 0}), - ('max', 'min', {'in': 0, 'out': 0}), - ('const_6', 'min', {'in': 1, 'out': 0}), - ('min', 'mul_2', {'in': 0, 'out': 0}), - ('const_1_6', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})] - - def test_hsigmoid_with_min_max(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {}) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - HSigmoidWithMinMax().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and - graph.get_op_nodes(name='final_mul')[0].op == 'HSigmoid') - - def test_hsigmoid_with_min_max_wrong_constant(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {'const_0': {'value': float_array([0.00001])}}) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSigmoidWithMinMax().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - def test_hsigmoid_with_min_max_different_tensors(self): - graph = build_graph_with_edge_attrs({ - **regular_op('input', {'type': 'Parameter'}), - **regular_op('input_2', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('max', {'op': 'Maximum'}), - **regular_op('min', {'op': 'Minimum'}), - **regular_op('mul', {'op': 'Mul'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **const('const_0', float_array([0.0])), - **const('const_3', float_array([3.0])), - **const('const_6', float_array([6.0])), - **const('const_1_6', float_array([1.0 / 6.0])), - **result('result'), - }, [('input_2', 'mul', {'in': 1, 'out': 0}), - ('input', 'add', {'in': 0, 'out': 0}), - ('const_3', 'add', {'in': 1, 'out': 0}), - ('add', 'max', {'in': 0, 'out': 0}), - ('const_0', 'max', {'in': 1, 'out': 0}), - ('max', 'min', {'in': 0, 'out': 0}), - ('const_6', 'min', {'in': 1, 'out': 0}), - ('min', 'mul', {'in': 0, 'out': 0}), - ('mul', 'mul_2', {'in': 0, 'out': 0}), - ('const_1_6', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})]) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSigmoidWithMinMax().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - -class HSigmoidWithReluDivTest(unittest.TestCase): - nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('relu', {'op': 'ReLU'}), - **regular_op('min', {'op': 'Minimum'}), - **regular_op('div', {'op': 'Div', 'name': 'final_div'}), - **const('add_const', float_array([3.0])), - **const('min_const', float_array([6.0])), - **const('div_const', float_array([6.0])), - **result('result'), - } - - edges = [('input', 'add', {'in': 0, 'out': 0}), - ('add_const', 'add', {'in': 1, 'out': 0}), - ('add', 'relu', {'in': 0, 'out': 0}), - ('relu', 'min', {'in': 0, 'out': 0}), - ('min_const', 'min', {'in': 1, 'out': 0}), - ('min', 'div', {'in': 0, 'out': 0}), - ('div_const', 'div', {'in': 1, 'out': 0}), - ('div', 'result', {'in': 0, 'out': 0})] - - def test_hsigmoid_with_relu_div(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {}) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - HSigmoidWithReluDiv().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_div')) == 1 and - graph.get_op_nodes(name='final_div')[0].op == 'HSigmoid') - self.assertTrue(graph.get_op_nodes(name='final_div')[0].out_nodes()[0].node == 'result') - - def test_hsigmoid_with_relu_div_wrong_constant(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {'add_const': {'value': float_array([0.00001])}}) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSigmoidWithReluDiv().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - def test_hsigmoid_with_relu_div_different_tensors(self): - graph = build_graph_with_edge_attrs({ - **regular_op('input', {'type': 'Parameter'}), - **regular_op('input_2', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('max', {'op': 'Maximum'}), - **regular_op('min', {'op': 'Minimum'}), - **regular_op('mul', {'op': 'Mul'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **const('const_0', float_array([0.0])), - **const('const_3', float_array([3.0])), - **const('const_6', float_array([6.0])), - **const('const_1_6', float_array([1.0 / 6.0])), - **result('result'), - }, [('input_2', 'mul', {'in': 1, 'out': 0}), - ('input', 'add', {'in': 0, 'out': 0}), - ('const_3', 'add', {'in': 1, 'out': 0}), - ('add', 'max', {'in': 0, 'out': 0}), - ('const_0', 'max', {'in': 1, 'out': 0}), - ('max', 'min', {'in': 0, 'out': 0}), - ('const_6', 'min', {'in': 1, 'out': 0}), - ('min', 'mul', {'in': 0, 'out': 0}), - ('mul', 'mul_2', {'in': 0, 'out': 0}), - ('const_1_6', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})]) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSigmoidWithReluDiv().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - -class HSigmoidWithReluMulTest(unittest.TestCase): - nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('relu', {'op': 'ReLU'}), - **regular_op('min', {'op': 'Minimum'}), - **regular_op('mul', {'op': 'Mul', 'name': 'final_mul'}), - **const('add_const', float_array([3.0])), - **const('min_const', float_array([6.0])), - **const('mul_const', float_array([1.0/6.0])), - **result('result'), - } - - edges = [('input', 'add', {'in': 0, 'out': 0}), - ('add_const', 'add', {'in': 1, 'out': 0}), - ('add', 'relu', {'in': 0, 'out': 0}), - ('relu', 'min', {'in': 0, 'out': 0}), - ('min_const', 'min', {'in': 1, 'out': 0}), - ('min', 'mul', {'in': 0, 'out': 0}), - ('mul_const', 'mul', {'in': 1, 'out': 0}), - ('mul', 'result', {'in': 0, 'out': 0})] - - def test_hsigmoid_with_relu_mul(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {}) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - HSigmoidWithReluMul().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and - graph.get_op_nodes(name='final_mul')[0].op == 'HSigmoid') - self.assertTrue(graph.get_op_nodes(name='final_mul')[0].out_nodes()[0].node == 'result') - - def test_hsigmoid_with_relu_mul_wrong_constant(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {'add_const': {'value': float_array([0.00001])}}) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSigmoidWithReluMul().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - def test_hsigmoid_with_relu_mul_different_tensors(self): - graph = build_graph_with_edge_attrs({ - **regular_op('input', {'type': 'Parameter'}), - **regular_op('input_2', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('max', {'op': 'Maximum'}), - **regular_op('min', {'op': 'Minimum'}), - **regular_op('mul', {'op': 'Mul'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **const('const_0', float_array([0.0])), - **const('const_3', float_array([3.0])), - **const('const_6', float_array([6.0])), - **const('const_1_6', float_array([1.0 / 6.0])), - **result('result'), - }, [('input_2', 'mul', {'in': 1, 'out': 0}), - ('input', 'add', {'in': 0, 'out': 0}), - ('const_3', 'add', {'in': 1, 'out': 0}), - ('add', 'max', {'in': 0, 'out': 0}), - ('const_0', 'max', {'in': 1, 'out': 0}), - ('max', 'min', {'in': 0, 'out': 0}), - ('const_6', 'min', {'in': 1, 'out': 0}), - ('min', 'mul', {'in': 0, 'out': 0}), - ('mul', 'mul_2', {'in': 0, 'out': 0}), - ('const_1_6', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})]) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSigmoidWithReluMul().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/HSwish_fusing_test.py b/tools/mo/unit_tests/mo/front/HSwish_fusing_test.py deleted file mode 100644 index 4415d438904256..00000000000000 --- a/tools/mo/unit_tests/mo/front/HSwish_fusing_test.py +++ /dev/null @@ -1,183 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.HSwish_fusion import HSwishWithClamp, HSwishWithMinMax -from openvino.tools.mo.front.common.partial_infer.utils import float_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const, regular_op, result, build_graph_with_edge_attrs - -ref_nodes = {**regular_op('input', {'type': 'Parameter'}), - **regular_op('hswish', {'type': 'HSwish', 'name': 'final_mul'}), - **result('result') - } -ref_edges = [('input', 'hswish'), ('hswish', 'result')] - - -class HSwishWithClampTest(unittest.TestCase): - nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('relu6', {'op': 'Clamp'}), - **regular_op('mul', {'op': 'Mul'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **const('const_0', float_array([0.0])), - **const('const_3', float_array([3.0])), - **const('const_6', float_array([6.0])), - **const('const_1_6', float_array([1.0 / 6.0])), - **result('result'), - } - - edges = [('input', 'mul', {'in': 0, 'out': 0}), - ('input', 'add', {'in': 0, 'out': 0}), - ('const_3', 'add', {'in': 1, 'out': 0}), - ('add', 'relu6', {'in': 0, 'out': 0}), - ('const_0', 'relu6', {'in': 1, 'out': 0}), - ('const_6', 'relu6', {'in': 2, 'out': 0}), - ('relu6', 'mul', {'in': 1, 'out': 0}), - ('mul', 'mul_2', {'in': 0, 'out': 0}), - ('const_1_6', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})] - - def test_hswish_with_clamp(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {}) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - HSwishWithClamp().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and - graph.get_op_nodes(name='final_mul')[0].op == 'HSwish') - - def test_hswish_with_clamp_wrong_constant(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {'const_0': {'value': float_array([0.00001])}}) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSwishWithClamp().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - def test_hswish_with_clamp_different_tensors(self): - graph = build_graph_with_edge_attrs({ - **regular_op('input', {'type': 'Parameter'}), - **regular_op('input_2', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('relu6', {'op': 'Clamp'}), - **regular_op('mul', {'op': 'Mul'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **const('const_0', float_array([0.0])), - **const('const_3', float_array([3.0])), - **const('const_6', float_array([6.0])), - **const('const_1_6', float_array([1.0 / 6.0])), - **result('result'), - }, [('input', 'mul', {'in': 0, 'out': 0}), - ('input_2', 'add', {'in': 0, 'out': 0}), - ('const_3', 'add', {'in': 1, 'out': 0}), - ('add', 'relu6', {'in': 0, 'out': 0}), - ('const_0', 'relu6', {'in': 1, 'out': 0}), - ('const_6', 'relu6', {'in': 2, 'out': 0}), - ('relu6', 'mul', {'in': 1, 'out': 0}), - ('mul', 'mul_2', {'in': 0, 'out': 0}), - ('const_1_6', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})]) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSwishWithClamp().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - -class HSwishWithMinMaxTest(unittest.TestCase): - nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('max', {'op': 'Maximum'}), - **regular_op('min', {'op': 'Minimum'}), - **regular_op('mul', {'op': 'Mul'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **const('const_0', float_array([0.0])), - **const('const_3', float_array([3.0])), - **const('const_6', float_array([6.0])), - **const('const_1_6', float_array([1.0 / 6.0])), - **result('result'), - } - - edges = [('input', 'mul', {'in': 1, 'out': 0}), - ('input', 'add', {'in': 0, 'out': 0}), - ('const_3', 'add', {'in': 1, 'out': 0}), - ('add', 'max', {'in': 0, 'out': 0}), - ('const_0', 'max', {'in': 1, 'out': 0}), - ('max', 'min', {'in': 0, 'out': 0}), - ('const_6', 'min', {'in': 1, 'out': 0}), - ('min', 'mul', {'in': 0, 'out': 0}), - ('mul', 'mul_2', {'in': 0, 'out': 0}), - ('const_1_6', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})] - - def test_hswish_with_min_max(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {}) - - graph_ref = build_graph(ref_nodes, ref_edges) - graph.stage = 'front' - - HSwishWithMinMax().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_mul')) == 1 and - graph.get_op_nodes(name='final_mul')[0].op == 'HSwish') - - def test_hswish_with_min_max_wrong_constant(self): - graph = build_graph_with_edge_attrs(self.nodes, self.edges, {'const_0': {'value': float_array([0.00001])}}) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSwishWithMinMax().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - def test_hswish_with_min_max_different_tensors(self): - graph = build_graph_with_edge_attrs({ - **regular_op('input', {'type': 'Parameter'}), - **regular_op('input_2', {'type': 'Parameter'}), - **regular_op('add', {'op': 'Add'}), - **regular_op('max', {'op': 'Maximum'}), - **regular_op('min', {'op': 'Minimum'}), - **regular_op('mul', {'op': 'Mul'}), - **regular_op('mul_2', {'op': 'Mul', 'name': 'final_mul'}), - **const('const_0', float_array([0.0])), - **const('const_3', float_array([3.0])), - **const('const_6', float_array([6.0])), - **const('const_1_6', float_array([1.0 / 6.0])), - **result('result'), - }, [('input_2', 'mul', {'in': 1, 'out': 0}), - ('input', 'add', {'in': 0, 'out': 0}), - ('const_3', 'add', {'in': 1, 'out': 0}), - ('add', 'max', {'in': 0, 'out': 0}), - ('const_0', 'max', {'in': 1, 'out': 0}), - ('max', 'min', {'in': 0, 'out': 0}), - ('const_6', 'min', {'in': 1, 'out': 0}), - ('min', 'mul', {'in': 0, 'out': 0}), - ('mul', 'mul_2', {'in': 0, 'out': 0}), - ('const_1_6', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2', 'result', {'in': 0, 'out': 0})]) - - graph_ref = graph.copy() - graph.stage = 'front' - - HSwishWithMinMax().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/LayerNorm_test.py b/tools/mo/unit_tests/mo/front/LayerNorm_test.py deleted file mode 100644 index 2bb15c95aea616..00000000000000 --- a/tools/mo/unit_tests/mo/front/LayerNorm_test.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.LayerNorm import LayerNorm -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class TestMVNPatternReplacement(unittest.TestCase): - nodes_attributes_mvn = { - 'inp': {'kind': 'op', 'op': 'AnyOp'}, - 'pool0': {'kind': 'op', 'op': 'ReduceMean'}, - 'pool1': {'kind': 'op', 'op': 'ReduceMean'}, - 'cast': {'kind': 'op', 'op': 'Cast'}, - 'pow': {'kind': 'op', 'op': 'Pow'}, - 'div': {'kind': 'op', 'op': 'Div'}, - 'sqrt': {'kind': 'op', 'op': 'Pow'}, - 'add': {'kind': 'op', 'op': 'Add'}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - 'add_param': {'kind': 'op', 'op': 'Const', 'shape': np.array([1]), 'value': np.array(1e-06, dtype=np.float32)}, - 'pow_param': {'kind': 'op', 'op': 'Const', 'shape': np.array([1]), 'value': np.array(0.5, dtype=np.float32)}, - 'pool0_param': {'kind': 'op', 'op': 'Const', 'shape': np.array([1]), 'value': np.array(-1, dtype=np.int32)}, - 'pool1_param': {'kind': 'op', 'op': 'Const', 'shape': np.array([1]), 'value': np.array(-1, dtype=np.int32)}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, - } - - nodes_attributes_ref = { - 'inp': {'kind': 'op', 'op': 'AnyOp'}, - 'mvn': {'kind': 'op', 'op': 'MVN', 'eps': 1e-6, 'normalize_variance': 1, 'eps_mode': 'inside_sqrt'}, - 'mvn_param': {'kind': 'op', 'op': 'Const', 'shape': np.array([]), 'value': np.array(-1, dtype=np.int32)}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, - } - - def test_MVNPatternReplacement_test_1(self): - graph = build_graph(self.nodes_attributes_mvn, - [('inp', 'pool0', {'out': 0}), - ('inp', 'sub', {'out': 0}), - ('pool0', 'sub'), - ('sub', 'pow'), - ('pow', 'pool1'), - ('pool1', 'add'), - ('add', 'sqrt'), - ('sqrt', 'div'), - ('sub', 'div'), - ('div', 'out'), - ('pow_param', 'sqrt'), - ('add_param', 'add'), - ('pool0_param', 'pool0'), - ('pool1_param', 'pool1'), - ], - nodes_with_edges_only=True) - graph_ref = build_graph(self.nodes_attributes_ref, - [('inp', 'mvn'), - ('mvn_param', 'mvn'), - ('mvn', 'out')], - nodes_with_edges_only=True) - graph.stage = 'front' - - replacer = LayerNorm() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_MVNPatternReplacement_test_2(self): - graph = build_graph(self.nodes_attributes_mvn, - [('inp', 'pool0', {'out': 0}), - ('inp', 'sub', {'out': 0}), - ('pool0', 'sub'), - ('sub', 'cast'), - ('cast', 'pow'), - ('pow', 'pool1'), - ('pool1', 'add'), - ('add', 'sqrt'), - ('sqrt', 'div'), - ('sub', 'div'), - ('div', 'out'), - ('pow_param', 'sqrt'), - ('add_param', 'add'), - ('pool0_param', 'pool0'), - ('pool1_param', 'pool1'), - ], - nodes_with_edges_only=True) - graph_ref = build_graph(self.nodes_attributes_ref, - [('inp', 'mvn'), - ('mvn_param', 'mvn'), - ('mvn', 'out')], - nodes_with_edges_only=True) - graph.stage = 'front' - - replacer = LayerNorm() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/Log1p_test.py b/tools/mo/unit_tests/mo/front/Log1p_test.py deleted file mode 100644 index da114f9620dcf9..00000000000000 --- a/tools/mo/unit_tests/mo/front/Log1p_test.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.Log1p import Log1p -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder': {'shape': np.array([4, 5, 6]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - # Log1p operation - 'Log1p': {'kind': 'op', 'op': 'Log1p'}, - # Test operation - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': None}, - # Add and Log operations - 'const': {'kind': 'op', 'op': 'Const', 'value': np.ones([1], dtype=np.float32)}, - 'add': {'type': 'Add', 'kind': 'op', 'op': 'Add'}, - 'log': {'type': 'Log', 'kind': 'op', 'op': 'Log'}, -} - - -class TestLog1p(unittest.TestCase): - def test_log1p_test(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'Log1p'), - ('Log1p', 'last') - ], nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('const', 'add'), - ('placeholder', 'add'), - ('add', 'log'), - ('log', 'last'), - ], nodes_with_edges_only=True) - - graph.stage = 'front' - - tested_class = Log1p() - tested_class.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/OneHotDepthNormalizer_test.py b/tools/mo/unit_tests/mo/front/OneHotDepthNormalizer_test.py deleted file mode 100644 index 71708303e3597c..00000000000000 --- a/tools/mo/unit_tests/mo/front/OneHotDepthNormalizer_test.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.OneHotDepthNormalizer import OneHotDepthNormalizer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, \ - regular_op, const - - -class OneHotDepthNormalizerTest(unittest.TestCase): - def test(self): - nodes = { - **regular_op('input', {'type': 'Parameter'}), - **const('depth', int64_array([2])), - **regular_op('onehot', {'type': 'OneHot', 'kind': 'op', 'op': 'OneHot'}), - - **regular_op('reshape', {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'}), - **const('reshape_dims', int64_array([])), - **result('result'), - } - edges = [('input', 'onehot'), - ('depth', 'onehot'), - ('onehot', 'result'), - ] - graph = build_graph(nodes, edges) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - edges_ref = [('input', 'onehot'), - ('depth', 'reshape'), - ('reshape_dims', 'reshape'), - ('reshape', 'onehot'), - ('onehot', 'result'), - ] - - graph_ref = build_graph(nodes, edges_ref) - - OneHotDepthNormalizer().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/Pack_test.py b/tools/mo/unit_tests/mo/front/Pack_test.py deleted file mode 100644 index b4c0dcf42535c7..00000000000000 --- a/tools/mo/unit_tests/mo/front/Pack_test.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -import numpy as np -import pytest - -from openvino.tools.mo.front.Pack import Pack -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder_0': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_3': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - # Pack operation - 'pack': {'axis': None, 'type': None, 'kind': 'op', 'op': 'Pack'}, - # Test operation - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': None}, - # Unsqueeze, Concat and Const operations - 'const_1': {'value': None, 'type': None, 'kind': 'op', 'op': 'Const'}, - 'Unsqueeze_0': {'type': 'Unsqueeze', 'kind': 'op', 'op': 'Unsqueeze'}, - 'Unsqueeze_1': {'type': 'Unsqueeze', 'kind': 'op', 'op': 'Unsqueeze'}, - 'Unsqueeze_2': {'type': 'Unsqueeze', 'kind': 'op', 'op': 'Unsqueeze'}, - 'Unsqueeze_3': {'type': 'Unsqueeze', 'kind': 'op', 'op': 'Unsqueeze'}, - 'Unsqueeze_0_axis': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': None, 'value': None}, - 'Unsqueeze_1_axis': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': None, 'value': None}, - 'Unsqueeze_2_axis': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': None, 'value': None}, - 'Unsqueeze_3_axis': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': None, 'value': None}, - 'concat_1': {'axis': None, 'type': 'Concat', 'kind': 'op', 'op': 'Concat'}, -} - - -class TestPackTest(): - - @pytest.mark.parametrize("num_inputs, num_placeholders, axis", [(2, 2, 0), (3, 3, 0), (4, 4, 0), - (4, 4, 1), (4, 1, 0), (4, 1, 1)]) - def test_pack_test_all(self, num_inputs: int, num_placeholders: int, axis: list): - - graph_edges = [] - for i in range(num_inputs - num_placeholders + 1): - for j in range(num_placeholders): - graph_edges.append(('placeholder_{}'.format(j), 'pack')) - graph_edges.append(('pack', 'last')) - - update_graph_attributes = {} - for i in range(num_placeholders): - update_graph_attributes['placeholder_{}'.format(i)] = {'shape': np.array([1, 227, 227, 3])} - update_graph_attributes['pack'] = {'axis': axis} - - graph = build_graph(nodes_attributes, graph_edges, update_graph_attributes, - nodes_with_edges_only=True) - - graph_ref_edges = [] - for i in range(num_inputs - num_placeholders + 1): - for j in range(num_placeholders): - graph_ref_edges.append(('placeholder_{}'.format(j), 'Unsqueeze_{}'.format(i + j))) - graph_ref_edges.append(('Unsqueeze_{}'.format(i + j), 'concat_1')) - graph_ref_edges.append(('concat_1', 'last')) - - update_graph_ref_attributes = {} - for i in range(num_placeholders): - update_graph_ref_attributes['placeholder_{}'.format(i)] = {'shape': np.array([1, 227, 227, 3])} - for i in range(num_inputs): - graph_ref_edges.append(('Unsqueeze_{}_axis'.format(i), 'Unsqueeze_{}'.format(i))) - update_graph_ref_attributes['Unsqueeze_{}_axis'.format(i)] = {'shape': int64_array([1]), - 'value': int64_array([axis])} - update_graph_ref_attributes['concat_1'] = {'axis': axis} - - graph_ref = build_graph(nodes_attributes, graph_ref_edges, update_graph_ref_attributes, - nodes_with_edges_only=True) - - graph.stage = 'front' - - replacer = Pack() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/RollWithEmptyAxesReplacer_test.py b/tools/mo/unit_tests/mo/front/RollWithEmptyAxesReplacer_test.py deleted file mode 100644 index 636718983f2b3c..00000000000000 --- a/tools/mo/unit_tests/mo/front/RollWithEmptyAxesReplacer_test.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.RollWithEmptyAxesReplacer import RollWithEmptyAxesReplacer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const, result, regular_op - -nodes_attributes = { - **regular_op('placeholder', {'type': 'Parameter'}), - **regular_op('roll', {'type': 'Roll', 'op': 'Roll', 'axes': int64_array([-1, 2, 3]), 'shift': int64_array([5, -2, 3])}), - **const('roll_shift', int64_array([5, -2, 3])), - **result('result'), - - **regular_op('shape_of', {'type': 'ShapeOf'}), - **regular_op('reshape1', {'type': 'Reshape'}), - **regular_op('new_roll', {'type': 'Roll'}), - **regular_op('reshape2', {'type': 'Reshape'}), - - **const('min_one_const', int64_array([-1])), - **const('zero_const', int64_array([0])) -} - - -class RollWithEmptyAxesReplacerTest(unittest.TestCase): - def test_transform(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'roll', {'in': 0, 'out': 0}), - ('roll_shift', 'roll', {'in': 1, 'out': 0}), - ('roll', 'result', {'in': 0, 'out': 0})], {}, nodes_with_edges_only=True) - Node(graph, 'roll').add_input_port(2) - - graph_ref = build_graph(nodes_attributes, - [('placeholder', 'reshape1', {'in': 0, 'out': 0}), - ('min_one_const', 'reshape1', {'in': 1, 'out': 0}), - ('reshape1', 'new_roll', {'in': 0, 'out': 0}), - ('roll_shift', 'new_roll', {'in': 1, 'out': 0}), - ('zero_const', 'new_roll', {'in': 2, 'out': 0}), - ('new_roll', 'reshape2', {'in': 0, 'out': 0}), - ('placeholder', 'shape_of', {'in': 0, 'out': 0}), - ('shape_of', 'reshape2', {'in': 1, 'out': 0}), - ('reshape2', 'result', {'in': 0, 'out': 0})], {}, nodes_with_edges_only=True) - - graph.stage = 'front' - - RollWithEmptyAxesReplacer().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - shape_of_nodes = graph.get_op_nodes(type='ShapeOf') - self.assertTrue(len(shape_of_nodes) == 1) - shape_of = shape_of_nodes[0] - self.assertTrue(shape_of.in_node().soft_get('name') == 'placeholder') diff --git a/tools/mo/unit_tests/mo/front/ThresholdedReluDecomposition_test.py b/tools/mo/unit_tests/mo/front/ThresholdedReluDecomposition_test.py deleted file mode 100644 index 8c506f7ead613d..00000000000000 --- a/tools/mo/unit_tests/mo/front/ThresholdedReluDecomposition_test.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.ThresholdedReluDecomposition import ThresholdedReluDecomposition -from openvino.tools.mo.front.common.partial_infer.utils import float_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - -nodes_attributes = { - 'parameter': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'trelu': {'type': None, 'kind': 'op', 'op': 'ThresholdedRelu', 'alpha': 0.75, 'name': 'my_trelu'}, - 'result': {'type': 'Result', 'value': None, 'kind': 'op', 'op': 'Result'}, - - 'cast': {'type': 'Convert', 'kind': 'op', 'op': 'Cast'}, - 'greater': {'type': 'Greater', 'kind': 'op', 'op': 'Greater'}, - 'mul': {'type': 'Multiply', 'kind': 'op', 'op': 'Mul', 'name': 'my_trelu'}, - 'squeeze2': {'type': 'Squeeze', 'kind': 'op', 'op': 'Squeeze'}, - **const('alpha', float_array([0.75])), -} - - -class ThresholdedReluDecompositionTest(unittest.TestCase): - def test_trelu(self): - graph = build_graph(nodes_attributes, - [('parameter', 'trelu', {'in': 0, 'out': 0}), - ('trelu', 'result', {'in': 0, 'out': 0}), - ], nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('parameter', 'greater', {'in': 0, 'out': 0}), - ('alpha', 'greater', {'in': 1, 'out': 0}), - ('greater', 'cast', {'in': 0, 'out': 0}), - ('parameter', 'mul', {'in': 0, 'out': 0}), - ('cast', 'mul', {'in': 1, 'out': 0}), - ('mul', 'result', {'in': 0, 'out': 0}), - ], nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - ThresholdedReluDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='my_trelu')) == 1 and - graph.get_op_nodes(name='my_trelu')[0].op == 'Mul') diff --git a/tools/mo/unit_tests/mo/front/__init__.py b/tools/mo/unit_tests/mo/front/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/binary_quantize_normalization_test.py b/tools/mo/unit_tests/mo/front/binary_quantize_normalization_test.py deleted file mode 100644 index d60fac14b60f04..00000000000000 --- a/tools/mo/unit_tests/mo/front/binary_quantize_normalization_test.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.binary_quantize_normalization import BinaryFakeQuantizeNormalization -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -graph_nodes = { - '0': {'name': 'input', 'kind': 'op', 'op': 'Parameter'}, - '1': {'name': 'mi_i', 'kind': 'op', 'op': 'Const'}, - '2': {'name': 'ma_i', 'kind': 'op', 'op': 'Const'}, - '3': {'name': 'mi_o', 'kind': 'op', 'op': 'Const'}, - '4': {'name': 'mi_o', 'kind': 'op', 'op': 'Const'}, - - 'add': {'kind': 'op', 'op': 'Add'}, - 'const': {'kind': 'op', 'op': 'Const', 'value': np.array(0.5)}, - 'mul': {'kind': 'op', 'op': 'Mul'}, - - 'quantize': {'name': 'quantize', 'levels': 2, 'kind': 'op', 'op': 'FakeQuantize'}, - - 'output': {'name': 'output1', 'kind': 'op', 'op': 'Result', 'type': 'Result'}, -} - -graph_edges = [ - ('0', 'quantize', {'in': 0}), - ('1', 'quantize', {'in': 1}), - ('2', 'quantize', {'in': 2}), - ('3', 'quantize', {'in': 3}), - ('4', 'quantize', {'in': 4}), - ('quantize', 'output'), -] - -graph_ref_edges = [ - ('0', 'quantize', {'in': 0}), - ('1', 'add'), - ('2', 'add'), - ('add', 'mul'), - ('const', 'mul'), - ('mul', 'quantize', {'in': 1, 'out': 0}), - ('mul', 'quantize', {'in': 2, 'out': 0}), - ('3', 'quantize', {'in': 3}), - ('4', 'quantize', {'in': 4}), - ('quantize', 'output'), -] - - -class TestBinaryQuantizeNormalization(unittest.TestCase): - def test_binary_quantize_normalizer(self): - graph = build_graph(graph_nodes, graph_edges, nodes_with_edges_only=True) - graph.stage = 'front' - BinaryFakeQuantizeNormalization().find_and_replace_pattern(graph) - graph.clean_up() - - graph_ref = build_graph(graph_nodes, graph_ref_edges) - graph_ref.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'output') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/broadcast_with_range_test.py b/tools/mo/unit_tests/mo/front/broadcast_with_range_test.py deleted file mode 100644 index 3c5046724d919a..00000000000000 --- a/tools/mo/unit_tests/mo/front/broadcast_with_range_test.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.broadcast_with_range import ExpandRangeConstant -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \ - regular_op_with_empty_data - - -class TestRangeBroadcast(unittest.TestCase): - def test_broadcast_with_range_positive_test(self): - graph = build_graph({ - **regular_op_with_shaped_data('shape', [2], {'type': 'Parameter'}), - **valued_const_with_data('value', np.arange(0, 384).reshape((1, 384))), - **regular_op_with_empty_data('bc', {'type': 'Broadcast'}), - **result(), - }, [ - *connect('value', '0:bc'), - *connect('shape', '1:bc'), - *connect('bc', 'output'), - ], nodes_with_edges_only=True) - ExpandRangeConstant().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attrs={ - **regular_op_with_shaped_data('shape', [2], {'type': 'Parameter'}), - **valued_const_with_data('value', np.arange(0, 384).reshape((1, 384))), - **regular_op_with_empty_data('bc', {'type': 'Broadcast'}), - **regular_op_with_empty_data('shapeof', {'type': 'ShapeOf'}), - **regular_op_with_empty_data('select', {'type': 'Select'}), - **regular_op_with_empty_data('gather', {'type': 'Gather'}), - 'gather_const': {'type': 'Gather', 'kind': 'op', 'op': 'Gather'}, - 'equal': {'type': 'Equal', 'kind': 'op', 'op': 'Equal'}, - - # start - **valued_const_with_data('start', np.array(0)), - # limit - **valued_const_with_data('minus_one_0', np.array(-1)), - **valued_const_with_data('zero_0', np.array(0)), - **valued_const_with_data('minus_one_1', np.array(-1)), - **valued_const_with_data('zero_1', np.array(0)), - # delta - **valued_const_with_data('delta', np.array(1)), - **regular_op_with_shaped_data('range', [1, 384], {'type': 'Range'}), - - # keep dims - **valued_const_with_data('axes', np.array([0])), - **regular_op_with_shaped_data('keep_shape', [1, 384], {'type': 'Unsqueeze'}), - - **valued_const_with_data('one', np.array(1)), - - **result(), - }, - edges=[ - *connect('value', 'shapeof'), - *connect('gather', '0:equal'), - ('gather', 'select', {'in': 2, 'out': 0}), - ('gather_const', 'select', {'in': 1}), - ('equal', 'select', {'in': 0}), - *connect('minus_one_0', '1:gather'), - *connect('zero_0', '2:gather'), - *connect('shapeof', '0:gather_const'), - *connect('minus_one_1', '1:gather_const'), - *connect('zero_1', '2:gather_const'), - *connect('start', '0:range'), - *connect('select', '1:range'), - *connect('delta', '2:range'), - *connect('range', '0:keep_shape'), - *connect('axes', '1:keep_shape'), - *connect('keep_shape', '0:bc'), - *connect('one', '1:equal'), - *connect('shape', '1:bc'), - ('shape_d', 'gather', {'out': 0, 'in': 0}), - *connect('bc', 'output'), - ], - update_attributes={ - 'range_d': {'value': np.arange(0, 384).reshape((1, 384))}, - 'keep_shape_d': {'value': np.arange(0, 384).reshape((1, 384))}, - }) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/caffe/MVNCaffeToMVN_test.py b/tools/mo/unit_tests/mo/front/caffe/MVNCaffeToMVN_test.py deleted file mode 100644 index 5f03e3d087f4ad..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/MVNCaffeToMVN_test.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.caffe.MVNCaffeToMVN import MVNCaffeToMVN -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, const, connect_front - -nodes = { - **regular_op_with_empty_data('input', {'type': 'Parameter'}), - **regular_op_with_empty_data('mvn_caffe', {'op': 'MVNCaffe'}), - **result(), - - # nodes after replacement - **const('start_1', np.array(1)), - **const('start_2', np.array(2)), - **const('step', np.array(1)), - **regular_op_with_empty_data('rank', {'op': 'Rank', 'type': None}), - **regular_op_with_empty_data('range', {'op': 'Range', 'type': None}), - **regular_op_with_empty_data('mvn', {'op': 'MVN', 'type': None}), -} - - -class MVNCaffeToMVNTest(unittest.TestCase): - def test_mvn_normalizer(self): - graph = build_graph(nodes, [('input', 'mvn_caffe'), - ('mvn_caffe', 'output')], - {'mvn_caffe': {'across_channels': 0}}, - nodes_with_edges_only=True) - graph.stage = 'front' - - MVNCaffeToMVN().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [('input', 'mvn', {'out': 0}), - ('input', 'rank', {'out': 0}), - *connect_front('start_2', '0:range'), - *connect_front('rank', '1:range'), - *connect_front('step', '2:range'), - *connect_front('range', '1:mvn'), - ('mvn', 'output')], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_mvn_normalizer_across_channels(self): - graph = build_graph(nodes, [('input', 'mvn_caffe'), - ('mvn_caffe', 'output')], - {'mvn_caffe': {'across_channels': 1}}, - nodes_with_edges_only=True) - graph.stage = 'front' - - MVNCaffeToMVN().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [('input', 'mvn', {'out': 0}), - ('input', 'rank', {'out': 0}), - *connect_front('start_1', '0:range'), - *connect_front('rank', '1:range'), - *connect_front('step', '2:range'), - *connect_front('range', '1:mvn'), - ('mvn', 'output')], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/caffe/__init__.py b/tools/mo/unit_tests/mo/front/caffe/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/caffe/argmax_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/argmax_ext_test.py deleted file mode 100644 index 6346098eae577b..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/argmax_ext_test.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.argmax_ext import ArgMaxFrontExtractor -from openvino.tools.mo.ops.argmax import ArgMaxOp, arg_ops_infer -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakeArgMaxProtoLayer: - def __init__(self, val): - self.argmax_param = val - - -class TestArgMaxExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['ArgMax'] = ArgMaxOp - - def test_argmax_no_pb_no_ml(self): - self.assertRaises(AttributeError, ArgMaxFrontExtractor.extract, None) - - @patch('openvino.tools.mo.front.caffe.argmax_ext.merge_attrs') - def test_argmax_ext_ideal_numbers(self, merge_attrs_mock): - params = { - 'out_max_val': True, - 'top_k': 100, - 'axis': 2 - } - merge_attrs_mock.return_value = { - **params - } - - fake_pl = FakeArgMaxProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - ArgMaxFrontExtractor.extract(fake_node) - - exp_res = { - 'out_max_val': True, - 'top_k': 100, - 'axis': 2, - 'infer': arg_ops_infer, - 'remove_values_output': True, - } - - for key in exp_res.keys(): - self.assertEqual(fake_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/axpy_test.py b/tools/mo/unit_tests/mo/front/caffe/axpy_test.py deleted file mode 100644 index 3eec6d41f2705a..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/axpy_test.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.caffe.axpy import AxpyToSSandAdd -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph_with_edge_attrs - - -class TestAxpyReplacer(unittest.TestCase): - def test_axpy(self): - nodes = { - 'node_1': {'kind': 'op', 'type': 'Identity', 'op': 'Parameter'}, - 'node_2': {'kind': 'op', 'type': 'Identity', 'op': 'Parameter'}, - 'node_3': {'kind': 'op', 'type': 'Identity', 'op': 'Parameter'}, - 'axpy': {'type': 'Axpy', 'kind': 'op', 'op': 'Axpy'}, - 'node_4': {'kind': 'op', 'type': 'Identity', 'op': 'Parameter'}} - edges = [ - ('node_1', 'axpy', {'in': 0, 'out': 0}), - ('node_2', 'axpy', {'in': 1, 'out': 0}), - ('node_3', 'axpy', {'in': 2, 'out': 0}), - ('axpy', 'node_4', {'in': 0, 'out': 0})] - graph = build_graph_with_edge_attrs(nodes, edges) - node = Node(graph, 'axpy') - replacer = AxpyToSSandAdd() - replacer.replace_op(graph, node) - - scale_node = [node for node, attrs in list(graph.nodes(data=True)) if attrs['type'] == 'ScaleShift'] - self.assertEqual(len(scale_node), 1) - add_node = [node for node, attrs in list(graph.nodes(data=True)) if attrs['type'] == 'Add'] - self.assertEqual(len(add_node), 1) diff --git a/tools/mo/unit_tests/mo/front/caffe/bn_test.py b/tools/mo/unit_tests/mo/front/caffe/bn_test.py deleted file mode 100644 index f9b2c52e04b8df..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/bn_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import unittest - -from openvino.tools.mo.front.caffe.bn import BNToScaleShift -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.extractors import FakeParam -from unit_tests.utils.graph import build_graph_with_edge_attrs, build_graph_with_attrs - - -class FakeBNProtoLayer: - def __init__(self, val): - self.bn_param = val - - -class FakeBNBinLayer: - def __init__(self, val): - self.blobs = val - - -class TestBNReplacer(unittest.TestCase): - def test_bn(self): - bn_pb = FakeBNProtoLayer(FakeParam('eps', 0.0001)) - mean = [1, 2.5, 3] - var = [0.5, 0.1, 1.2] - scale = [2.3, 3.4, 4.5] - shift = [0.8, 0.6, 0.4] - bn_bin = FakeBNBinLayer([FakeParam('data', mean), - FakeParam('data', var), - FakeParam('data', scale), - FakeParam('data', shift)]) - nodes = [ - ('input', {'kind': 'op', 'type': 'Identity', 'op': 'Identity'}), - ('bn', {'type': None, 'kind': 'op', 'op': 'BN', 'pb': bn_pb, 'model_pb': bn_bin}), - ('output', {'kind': 'op', 'type': 'Identity', 'op': 'Identity'}), - ] - edges = [ - ('input', 'bn', {'in': 0, 'out': 0}), - ('bn', 'output', {'in': 0, 'out': 0}), - ] - graph = build_graph_with_attrs(nodes, edges) - node = Node(graph, 'bn') - graph.stage = 'front' - - BNToScaleShift().find_and_replace_pattern(graph) - - ref_nodes = { - 'input': {'kind': 'op', 'type': 'Identity', 'op': 'Identity'}, - 'scale': {'kind': 'op', 'type': 'Const', 'op': 'Const', - 'value': np.array([1.11796412, 3.2272172, 4.74282367])}, - 'shift': {'kind': 'op', 'type': 'Const', 'op': 'Const', - 'value': np.array([-2.07131747, -10.87253847, -20.14270653])}, - 'ss': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'}, - 'output': {'kind': 'op', 'type': 'Identity', 'op': 'Identity'}, - } - ref_edges = [ - ('input', 'ss', {'in': 0, 'out': 0}), - ('scale', 'ss', {'in': 1, 'out': 0}), - ('shift', 'ss', {'in': 2, 'out': 0}), - ('ss', 'output', {'in': 0, 'out': 0}), - ] - ref_graph = build_graph_with_edge_attrs(ref_nodes, ref_edges) - (flag, resp) = compare_graphs(graph, ref_graph, 'input', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/caffe/conv_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/conv_ext_test.py deleted file mode 100644 index eb430a67c1fdeb..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/conv_ext_test.py +++ /dev/null @@ -1,336 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -import numpy as np - -from openvino.tools.mo.front.caffe.conv_ext import ConvFrontExtractor, DeconvFrontExtractor, conv_create_attrs, conv_set_params -from openvino.tools.mo.front.caffe.extractors.utils import get_list_from_container -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.extractors import PB, FakeParam, FakeMultiParam - - -class FakeConvProtoLayer: - def __init__(self, val): - self.convolution_param = val - self.bottom = [0] - - -class TestConvShapesParsing(unittest.TestCase): - def test_conv_no_pb_no_ml(self): - node = PB({'pb': None}) - self.assertRaises(Error, ConvFrontExtractor.extract, node) - - @patch('openvino.tools.mo.front.caffe.conv_ext.weights_biases') - @patch('openvino.tools.mo.front.caffe.conv_ext.layout_attrs') - def test_conv_ext_ideal_numbers(self, weights_biases_mock, layout_attrs_mock): - weights_biases_mock.return_value = {} - layout_attrs_mock.return_value = {} - params = { - 'pad': 10, - 'kernel_size': 11, - 'stride': 12, - 'dilation': 13, - 'group': 14, - 'num_output': 15, - 'bias_term': True - } - node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))}) - ConvFrontExtractor.extract(node) - res = node - exp_res = { - 'op': 'Conv2D', - 'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]), - 'pad_spatial_shape': np.array([[10, 10], [10, 10]]), - 'stride': np.array([1, 1, 12, 12]), - 'kernel_spatial': np.array([11, 11]), - 'dilation': np.array([1, 1, 13, 13]), - 'group': 14, - 'bias_addable': True, - 'bias_term': True, - } - self.assertTrue(weights_biases_mock.called) - self.assertTrue(layout_attrs_mock.called) - for key in exp_res.keys(): - if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'): - np.testing.assert_equal(res[key], exp_res[key]) - else: - self.assertEqual(res[key], exp_res[key]) - - @patch('openvino.tools.mo.front.caffe.conv_ext.weights_biases') - @patch('openvino.tools.mo.front.caffe.conv_ext.layout_attrs') - def test_conv_ext_empty_numbers(self, weights_biases_mock, layout_attrs_mock): - weights_biases_mock.return_value = {} - layout_attrs_mock.return_value = {} - params = { - 'pad': None, - 'kernel_size': None, - 'stride': None, - 'dilation': None, - 'group': 14, - 'num_output': 15, - 'bias_term': True, - 'pad_w': 3, - 'pad_h': 4, - 'kernel_w': 5, - 'kernel_h': 6, - 'stride_h': 3, - 'stride_w': 2, - } - node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))}) - ConvFrontExtractor.extract(node) - res = node - exp_res = { - 'op': 'Conv2D', - 'pad': np.array([[0, 0], [0, 0], [4, 4], [3, 3]]), - 'pad_spatial_shape': np.array([[4, 4], [3, 3]]), - 'stride': np.array([1, 1, 3, 2]), - 'kernel_spatial': np.array([6, 5]), - 'dilation': np.array([1, 1, 1, 1]), - 'group': 14, - 'bias_addable': True, - 'bias_term': True, - } - self.assertTrue(weights_biases_mock.called) - self.assertTrue(layout_attrs_mock.called) - for key in exp_res.keys(): - if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'): - np.testing.assert_equal(res[key], exp_res[key]) - else: - self.assertEqual(res[key], exp_res[key]) - - def test_attrs(self): - params = { - 'type_str': 'Conv2D', - 'padding': [10, 10], - 'stride': [12, 12], - 'kernel': [11, 11], - 'dilate': [13, 13], - 'group': 14, - 'output': 13, - 'bias_term': True - } - - res = conv_create_attrs(params) - - exp_res = { - 'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]), - 'pad_spatial_shape': np.array([[10, 10], [10, 10]]), - 'stride': np.array([1, 1, 12, 12]), - 'kernel_spatial': np.array([11, 11]), - 'dilation': np.array([1, 1, 13, 13]), - 'group': 14, - 'bias_addable': True, - 'bias_term': True, - 'output_spatial_shape': None, - 'output_shape': None, - 'output': 13, - } - for key in exp_res.keys(): - if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'): - np.testing.assert_equal(res[key], exp_res[key]) - else: - self.assertEqual(res[key], exp_res[key]) - - def test_get_list_from_container_no_existing_param(self): - res = get_list_from_container(FakeParam("p", "1"), 'prop', int) - self.assertEqual(res, []) - - def test_get_list_from_container_no_param(self): - res = get_list_from_container(None, 'prop', int) - self.assertEqual(res, []) - - def test_get_list_from_container_simple_type_match(self): - res = get_list_from_container(FakeParam('prop', 10), 'prop', int) - self.assertEqual(res, [10]) - - def test_get_list_from_container_list_match(self): - res = get_list_from_container(FakeParam('prop', [10, 11]), 'prop', int) - self.assertEqual(res, [10, 11]) - - def test_get_list_from_container_list_match_empty(self): - res = get_list_from_container(FakeParam('prop', []), 'prop', int) - self.assertEqual(res, []) - - def test_params_creation(self): - params = { - 'pad': None, - 'kernel_size': None, - 'stride': None, - 'dilation': None, - 'group': 14, - 'num_output': 15, - 'bias_term': True, - 'pad_w': 3, - 'pad_h': 4, - 'kernel_w': 5, - 'kernel_h': 6, - 'stride_h': 3, - 'stride_w': 2, - } - exp_res = { - 'padding': [3, 4], - 'stride': [2, 3], - 'kernel': [5, 6], - 'dilate': [1, 1], - 'group': 14, - 'output': 15 - } - res = conv_set_params(FakeConvProtoLayer(FakeMultiParam(params)).convolution_param, 'Conv2D') - - for key in exp_res.keys(): - if key in ('padding', 'stride', 'stride', 'kernel', 'dilate'): - np.testing.assert_equal(res[key], exp_res[key]) - else: - self.assertEqual(res[key], exp_res[key]) - - -class TestDeconvShapesParsing(unittest.TestCase): - def test_deconv_no_pb_no_ml(self): - node = PB({'pb': None}) - self.assertRaises(Error, DeconvFrontExtractor.extract, node) - - @patch('openvino.tools.mo.front.caffe.conv_ext.weights_biases') - @patch('openvino.tools.mo.front.caffe.conv_ext.layout_attrs') - def test_conv_ext_ideal_numbers(self, weights_biases_mock, layout_attrs_mock): - weights_biases_mock.return_value = {} - layout_attrs_mock.return_value = {} - params = { - 'pad': 10, - 'kernel_size': 11, - 'stride': 12, - 'dilation': 13, - 'group': 14, - 'num_output': 15, - 'bias_term': True - } - node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))}) - res = DeconvFrontExtractor.extract(node) - res = node - exp_res = { - 'op': 'Deconv2D', - 'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]), - 'pad_spatial_shape': np.array([[10, 10], [10, 10]]), - 'stride': np.array([1, 1, 12, 12]), - 'kernel_spatial': np.array([11, 11]), - 'dilation': np.array([1, 1, 13, 13]), - 'group': 14, - 'bias_addable': True, - } - self.assertTrue(weights_biases_mock.called) - self.assertTrue(layout_attrs_mock.called) - for key in exp_res.keys(): - if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'): - np.testing.assert_equal(res[key], exp_res[key]) - else: - self.assertEqual(res[key], exp_res[key]) - - @patch('openvino.tools.mo.front.caffe.conv_ext.weights_biases') - @patch('openvino.tools.mo.front.caffe.conv_ext.layout_attrs') - def test_conv_ext_false_bias_term(self, weights_biases_mock, layout_attrs_mock): - weights_biases_mock.return_value = {} - layout_attrs_mock.return_value = {} - params = { - 'pad': 10, - 'kernel_size': 11, - 'stride': 12, - 'dilation': 13, - 'group': 14, - 'num_output': 15, - 'bias_term': False - } - node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))}) - res = DeconvFrontExtractor.extract(node) - res = node - exp_res = { - 'op': 'Deconv2D', - 'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]), - 'pad_spatial_shape': np.array([[10, 10], [10, 10]]), - 'stride': np.array([1, 1, 12, 12]), - 'kernel_spatial': np.array([11, 11]), - 'dilation': np.array([1, 1, 13, 13]), - 'group': 14, - 'bias_addable': True, - 'bias_term': False, - } - self.assertTrue(weights_biases_mock.called) - self.assertTrue(layout_attrs_mock.called) - for key in exp_res.keys(): - if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation', 'bias_term'): - np.testing.assert_equal(res[key], exp_res[key]) - else: - self.assertEqual(res[key], exp_res[key]) - - @patch('openvino.tools.mo.front.caffe.conv_ext.weights_biases') - @patch('openvino.tools.mo.front.caffe.conv_ext.layout_attrs') - def test_conv_ext_empty_numbers(self, weights_biases_mock, layout_attrs_mock): - weights_biases_mock.return_value = {} - layout_attrs_mock.return_value = {} - params = { - 'pad': None, - 'kernel_size': None, - 'stride': None, - 'dilation': None, - 'group': 14, - 'num_output': 15, - 'bias_term': True, - 'pad_w': 3, - 'pad_h': 4, - 'kernel_w': 5, - 'kernel_h': 6, - 'stride_h': 3, - 'stride_w': 2, - } - node = PB({'pb': FakeConvProtoLayer(FakeMultiParam(params))}) - res = DeconvFrontExtractor.extract(node) - res = node - exp_res = { - 'op': 'Deconv2D', - 'pad': np.array([[0, 0], [0, 0], [4, 4], [3, 3]]), - 'pad_spatial_shape': np.array([[4, 4], [3, 3]]), - 'stride': np.array([1, 1, 3, 2]), - 'kernel_spatial': np.array([6, 5]), - 'dilation': np.array([1, 1, 1, 1]), - 'group': 14, - 'bias_addable': True, - } - self.assertTrue(weights_biases_mock.called) - self.assertTrue(layout_attrs_mock.called) - for key in exp_res.keys(): - if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'): - np.testing.assert_equal(res[key], exp_res[key]) - else: - self.assertEqual(res[key], exp_res[key]) - - def test_attrs(self): - params = { - 'type_str': 'Deconv2D', - 'padding': [10, 10], - 'stride': [12, 12], - 'kernel': [11, 11], - 'dilate': [13, 13], - 'group': 14, - 'output': 13, - 'bias_term': True - } - res = conv_create_attrs(params) - - exp_res = { - 'pad': np.array([[0, 0], [0, 0], [10, 10], [10, 10]]), - 'pad_spatial_shape': np.array([[10, 10], [10, 10]]), - 'stride': np.array([1, 1, 12, 12]), - 'kernel_spatial': np.array([11, 11]), - 'dilation': np.array([1, 1, 13, 13]), - 'group': 14, - 'bias_addable': True, - 'output_spatial_shape': None, - 'output_shape': None, - 'output': 13, - } - for key in exp_res.keys(): - if key in ('pad', 'pad_spatial_shape', 'stride', 'kernel_spatial', 'dilation'): - np.testing.assert_equal(res[key], exp_res[key]) - else: - self.assertEqual(res[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/crop_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/crop_ext_test.py deleted file mode 100644 index 9f3610d19a77b2..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/crop_ext_test.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.crop_ext import CropFrontExtractor -from openvino.tools.mo.front.common.partial_infer.crop import crop_infer -from openvino.tools.mo.ops.crop import Crop -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakeCropProtoLayer: - def __init__(self, val): - self.crop_param = val - - -class TestCropExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['Crop'] = Crop - - def test_da_no_pb_no_ml(self): - self.assertRaises(AttributeError, CropFrontExtractor.extract, None) - - @patch('openvino.tools.mo.front.caffe.collect_attributes') - def test_crop_ext(self, collect_attributes_mock): - params = { - 'axis': 0, - 'offset': 0, - } - collect_attributes_mock.return_value = { - **params, - 'test': 54, - 'test2': 'test3' - } - fake_pl = FakeCropProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - CropFrontExtractor.extract(fake_node) - - exp_res = { - 'op': 'Crop', - 'axis': 0, - 'offset': 0, - 'dim': None, # set in infer - 'infer': crop_infer - } - - for key in exp_res.keys(): - self.assertEqual(exp_res[key], fake_node[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/ctcgreedydecoder_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/ctcgreedydecoder_ext_test.py deleted file mode 100644 index 103a0c0f50fda0..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/ctcgreedydecoder_ext_test.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.ctcgreedydecoder_ext import CTCGreedyDecoderFrontExtractor -from openvino.tools.mo.ops.ctc_greedy_decoder import CTCGreedyDecoderOp -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakeCTCGreedyDecoderProtoLayer: - def __init__(self, val): - self.ctc_decoder_param = val - - -class TestCTCGreedyDecoderExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['CTCGreedyDecoder'] = CTCGreedyDecoderOp - - def test_ctcgreedydecoder_no_pb_no_ml(self): - self.assertRaises(AttributeError, CTCGreedyDecoderFrontExtractor.extract, None) - - @patch('openvino.tools.mo.front.caffe.ctcgreedydecoder_ext.merge_attrs') - def test_ctcgreedydecoder_ext_ideal_numbers(self, merge_attrs_mock): - params = { - 'ctc_merge_repeated': True - } - merge_attrs_mock.return_value = { - **params - } - - fake_pl = FakeCTCGreedyDecoderProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - CTCGreedyDecoderFrontExtractor.extract(fake_node) - - exp_res = { - 'type': "CTCGreedyDecoder", - 'ctc_merge_repeated': 1, - 'infer': CTCGreedyDecoderOp.infer - } - - for key in exp_res.keys(): - self.assertEqual(fake_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/custom_layers_mapping_test.py b/tools/mo/unit_tests/mo/front/caffe/custom_layers_mapping_test.py deleted file mode 100644 index ba9a2390ff864c..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/custom_layers_mapping_test.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from google.protobuf import text_format - -from openvino.tools.mo.front.caffe.custom_layers_mapping import proto_extractor -from openvino.tools.mo.front.caffe.proto import caffe_pb2 - - -class TestCustomLayerMapping(unittest.TestCase): - def test_extractor_custom_layer(self): - expected_conv_params = { - 'num_output': 64, - 'pad': 1, - 'kernel_size': 3, - 'stride': 1, - 'bias_term': True, - 'axis': 1, - 'engine': 'caffe.ConvolutionParameter.DEFAULT', - 'group': 1, - 'force_nd_im2col': False, - 'pad_h': 0, - 'pad_w': 0 - } - layer = """ - name: "conv" - type: "Convolution" - bottom: "input" - top: "conv" - convolution_param { - num_output: 64 - pad: 1 - kernel_size: 3 - stride: 1 - } - """ - mapping = { - 'NativeType': 'Convolution', - 'hasParam': 'true', - 'protoParamName': 'convolution_param' - } - proto = caffe_pb2.LayerParameter() - text_format.Merge(layer, proto) - attrs = proto_extractor(proto, None, mapping, False, False) - for key, val in expected_conv_params.items(): - if key == 'bias_term' or key == 'force_nd_im2col': - self.assertTrue(str(int(val)) == attrs[key]) - else: - self.assertTrue(str(val) == attrs[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/elementwise_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/elementwise_ext_test.py deleted file mode 100644 index ff51a84eaba430..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/elementwise_ext_test.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.elementwise_ext import BiasToAdd -from unit_tests.utils.extractors import FakeModelLayer, FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakeBiasProtoLayer: - def __init__(self, val): - self.bias_param = val - - -class TestBias(unittest.TestCase): - - @patch('openvino.tools.mo.front.caffe.elementwise_ext.embed_input') - def test_bias(self, embed_input_mock): - embed_input_mock.return_value = {} - params = {'axis': 1} - add_node = FakeNode(FakeBiasProtoLayer(FakeMultiParam(params)), - FakeModelLayer([1, 2, 3, 4, 5])) - BiasToAdd.extract(add_node) - - exp_res = { - 'type': "Add", - 'axis': 1 - } - - for key in exp_res.keys(): - self.assertEqual(add_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/eltwise_add_normalize_test.py b/tools/mo/unit_tests/mo/front/caffe/eltwise_add_normalize_test.py deleted file mode 100644 index 8f15aef9441461..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/eltwise_add_normalize_test.py +++ /dev/null @@ -1,202 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.caffe.eltwise_add_normalize import EltwiseAddNormalize -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -input_shape = int64_array([1, 4, 10]) -const_1_value = np.array([2.0]) -const_2_value = np.array([3.0]) -const_3_value = np.array([4.0]) - -nodes_attributes = { - 'placeholder_1': {'shape': input_shape, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'}, - 'placeholder_2': {'shape': input_shape, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'}, - 'placeholder_3': {'shape': input_shape, 'type': 'Placeholder', 'kind': 'op', 'op': 'Placeholder'}, - 'eltwise': {'type': 'Add', 'kind': 'op', 'op': 'Add'}, - 'eltwise_n': {'type': 'EltwiseN', 'kind': 'op', 'op': 'EltwiseN', 'operation': 'sum'}, - - 'sigmoid_1': {'type': 'Sigmoid', 'kind': 'op', 'op': 'Sigmoid'}, - 'sigmoid_2': {'type': 'Sigmoid', 'kind': 'op', 'op': 'Sigmoid'}, - - 'mul_1': {'type': 'Multiply', 'op': 'Mul', 'kind': 'op'}, - 'const_1': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'mul_2': {'type': 'Multiply', 'op': 'Mul', 'kind': 'op'}, - 'const_2': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'mul_3': {'type': 'Multiply', 'op': 'Mul', 'kind': 'op'}, - 'const_3': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'shape': None, 'value': None}, -} - - -class EltwiseAddNormalizationTest(unittest.TestCase): - def test_first_input_coeff_not_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'eltwise'), - ('placeholder_2', 'eltwise'), - ('eltwise', 'sigmoid_1'), - ('eltwise', 'sigmoid_2'), - ], - {'eltwise': {'coeff': np.array([2.0, 1.0])} - }, nodes_with_edges_only=True) - graph.stage = 'front' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'mul_1'), - ('const_1', 'mul_1'), - ('mul_1', 'eltwise'), - ('placeholder_2', 'eltwise'), - ('eltwise', 'sigmoid_1'), - ('eltwise', 'sigmoid_2'), - ], - {'const_1': {'value': const_1_value, 'shape': const_1_value.shape}, - }, nodes_with_edges_only=True) - - EltwiseAddNormalize().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_second_input_coeff_not_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'eltwise'), - ('placeholder_2', 'eltwise'), - ('eltwise', 'sigmoid_1'), - ('eltwise', 'sigmoid_2'), - ], - {'eltwise': {'coeff': np.array([1.0, const_2_value[0]])} - }, nodes_with_edges_only=True) - graph.stage = 'front' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'eltwise'), - ('placeholder_2', 'mul_2'), - ('const_2', 'mul_2'), - ('mul_2', 'eltwise'), - ('eltwise', 'sigmoid_1'), - ('eltwise', 'sigmoid_2'), - ], - {'const_2': {'value': const_2_value, 'shape': const_2_value.shape}, - }, nodes_with_edges_only=True) - - EltwiseAddNormalize().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_both_input_coeff_not_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'eltwise'), - ('placeholder_2', 'eltwise'), - ('eltwise', 'sigmoid_1'), - ('eltwise', 'sigmoid_2'), - ], - {'eltwise': {'coeff': np.array([const_1_value[0], const_2_value[0]])} - }, nodes_with_edges_only=True) - graph.stage = 'front' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'mul_1'), - ('const_1', 'mul_1'), - ('mul_1', 'eltwise'), - ('placeholder_2', 'mul_2'), - ('const_2', 'mul_2'), - ('mul_2', 'eltwise'), - ('eltwise', 'sigmoid_1'), - ('eltwise', 'sigmoid_2'), - ], - {'const_1': {'value': const_1_value, 'shape': const_1_value.shape}, - 'const_2': {'value': const_2_value, 'shape': const_2_value.shape}, - }, nodes_with_edges_only=True) - - EltwiseAddNormalize().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_both_input_coeff_equal_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'eltwise'), - ('placeholder_2', 'eltwise'), - ('eltwise', 'sigmoid_1'), - ('eltwise', 'sigmoid_2'), - ], - {'eltwise': {'coeff': np.array([1.0, 1.0])} - }, nodes_with_edges_only=True) - graph.stage = 'front' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'eltwise'), - ('placeholder_2', 'eltwise'), - ('eltwise', 'sigmoid_1'), - ('eltwise', 'sigmoid_2'), - ], - {}, nodes_with_edges_only=True) - - EltwiseAddNormalize().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_both_input_coeff_not_defined(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'eltwise'), - ('placeholder_2', 'eltwise'), - ('eltwise', 'sigmoid_1'), - ('eltwise', 'sigmoid_2'), - ], - {'eltwise': {'coeff': None} - }, nodes_with_edges_only=True) - graph.stage = 'front' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'eltwise'), - ('placeholder_2', 'eltwise'), - ('eltwise', 'sigmoid_1'), - ('eltwise', 'sigmoid_2'), - ], - {}, nodes_with_edges_only=True) - - EltwiseAddNormalize().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1', check_op_attrs=True) - self.assertTrue(flag, resp) - - - def test_3_inputs_with_coeffs(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'eltwise'), - ('placeholder_2', 'eltwise'), - ('placeholder_3', 'eltwise'), - ('eltwise', 'sigmoid_1'), - ], - {'eltwise': {'coeff': np.array([const_1_value[0], const_2_value[0], const_3_value[0]])} - }, nodes_with_edges_only=True) - graph.stage = 'front' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'mul_1'), - ('const_1', 'mul_1'), - ('mul_1', 'eltwise_n'), - ('placeholder_2', 'mul_2'), - ('const_2', 'mul_2'), - ('mul_2', 'eltwise_n'), - ('placeholder_3', 'mul_3'), - ('const_3', 'mul_3'), - ('mul_3', 'eltwise_n'), - ('eltwise_n', 'sigmoid_1'), - ], - {'const_1': {'value': const_1_value, 'shape': const_1_value.shape}, - 'const_2': {'value': const_2_value, 'shape': const_2_value.shape}, - 'const_3': {'value': const_3_value, 'shape': const_3_value.shape}, - }, nodes_with_edges_only=True) - - EltwiseAddNormalize().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/caffe/elu_test.py b/tools/mo/unit_tests/mo/front/caffe/elu_test.py deleted file mode 100644 index c26a58b8ffd416..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/elu_test.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.elu import ELUFrontExtractor -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakeProtoLayer: - def __init__(self, val): - self.elu_param = val - - -class TestElu(unittest.TestCase): - @patch('openvino.tools.mo.front.caffe.elu.collect_attributes') - def test_elu_ext(self, collect_attrs_mock): - params = { - 'alpha': 4 - } - collect_attrs_mock.return_value = { - **params, - 'test': 54, - 'test2': 'test3' - } - - fn = FakeNode(FakeProtoLayer(FakeMultiParam(params)), None) - ELUFrontExtractor.extract(fn) - - exp_res = { - 'type': 'Elu', - 'alpha': 4 - } - - for i in exp_res: - self.assertEqual(fn[i], exp_res[i]) diff --git a/tools/mo/unit_tests/mo/front/caffe/extractor_test.py b/tools/mo/unit_tests/mo/front/caffe/extractor_test.py deleted file mode 100644 index 435c59a8ecdd38..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/extractor_test.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.extractor import check_phase, register_caffe_python_extractor -from openvino.tools.mo.front.extractor import CaffePythonFrontExtractorOp -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'}, - 'node_2': {'type': 'Identity', 'kind': 'op'}} - - -class TestExtractor(unittest.TestCase): - def test_check_phase_train_phase(self): - phase_param = { - 'phase': 0 - } - - include_param = { - 'include': [FakeMultiParam(phase_param)] - } - - graph = build_graph(nodes_attributes, - [('node_1', 'node_2')], - { - 'node_1': {'pb': FakeMultiParam(include_param)} - }) - - node = Node(graph, 'node_1') - res = check_phase(node) - exp_res = {'phase': 0} - self.assertEqual(res, exp_res) - - def test_check_phase_test_phase(self): - phase_param = { - 'phase': 1 - } - - include_param = { - 'include': [FakeMultiParam(phase_param)] - } - - graph = build_graph(nodes_attributes, - [('node_1', 'node_2')], - { - 'node_1': {'pb': FakeMultiParam(include_param)} - }) - - node = Node(graph, 'node_1') - res = check_phase(node) - exp_res = {'phase': 1} - self.assertEqual(res, exp_res) - - def test_check_phase_no_phase(self): - phase_param = {} - - include_param = { - 'include': [FakeMultiParam(phase_param)] - } - - graph = build_graph(nodes_attributes, - [('node_1', 'node_2')], - { - 'node_1': {'pb': FakeMultiParam(include_param)} - }) - - node = Node(graph, 'node_1') - res = check_phase(node) - exp_res = {} - self.assertEqual(res, exp_res) - - def test_check_phase_no_include(self): - include_param = {} - - graph = build_graph(nodes_attributes, - [('node_1', 'node_2')], - { - 'node_1': {'pb': FakeMultiParam(include_param)} - }) - - node = Node(graph, 'node_1') - res = check_phase(node) - exp_res = {} - self.assertEqual(res, exp_res) - - def test_check_phase_no_pb(self): - graph = build_graph(nodes_attributes, - [('node_1', 'node_2')], - {}) - - node = Node(graph, 'node_1') - res = check_phase(node) - exp_res = {} - self.assertEqual(res, exp_res) - - @patch('openvino.tools.mo.ops.activation.Activation') - def test_register_caffe_python_extractor_by_name(self, op_mock): - op_mock.op = 'TestLayer' - name = 'myTestLayer' - register_caffe_python_extractor(op_mock, name) - self.assertIn(name, CaffePythonFrontExtractorOp.registered_ops) - - @patch('openvino.tools.mo.ops.activation.Activation') - def test_register_caffe_python_extractor_by_op(self, op_mock): - op_mock.op = 'TestLayer' - register_caffe_python_extractor(op_mock) - self.assertIn(op_mock.op, CaffePythonFrontExtractorOp.registered_ops) diff --git a/tools/mo/unit_tests/mo/front/caffe/extractors/__init__.py b/tools/mo/unit_tests/mo/front/caffe/extractors/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/caffe/extractors/utils_test.py b/tools/mo/unit_tests/mo/front/caffe/extractors/utils_test.py deleted file mode 100644 index 9b28206ab6f7ae..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/extractors/utils_test.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch, call - -import numpy as np - -from openvino.tools.mo.front.caffe.extractors.utils import weights_biases, embed_input, get_canonical_axis_index -from unit_tests.utils.extractors import FakeModelLayer - - -class TestWeightsBiases(unittest.TestCase): - def test_weights_biases_no_layer_no_bias(self): - res = weights_biases(False, None) - self.assertEqual(res, {}) - - @patch('openvino.tools.mo.front.caffe.extractors.utils.embed_input') - def test_weights_biases_layer_no_bias(self, embed_input_mock): - weights_biases(False, FakeModelLayer([[1, 2], ])) - calls = [call({}, 1, 'weights', [1, 2])] - embed_input_mock.assert_has_calls(calls) - - @patch('openvino.tools.mo.front.caffe.extractors.utils.embed_input') - def test_weights_biases_layer_bias(self, embed_input_mock): - weights_biases(True, FakeModelLayer([[1, 2], [3, 4]])) - calls = [call({}, 1, 'weights', [1, 2]), call({}, 2, 'biases', [3, 4])] - embed_input_mock.assert_has_calls(calls) - - -class TestEmbedInput(unittest.TestCase): - def test_embed_input_no_bin_name_no_bias(self): - attrs = {} - blob = np.array([1, 2]) - name = 'weights' - embed_input(attrs, 1, name, blob, None) - exp_res = { - 'weights': blob, - 'embedded_inputs': [ - (1, name, {'bin': name}) - ] - } - for key in exp_res.keys(): - if key == name: - np.testing.assert_equal(attrs[key], exp_res[key]) - else: - self.assertEqual(attrs[key], exp_res[key]) - - def test_embed_input_w_bin_name(self): - attrs = {} - blob = np.array([1, 2]) - name = 'weights' - embed_input(attrs, 1, name, blob, 'special_name') - exp_res = { - 'weights': blob, - 'embedded_inputs': [ - (1, name, {'bin': 'special_name'}) - ] - } - for key in exp_res.keys(): - if key == name: - np.testing.assert_equal(attrs[key], exp_res[key]) - else: - self.assertEqual(attrs[key], exp_res[key]) - - -class TestCanonicalAxisIndex(unittest.TestCase): - def test_negative_index(self): - shape = [1, 2, 3, 4] - inds = [-4, -3, -2, -1] - expected_inds = [0, 1, 2, 3] - for i in range(len(inds)): - assert get_canonical_axis_index(shape, inds[i]) == expected_inds[i] - - def test_posirive_index(self): - shape = [1, 2, 3, 4] - inds = [0, 1, 2, 3] - expected_inds = [0, 1, 2, 3] - for i in range(len(inds)): - assert get_canonical_axis_index(shape, inds[i]) == expected_inds[i] diff --git a/tools/mo/unit_tests/mo/front/caffe/grn_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/grn_ext_test.py deleted file mode 100644 index 0b5d22115d1ea6..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/grn_ext_test.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.grn_ext import GRNFrontExtractor -from openvino.tools.mo.ops.grn import GRNOp -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakeGRNProtoLayer: - def __init__(self, val): - self.grn_param = val - - -class TestGRNExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['GRN'] = GRNOp - - def test_grn_no_pb_no_ml(self): - self.assertRaises(AttributeError, GRNFrontExtractor.extract, None) - - @patch('openvino.tools.mo.front.caffe.grn_ext.merge_attrs') - def test_grn_ext_ideal_numbers(self, merge_attrs_mock): - params = { - 'bias': 0.7 - } - merge_attrs_mock.return_value = { - **params - } - - fake_pl = FakeGRNProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - GRNFrontExtractor.extract(fake_node) - - exp_res = { - 'type': "GRN", - 'bias': 0.7, - 'infer': copy_shape_infer - } - - for key in exp_res.keys(): - self.assertEqual(fake_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/loader_test.py b/tools/mo/unit_tests/mo/front/caffe/loader_test.py deleted file mode 100644 index 6348440c9cd7a8..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/loader_test.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -from google.protobuf import text_format - -from openvino.tools.mo.front.caffe.loader import caffe_pb_to_nx -from openvino.tools.mo.front.caffe.proto import caffe_pb2 -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.error import Error -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry - -proto_str_one_input = 'name: "network" ' \ - 'layer { ' \ - 'name: "Input0" ' \ - 'type: "Input" ' \ - 'top: "Input0" ' \ - 'input_param { ' \ - 'shape: { ' \ - 'dim: 1 ' \ - 'dim: 3 ' \ - 'dim: 224 ' \ - 'dim: 224 ' \ - '} ' \ - '} ' \ - '}' - -proto_str_old_styled_multi_input = 'name: "network" ' \ - 'input: "Input0" ' \ - 'input_dim: 1 ' \ - 'input_dim: 3 ' \ - 'input_dim: 224 ' \ - 'input_dim: 224 ' \ - 'input: "data" ' \ - 'input_dim: 1 ' \ - 'input_dim: 3 ' - -proto_str_input = 'name: "network" ' \ - 'input: "data" ' \ - 'input_shape ' \ - '{ ' \ - 'dim: 1 ' \ - 'dim: 3 ' \ - 'dim: 224 ' \ - 'dim: 224 ' \ - '}' - -proto_str_multi_input = 'name: "network" ' \ - 'input: "data" ' \ - 'input_shape ' \ - '{ ' \ - 'dim: 1 ' \ - 'dim: 3 ' \ - 'dim: 224 ' \ - 'dim: 224 ' \ - '} ' \ - 'input: "data1" ' \ - 'input_shape ' \ - '{ ' \ - 'dim: 1 ' \ - 'dim: 3 ' \ - '}' - -proto_str_old_styled_input = 'name: "network" ' \ - 'input: "data" ' \ - 'input_dim: 1 ' \ - 'input_dim: 3 ' \ - 'input_dim: 224 ' \ - 'input_dim: 224 ' - -layer_proto_str = 'layer { ' \ - 'name: "conv1" ' \ - 'type: "Convolution" ' \ - 'bottom: "data" ' \ - 'top: "conv1" ' \ - '}' - -proto_same_name_layers = 'layer { ' \ - 'name: "conv1" ' \ - 'type: "Convolution" ' \ - 'bottom: "data" ' \ - 'top: "conv1" ' \ - '} ' \ - 'layer { ' \ - 'name: "conv1" ' \ - 'type: "Convolution" ' \ - 'bottom: "data1" ' \ - 'top: "conv1_2" ' \ - '}' - -class TestLoader(UnitTestWithMockedTelemetry): - def test_caffe_pb_to_nx_one_input(self): - proto = caffe_pb2.NetParameter() - text_format.Merge(proto_str_one_input, proto) - input_shapes = caffe_pb_to_nx(Graph(), proto, None) - expected_input_shapes = { - 'Input0': np.array([1, 3, 224, 224]) - } - - for i in expected_input_shapes: - np.testing.assert_array_equal(input_shapes[i], expected_input_shapes[i]) - - def test_caffe_pb_to_nx_old_styled_multi_input(self): - proto = caffe_pb2.NetParameter() - text_format.Merge(proto_str_old_styled_multi_input + layer_proto_str, proto) - self.assertRaises(Error, caffe_pb_to_nx, Graph(), proto, None) - - def test_caffe_pb_to_nx_old_styled_input(self): - proto = caffe_pb2.NetParameter() - text_format.Merge(proto_str_old_styled_input + layer_proto_str, proto) - input_shapes = caffe_pb_to_nx(Graph(), proto, None) - expected_input_shapes = { - 'data': np.array([1, 3, 224, 224]) - } - - for i in expected_input_shapes: - np.testing.assert_array_equal(input_shapes[i], expected_input_shapes[i]) - - def test_caffe_pb_to_standart_input(self): - proto = caffe_pb2.NetParameter() - text_format.Merge(proto_str_input + layer_proto_str, proto) - input_shapes = caffe_pb_to_nx(Graph(), proto, None) - expected_input_shapes = { - 'data': np.array([1, 3, 224, 224]) - } - - for i in expected_input_shapes: - np.testing.assert_array_equal(input_shapes[i], expected_input_shapes[i]) - - def test_caffe_pb_to_multi_input(self): - proto = caffe_pb2.NetParameter() - text_format.Merge(proto_str_multi_input + layer_proto_str, proto) - input_shapes = caffe_pb_to_nx(Graph(), proto, None) - expected_input_shapes = { - 'data': np.array([1, 3, 224, 224]), - 'data1': np.array([1, 3]) - } - - for i in expected_input_shapes: - np.testing.assert_array_equal(input_shapes[i], expected_input_shapes[i]) - - def test_caffe_same_name_layer(self): - proto = caffe_pb2.NetParameter() - text_format.Merge(proto_str_multi_input + proto_same_name_layers, proto) - graph = Graph() - caffe_pb_to_nx(graph, proto, None) - # 6 nodes because: 2 inputs + 2 convolutions + 2 identity nodes used as fake outputs - np.testing.assert_equal(len(graph.nodes()), 6) diff --git a/tools/mo/unit_tests/mo/front/caffe/normalize_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/normalize_ext_test.py deleted file mode 100644 index a2e724e1d690dd..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/normalize_ext_test.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.normalize_ext import NormalizeFrontExtractor -from openvino.tools.mo.ops.normalize import NormalizeOp -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakeNormalizeProtoLayer: - def __init__(self, val): - self.norm_param = val - - -class TestNormalizeExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['Normalize'] = NormalizeOp - - def test_normalize_no_pb_no_ml(self): - self.assertRaises(AttributeError, NormalizeFrontExtractor.extract, None) - - @patch('openvino.tools.mo.front.caffe.normalize_ext.collect_attributes') - def test_normalize_ext_ideal_numbers(self, collect_attributes_mock): - params = { - 'across_spatial': 1, - 'channel_shared': 0, - 'eps': 0.00001 - } - collect_attributes_mock.return_value = { - **params - } - - fake_pl = FakeNormalizeProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - NormalizeFrontExtractor.extract(fake_node) - - exp_res = { - 'type': "Normalize", - 'across_spatial': 1, - 'channel_shared': 0, - 'eps': 0.00001, - } - - for key in exp_res.keys(): - self.assertEqual(fake_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/pooling_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/pooling_ext_test.py deleted file mode 100644 index cd00cb39dbb020..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/pooling_ext_test.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.caffe.pooling_ext import PoolingFrontExtractor -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.ops.pooling import Pooling -from unit_tests.utils.extractors import PB, FakeMultiParam - - -class FakeProtoLayer: - def __init__(self, val): - self.pooling_param = val - - -class TestPooling(unittest.TestCase): - def test_pooling_ext_global(self): - params = { - 'kernel_size': 1, - 'stride': 2, - 'pad': 3, - 'pool': 0, - 'global_pooling': True, - 'ceil_mode': 1 - } - node = PB({'pb': FakeProtoLayer(FakeMultiParam(params))}) - PoolingFrontExtractor.extract(node) - res = node - exp_res = { - 'window': np.array([1, 1, 0, 0], dtype=np.int64), - 'stride': np.array([1, 1, 1, 1], dtype=np.int64), - 'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64), - 'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64), - 'pool_method': 'max', - 'exclude_pad': True, - 'infer': Pooling.infer, - 'global_pool': True, - 'output_spatial_shape': None, - 'pooling_convention': 'full', - 'rounding_type': 'ceil' - - } - exp_res.update(layout_attrs()) - for i in exp_res.keys(): - if i in ('window', 'stride', - 'pad', 'pad_spatial_shape', - 'spatial_dims', 'batch_dims', - 'channel_dims'): - np.testing.assert_array_equal(res[i], exp_res[i]) - else: - self.assertEqual(res[i], exp_res[i]) - - def test_pooling_ext(self): - params = { - 'kernel_size': 1, - 'stride': 2, - 'pad': 3, - 'pool': 1, - 'global_pooling': False, - 'ceil_mode': 0 - } - node = PB({'pb': FakeProtoLayer(FakeMultiParam(params))}) - PoolingFrontExtractor.extract(node) - res = node - exp_res = { - 'window': np.array([1, 1, 1, 1], dtype=np.int64), - 'stride': np.array([1, 1, 2, 2], dtype=np.int64), - 'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]], dtype=np.int64), - 'pad_spatial_shape': np.array([[3, 3], [3, 3]], dtype=np.int64), - 'pool_method': 'avg', - 'exclude_pad': False, - 'infer': Pooling.infer, - 'global_pool': False, - 'output_spatial_shape': None, - 'pooling_convention': 'valid' - } - exp_res.update(layout_attrs()) - for i in exp_res.keys(): - if i in ('window', 'stride', - 'pad', 'pad_spatial_shape', - 'spatial_dims', 'batch_dims', - 'channel_dims'): - np.testing.assert_array_equal(res[i], exp_res[i]) - else: - self.assertEqual(res[i], exp_res[i]) - - def test_pooling_ext_exception(self): - params = { - 'kernel_size': 1, - 'stride': 2, - 'pad': 3, - 'pool': 3, - 'global_pooling': True - } - node = PB({'pb': FakeProtoLayer(FakeMultiParam(params))}) - self.assertRaises(ValueError, PoolingFrontExtractor.extract, node) diff --git a/tools/mo/unit_tests/mo/front/caffe/prelu_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/prelu_ext_test.py deleted file mode 100644 index 71939faacada0f..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/prelu_ext_test.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.prelu_ext import PreluFrontExtractor -from openvino.tools.mo.ops.prelu import PReLU -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakePReLUProtoLayer: - def __init__(self, val): - self.prelu_param = val - - -class TestPreluExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['PReLU'] = PReLU - - def test_prelu_no_pb_no_ml(self): - self.assertRaises(AttributeError, PreluFrontExtractor.extract, None) - - @patch('openvino.tools.mo.front.caffe.prelu_ext.merge_attrs') - def test_reogyolo_ext_ideal_numbers(self, merge_attrs_mock): - params = { - 'channel_shared': False - } - - merge_attrs_mock.return_value = { - **params - } - - fake_pl = FakePReLUProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - PreluFrontExtractor.extract(fake_node) - - exp_res = { - 'type': 'PReLU', - 'op': 'PReLU', - 'channel_shared': 0, - 'infer': PReLU.infer, - } - - for key in exp_res.keys(): - self.assertEqual(fake_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/priorbox_clustered_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/priorbox_clustered_ext_test.py deleted file mode 100644 index 7492f183805a19..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/priorbox_clustered_ext_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -import numpy as np - -from openvino.tools.mo.front.caffe.priorbox_clustered_ext import PriorBoxClusteredFrontExtractor -from openvino.tools.mo.ops.priorbox_clustered import PriorBoxClusteredOp -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakePriorBoxClusteredProtoLayer: - def __init__(self, val): - self.prior_box_param = val - - -class TestPriorBoxClusteredExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['PriorBoxClustered'] = PriorBoxClusteredOp - - def test_priorboxclustered_no_pb_no_ml(self): - self.assertRaises(AttributeError, PriorBoxClusteredFrontExtractor.extract, None) - - @patch('openvino.tools.mo.front.caffe.priorbox_clustered_ext.merge_attrs') - def test_priorboxclustered_ext_ideal_numbers(self, merge_attrs_mock): - params = { - 'width': '30.0', - 'height': '60.0', - 'clip': False, - 'flip': True, - 'variance': np.array(['0.2', '0.3', '0.2', '0.3']), - 'img_size': '300', - 'img_h': '0', - 'img_w': '0', - 'step': '0,5', - 'step_h': '0', - 'step_w': '0', - 'offset': '0.6' - } - merge_attrs_mock.return_value = { - **params - } - - fake_pl = FakePriorBoxClusteredProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - PriorBoxClusteredFrontExtractor.extract(fake_node) - - exp_res = { - 'op': 'PriorBoxClustered', - 'type': 'PriorBoxClustered', - 'width': '30.0', - 'height': '60.0', - 'clip': 0, - 'flip': 1, - 'variance': np.array(['0.2', '0.3', '0.2', '0.3']), - 'img_size': '300', - 'img_h': '0', - 'img_w': '0', - 'step': '0,5', - 'step_h': '0', - 'step_w': '0', - 'offset': '0.6' - } - - for key in exp_res.keys(): - if key in ['width', 'height', 'variance']: - np.testing.assert_equal(fake_node[key], exp_res[key]) - else: - self.assertEqual(fake_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/priorbox_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/priorbox_ext_test.py deleted file mode 100644 index f81bc854d80afa..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/priorbox_ext_test.py +++ /dev/null @@ -1,136 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -import numpy as np - -from openvino.tools.mo.front.caffe.priorbox_ext import PriorBoxFrontExtractor -from openvino.tools.mo.ops.priorbox import PriorBoxOp -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam, FakeParam -from unit_tests.utils.graph import FakeNode - - -class FakeMultiParamListFields(FakeMultiParam): - def __init__(self, val): - super().__init__(val) - - def ListFields(self): - keys = [] - for k in self.dict_values.keys(): - keys.append([FakeParam('name', k)]) - return keys - - -class FakePriorBoxProtoLayer: - def __init__(self, val): - self.prior_box_param = val - - -class TestPriorBoxExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['PriorBox'] = PriorBoxOp - - def test_priorbox_no_pb_no_ml(self): - self.assertRaises(AttributeError, PriorBoxFrontExtractor.extract, None) - - @patch('openvino.tools.mo.front.caffe.priorbox_ext.merge_attrs') - def test_priorbox_ext_ideal_numbers(self, merge_attrs_mock): - params = { - 'clip': False, - 'flip': True, - 'min_size': np.array([]), - 'max_size': np.array([]), - 'aspect_ratio': np.array([2, 3]), - 'variance': np.array(['0.2', '0.3', '0.2', '0.3']), - 'img_size': '300', - 'img_h': '0', - 'img_w': '0', - 'step': '0,5', - 'step_h': '0', - 'step_w': '0', - 'offset': '0.6' - } - merge_attrs_mock.return_value = { - **params - } - - fake_pl = FakePriorBoxProtoLayer(FakeMultiParamListFields(params)) - fake_node = FakeNode(fake_pl, None) - - PriorBoxFrontExtractor.extract(fake_node) - - exp_res = { - 'op': 'PriorBox', - 'type': 'PriorBox', - 'clip': 0, - 'variance': np.array(['0.2', '0.3', '0.2', '0.3']), - 'img_size': '300', - 'img_h': '0', - 'img_w': '0', - 'step': '0,5', - 'step_h': '0', - 'step_w': '0', - 'offset': '0.6' - } - - for key in exp_res.keys(): - if key in ['width', 'height', 'variance']: - np.testing.assert_equal(fake_node[key], exp_res[key]) - else: - self.assertEqual(fake_node[key], exp_res[key]) - - @patch('openvino.tools.mo.front.caffe.priorbox_ext.merge_attrs') - def test_priorbox_ext_ideal_numbers_density(self, merge_attrs_mock): - params = { - 'clip': False, - 'flip': True, - 'min_size': np.array([]), - 'max_size': np.array([]), - 'aspect_ratio': np.array([2, 3]), - 'variance': np.array(['0.2', '0.3', '0.2', '0.3']), - 'img_size': '300', - 'img_h': '0', - 'img_w': '0', - 'step': '0,5', - 'step_h': '0', - 'step_w': '0', - 'offset': '0.6', - 'fixed_size': np.array(['1', '32']), - 'fixed_ratio': np.array(['0.2', '0.5']), - 'density': np.array(['0.3', '0.6']) - } - merge_attrs_mock.return_value = { - **params - } - - fake_pl = FakePriorBoxProtoLayer(FakeMultiParamListFields(params)) - fake_node = FakeNode(fake_pl, None) - - PriorBoxFrontExtractor.extract(fake_node) - - exp_res = { - 'op': 'PriorBox', - 'type': 'PriorBox', - 'clip': 0, - 'variance': np.array(['0.2', '0.3', '0.2', '0.3']), - 'img_size': '300', - 'img_h': '0', - 'img_w': '0', - 'step': '0,5', - 'step_h': '0', - 'step_w': '0', - 'offset': '0.6', - 'fixed_size': np.array(['1', '32']), - 'fixed_ratio': np.array(['0.2', '0.5']), - 'density': np.array(['0.3', '0.6']) - } - - for key in exp_res.keys(): - if key in ['width', 'height', 'variance', 'fixed_size', 'fixed_ratio', 'density']: - np.testing.assert_equal(fake_node[key], exp_res[key]) - else: - self.assertEqual(fake_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/proposal_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/proposal_ext_test.py deleted file mode 100644 index 862dfa40186231..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/proposal_ext_test.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.proposal_ext import ProposalFrontExtractor -from openvino.tools.mo.ops.proposal import ProposalOp -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakeProposalProtoLayer: - def __init__(self, val): - self.proposal_param = val - - -class TestProposalExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['Proposal'] = ProposalOp - - def test_proposal_no_pb_no_ml(self): - self.assertRaises(AttributeError, ProposalFrontExtractor.extract, None) - - @patch('openvino.tools.mo.front.caffe.proposal_ext.merge_attrs') - def test_proposal_ext_ideal_numbers(self, merge_attrs): - params = { - 'feat_stride': 1, - 'base_size': 16, - 'min_size': 16, - 'ratio': 1, - 'scale': 2, - 'pre_nms_topn': 6000, - 'post_nms_topn': 300, - 'nms_thresh': 0.7 - } - merge_attrs.return_value = { - **params - } - - fake_pl = FakeProposalProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - ProposalFrontExtractor.extract(fake_node) - - exp_res = { - 'type': "Proposal", - 'feat_stride': 1, - 'base_size': 16, - 'min_size': 16, - 'ratio': 1, - 'scale': 2, - 'pre_nms_topn': 6000, - 'post_nms_topn': 300, - 'nms_thresh': 0.7, - 'infer': ProposalOp.proposal_infer - } - - for key in exp_res.keys(): - self.assertEqual(fake_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/proposal_python_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/proposal_python_ext_test.py deleted file mode 100644 index 9bfb4a138e32cc..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/proposal_python_ext_test.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.caffe.proposal_python_ext import ProposalPythonFrontExtractor -from openvino.tools.mo.ops.proposal import ProposalOp -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakeProposalPythonProtoLayer: - def __init__(self, val): - self.python_param = val - - -class TestProposalPythonExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['Proposal'] = ProposalOp - - def test_proposal_no_pb_no_ml(self): - self.assertRaises(AttributeError, ProposalPythonFrontExtractor.extract, None) - - def test_proposal_ext_ideal_numbers(self): - params = { - 'param_str': "'feat_stride': 16" - } - fake_pl = FakeProposalPythonProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - ProposalPythonFrontExtractor.extract(fake_node) - - exp_res = { - 'type': "Proposal", - 'feat_stride': 16, - 'base_size': 16, - 'min_size': 16, - 'ratio': [0.5, 1, 2], - 'scale': [8, 16, 32], - 'pre_nms_topn': 6000, - 'post_nms_topn': 300, - 'nms_thresh': 0.7, - 'infer': ProposalOp.proposal_infer - } - - for key in exp_res.keys(): - self.assertEqual(fake_node[key], exp_res[key]) - - def test_proposal_ext_scales(self): - params = { - 'param_str': "'feat_stride': 16, 'scales': [1,2,3], 'ratios':[5, 6,7]" - } - fake_pl = FakeProposalPythonProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - ProposalPythonFrontExtractor.extract(fake_node) - - exp_res = { - 'type': "Proposal", - 'feat_stride': 16, - 'base_size': 16, - 'min_size': 16, - 'ratio': [5, 6, 7], - 'scale': [1, 2, 3], - 'pre_nms_topn': 6000, - 'post_nms_topn': 300, - 'nms_thresh': 0.7, - 'infer': ProposalOp.proposal_infer - } - - for key in exp_res.keys(): - self.assertEqual(fake_node[key], exp_res[key]) - - def test_proposal_ext_scale(self): - params = { - 'param_str': "'feat_stride': 16, 'scale': [1,2,3], 'ratio':[5, 6,7]" - } - fake_pl = FakeProposalPythonProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - ProposalPythonFrontExtractor.extract(fake_node) - - exp_res = { - 'type': "Proposal", - 'feat_stride': 16, - 'base_size': 16, - 'min_size': 16, - 'ratio': [5, 6, 7], - 'scale': [1, 2, 3], - 'pre_nms_topn': 6000, - 'post_nms_topn': 300, - 'nms_thresh': 0.7, - 'infer': ProposalOp.proposal_infer - } - - for key in exp_res.keys(): - self.assertEqual(fake_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/python_layer_extractor_test.py b/tools/mo/unit_tests/mo/front/caffe/python_layer_extractor_test.py deleted file mode 100644 index 5126a8bd45dd94..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/python_layer_extractor_test.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.caffe.python_layer_extractor import PythonFrontExtractorOp -from openvino.tools.mo.front.extractor import CaffePythonFrontExtractorOp -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakePythonProtoLayer: - def __init__(self, params: FakeMultiParam): - self.type = 'Python' - self.python_param = params - - -class FakePythonExtractor: - @classmethod - def extract(cls, node: Node): - return True - - -class TestPythonLayerExtractor(unittest.TestCase): - def test_python_extractor_for_op(self): - module = 'test_module' - layer = 'test_layer' - CaffePythonFrontExtractorOp.registered_ops['{}.{}'.format(module, layer)] = \ - lambda node: CaffePythonFrontExtractorOp.parse_param_str(node.pb.python_param.param_str) - params = FakeMultiParam({ - 'module': module, - 'layer': layer, - 'param_str': "'feat_stride': 16" - }) - ext = PythonFrontExtractorOp.extract(FakeNode(FakePythonProtoLayer(params), None)) - self.assertEqual({'feat_stride': 16}, ext) - - def test_python_extractor_for_extractors(self): - module = 'test_module' - layer = 'test_layer' - CaffePythonFrontExtractorOp.registered_ops['{}.{}'.format(module, layer)] = FakePythonExtractor - params = FakeMultiParam({ - 'module': module, - 'layer': layer, - 'param_str': "'feat_stride': 16" - }) - self.assertTrue(PythonFrontExtractorOp.extract(FakeNode(FakePythonProtoLayer(params), None))) diff --git a/tools/mo/unit_tests/mo/front/caffe/regionyolo_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/regionyolo_ext_test.py deleted file mode 100644 index e2f8b7a5eebe6b..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/regionyolo_ext_test.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.regionyolo_ext import RegionYoloFrontExtractor -from openvino.tools.mo.ops.regionyolo import RegionYoloOp -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakeRegionYoloProtoLayer: - def __init__(self, val, val_f): - self.region_yolo_param = val - self.flatten_param = val_f - - -class TestReorgYoloExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['RegionYolo'] = RegionYoloOp - - def test_reogyolo_no_pb_no_ml(self): - self.assertRaises(AttributeError, RegionYoloFrontExtractor.extract, None) - - @patch('openvino.tools.mo.front.caffe.regionyolo_ext.merge_attrs') - def test_reogyolo_ext_ideal_numbers(self, merge_attrs_mock): - params = { - 'coords': 4, - 'classes': 20, - 'num': 5, - 'do_softmax': 1, - 'anchors': 5, - 'mask': 5, - } - params_flatten = { - 'axis': 1, - 'end_axis': -1 - } - merge_attrs_mock.return_value = { - **params, - **params_flatten - } - - fake_pl = FakeRegionYoloProtoLayer(FakeMultiParam(params), FakeMultiParam(params_flatten)) - fake_node = FakeNode(fake_pl, None) - - RegionYoloFrontExtractor.extract(fake_node) - - exp_res = { - 'type': "RegionYolo", - 'coords': 4, - 'classes': 20, - 'num': 5, - 'axis': 1, - 'end_axis': -1, - 'do_softmax': 1, - 'anchors': 5, - 'mask': 5, - 'infer': RegionYoloOp.regionyolo_infer - } - - for key in exp_res.keys(): - self.assertEqual(fake_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/caffe/reorgyolo_ext_test.py b/tools/mo/unit_tests/mo/front/caffe/reorgyolo_ext_test.py deleted file mode 100644 index 3b4122e04c7cd2..00000000000000 --- a/tools/mo/unit_tests/mo/front/caffe/reorgyolo_ext_test.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -from openvino.tools.mo.front.caffe.reorgyolo_ext import ReorgYoloFrontExtractor -from openvino.tools.mo.ops.reorgyolo import ReorgYoloOp -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import FakeNode - - -class FakeReorgYoloProtoLayer: - def __init__(self, val): - self.reorg_yolo_param = val - - -class TestReorgYoloExt(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['ReorgYolo'] = ReorgYoloOp - - def test_elu_no_pb_no_ml(self): - self.assertRaises(AttributeError, ReorgYoloFrontExtractor.extract, None) - - @patch('openvino.tools.mo.front.caffe.reorgyolo_ext.merge_attrs') - def test_elu_ext_ideal_numbers(self, merge_attrs_mock): - params = { - 'stride': 2 - } - merge_attrs_mock.return_value = { - **params - } - - fake_pl = FakeReorgYoloProtoLayer(FakeMultiParam(params)) - fake_node = FakeNode(fake_pl, None) - - ReorgYoloFrontExtractor.extract(fake_node) - - exp_res = { - 'type': "ReorgYolo", - 'stride': 2, - 'infer': ReorgYoloOp.reorgyolo_infer - } - - for key in exp_res.keys(): - self.assertEqual(fake_node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/common/__init__.py b/tools/mo/unit_tests/mo/front/common/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/common/layout_test.py b/tools/mo/unit_tests/mo/front/common/layout_test.py deleted file mode 100644 index 11330ae05f4560..00000000000000 --- a/tools/mo/unit_tests/mo/front/common/layout_test.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.common.layout import get_batch_dim, get_width_dim, get_height_dim, get_features_dim, get_depth_dim, \ - shape_for_layout -from openvino.tools.mo.utils.error import Error - - -class TestLayoutFunctions(unittest.TestCase): - def test_get_batch_dim_NCHW(self): - self.assertEqual(get_batch_dim('NCHW', 4), 0) - - def test_get_batch_dim_NHWC(self): - self.assertEqual(get_batch_dim('NHWC', 4), 0) - - def test_get_batch_dim_NCDHW(self): - self.assertEqual(get_batch_dim('NCHW', 5), 0) - - def test_get_batch_dim_NDHWC(self): - self.assertEqual(get_batch_dim('NHWC', 5), 0) - - def test_get_features_dim_NCHW(self): - self.assertEqual(get_features_dim('NCHW', 4), 1) - - def test_get_features_dim_NHWC(self): - self.assertEqual(get_features_dim('NHWC', 4), 3) - - def test_get_features_dim_NCDHW(self): - self.assertEqual(get_features_dim('NCHW', 5), 1) - - def test_get_features_dim_NDHWC(self): - self.assertEqual(get_features_dim('NHWC', 5), 4) - - def test_get_width_dim_NCHW(self): - self.assertEqual(get_width_dim('NCHW', 4), 3) - - def test_get_width_dim_NHWC(self): - self.assertEqual(get_width_dim('NHWC', 4), 2) - - def test_get_width_dim_NCDHW(self): - self.assertEqual(get_width_dim('NCHW', 5), 4) - - def test_get_width_dim_NDHWC(self): - self.assertEqual(get_width_dim('NHWC', 5), 3) - - def test_get_height_dim_NCHW(self): - self.assertEqual(get_height_dim('NCHW', 4), 2) - - def test_get_height_dim_NHWC(self): - self.assertEqual(get_height_dim('NHWC', 4), 1) - - def test_get_height_dim_NCDHW(self): - self.assertEqual(get_height_dim('NCHW', 5), 3) - - def test_get_height_dim_NDHWC(self): - self.assertEqual(get_height_dim('NHWC', 5), 2) - - def test_get_depth_dim_NCDHW(self): - self.assertEqual(get_depth_dim('NCHW', 5), 2) - - def test_get_depth_dim_NDHWC(self): - self.assertEqual(get_depth_dim('NHWC', 5), 1) - - def test_get_batch_dim_wrong_layout(self): - self.assertRaises(AssertionError, get_batch_dim, 'NCDHW', 5) - - def test_get_width_dim_wrong_layout(self): - self.assertRaises(AssertionError, get_width_dim, 'NCDHW', 5) - - def test_get_height_dim_wrong_layout(self): - self.assertRaises(AssertionError, get_height_dim, 'NCDHW', 5) - - def test_get_features_dim_wrong_layout(self): - self.assertRaises(AssertionError, get_features_dim, 'NCDHW', 5) - - def test_shape_for_layout_NCHW(self): - self.assertListEqual([2, 3, 4, 5], list(shape_for_layout('NCHW', batch=2, features=3, height=4, width=5))) - - def test_shape_for_layout_NHWC(self): - self.assertListEqual([2, 4, 5, 3], list(shape_for_layout('NHWC', batch=2, features=3, height=4, width=5))) - - def test_shape_for_layout_missing_batch(self): - with self.assertRaises(Error): - shape_for_layout('NCHW', features=3, height=4, width=5) - - def test_shape_for_layout_missing_features(self): - with self.assertRaises(Error): - shape_for_layout('NCHW', batch=2, height=4, width=5) - - def test_shape_for_layout_missing_height(self): - with self.assertRaises(Error): - shape_for_layout('NHWC', batch=2, features=3, width=5) - - def test_shape_for_layout_missing_width(self): - with self.assertRaises(Error): - shape_for_layout('NHWC', batch=2, features=3, height=4) - - def test_shape_for_layout_unknown_parameter(self): - with self.assertRaises(Error): - shape_for_layout('NHWC', batch=2, features=3, height=4, width=5, unknown_parameter=123) diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/__init__.py b/tools/mo/unit_tests/mo/front/common/partial_infer/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/caffe_fallback_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/caffe_fallback_test.py deleted file mode 100644 index b656b5e038f67b..00000000000000 --- a/tools/mo/unit_tests/mo/front/common/partial_infer/caffe_fallback_test.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import MagicMock - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.caffe_fallback import build_net -from unit_tests.utils.extractors import FakeMultiParam, FakeValue -from unit_tests.utils.graph import build_graph - - -class Net: - def __init__(self, blobs): - self.blobs = blobs - self.reshape_blob = MagicMock(return_value=np.array([1, 1, 1, 1])) - self.reshape = MagicMock(return_value=np.array([1, 1, 1, 1])) - self.forward = MagicMock(return_value={'top_node': FakeValue(np.array([1, 3, 112, 112]))}) - - -my_mock_net = None - - -class Caffe: - def __init__(self): - self.TEST = 'TEST' - - def Net(self, *args): - return my_mock_net - - -class TestCaffeNativePartialInfer(unittest.TestCase): - @classmethod - def setUpClass(cls): - import sys - sys.modules['caffe'] = Caffe() - cls.nodes_attributes = { - 'node_1': {'type': 'Parameter', 'kind': 'op'}, - 'node_2': {'type': 'Parameter', 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'kind': 'op'}, - 'node_4': {'type': 'Identity', 'kind': 'op'}, - 'op_output': { 'kind': 'op', 'op': 'Result'} - } - - def test_build_net_equal_inputs(self): - global my_mock_net - my_blobs = { - 'node_1': FakeValue(np.array([1, 3, 227, 227])), - 'node_2': FakeValue(np.array([1, 3, 224, 224])) - } - my_mock_net = Net(my_blobs) - graph = build_graph(self.nodes_attributes, - [ - ('node_1', 'node_3'), - ('node_2', 'node_3'), - ('node_3', 'node_4'), - ('node_4', 'op_output') - ], - { - 'node_4': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227])}, - 'node_2': {'shape': np.array([1, 3, 224, 224])}, - 'node_3': {'top': 'top_node'} - }) - graph.proto_path = 'path_to_proto' - graph.caffemodel_path = 'path_to_proto' - build_net(graph) - my_mock_net.reshape.assert_not_called() - my_mock_net.forward.assert_called_once_with() - self.assertIsNotNone(graph.caffe_net) - - def test_build_net_not_equal_inputs(self): - global my_mock_net - input_node_param = { - 'shape': np.array([1, 3, 112, 112]), - 'reshape': MagicMock(return_value=134) - } - my_blobs = { - 'node_1': FakeMultiParam(input_node_param), - } - my_mock_net = Net(my_blobs) - graph = build_graph(self.nodes_attributes, - [ - ('node_1', 'node_3'), - ('node_3', 'node_4'), - ('node_4', 'op_output') - ], - {'node_4': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227])}, - 'node_3': {'top': 'top_node'} - }, - nodes_with_edges_only=True) - graph.proto_path = 'path_to_proto' - graph.caffemodel_path = 'path_to_proto' - build_net(graph) - my_mock_net.reshape.assert_called_once_with() - my_mock_net.forward.assert_called_once_with() - self.assertIsNotNone(graph.caffe_net) diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/concat_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/concat_test.py deleted file mode 100644 index 4808f992038ab5..00000000000000 --- a/tools/mo/unit_tests/mo/front/common/partial_infer/concat_test.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.concat import concat_infer -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'kind': 'data', 'value': None}, - 'node_2': {'kind': 'data', 'value': None}, - 'concat': {'type': 'Concat', 'kind': 'op'}, - 'node_3': {'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - } - - -class TestConcatPartialInfer(): - @pytest.mark.parametrize("shape1, shape2, output_shape, axis",[([1, 3, 227, 227], [1, 3, 220, 227], - [1, 3, 447, 227], 2), - ([1, 3, 227, 227], [1, 3, 227, 220], [1, 3, 227, 447], -1), - ([1, 3, dynamic_dimension_value, 227], [1, dynamic_dimension_value, 227, 220], [1, 3, 227, 447], -1), - ([1, 3, 10, 227], [1, 3, 10, dynamic_dimension_value], [1, 3, 10, dynamic_dimension_value], -1), - ]) - def test_concat_infer(self, shape1, shape2, output_shape, axis): - graph = build_graph(nodes_attributes, - [('node_1', 'concat'), - ('node_2', 'concat'), - ('concat', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': shape_array(shape1)}, - 'node_2': {'shape': shape_array(shape2)}, - 'concat': {'axis': axis} - }) - - concat_node = Node(graph, 'concat') - concat_infer(concat_node) - res_shape = graph.node['node_3']['shape'] - assert strict_compare_tensors(output_shape, res_shape) - - @pytest.mark.parametrize("value1, value2, output_value, axis",[(shape_array([1]), - shape_array([4]), shape_array([1, 4]), 0), - (shape_array([dynamic_dimension_value]), shape_array([4]), - shape_array([dynamic_dimension_value, 4]), -1), - ]) - def test_concat_value_infer(self, value1, value2, output_value, axis): - graph = build_graph(nodes_attributes, - [('node_1', 'concat'), - ('node_2', 'concat'), - ('concat', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': output_value.shape, 'value': output_value}, - 'node_1': {'shape': value1.shape, 'value': value1}, - 'node_2': {'shape': value2.shape, 'value': value2}, - 'concat': {'axis': axis} - }) - - concat_node = Node(graph, 'concat') - concat_infer(concat_node) - res_value = graph.node['node_3']['value'] - assert strict_compare_tensors(output_value, res_value) - - def test_concat_infer_not_match(self): - graph = build_graph(nodes_attributes, - [('node_1', 'concat'), - ('node_2', 'concat'), - ('concat', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227])}, - 'node_2': {'shape': np.array([1, 2, 227, 227])}, - 'concat': {'axis': 2} - }) - - concat_node = Node(graph, 'concat') - with pytest.raises(Error, match="Concat input shapes do not match for node*"): - concat_infer(concat_node) - - def test_concat_infer_no_shape(self): - graph = build_graph(nodes_attributes, - [('node_1', 'concat'), - ('node_2', 'concat'), - ('concat', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227])}, - 'node_2': {'shape': None}, - 'concat': {'axis': 2} - }) - - concat_node = Node(graph, 'concat') - with pytest.raises(Error, match="One of the input shapes is not defined for node *"): - concat_infer(concat_node) diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/crop_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/crop_test.py deleted file mode 100644 index 504a4168129fda..00000000000000 --- a/tools/mo/unit_tests/mo/front/common/partial_infer/crop_test.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.crop import crop_infer -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'value': None, 'kind': 'data'}, - 'node_2': {'value': None, 'kind': 'data'}, - 'crop_1': {'op': 'Crop', 'kind': 'op'}, - 'node_3': {'value': None, 'kind': 'data'}, - 'op_output': { 'kind': 'op', 'op': 'Result'} - } - - -class TestCropInfer(unittest.TestCase): - def test_crop_infer_ideal(self): - graph = build_graph(nodes_attributes, - [('node_1', 'crop_1'), - ('node_2', 'crop_1'), - ('crop_1', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 2, 500, 500])}, - 'node_2': {'shape': np.array([1, 2, 256, 256])}, - 'crop_1': {'axis': 2, 'offset': [0, 0], 'dim': None} - }) - - crop_node = Node(graph, 'crop_1') - - crop_infer(crop_node) - exp_shape = np.array([1, 2, 256, 256]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - self.assertEqual(crop_node.axis, [2, 3]) - self.assertEqual(crop_node.offset, [0, 0]) - self.assertEqual(crop_node.dim, [256, 256]) - - def test_crop_infer_negative_axis(self): - graph = build_graph(nodes_attributes, - [('node_1', 'crop_1'), - ('node_2', 'crop_1'), - ('crop_1', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 2, 500, 500])}, - 'node_2': {'shape': np.array([1, 2, 256, 256])}, - 'crop_1': {'axis': -1, 'offset': [0, 0], 'dim': None} - }) - - crop_node = Node(graph, 'crop_1') - - crop_infer(crop_node) - exp_shape = np.array([1, 2, 500, 256]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - self.assertEqual(crop_node.axis, [3]) - self.assertEqual(crop_node.offset, [0]) - self.assertEqual(crop_node.dim, [256]) - - def test_crop_infer_no_shape(self): - graph = build_graph(nodes_attributes, - [('node_1', 'crop_1'), - ('node_2', 'crop_1'), - ('crop_1', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 2, 500, 500])}, - 'node_2': {'shape': None}, - 'crop_1': {'axis': 2, 'offset': [0, 0], 'dim': None} - }) - - crop_node = Node(graph, 'crop_1') - - crop_infer(crop_node) - self.assertIsNone(graph.node['node_3']['shape']) - - def test_crop_infer_one_shape(self): - graph = build_graph(nodes_attributes, - [('node_1', 'crop_1'), - ('crop_1', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 2, 500, 500])}, - 'crop_1': {'axis': 2, 'offset': [0], 'dim': None} - }) - - crop_node = Node(graph, 'crop_1') - - crop_infer(crop_node) - self.assertIsNone(graph.node['node_3']['shape']) - - def test_crop_infer_out_offset(self): - graph = build_graph(nodes_attributes, - [('node_1', 'crop_1'), - ('node_2', 'crop_1'), - ('crop_1', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 2, 500, 500])}, - 'node_2': {'shape': np.array([1, 2, 256, 256])}, - 'crop_1': {'axis': 2, 'offset': [300], 'dim': None} - }) - - crop_node = Node(graph, 'crop_1') - - crop_infer(crop_node) - self.assertIsNone(graph.node['node_3']['shape']) diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/elemental_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/elemental_test.py deleted file mode 100644 index 7a782b72bbc1e9..00000000000000 --- a/tools/mo/unit_tests/mo/front/common/partial_infer/elemental_test.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer - - -class FakeNode: - def __init__(self, blob): - self.blob = blob - - def in_shape(self): - return self.blob - - -class TestElementalInference(unittest.TestCase): - @patch('openvino.tools.mo.front.common.partial_infer.elemental.single_output_infer') - def test_copy_shape_infer(self, single_output_infer_mock): - single_output_infer_mock.return_value = 0 - node = FakeNode(np.array([1, 2])) - copy_shape_infer(node) - self.assertTrue(single_output_infer_mock.called) diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/eltwise_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/eltwise_test.py deleted file mode 100644 index fd7acc9fa076ae..00000000000000 --- a/tools/mo/unit_tests/mo/front/common/partial_infer/eltwise_test.py +++ /dev/null @@ -1,245 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import pytest - -from openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer, eltwise_reverse_infer -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, strict_compare_tensors, \ - dynamic_dimension_value, reverse_bypass_infer -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.graph import result, regular_op_with_empty_data, connect, \ - build_graph, shaped_parameter - -nodes_attributes = {'node_1': {'value': 2, 'kind': 'data'}, - 'node_2': {'value': 3, 'kind': 'data'}, - 'eltw_1': {'kind': 'op'}, - 'node_3': {'value': None, 'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - } - - - -class TestEltwiseInfer(): - @pytest.mark.parametrize("value1, shape1, value2, shape2, shape_infer, exp_value, exp_shape",[ - (np.array(2), [], np.array(3), [], lambda a, b: np.multiply(a, b), np.array(6), []), - (np.array(2), [], np.array(3), [], lambda a, b: np.maximum(a, b), np.array(3), []), - (np.array(2), [], np.array(3), [], lambda a, b: np.add(a, b), np.array(5), []), - (None, [1, 5], None, [1, 1], lambda a, b: np.add(a, b), None, [1, 5]), - (None, [dynamic_dimension_value, 3], None, [1, 1], lambda a, b: np.add(a, b), None, - [dynamic_dimension_value, 3]), - (None, [dynamic_dimension_value, 3], None, [1, dynamic_dimension_value], lambda a, b: np.add(a, b), None, - [dynamic_dimension_value, 3]), - (None, [4, 5, dynamic_dimension_value, 3], None, [1, dynamic_dimension_value], lambda a, b: np.add(a, b), None, - [4, 5, dynamic_dimension_value, 3]), - (None, [1, 10, 20, 30], None, [dynamic_dimension_value, 10, 20, 30], lambda a, b: np.add(a, b), None, - [dynamic_dimension_value, 10, 20, 30]), - # dynamic value propagation - (shape_array([dynamic_dimension_value, 5]), [2], np.array(3), [], lambda a, b: np.add(a, b), - shape_array([dynamic_dimension_value, 8]), [2]), - (shape_array([dynamic_dimension_value, 5]), [2], np.array([3, 7]), [], lambda a, b: np.add(a, b), - shape_array([dynamic_dimension_value, 12]), [2]), - ]) - def test_eltwise_infer(self, value1, shape1, value2, shape2, shape_infer, exp_value, exp_shape): - graph = build_graph(nodes_attributes, - [('node_1', 'eltw_1'), - ('node_2', 'eltw_1'), - ('eltw_1', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': shape_array(value1).shape if value1 is not None else shape_array(shape1), - 'value': value1}, - 'node_2': {'shape': shape_array(value2).shape if value2 is not None else shape_array(shape2), - 'value': value2} - }) - - graph.graph['layout'] = 'NCHW' - - eltwise_node = Node(graph, 'eltw_1') - - eltwise_infer(eltwise_node, shape_infer) - res_shape = graph.node['node_3']['shape'] - res_value = eltwise_node.out_node().value - if exp_value is not None: - assert strict_compare_tensors(res_value, shape_array(exp_value)) - assert strict_compare_tensors(res_shape, shape_array(exp_shape)) - - def test_eltwise_infer_none_val(self): - graph = build_graph(nodes_attributes, - [('node_1', 'eltw_1'), - ('node_2', 'eltw_1'), - ('eltw_1', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 256, 256]), 'value': None}, - 'node_2': {'shape': np.array([1, 3, 256, 256])} - }) - graph.graph['layout'] = 'NCHW' - eltwise_node = Node(graph, 'eltw_1') - - eltwise_infer(eltwise_node, lambda a, b: a * b) - exp_shape = np.array([1, 3, 256, 256]) - res_shape = graph.node['node_3']['shape'] - res_value = eltwise_node.out_node().value - for i in range(0, len(exp_shape)): - assert exp_shape[i] == res_shape[i] - - assert res_value is None - - def test_eltwise_infer_none_min_max(self): - graph = build_graph(nodes_attributes, - [('node_1', 'eltw_1'), - ('node_2', 'eltw_1'), - ('eltw_1', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 257, 256])}, - 'node_2': {'shape': np.array([1, 3, 256, 257])} - }) - graph.graph['layout'] = 'NCHW' - eltwise_node = Node(graph, 'eltw_1') - - with pytest.raises(Error, match='Input shapes mismatch*'): - eltwise_infer(eltwise_node) - - -dyn = dynamic_dimension_value - - -class TestElementwiseReverseInfer(unittest.TestCase): - @staticmethod - def build_and_test_reverse_inference(inp_shape_1, inp_shape_2, out_shape, ref_shape, auto_broadcast='numpy'): - in_port_with_defined_shape = 0 if inp_shape_1 is not None else 1 - defined_shape = shape_array(inp_shape_1 if inp_shape_1 is not None else inp_shape_2) - - nodes = { - **shaped_parameter('undefined_shape_data', None, {'reverse_infer': Parameter.reverse_infer}), - **shaped_parameter('data', shape_array(defined_shape), {'reverse_infer': Parameter.reverse_infer}), - **regular_op_with_empty_data('elementwise', {'op': 'Add', 'type': 'Add', - 'infer': eltwise_infer, - 'reverse_infer': eltwise_reverse_infer, - 'auto_broadcast': auto_broadcast}), - **result('res'), - } - - edges = [ - *connect('undefined_shape_data', '{}:elementwise'.format(int(not in_port_with_defined_shape))), - *connect('data', '{}:elementwise'.format(in_port_with_defined_shape)), - *connect('elementwise', 'res') - ] - - graph = build_graph(nodes, edges) - graph.stage = 'middle' - Node(graph, 'elementwise').out_port(0).data.set_shape(shape_array(out_shape)) - Node(graph, 'elementwise').in_port(in_port_with_defined_shape).data.set_shape(defined_shape) - - partial_infer(graph) - actual_shape = Node(graph, 'undefined_shape_data').out_port(0).data.get_shape() - if ref_shape is None: - assert actual_shape == ref_shape - else: - assert strict_compare_tensors(actual_shape, shape_array(ref_shape)) - - def test_reverse_infer_1(self): - self.build_and_test_reverse_inference(inp_shape_1=[dyn, dyn], - inp_shape_2=None, - out_shape=[dyn, dyn, dyn, dyn], - ref_shape=[dyn, dyn, dyn, dyn]) - - def test_reverse_infer_2(self): - self.build_and_test_reverse_inference(inp_shape_1=None, - inp_shape_2=[dyn, dyn], - out_shape=[dyn, dyn, dyn, dyn], - ref_shape=[dyn, dyn, dyn, dyn]) - - def test_reverse_infer_3(self): - self.build_and_test_reverse_inference(inp_shape_1=[1], - inp_shape_2=None, - out_shape=[dyn, 400, 400, 3], - ref_shape=[dyn, 400, 400, 3]) - - def test_reverse_infer_4(self): - self.build_and_test_reverse_inference(inp_shape_1=[4, 1], - inp_shape_2=None, - out_shape=[dyn, dyn, 4, 3], - ref_shape=[dyn, dyn, dyn, 3]) - - def test_reverse_infer_5(self): - self.build_and_test_reverse_inference(inp_shape_1=[4, 1], - inp_shape_2=None, - out_shape=[dyn, dyn, 4, 1], - ref_shape=[dyn, dyn, dyn, 1]) - - def test_reverse_infer_6(self): - # both output and input has the same rank, cannot deduce other inputs rank - with self.assertRaisesRegex(Error, "Model Optimizer is unable to deduce input shapes"): - self.build_and_test_reverse_inference(inp_shape_1=[dyn, dyn, dyn, dyn], - inp_shape_2=None, - out_shape=[dyn, dyn, 4, 1], - ref_shape=None) - - def test_reverse_infer_7(self): - self.build_and_test_reverse_inference(inp_shape_1=[4, dyn], - inp_shape_2=None, - out_shape=[1, dyn, dyn, 1], - ref_shape=[1, dyn, dyn, 1]) - - def test_reverse_infer_8(self): - with self.assertRaisesRegex(AssertionError, "Shapes of Elementwise node '.*' are not compatible"): - self.build_and_test_reverse_inference(inp_shape_1=[4, dyn], - inp_shape_2=None, - out_shape=[1, dyn, 7, 1], - ref_shape=None) - - def test_reverse_infer_no_broadcast(self): - self.build_and_test_reverse_inference(inp_shape_1=[1, 4, dyn, dyn], - inp_shape_2=None, - out_shape=[1, dyn, dyn, 1], - ref_shape=[1, 4, dyn, 1], - auto_broadcast='none') - - -class TestUnaryElementwiseReverseInfer(unittest.TestCase): - @staticmethod - def build_and_test_reverse_inference(out_shape): - - nodes = { - **shaped_parameter('undefined_shape_data', None, {'reverse_infer': Parameter.reverse_infer}), - **regular_op_with_empty_data('elementwise', - {'op': 'Sqrt', 'type': 'Sqrt', - 'infer': eltwise_infer, - 'reverse_infer': lambda node: reverse_bypass_infer(node,in_ports=[0])}), - **result('res'), - } - - edges = [ - *connect('undefined_shape_data', '0:elementwise'), - *connect('elementwise', 'res'), - ] - - graph = build_graph(nodes, edges) - graph.stage = 'middle' - Node(graph, 'elementwise').out_port(0).data.set_shape(shape_array(out_shape)) - - partial_infer(graph) - actual_shape = Node(graph, 'elementwise').in_port(0).data.get_shape() - - # check that out_shape is transferred into only existing in_port(0) - assert strict_compare_tensors(actual_shape, shape_array(out_shape)) - - def test_reverse_infer_1(self): - self.build_and_test_reverse_inference(out_shape=[dyn, dyn, dyn, dyn]) - - def test_reverse_infer_2(self): - self.build_and_test_reverse_inference(out_shape=[dyn, dyn]) - - def test_reverse_infer_3(self): - self.build_and_test_reverse_inference(out_shape=[1, 100]) diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/multi_box_detection_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/multi_box_detection_test.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/multi_box_prior_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/multi_box_prior_test.py deleted file mode 100644 index 3701cad39d6d2f..00000000000000 --- a/tools/mo/unit_tests/mo/front/common/partial_infer/multi_box_prior_test.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.multi_box_prior import multi_box_prior_infer_mxnet -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'value': None, 'kind': 'data'}, - 'node_2': {'value': None, 'kind': 'data'}, - 'prior_box_1': {'type': 'PriorBox', 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'value': None, 'kind': 'data'} - } - - -class TestMultiBoxPriorInfer(unittest.TestCase): - def test_prior_box_infer_ideal(self): - graph = build_graph(nodes_attributes, - [('node_1', 'prior_box_1'), - ('node_2', 'prior_box_1'), - ('prior_box_1', 'node_3')], - {'node_1': {'shape': np.array([1, 1024, 19, 19])}, - 'node_2': {'shape': np.array([1, 3, 300, 300])}, - 'prior_box_1': {'aspect_ratio': [1.0, 2.0, 0.5, 3.0, 0.333333333333], - 'min_size': [0.2, 0.272], - 'max_size': '', 'offset': 0.5, 'step': 0.2, 'sizes': [0.2, 0.272]}, - 'node_3': {'shape': np.array([1, 2, 3])}, - }) - - multi_box_prior_node = Node(graph, 'prior_box_1') - - multi_box_prior_infer_mxnet(multi_box_prior_node) - exp_shape = np.array([1, 2, 8664]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - self.assertEqual(multi_box_prior_node.min_size, [0.2, 0.272]) - self.assertEqual(multi_box_prior_node.max_size, '') - self.assertEqual(multi_box_prior_node.aspect_ratio, [1.0, 2.0, 0.5, 3.0, 0.333333333333]) - self.assertEqual(round(multi_box_prior_node.step, 1), 0.2) - self.assertEqual(round(multi_box_prior_node.offset, 1), 0.5) diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/roipooling_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/roipooling_test.py deleted file mode 100644 index 114e81a279a945..00000000000000 --- a/tools/mo/unit_tests/mo/front/common/partial_infer/roipooling_test.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.roipooling import roipooling_infer -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'kind': 'data'}, - 'node_2': {'kind': 'data'}, - 'node_3': {'kind': 'data'}, - 'node_4': {'kind': 'data'}, - 'roipool': {'type': 'ROIPooling', 'kind': 'op', 'pooled_h': None, 'pooled_w': None}, - 'output': {'value': None, 'kind': 'data'}, - 'op_output': { 'kind': 'op', 'op': 'Result'}, - } - - -class TestRoipoolingInfer(unittest.TestCase): - def test_roipooling_infer_ideal(self): - graph = build_graph(nodes_attributes, - [('node_1', 'roipool'), - ('node_2', 'roipool'), - ('roipool', 'output'), - ('output', 'op_output') - ], - {'output': {'shape': None}, - 'node_1': {'shape': np.array([1, 256, 20, 20])}, - 'node_2': {'shape': np.array([150, 5])}, - 'roipool': {'pooled_h': 6, 'pooled_w': 6} - }) - graph.graph['layout'] = 'NCHW' - roipooling_node = Node(graph, 'roipool') - - roipooling_infer(roipooling_node) - exp_shape = np.array([150, 256, 6, 6]) - res_shape = graph.node['output']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_roipooling_infer_no_shape(self): - graph = build_graph(nodes_attributes, - [('node_1', 'roipool'), - ('node_2', 'roipool'), - ('roipool', 'output'), - ('output', 'op_output') - ], - {'output': {'shape': None}, - 'node_1': {'shape': None}, - 'node_2': {'shape': np.array([1, 256])}, - 'roipool': {'pooled_h': 6, 'pooled_w': 6} - }) - graph.graph['layout'] = 'NCHW' - - roipooling_node = Node(graph, 'roipool') - - roipooling_infer(roipooling_node) - self.assertIsNone(graph.node['output']['shape']) - - def test_roipooling_infer_tf(self): - graph = build_graph(nodes_attributes, - [('node_1', 'roipool'), - ('node_2', 'roipool'), - ('node_3', 'roipool'), - ('node_4', 'roipool'), - ('roipool', 'output'), - ('output', 'op_output') - ], - {'output': {'shape': None}, - 'node_1': {'shape': np.array([1, 20, 20, 256])}, - 'node_2': {'shape': np.array([150, 5])}, - 'node_3': {'shape': np.array([150])}, - 'node_4': {'shape': np.array([2], dtype=np.int64), 'value': np.array([7, 6], - dtype=np.int64)}, - }) - graph.graph['layout'] = 'NHWC' - roipooling_node = Node(graph, 'roipool') - - roipooling_infer(roipooling_node) - exp_shape = np.array([150, 7, 6, 256]) - res_shape = graph.node['output']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) diff --git a/tools/mo/unit_tests/mo/front/common/partial_infer/utils_test.py b/tools/mo/unit_tests/mo/front/common/partial_infer/utils_test.py deleted file mode 100644 index 59baf44e8dc5b3..00000000000000 --- a/tools/mo/unit_tests/mo/front/common/partial_infer/utils_test.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import pytest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array, is_fully_defined, \ - dynamic_dimension_value, dynamic_dimension, shape_array, compatible_shapes, shape_delete, shape_insert, \ - strict_compare_tensors, clarify_partial_shape -from openvino.tools.mo.utils.error import Error - - -def gen_masked_array(array, masked_indices): - """ - Creates a masked array from the input array by masking specific elements. - - :param array: the input array - :param masked_indices: element indices to be masked - :return: the result masked array - """ - res = np.ma.masked_array(array) - for index in masked_indices: - res[index] = np.ma.masked - return res - - -class TestIsFullyDefinedTest(): - @pytest.mark.parametrize("data, result",[(None, False), - (int64_array([2, 3, 5, 7]), True), # int64 array with valid values - (np.array([2, 3, 5, 7]), True), # any numpy array with valid values - (np.array([2, dynamic_dimension_value]), True), # array with dynamic dimension value is fully defined! - (shape_array([2, dynamic_dimension_value, 5]), False), # masked array with at least one masked element - (shape_array([2, 4, 5]), True), # masked array with no masked elements is fully defined - (dynamic_dimension, False), # dynamic dimension is not fully defined - (dynamic_dimension_value, True), # dynamic dimension value is fully defined - ((dynamic_dimension_value, dynamic_dimension_value), True), # list with dynamic dimension values is - # fully defined - ((dynamic_dimension, 1), False), # tuple with dynamic dimension is not fully defined - ([dynamic_dimension, 1], False), # list with dynamic dimension is not fully defined - ]) - def test_is_fully_defined(self, data, result): - assert is_fully_defined(data) == result - - -class TestShapeArrayTest(): - @pytest.mark.parametrize("data, ref, result",[([1], shape_array([1]), True), - # if we provide a list with dynamic_dimension_value then it is converted to dynamic dimension - ([dynamic_dimension_value, 5], gen_masked_array([1, 5], [0]), True), - # if we provide a list with dynamic_dimension then the generated shape array still have it - ([7, dynamic_dimension], gen_masked_array([7, 1], [1]), True), - # negative test to make sure that np.ma.allequal works properly - ([2], gen_masked_array([1], []), False), - ]) - def test_shape_array(self, data, ref, result): - assert strict_compare_tensors(shape_array(data), ref) == result - - -class TestCompareShapesTest(): - @pytest.mark.parametrize("input1, input2, result",[(gen_masked_array([1, 2, 3], []), - gen_masked_array([1, 2, 3], []), True), - (gen_masked_array([4, 2, 3], []), gen_masked_array([1, 2, 3], []), False), - (gen_masked_array([1, 2], []), gen_masked_array([1, 2, 3], []), False), - (gen_masked_array([1, 2, 3], []), gen_masked_array([1, 2], []), False), - (gen_masked_array([1, 2, 3], [1]), gen_masked_array([1, 5, 3], [1]), True), # [1, d, 3] vs [1, d, 3] - (gen_masked_array([1, 2, 3], [2]), gen_masked_array([1, 5, 3], [1]), True), # [1, 2, d] vs [1, d, 3] - (gen_masked_array([1, 2, 3], []), gen_masked_array([1, 5, 3], [1]), True), # [1, 2, 3] vs [1, d, 3] - (gen_masked_array([1, 2, 3], [0]), gen_masked_array([1, 5, 3], []), False), # [d, 2, 3] vs [1, 5, 3] - (np.array([1, 2, 3]), gen_masked_array([1, 5, 3], [1]), True), # [1, 2, 3] vs [1, d, 3] - (np.array([1, 2]), gen_masked_array([1, 5, 3], [1]), False), - (np.array([1, 2]), np.array([1, 2]), True), - (np.array([1, 2]), np.array([3, 2]), False), - ]) - def test_compare_shapes(self, input1, input2, result): - assert compatible_shapes(input1, input2) == result - - -class TestShapeDeleteTest(): - @pytest.mark.parametrize("shape, indices, result",[(gen_masked_array([1, 2, 3], []), [], - gen_masked_array([1, 2, 3], [])), - # [1, d, 3] -> [d, 3]. Indices input is a list - (gen_masked_array([1, 2, 3], [1]), [0], gen_masked_array([2, 3], [0])), - # [1, d, 3] -> [d, 3]. Indices input is a numpy array - (gen_masked_array([1, 2, 3], [1]), np.array([0]), gen_masked_array([2, 3], [0])), - # [1, d, 3] -> [d, 3]. Indices input is a masked array - (gen_masked_array([1, 2, 3], [1]), gen_masked_array([0], []), gen_masked_array([2, 3], [0])), - # [1, d, 3] -> [d, 3]. Indices input is a numpy array with scalar - (gen_masked_array([1, 2, 3], [1]), np.array(0), gen_masked_array([2, 3], [0])), - # [1, d, 3] -> [d, 3]. Indices input is an integer - (gen_masked_array([1, 2, 3], [1]), 0, gen_masked_array([2, 3], [0])), # [1, d, 3] -> [d, 3] - (gen_masked_array([1, 2, 3, 4], [1]), [0, 2], gen_masked_array([2, 4], [0])), # [1, d, 3, 4] -> [d, 4] - (gen_masked_array([1, 2, 3], [1]), [0, 2, 1], gen_masked_array([], [])), # [1, d, 3] -> [] - (gen_masked_array([1, 2, 3], [1]), [0, 2], gen_masked_array([2], [0])), # [1, d, 3] -> [d] - # [1, d, d, 4] -> [d, d] - (gen_masked_array([1, 2, 3, 4], [1, 2]), [3, 0], gen_masked_array([2, 3], [0, 1])), - (gen_masked_array([1, 2, 3, 4], [2]), 3, gen_masked_array([1, 2, 3], [2])), # [1, 2, d, 4] -> [1, 2, d] - ([1, 2, 3, 4], [1], [1, 3, 4]), # [1, 2, 3, 4] -> [1, 3, 4]. Input is a regular lists - (np.array([1, 2, 3, 4]), [1], [1, 3, 4]), # [1, 2, 3, 4] -> [1, 3, 4]. Input is a regular arrays - (np.array([1, 2, 3, 4]), [-1, -3], [1, 3]), # [1, 2, 3, 4] -> [1, 3]. Negative indices - (np.array([1, 2, 3, 4]), -2, [1, 2, 4]), # [1, 2, 3, 4] -> [1, 2, 4]. Negative index - ]) - def test_shape_delete(self, shape, indices, result): - assert strict_compare_tensors(shape_delete(shape, indices), result) - - def test_shape_delete_raise_exception(self): - with pytest.raises(Error, match ='.*Incorrect parameter type.*'): - shape_delete(gen_masked_array([1, 2, 3], []), {}) - - -class TestShapeInsertTest(): - @pytest.mark.parametrize("shape, pos, values, result",[(gen_masked_array([1, 2, 3], []), 1, [5], - gen_masked_array([1, 5, 2, 3], [])), - (gen_masked_array([1, 2, 3], [1]), 1, [5], gen_masked_array([1, 5, 2, 3], [2])), - (gen_masked_array([1, 2, 3], [1]), 1, [dynamic_dimension], gen_masked_array([1, 5, 2, 3], [1, 2])), - (gen_masked_array([1, 2, 3], [1]), 0, [dynamic_dimension], gen_masked_array([5, 1, 2, 3], [0, 2])), - (gen_masked_array([1, 2, 3], [1]), np.int64(0), [dynamic_dimension], - gen_masked_array([5, 1, 2, 3], [0, 2])), - (gen_masked_array([1, 2, 3], [1]), 3, [dynamic_dimension], gen_masked_array([1, 2, 3, 5], [1, 3])), - (gen_masked_array([1, 2, 3], [1]), 3, [dynamic_dimension, dynamic_dimension], - gen_masked_array([1, 2, 3, 5, 6], [1, 3, 4])), - (gen_masked_array([1], [0]), 0, [7, dynamic_dimension], gen_masked_array([7, 5, 2], [1, 2])), - ]) - def test_shape_insert(self, shape, pos, values, result): - assert strict_compare_tensors(shape_insert(shape, pos, values), result) - - def test_shape_insert_raise_exception(self): - with pytest.raises(Error, match='.*Incorrect parameter type.*'): - shape_insert(gen_masked_array([1, 2, 3], []), 2, {}) - - -class Testmo_array_test(): - @pytest.mark.parametrize("data, result",[(mo_array([2, 3, 5, 7]), np.array([2, 3, 5, 7])), - (mo_array([2., 3., 5., 7.], dtype=np.float64), np.array([2., 3., 5., 7.])), - (mo_array([2., 3., 5., 7.]), np.array([2., 3., 5., 7.], dtype=np.float32)), - ]) - def test_mo_array_positive(self, data, result): - assert data.dtype == result.dtype - - @pytest.mark.parametrize("data, result",[(mo_array([2., 3., 5., 7.]), np.array([2., 3., 5., 7.])), - ]) - def test_mo_array_negative(self, data, result): - assert data.dtype != result.dtype - - -class clarify_partial_shape_test(unittest.TestCase): - - def test_clarify_1(self): - actual_result = clarify_partial_shape([shape_array([dynamic_dimension, 10, dynamic_dimension]), - shape_array([4, dynamic_dimension, dynamic_dimension])]) - ref_result = shape_array([4, 10, dynamic_dimension]) - assert strict_compare_tensors(actual_result, ref_result) diff --git a/tools/mo/unit_tests/mo/front/conv_test.py b/tools/mo/unit_tests/mo/front/conv_test.py deleted file mode 100644 index a351c96003061f..00000000000000 --- a/tools/mo/unit_tests/mo/front/conv_test.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.concat import concat_infer -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.middle.passes.conv import fuse_pad -from openvino.tools.mo.middle.passes.fusing.mark_unfused_nodes import mark_shape_of_sugraph_as_unfusable -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.middle.pattern_match import for_graph_and_each_sub_graph_recursively -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.ops.elementwise import eltwise_infer -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.pad import Pad -from openvino.tools.mo.ops.shape import Shape -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, connect, \ - shaped_parameter, valued_const_with_data - - -class PadFusingTest(unittest.TestCase): - - # standard case: not in the shape subgraph - def test_pad_fusing(self): - nodes = { - **shaped_parameter('input', shape_array([1, 3, 248, 248])), - - **valued_const_with_data('pads_begin', shape_array([0, 0, 1, 1])), - **valued_const_with_data('pads_end', shape_array([0, 0, 1, 1])), - **valued_const_with_data('fill_value', shape_array(0.0)), - **valued_const_with_data('weights', shape_array(np.zeros([3, 16, 4, 4]))), - - **regular_op_with_empty_data('pad', {'type': 'Pad', - 'op': 'Pad', - 'infer': Pad.infer, - 'mode': 'constant'}), - - **regular_op_with_empty_data('conv', {'type': 'Convolution', - 'op': 'Convolution', - 'infer': Convolution.infer, - # zeros, no paddings - 'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]), - 'dilation': np.array([1, 1, 1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'group': 1, - 'kernel_spatial_idx': np.array([2, 3]), - 'output': 64, - 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), - 'batch_dims': np.array([0]), - 'input_feature_channel': 1, - 'output_feature_channel': 0}), - **result(), - } - - graph = build_graph(nodes_attrs=nodes, edges=[ - *connect('input', '0:pad'), - *connect('pads_begin', '1:pad'), - *connect('pads_end', '2:pad'), - *connect('fill_value', '3:pad'), - *connect('pad', '0:conv'), - *connect('weights', '1:conv'), - *connect('conv', 'output'), - ], nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'middle' - - graph = partial_infer(graph) - mark_shape_of_sugraph_as_unfusable(graph) - for_graph_and_each_sub_graph_recursively(graph, fuse_pad) - graph.clean_up() - - conv_fused_with_pad = regular_op_with_empty_data('conv', {'type': 'Convolution', - 'op': 'Convolution', - # ones are taken from fused Pad - 'pad': np.array([[0, 0], [0, 0], [1, 1], [1, 1]]), - 'dilation': np.array([1, 1, 1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'group': 1, - 'kernel_spatial_idx': np.array([2, 3]), - 'output': 64, - 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), - 'batch_dims': np.array([0]), - 'input_feature_channel': 1, - 'output_feature_channel': 0, - - 'infer': Convolution.infer}) - - graph_ref = build_graph(nodes_attrs=nodes, update_attributes=conv_fused_with_pad, edges=[ - *connect('input', '0:conv'), - *connect('weights', '1:conv'), - *connect('conv', 'output'), - ], nodes_with_edges_only=True) - graph_ref.graph['layout'] = 'NCHW' - graph_ref.stage = 'middle' - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - # Pad in the shape subgraph - def test_pad_fusing_shape_subgraph(self): - nodes = { - **shaped_parameter('input', shape_array([1, 3, 1020, 1020])), - **regular_op_with_empty_data('input_shape', {'type': 'ShapeOf', 'op': 'ShapeOf', 'output_type': np.int64, - 'infer': Shape.infer}), - **regular_op_with_empty_data('gathered_shape', {'type': 'Gather', 'batch_dims': 0, 'infer': Gather.infer}), - **valued_const_with_data('axis', np.array([0])), - **valued_const_with_data('indices', np.array([2, 3])), - - **regular_op_with_empty_data('div', {'type': 'Div', - 'infer': lambda node: eltwise_infer(node, lambda a, b: a / b)}), - **regular_op_with_empty_data('sub_1', {'type': 'Sub', - 'infer': lambda node: eltwise_infer(node, lambda a, b: a - b)}), - **regular_op_with_empty_data('sub_2', {'type': 'Sub', - 'infer': lambda node: eltwise_infer(node, lambda a, b: a - b)}), - - **valued_const_with_data('div_const', shape_array([2])), - **valued_const_with_data('sub_const', shape_array([512])), - - **regular_op_with_empty_data('pad', {'type': 'Pad', - 'op': 'Pad', - 'infer': Pad.infer, - 'mode': 'constant'}), - - **regular_op_with_empty_data('concat', {'type': 'Concat', - 'op': 'Concat', - 'axis': 0, - 'infer': concat_infer}), - - **valued_const_with_data('pad_end', shape_array([0, 0, 0, 0])), - **valued_const_with_data('blank_zeros', shape_array([0, 0])), - - **regular_op_with_empty_data('conv', {'type': 'Convolution', - 'op': 'Convolution', - - 'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]), - 'dilation': np.array([1, 1, 1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'group': 1, - 'kernel_spatial_idx': np.array([2, 3]), - 'output': 64, - 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), - 'batch_dims': np.array([0]), - 'input_feature_channel': 1, - 'output_feature_channel': 0, - - 'infer': Convolution.infer}), - **valued_const_with_data('weights', shape_array(np.zeros([3, 16, 4, 4]))), - **result(), - } - - graph = build_graph(nodes_attrs=nodes, - update_attributes={ - 'gathered_shape_d': {'kind': 'data', 'value': shape_array([256, 256]), - 'shape': shape_array([2])}}, - edges=[ - *connect('input', 'input_shape', skip_data=True), - *connect('input_shape', '0:gathered_shape'), - *connect('indices', '1:gathered_shape'), - *connect('axis', '2:gathered_shape'), - - *connect('gathered_shape', 'sub_1'), - *connect('sub_const', 'sub_1'), - *connect('sub_1', 'div'), - *connect('div_const', 'div'), - *connect('div', '0:sub_2'), - *connect('sub_1', '1:sub_2'), - *connect('input', '0:pad'), - - *connect('blank_zeros', '0:concat'), - *connect('sub_2', '1:concat'), - *connect('concat', '1:pad'), - - *connect('pad_end', '2:pad'), - *connect('pad', '0:conv'), - *connect('weights', '1:conv'), - *connect('conv', 'output'), - ], nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'middle' - - graph = partial_infer(graph) - - # graph must remain unchanged - graph_ref = graph.copy() - - mark_shape_of_sugraph_as_unfusable(graph) - for_graph_and_each_sub_graph_recursively(graph, fuse_pad) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/div_test.py b/tools/mo/unit_tests/mo/front/div_test.py deleted file mode 100644 index 2c818035378777..00000000000000 --- a/tools/mo/unit_tests/mo/front/div_test.py +++ /dev/null @@ -1,85 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.div import Div -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \ - connect_data - -nodes = { - **regular_op_with_shaped_data('placeholder_1', [1, 227, 227, 3], {'type': 'Parameter'}), - **regular_op_with_shaped_data('placeholder_2', [1, 227, 227, 3], {'type': 'Parameter'}), - **regular_op_with_shaped_data('div', None, {'op': 'Div', 'type': 'Divide', 'name': 'my_div'}), - - **regular_op_with_shaped_data('reciprocal', [1, 227, 227, 3], {'type': 'Power'}), - **valued_const_with_data('minus_one', np.array(-1.)), - **regular_op_with_shaped_data('mul', None, {'type': 'Multiply'}), - - **result(), -} - - -class TestDiv(unittest.TestCase): - def test_div_test_1(self): - # Test with two different inputs from two placeholders - graph = build_graph(nodes, [ - *connect('placeholder_1', '0:div'), - *connect('placeholder_2', '1:div'), - *connect('div', 'output'), - ], nodes_with_edges_only=True) - Div().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [ - *connect('placeholder_1', '0:mul'), - *connect('placeholder_2', '0:reciprocal'), - *connect('minus_one', '1:reciprocal'), - *connect('reciprocal', '1:mul'), - *connect('mul', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Multiply')[0]]['name'] == 'my_div') - - def test_div_test_2(self): - # Test with two same inputs from one placeholder - graph = build_graph(nodes, [ - *connect('placeholder_1:0', '0:div'), - *connect_data('placeholder_1:0', '1:div'), - *connect('div', 'output'), - ], nodes_with_edges_only=True) - Div().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [ - *connect('placeholder_1:0', '0:mul'), - *connect_data('placeholder_1:0', '0:reciprocal'), - *connect('minus_one', '1:reciprocal'), - *connect('reciprocal', '1:mul'), - *connect('mul', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Multiply')[0]]['name'] == 'my_div') - - def test_div_with_integer(self): - # Test where transformation should not be applied because the divisor is integer - graph = build_graph({ - **regular_op_with_shaped_data('parameter', [1, 227, 227, 3], {'type': 'Parameter', 'data_type': np.int32}), - **valued_const_with_data('const', np.array([-1.], dtype=np.int32)), - **regular_op_with_shaped_data('div', None, {'op': 'Div', 'type': 'Divide', 'name': 'my_div'}), - **result()}, - [ - *connect('parameter:0', '0:div'), - *connect_data('const:0', '1:div'), - *connect('div', 'output'), - ]) - graph_ref = graph.copy() - Div().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/eltwise_n_test.py b/tools/mo/unit_tests/mo/front/eltwise_n_test.py deleted file mode 100644 index 3891bc6f74b8cc..00000000000000 --- a/tools/mo/unit_tests/mo/front/eltwise_n_test.py +++ /dev/null @@ -1,178 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.eltwise_n import EltwiseNReplacement -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_3': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_4': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - # EltwiseN operations - 'EltwiseN_1': {'value': None, 'operation': None, 'type': None, 'kind': 'op', 'op': 'EltwiseN'}, - # Test operation - 'last': {'value': None, 'operation': None, 'type': 'Eltwise', 'kind': 'op', 'op': None}, - # Max operation - 'max_1': {'value': None, 'operation': None, 'type': 'Eltwise', 'kind': 'op'}, - # Mui operations - 'mul_1': {'value': None, 'operation': None, 'type': 'Eltwise', 'kind': 'op'}, - 'mul_2': {'value': None, 'operation': None, 'type': 'Eltwise', 'kind': 'op'}, - # Add operations - 'add_1': {'value': None, 'operation': None, 'type': 'Eltwise', 'kind': 'op'}, - 'add_2': {'value': None, 'operation': None, 'type': 'Eltwise', 'kind': 'op'}, - 'add_3': {'value': None, 'operation': None, 'type': 'Eltwise', 'kind': 'op'}, -} - - -class TestEltwiseNFrontReplacement(unittest.TestCase): - def test_eltwiseN_test_1(self): - # EltwiseN test with N = 2 from 2 placeholders and operation = max - - graph = build_graph(nodes_attributes, - [('placeholder_1', 'EltwiseN_1'), - ('placeholder_2', 'EltwiseN_1'), - ('EltwiseN_1', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2': {'shape': np.array([1, 227, 227, 3])}, - 'EltwiseN_1': {'operation': 'max'}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'max_1'), - ('placeholder_2', 'max_1'), - ('max_1', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2': {'shape': np.array([1, 227, 227, 3])}, - 'max_1': {'type': 'Maximum'} - }, nodes_with_edges_only=True) - - graph.stage = 'front' - - replacer = EltwiseNReplacement() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_eltwise_test_2(self): - # EltwiseN test with N = 3 from 3 placeholders and operation = mul - - graph = build_graph(nodes_attributes, - [('placeholder_1', 'EltwiseN_1'), - ('placeholder_2', 'EltwiseN_1'), - ('placeholder_3', 'EltwiseN_1'), - ('EltwiseN_1', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_3': {'shape': np.array([1, 227, 227, 3])}, - 'EltwiseN_1': {'operation': 'mul'}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'mul_1'), - ('placeholder_2', 'mul_1'), - ('mul_1', 'mul_2'), - ('placeholder_3', 'mul_2'), - ('mul_2', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_3': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1': {'type': 'Multiply'}, - 'mul_2': {'type': 'Multiply'}, - }, nodes_with_edges_only=True) - - graph.stage = 'front' - - replacer = EltwiseNReplacement() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_eltwise_test_3(self): - # EltwiseN test with N = 4 from 1 placeholder and operation = sum - graph = build_graph(nodes_attributes, - [('placeholder_1', 'EltwiseN_1'), - ('placeholder_2', 'EltwiseN_1'), - ('placeholder_3', 'EltwiseN_1'), - ('placeholder_4', 'EltwiseN_1'), - ('EltwiseN_1', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_3': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_4': {'shape': np.array([1, 227, 227, 3])}, - 'EltwiseN_1': {'operation': 'sum'}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'add_1'), - ('placeholder_2', 'add_1'), - ('add_1', 'add_2'), - ('placeholder_3', 'add_2'), - ('add_2', 'add_3'), - ('placeholder_4', 'add_3'), - ('add_3', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_3': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_4': {'shape': np.array([1, 227, 227, 3])}, - 'add_1': {'type': 'Add'}, - 'add_2': {'type': 'Add'}, - 'add_3': {'type': 'Add'}, - }, nodes_with_edges_only=True) - - graph.stage = 'front' - - replacer = EltwiseNReplacement() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_eltwise_test_4(self): - # EltwiseN test with N = 4 from 1 placeholder and operation = sum - graph = build_graph(nodes_attributes, - [('placeholder_1', 'EltwiseN_1'), - ('placeholder_1', 'EltwiseN_1'), - ('placeholder_1', 'EltwiseN_1'), - ('placeholder_1', 'EltwiseN_1'), - ('EltwiseN_1', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'EltwiseN_1': {'operation': 'sum'}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'add_1'), - ('placeholder_1', 'add_1'), - ('add_1', 'add_2'), - ('placeholder_1', 'add_2'), - ('add_2', 'add_3'), - ('placeholder_1', 'add_3'), - ('add_3', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'add_1': {'type': 'Add'}, - 'add_2': {'type': 'Add'}, - 'add_3': {'type': 'Add'}, - }, nodes_with_edges_only=True) - - graph.stage = 'front' - - replacer = EltwiseNReplacement() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/extractor_test.py b/tools/mo/unit_tests/mo/front/extractor_test.py deleted file mode 100644 index 7983d39d76fb22..00000000000000 --- a/tools/mo/unit_tests/mo/front/extractor_test.py +++ /dev/null @@ -1,682 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import pytest -from openvino.tools.mo.front.common.partial_infer.utils import strict_compare_tensors -from openvino.tools.mo.front.extractor import input_user_data_repack, output_user_data_repack, update_ie_fields, add_input_op, \ - get_node_id_with_ports -from openvino.tools.mo.front.extractor import spatial_attr_getter, add_input_ops, attr_getter, CaffePythonFrontExtractorOp, \ - add_output_ops, bool_to_str -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from unit_tests.utils.extractors import FakeMultiParam -from unit_tests.utils.graph import build_graph, build_graph_with_edge_attrs, build_graph_with_attrs -from openvino.runtime import PartialShape - - -class FakePythonParam: - def __init__(self, param: FakeMultiParam): - self.__setattr__('python_param', param) - - -nodes_attributes = {'input': {'kind': 'data'}, - 'pool_1': {'type': 'Pooling', 'kind': 'op'}, - 'output': {'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - } - - -class UpdateIEFieldsTest(unittest.TestCase): - def test_default_update_ie_fields(self): - update_ie_fields({}, ir_version=None) - - def test_not_set_update_ie_fields(self): - with self.assertRaisesRegex(Error, 'Unrecognized IR version.*'): - update_ie_fields({}, ir_version='abracadabra') - - -class TestExtractor(unittest.TestCase): - def test_spatial_attr_getter(self): - input_shape = np.array([1, 125, 13, 13]) - params = { - 'kernel': np.array([1, 1, 1, 2]), - 'pad': np.array([1, 1, 3, 4]), - 'stride': np.array([1, 1, 2, 3]), - } - graph = build_graph(nodes_attributes, - [('input', 'pool_1'), - ('pool_1', 'output'), - ('output', 'op_output') - ], - {'input': {'shape': input_shape}, - 'pool_1': {**params, 'spatial_dims': [2, 3]}, - 'output': {'shape': None}}) - pool_1_node = Node(graph, 'pool_1') - for param in params.keys(): - if type(params[param]) is np.ndarray: - port_lambda = lambda x: x - self.assertEqual(params[param][2], - spatial_attr_getter(pool_1_node, field=param, dim=0, post=port_lambda)) - self.assertEqual(params[param][3], - spatial_attr_getter(pool_1_node, field=param, dim=1, post=port_lambda)) - - def test_attr_getter(self): - nodes = {'input': {'kind': 'data'}, - 'reshape': {'type': 'Reshape', 'kind': 'op'}, - 'output': {'kind': 'data'}, - 'op_output': {'type': 'Result', 'kind': 'op'}, - } - input_shape = np.array([1, 125, 13, 13]) - params = { - 'dim': [1, 1, 2, 3], - 'max_size': np.array([3, 2, 1, 0]) - } - expect_params = { - 'dim': "1,1,2,3", - 'max_size': "3,2,1,0", - } - graph = build_graph(nodes, - [('input', 'reshape'), - ('reshape', 'output'), - ('output', 'op_output') - ], - {'input': {'shape': input_shape}, - 'reshape': {**params, 'spatial_dims': [2, 3]}, - 'output': {'shape': None}}) - pool_1_node = Node(graph, 'reshape') - for param in params.keys(): - if type(params[param]) is list: - self.assertEqual(expect_params[param], - attr_getter(pool_1_node, param)) - - -class TestAddInputOp(unittest.TestCase): - nodes = [ - ('op_node', {'kind': 'op'}), - ('future_input', {'kind': 'op'}), - ('another_node', {'kind': 'op'}), - ] - edges = [('future_input', 'op_node', {'in': 1, 'out': 0}), - ('another_node', 'op_node', {'in': 0, 'out': 0})] - - def test_in_port_no_data(self): - graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges) - new_input_shape = np.array([1, 2, 3, 4]) - graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges[1:], - new_nodes_with_attrs=[('input_node', {'kind': 'op', 'op': 'Parameter', - 'shape': new_input_shape})], - new_edges_with_attrs=[('input_node', 'op_node', {'in': 1, 'out': 0})]) - add_input_op(graph, 'op_node', 1, data=False, shape=new_input_shape) - graph.remove_edge('future_input', 'op_node') - (flag, resp) = compare_graphs(graph, graph_ref, last_node='op_node') - self.assertTrue(flag, resp) - - def test_in_port_with_data(self): - graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges) - graph.stage = 'middle' - new_input_shape = np.array([1, 2, 3, 4]) - graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges[1:], - new_nodes_with_attrs=[('input_node', {'kind': 'op', 'op': 'Parameter', - 'shape': new_input_shape}), - ('input_data', {'kind': 'data'})], - new_edges_with_attrs=[('input_node', 'input_data', {'in': 0, 'out': 0}), - ('input_data', 'op_node', {'in': 1, 'out': 0})]) - add_input_op(graph, 'op_node', 1, data=True, shape=new_input_shape) - graph.remove_edge('future_input', 'op_node') - (flag, resp) = compare_graphs(graph, graph_ref, last_node='op_node') - self.assertTrue(flag, resp) - - nodes_out = [ - ('op_node', {'kind': 'op'}), - ('future_input', {'kind': 'op'}), - ('another_node', {'kind': 'op'}), - ] - edges_out = [('op_node', 'future_input', {'in': 0, 'out': 1}), - ('op_node', 'another_node', {'in': 0, 'out': 0})] - - def test_out_port_no_data(self): - graph = build_graph_with_attrs(nodes_with_attrs=self.nodes_out, edges_with_attrs=self.edges_out) - new_input_shape = np.array([1, 2, 3, 4]) - graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes_out, edges_with_attrs=self.edges_out[1:], - new_nodes_with_attrs=[('input_node', {'kind': 'op', 'op': 'Parameter', - 'shape': new_input_shape})], - new_edges_with_attrs=[('input_node', 'future_input', {'in': 0, 'out': 0})]) - add_input_op(graph, 'op_node', 1, data=False, shape=new_input_shape, is_out_port=True) - graph.remove_edge('op_node', 'future_input') - (flag, resp) = compare_graphs(graph, graph_ref, last_node='another_node') - self.assertTrue(flag, resp) - (flag, resp) = compare_graphs(graph, graph_ref, last_node='future_input') - self.assertTrue(flag, resp) - - def test_out_port_with_data(self): - graph = build_graph_with_attrs(nodes_with_attrs=self.nodes_out, edges_with_attrs=self.edges_out[1:], - new_nodes_with_attrs=[('input_data', {'kind': 'data', 'shape': None, 'value': None})], - new_edges_with_attrs=[('op_node', 'input_data', {'out': 1, 'in': 0}), - ('input_data', 'future_input', {'in': 0, 'out': 0})]) - graph.stage = 'middle' - new_input_shape = np.array([1, 2, 3, 4]) - graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes_out, edges_with_attrs=self.edges_out[1:], - new_nodes_with_attrs=[('input_node', {'kind': 'op', 'op': 'Parameter', - 'shape': new_input_shape}), - ('input_data', {'kind': 'data', 'shape': None})], - new_edges_with_attrs=[('input_node', 'input_data', {'in': 0, 'out': 0}), - ('input_data', 'future_input', {'in': 0, 'out': 0})]) - add_input_op(graph, 'op_node', 1, data=True, shape=new_input_shape, is_out_port=True) - graph.remove_edge('op_node', 'input_data') - - (flag, resp) = compare_graphs(graph, graph_ref, last_node='another_node') - self.assertTrue(flag, resp) - (flag, resp) = compare_graphs(graph, graph_ref, last_node='future_input') - self.assertTrue(flag, resp) - - -class TestInputAddition(UnitTestWithMockedTelemetry): - # Tests for input - nodes = {'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'Parameter'}, - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'}, - } - edges = [ - ('node_1', 'conv_1'), - ('conv_1', 'relu_1'), - ] - - def test_none_out_port_raise(self): - graph = build_graph(self.nodes, self.edges) - shape = np.array([1, 2, 3, 4]) - inputs = {'conv_1': [{'shape': shape, 'out': None}]} - with self.assertRaisesRegex(Error, 'Output port for input node conv_1 should be specified, it cannot be None!'): - add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True) - - def test_wrong_output_port_raise(self): - graph = build_graph(self.nodes, self.edges) - shape = np.array([1, 2, 3, 4]) - inputs = {'conv_1': [{'shape': shape, 'out': 5}]} - with self.assertRaisesRegex(Error, 'Output port index 5 is out of number of available output ports for node'): - add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True) - - def test_wrong_input_port_raise(self): - graph = build_graph(self.nodes, self.edges) - shape = np.array([1, 2, 3, 4]) - inputs = {'conv_1': [{'shape': shape, 'in': 5}]} - with self.assertRaisesRegex(Error, 'Input port index 5 is out of number of available input ports for node'): - add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True) - - def test_one_input_one_shape(self): - shape = np.array([1, 2, 3, 4]) - inputs = {'conv_1': [{'shape': shape}]} - nodes = { - 'old_input': {'type': 'Identity', 'kind': 'op', 'op': 'Parameter'}, - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'output': {'type': 'SoftMax', 'kind': 'op', 'op': 'NotPlaceholder'} - } - edges = [ - ('old_input', 'conv_1'), - ('conv_1', 'relu_1'), - ('relu_1', 'output') - ] - graph = build_graph(nodes, edges) - add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True) - new_input = list(graph.in_edges('conv_1'))[0][0] - self.assertFalse(graph.node['old_input']['is_input']) - self.assertTrue(graph.node[new_input]['is_input']) - self.assertTrue((new_input, 'conv_1') in graph.edges()) - self.assertTrue(('old_input', 'conv_1') not in graph.edges()) - shapes_are_equal = np.array_equal(graph.node[new_input]['shape'], shape) - self.assertTrue(shapes_are_equal) - - def test_one_input_no_shape(self): - shape = None - inputs = {'conv_1': [{'shape': shape}]} - nodes = { - 'old_input': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'old_input_data': {'kind': 'data', 'value': None, 'shape': np.array([-1, 224, 224, 3])}, - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'conv_1_data': {'kind': 'data', 'value': True, 'shape': np.array([-1, 224, 224, 3])}, - 'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'relu_1_data': {'kind': 'data', 'value': None, 'shape': np.array([-1, 112, 112, 64])}, - 'output': {'type': 'SoftMax', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'output_data': {'name': 'output_data', 'kind': 'data', 'shape': np.array([-1, 112, 112, 64])}, - 'op_output': {'kind': 'op', 'op': 'Result'} - } - edges = [ - ('old_input', 'old_input_data'), - ('old_input_data', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'relu_1'), - ('relu_1', 'relu_1_data'), - ('relu_1_data', 'output'), - ('output', 'output_data'), - ('output_data', 'op_output') - ] - graph = build_graph(nodes, edges) - graph.stage = 'middle' - add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=False) - new_input = list(graph.in_edges(list(graph.in_edges('conv_1'))[0][0]))[0][0] - new_input_data = list(graph.in_edges('conv_1'))[0][0] - self.assertFalse(graph.node['old_input']['is_input']) - self.assertTrue(graph.node[new_input]['is_input']) - self.assertTrue((new_input_data, 'conv_1') in graph.edges()) - self.assertTrue(('old_input_data', 'conv_1') not in graph.edges()) - self.assertIsNotNone(graph.node[new_input_data]['shape']) - - def test_two_inputs_two_shapes_positive_1(self): - shape_1 = [1, 2, 3, 4] - shape_2 = [4, 3, 2, 1] - inputs = {'node_1': [{'shape': shape_1}], 'node_4': [{'shape': shape_2}]} - nodes = { - 'input_1': {'type': 'Identity', 'kind': 'op', 'op': 'Parameter'}, - 'input_2': {'type': 'Identity', 'kind': 'op', 'op': 'Parameter'}, - 'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'node_2': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'node_3': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'node_4': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'output': {'kind': 'op', 'op': 'Result'} - } - edges = [ - ('input_1', 'node_1'), - ('node_1', 'node_2'), - ('node_3', 'output'), - ('input_2', 'node_4'), - ('node_4', 'output') - ] - graph = build_graph(nodes, edges) - add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True) - new_input_1 = list(graph.in_edges('node_1'))[0][0] - new_input_2 = list(graph.in_edges('node_4'))[0][0] - self.assertFalse(graph.node['input_1']['is_input']) - self.assertTrue(graph.node[new_input_1]['is_input']) - self.assertTrue(graph.node[new_input_2]['is_input']) - self.assertTrue((new_input_1, 'node_1') in graph.edges()) - self.assertTrue((new_input_2, 'node_4') in graph.edges()) - self.assertTrue(strict_compare_tensors(shape_1, graph.node[new_input_1]['shape'])) - self.assertTrue(strict_compare_tensors(shape_2, graph.node[new_input_2]['shape'])) - - def test_two_inputs_two_shapes_not_all_inputs(self): - shape_1 = [1, 2, 3, 4] - shape_2 = [4, 3, 2, 1] - inputs = {'node_1': [{'shape': shape_1}], 'node_4': [{'shape': shape_2}]} - nodes = { - 'input_1': {'type': 'Identity', 'kind': 'op', 'op': 'Parameter'}, - 'input_2': {'type': 'Identity', 'kind': 'op', 'op': 'Parameter'}, - 'node_1': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'node_2': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'node_3': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'node_4': {'type': 'Identity', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'output': { 'kind': 'op', 'op': 'Result'}, - 'input_3': {'type': 'Identity', 'kind': 'op', 'op': 'Parameter'} - } - edges = [ - ('input_1', 'node_1'), - ('node_1', 'node_2'), - ('node_3', 'output'), - ('input_2', 'node_4'), - ('node_4', 'output'), - ('input_3', 'output') - ] - graph = build_graph(nodes, edges) - self.assertRaises(Error, add_input_ops, graph, inputs, True) - - # Tests for cases with input/output ports cutting - def test_add_input_with_input_port_before_infer(self): - shape = np.array([1, 2, 3, 4]) - inputs = {'conv_1': [{'shape': shape, 'in': 0}]} - nodes = { - 'old_input': {'type': 'Identity', 'kind': 'op', 'op': 'Parameter'}, - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'output': {'type': 'SoftMax', 'kind': 'op', 'op': 'NotPlaceholder'} - } - edges = [ - ('old_input', 'conv_1'), - ('conv_1', 'relu_1'), - ('relu_1', 'output') - ] - graph = build_graph(nodes, edges) - add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True) - - # Check that graph - graph_ref = build_graph(nodes, edges, update_attributes={'old_input': {'shape': shape}}) - (flag, resp) = compare_graphs(graph, graph_ref, last_node='output') - self.assertTrue(flag, resp) - - # also checks that new old_input was changed - new_input = list(graph.in_edges('conv_1'))[0][0] - self.assertFalse(graph.node['old_input']['is_input']) - self.assertTrue(graph.node[new_input]['is_input']) - self.assertTrue((new_input, 'conv_1') in graph.edges()) - self.assertTrue(('old_input', 'conv_1') not in graph.edges()) - - def test_add_input_with_output_port_before_infer(self): - shape = np.array([1, 2, 3, 4]) - inputs = {'conv_1': [{'shape': shape, 'out': 0}]} - nodes = { - 'old_input': {'type': 'Identity', 'kind': 'op', 'op': 'Parameter'}, - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'output': {'type': 'SoftMax', 'kind': 'op', 'op': 'NotPlaceholder'} - } - edges = [ - ('old_input', 'conv_1'), - ('conv_1', 'relu_1'), - ('conv_2', 'relu_1'), - ('relu_1', 'output') - ] - graph = build_graph(nodes, edges) - add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=True) - - graph_ref = build_graph(nodes_attrs={'new_input': {'kind': 'op', 'op': 'Parameter', 'shape': shape}, - **nodes}, - edges=[('new_input', 'relu_1'), - ('relu_1', 'output'), - ('conv_2', 'relu_1'), - ('old_input', 'conv_1'),],) - # Check that new input is added right (with right ports !) - (flag, resp) = compare_graphs(graph, graph_ref, last_node='output') - self.assertTrue(flag, resp) - - # Check that other graph is not damaged - (flag, resp) = compare_graphs(graph, graph_ref, last_node='conv_1') - self.assertTrue(flag, resp) - - # Checks for new input and edges - self.assertTrue('conv_1/placeholder_out_port_0' in graph.nodes()) - new_input = 'conv_1/placeholder_out_port_0' - self.assertTrue(graph.node[new_input]['is_input']) - self.assertTrue((new_input, 'relu_1') in graph.edges()) - self.assertTrue(('old_input', 'relu_1') not in graph.edges()) - - def test_add_input_with_output_port_after_infer(self): - shape = np.array([1, 2, 3, 4]) - inputs = {'conv_1': [{'shape': shape, 'out': 0}]} - nodes = { - 'old_input': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'inp_data' : {'kind': 'data', 'shape': shape + 1}, - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder'}, - 'conv_data': {'kind': 'data', 'shape': shape, 'value': None, 'data_attr': 'data_attr_value'}, - 'relu_1': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder'}, - } - edges = [ - ('old_input', 'inp_data'), - ('inp_data', 'conv_1'), - ('conv_1', 'conv_data'), - ('conv_data', 'relu_1', {'edge_attr': 'edge_value'}), - ] - graph = build_graph(nodes, edges) - graph.stage = 'middle' - add_input_ops(graph=graph, user_defined_inputs=inputs, before_infer=False) - - graph_ref = build_graph(nodes_attrs={'new_input': {'kind': 'op', 'op': 'Parameter', 'shape': shape}, - **nodes}, - edges=[('old_input', 'inp_data'), - ('inp_data', 'conv_1'), - ('new_input', 'conv_data'), - ('conv_data', 'relu_1', {'edge_attr': 'edge_value'}), - ],) - # Check that new input is added right (with right ports !) - (flag, resp) = compare_graphs(graph, graph_ref, last_node='relu_1') - self.assertTrue(flag, resp) - - # Check that other graph is not damaged - (flag, resp) = compare_graphs(graph, graph_ref, last_node='conv_1') - self.assertTrue(flag, resp) - - # Checks for new input and edges - self.assertTrue('conv_1/placeholder_out_port_0' in graph.nodes()) - new_input = 'conv_1/placeholder_out_port_0' - - self.assertTrue(graph.node[new_input]['is_input']) - - self.assertTrue(Node(graph, 'relu_1').in_node(0)['data_attr'] == 'data_attr_value') - self.assertTrue(Node(graph, 'relu_1').in_edge(0)['edge_attr'] == 'edge_value') - - -class TestOutputCut(): - # {'embeddings': [{'port': None}]} - @pytest.mark.parametrize("output",[{'C':[{'port': None}]}, {'C': [{'out': 0}]}, {'C': [{'out': 1}]}]) - def test_output_port_cut(self, output): - nodes = {'A': {'type': 'Identity', 'kind': 'op', 'op': 'Identity'}, - 'B': {'type': 'Identity', 'kind': 'op', 'op': 'Identity'}, - 'C': {'type': 'Identity', 'kind': 'op', 'op': 'Identity'}, - 'D': {'type': 'Identity', 'kind': 'op', 'op': 'Identity'}, - 'E': {'type': 'Identity', 'kind': 'op', 'op': 'Identity'}, - } - edges = [ - ('A', 'C', {'in': 0, 'out': 0}), - ('B', 'C', {'in': 1, 'out': 0}), - ('C', 'D', {'in': 0, 'out': 0}), - ('C', 'E', {'in': 0, 'out': 1}) - ] - graph = build_graph_with_edge_attrs(nodes, edges) - sinks = add_output_ops(graph, output) - graph.clean_up() - assert len(Node(graph, 'C').out_nodes()) == 1 - assert len(Node(graph, 'C').in_nodes()) == 2 - - @pytest.mark.parametrize("output",[{'C': [{'in': 0}]}, {'C': [{'in': 1}]}]) - def test_output_port_cut(self, output): - nodes = {'A': {'op': 'Parameter', 'kind': 'op'}, - 'B': {'op': 'Parameter', 'kind': 'op'}, - 'C': {'type': 'Identity', 'kind': 'op', 'op': 'Identity'}, - 'D': {'type': 'Identity', 'kind': 'op', 'op': 'Identity'}, - 'E': {'type': 'Identity', 'kind': 'op', 'op': 'Identity'}, - } - edges = [ - ('A', 'C', {'in': 0, 'out': 0}), - ('B', 'C', {'in': 1, 'out': 0}), - ('C', 'D', {'in': 0, 'out': 0}), - ('C', 'E', {'in': 0, 'out': 1}) - ] - graph = build_graph_with_edge_attrs(nodes, edges) - sinks = add_output_ops(graph, output) - graph.clean_up() - assert len(graph.nodes()) == 2 - - -class TestUserDataRepack(UnitTestWithMockedTelemetry): - nodes = {'A': {'name': 'Aa', 'op': 'Parameter', 'kind': 'op'}, - 'B': {'name': 'Bb', 'op': 'Parameter', 'kind': 'op'}, - 'C': {'name': 'Cc', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Identity'}, - 'D': {'name': 'Dd', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Identity'}, - 'E': {'name': 'Ee', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Identity'}, - } - edges = [ - ('A', 'C', {'in': 0, 'out': 0}), - ('B', 'C', {'in': 1, 'out': 0}), - ('C', 'D', {'in': 0, 'out': 0}), - ('C', 'E', {'in': 0, 'out': 1}) - ] - - def test_input_user_data_repack_none(self): - graph = build_graph(self.nodes, self.edges) - input, freeze_placeholder = input_user_data_repack(graph, None, None) - self.assertEqual(input, None) - self.assertEqual(freeze_placeholder, None) - - def test_input_user_data_repack_names_to_ids_list(self): - graph = build_graph(self.nodes, self.edges) - input, freeze_placeholder = input_user_data_repack(graph, ['Aa', 'Bb'], None) - self.assertDictEqual(input, {'A': [{'shape': None, 'port': None}], 'B': [{'shape': None, 'port': None}]}) - self.assertEqual(freeze_placeholder, None) - - def test_input_user_data_repack_names_ports_in_out(self): - graph = build_graph(self.nodes, self.edges) - input, freeze_placeholder = input_user_data_repack(graph, ['Aa:0', '1:Cc'], None) - self.assertDictEqual(input, {'A': [{'shape': None, 'out': 0}], 'C': [{'shape': None, 'in': 1}]}) - self.assertEqual(freeze_placeholder, None) - - def test_input_user_data_repack_dict_with_shapes(self): - graph = build_graph(self.nodes, self.edges) - shape_1 = np.array([1, 160, 160, 3]) - shape_2 = np.array([1, 127, 127, 3]) - input, freeze_placeholder = input_user_data_repack(graph, {'Aa': shape_1, 'Bb': shape_2}, None) - self.assertDictEqual(input, {'A': [{'shape': shape_1, 'port': None}], 'B': [{'shape': shape_2, 'port': None}]}) - self.assertEqual(freeze_placeholder, None) - - def test_input_user_data_repack_dict_with_shapes_and_ports(self): - graph = build_graph(self.nodes, self.edges) - shape_1 = np.array([1, 160, 160, 3]) - shape_2 = np.array([1, 127, 127, 3]) - input, freeze_placeholder = input_user_data_repack(graph, {'Aa:0': shape_1, 'Bb:0': shape_2}, None) - self.assertDictEqual(input, {'A': [{'shape': shape_1, 'out': 0}], 'B': [{'shape': shape_2, 'out': 0}]}) - self.assertEqual(freeze_placeholder, None) - - def test_freeze_placeholder_and_input(self): - graph = build_graph(self.nodes, self.edges) - shape_1 = np.array([1, 160, 160, 3]) - input, freeze_placeholder = input_user_data_repack(graph, {'Aa:0': shape_1}, {'Bb': False}) - self.assertDictEqual(input, {'A': [{'shape': shape_1, 'out': 0}], 'B': [{'shape': None, 'port': None}]}) - self.assertEqual(freeze_placeholder, {'B': False}) - - def test_error(self): - graph = build_graph(self.nodes, self.edges) - self.assertRaises(Error, input_user_data_repack, graph, PartialShape([1, 227, 227, 3]), None) - - def test_error_2(self): - graph = build_graph(self.nodes, self.edges) - self.assertRaises(Error, input_user_data_repack, graph, PartialShape([1, 227, 227, 3]), None) - - def test_error_3(self): - graph = build_graph(self.nodes, self.edges) - self.assertRaises(Error, input_user_data_repack, graph, ['Bcb'], None) - - def test_input_and_freeze(self): - graph = build_graph(self.nodes, self.edges) - shape_1 = PartialShape([1, 160, 160, 3]) - input, freeze_placeholder = input_user_data_repack(graph, shape_1, {'Bb': True}) - self.assertDictEqual(input, {'A': [{'shape': shape_1, 'port': None}], 'B': [{'shape': None, 'port': None}]}) - self.assertDictEqual(freeze_placeholder, {'B': True}) - - def test_freeze_new_placeholder_1(self): - # create a new placeholder Cc:0 by cutting output port with shape_2 = [5] and freeze a value [1.0 1.0 2.0 3.0 5.0] - graph = build_graph(self.nodes, self.edges) - shape_1 = np.array([1, 160, 160, 3]) - shape_2 = np.array([5]) - input, freeze_placeholder = input_user_data_repack(graph, {'Aa:0': shape_1, 'Cc:0' : shape_2}, {'Bb': False, 'Cc:0' : [1.0, 1.0, 2.0, 3.0, 5.0]}) - self.assertDictEqual(input, {'A' : [{'shape' : shape_1, 'out' : 0}], 'B' : [{'shape' : None, 'port' : None}], 'C' : [{'shape' : shape_2, 'out' : 0}]}) - self.assertEqual(freeze_placeholder, {'B' : False, 'C/placeholder_out_port_0' : [1.0, 1.0, 2.0, 3.0, 5.0]}) - - def test_freeze_new_placeholder_2(self): - # create a new placeholder Ee by cutting input port with shape_2 = [2, 2] and freeze a value [[1.0, 1.0], [2.0, 3.0]] - graph = build_graph(self.nodes, self.edges) - shape_1 = np.array([1, 160, 160, 3]) - shape_2 = np.array([2, 2]) - input, freeze_placeholder = input_user_data_repack(graph, {'Aa:0': shape_1, 'Ee' : shape_2}, {'Bb': False, 'Ee' : [[1.0, 1.0], [2.0, 3.0]]}) - self.assertDictEqual(input, {'A' : [{'shape' : shape_1, 'out' : 0}], 'B' : [{'shape' : None, 'port' : None}], 'E' : [{'shape' : shape_2, 'port' : None}]}) - self.assertEqual(freeze_placeholder, {'B' : False, 'E/placeholder_port_None' : [[1.0, 1.0], [2.0, 3.0]]}) - - def test_freeze_new_placeholder_error(self): - # shape is not specified for new placeholder Cc:0 with frozen value - graph = build_graph(self.nodes, self.edges) - shape_1 = np.array([1, 160, 160, 3]) - self.assertRaises(Error, input_user_data_repack, graph, {'Aa:0': shape_1}, {'Bb': False, 'Cc:0' : [1.0, 1.0, 2.0, 3.0, 5.0]}) - - def test_output_user_data_repack(self): - graph = build_graph(self.nodes, self.edges) - output = output_user_data_repack(graph, ['Cc']) - self.assertDictEqual(output, {'C': [{'port': None}]}) - - def test_output_user_data_repack_ports(self): - graph = build_graph(self.nodes, self.edges) - output = output_user_data_repack(graph, ['Cc:1', '0:Cc']) - self.assertDictEqual(output, {'C': [{'out': 1}, {'in': 0}]}) - - def test_output_user_data_repack_none(self): - graph = build_graph(self.nodes, self.edges) - output = output_user_data_repack(graph, None) - self.assertEqual(output, None) - - -class TestExtractPort(unittest.TestCase): - def setUp(self) -> None: - nodes = { - 'input_id': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter', 'name': '1input1:0'}, - 'conv_id': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder', 'name': '1input1'}, - 'relu_id': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder', 'name': 'relu'}, - 'squeeze_id': {'type': 'Squeeze', 'kind': 'op', 'op': 'NotPlaceholder', 'name': 'relu:0'}, - } - edges = [ - ('input_id', 'conv_id'), - ('conv_id', 'relu_id'), - ('relu_id', 'squeeze_id'), - ] - self.graph = build_graph(nodes, edges) - - def test_out_port(self): - node_id, direction, port = get_node_id_with_ports(self.graph, '1input1:0:0') - self.assertEqual(node_id, 'input_id') - self.assertEqual(direction, 'out') - self.assertEqual(port, 0) - - def test_in_port1(self): - node_id, direction, port = get_node_id_with_ports(self.graph, '0:1input1') - self.assertEqual(node_id, 'conv_id') - self.assertEqual(direction, 'in') - self.assertEqual(port, 0) - - def test_in_port2(self): - node_id, direction, port = get_node_id_with_ports(self.graph, '0:relu:0') - self.assertEqual(node_id, 'squeeze_id') - self.assertEqual(direction, 'in') - self.assertEqual(port, 0) - - def test_no_port1(self): - node_id, direction, port = get_node_id_with_ports(self.graph, '1input1') - self.assertEqual(node_id, 'conv_id') - self.assertEqual(direction, 'port') - self.assertEqual(port, None) - - def test_no_port2(self): - self.assertRaises(Error, get_node_id_with_ports, self.graph, '1input1:0') - - def test_non_int(self): - self.assertRaises(Error, get_node_id_with_ports, self.graph, 'port:1input1') - - def test_two_ports(self): - self.assertRaises(Error, get_node_id_with_ports, self.graph, '0:1input1:1') - - def test_name_looks_like_port_number(self): - nodes = { - 'input_id': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter', 'name': '0'}, - 'conv_id': {'type': 'Convolution', 'kind': 'op', 'op': 'NotPlaceholder', 'name': '1'}, - 'relu_id': {'type': 'ReLU', 'kind': 'op', 'op': 'NotPlaceholder', 'name': '2'}, - } - edges = [ - ('input_id', 'conv_id'), - ('conv_id', 'relu_id'), - ] - graph = build_graph(nodes, edges) - node_id, direction, port = get_node_id_with_ports(graph, '0:2') - self.assertEqual(node_id, 'relu_id') - self.assertEqual(direction, 'in') - self.assertEqual(port, 0) - - -class TestCaffePythonFrontExtractorOp(unittest.TestCase): - def test_get_attrs(self): - exp_attrs = {"test_attr_1": 12, "test_attr_2": "sdf sdf"} - param_str = "'test_attr_1': 12, 'test_attr_2': 'sdf sdf'" - attrs = CaffePythonFrontExtractorOp.get_attrs(FakePythonParam(FakeMultiParam({'param_str': param_str}))) - self.assertEqual(exp_attrs, attrs) - -class TestBoolToSrtFunction(unittest.TestCase): - def test_bool_to_str(self): - graph = build_graph(nodes_attributes, - [('input', 'pool_1'), - ('pool_1', 'output'), - ('output', 'op_output') - ], - {'pool_1': {'bool_attr': None} - }) - pool_1_node = Node(graph, 'pool_1') - attrs = [(True, 'true'), (False, 'false'), (1, 'true'), (0, 'false')] - for attr in attrs: - pool_1_node.bool_attr = attr[0] - self.assertEqual(attr[1], bool_to_str(pool_1_node, 'bool_attr')) diff --git a/tools/mo/unit_tests/mo/front/freeze_placeholder_value_test.py b/tools/mo/unit_tests/mo/front/freeze_placeholder_value_test.py deleted file mode 100644 index 5ce767f55fceb8..00000000000000 --- a/tools/mo/unit_tests/mo/front/freeze_placeholder_value_test.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.freeze_placeholder_value import FreezePlaceholderValue -from unit_tests.utils.graph import build_graph - -nodes_bool = { - '0': {'name': 'input1', 'kind': 'op', 'op': 'Parameter', 'data_type': bool, 'shape': np.array([])}, - '1': {'name': 'input2', 'kind': 'op', 'op': 'Parameter', 'data_type': bool, 'shape': np.array([])}, - '2': {'name': 'node_1', 'kind': 'op', 'op': 'NotPlaceholder'}, - '3': {'name': 'node_2', 'kind': 'op', 'op': 'NotPlaceholder'}, - '4': {'name': 'node_3', 'kind': 'op', 'op': 'NotPlaceholder'}, - '5': {'name': 'node_4', 'kind': 'op', 'op': 'NotPlaceholder'}, - '6': {'name': 'output1', 'kind': 'op', 'op': 'Result', 'type': 'Result'}, - '7': {'name': 'output2', 'kind': 'op', 'op': 'Result', 'type': 'Result'} - -} -edges = { - ('0', '2'), - ('2', '3'), - ('4', '6'), - ('1', '5'), - ('5', '7') -} - - -class TestFreezePlaceholderValue(unittest.TestCase): - def test_freeze_true(self): - graph = build_graph(nodes_bool, edges) - graph.graph['fw'] = 'tf' - tested_class = FreezePlaceholderValue() - graph.graph['freeze_placeholder'] = {'input1': 'True'} - before_pattern = graph.nodes() - tested_class.find_and_replace_pattern(graph=graph) - after_pattern = graph.nodes() - # number of nodes in the grpaph didn't change - self.assertEqual(len(before_pattern), len(after_pattern)) - # reach new placeholder - try: - new_ph_dict = graph.node[[u for u, v in graph.in_edges('2')][0]] - except Exception as e: - self.fail("Can't get frozen placeholder. Broken edge. Additional information: {}".format(e)) - # check value - self.assertEqual('value' in new_ph_dict, True) - self.assertEqual(new_ph_dict['value'], True) - - def test_freeze_false(self): - graph = build_graph(nodes_bool, edges) - graph.graph['fw'] = 'tf' - tested_class = FreezePlaceholderValue() - graph.graph['freeze_placeholder'] = {'input1': 'False'} - before_pattern = graph.nodes() - tested_class.find_and_replace_pattern(graph=graph) - after_pattern = graph.nodes() - # number of nodes in the grpaph didn't change - self.assertEqual(len(before_pattern), len(after_pattern)) - # reach new placeholder - try: - new_ph_dict = graph.node[[u for u, v in graph.in_edges('2')][0]] - except Exception as e: - self.fail("Can't get frozen placeholder. Broken edge. Additional information: {}".format(e)) - # check value - self.assertEqual('value' in new_ph_dict, True) - self.assertEqual(new_ph_dict['value'], False) - - def test_freeze_both(self): - graph = build_graph(nodes_bool, edges) - graph.graph['fw'] = 'tf' - tested_class = FreezePlaceholderValue() - graph.graph['freeze_placeholder'] = {'input1': 'False', 'input2': 'True'} - before_pattern = graph.nodes() - tested_class.find_and_replace_pattern(graph=graph) - after_pattern = graph.nodes() - # number of nodes in the graph didn't change - self.assertEqual(len(before_pattern), len(after_pattern)) - # reach new placeholder - try: - new_ph_dict_1 = graph.node[[u for u, v in graph.in_edges('2')][0]] - new_ph_dict_2 = graph.node[[u for u, v in graph.in_edges('5')][0]] - except Exception as e: - self.fail("Can't get frozen placeholder. Broken edge. Additional information: {}".format(e)) - # check value - self.assertEqual('value' in new_ph_dict_1, True) - self.assertEqual('value' in new_ph_dict_2, True) - self.assertEqual(new_ph_dict_1['value'], False) - self.assertEqual(new_ph_dict_2['value'], True) diff --git a/tools/mo/unit_tests/mo/front/image_scaler_test.py b/tools/mo/unit_tests/mo/front/image_scaler_test.py deleted file mode 100644 index f16c04175faa7d..00000000000000 --- a/tools/mo/unit_tests/mo/front/image_scaler_test.py +++ /dev/null @@ -1,283 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.image_scaler import ImageScaler -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # ImageScaler operation - 'im_scaler': {'type': None, 'kind': 'op', 'op': 'ImageScaler'}, - 'im_scaler_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Test operation - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': None}, - 'last_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Mul and Add operations - 'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'}, - 'const_mul_1_w': {'type': None, 'value': None, 'kind': 'op', 'op': 'Const'}, - 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'add_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Add'}, - 'const_add_1_w': {'type': None, 'value': None, 'kind': 'op', 'op': 'Const'}, - 'add_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'add_1_data': {'value': None, 'shape': None, 'kind': 'data'}, -} - - -class ImageScalerTest(unittest.TestCase): - # Tests for MIDDLE stage - # Graph with Mul and Add operations - def test_image_scaler_test_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'im_scaler'), - ('im_scaler', 'im_scaler_data'), - ('im_scaler_data', 'last'), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'im_scaler': {'scale': np.array(2.0), 'bias': np.reshape(np.array([1, 2, 3]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'last') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array(2.0).shape, 'value': np.array(2.0)}, - 'const_add_1_w': {'shape': np.array([3, 1, 1]), - 'value': np.reshape(np.array([1, 2, 3]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'middle' - - replacer = ImageScaler() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last') - self.assertTrue(flag, resp) - - # Graph with Add operation - def test_image_scaler_test_2(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'im_scaler'), - ('im_scaler', 'im_scaler_data'), - ('im_scaler_data', 'last'), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'im_scaler': {'scale': np.array(1.0), 'bias': np.reshape(np.array([1, 2, 3]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'last') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_add_1_w': {'shape': np.array([3, 1, 1]), - 'value': np.reshape(np.array([1, 2, 3]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'middle' - - replacer = ImageScaler() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last') - self.assertTrue(flag, resp) - - # Graph with Mul operation - def test_image_scaler_test_3(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'im_scaler'), - ('im_scaler', 'im_scaler_data'), - ('im_scaler_data', 'last'), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'im_scaler': {'scale': np.array(2.0), 'bias': np.reshape(np.array([0, 0, 0]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'last') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array(2.0).shape, 'value': np.array(2.0)}, - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'middle' - - replacer = ImageScaler() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last') - self.assertTrue(flag, resp) - - # Graph without Mul and Add operations - def test_image_scaler_test_4(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'im_scaler'), - ('im_scaler', 'im_scaler_data'), - ('im_scaler_data', 'last'), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'im_scaler_data': {'shape': np.array([1, 227, 227, 3])}, - 'im_scaler': {'scale': np.array(1.0), 'bias': np.reshape(np.array([0, 0, 0]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'last') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'middle' - - replacer = ImageScaler() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last') - self.assertTrue(flag, resp) - - # Tests for FRONT stage - # Graph with Mul and Add operations - def test_image_scaler_test_5(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'im_scaler'), - ('im_scaler', 'last'), - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'im_scaler': {'scale': np.array(2.0), 'bias': np.reshape(np.array([1, 2, 3]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'mul_1'), - ('const_mul_1_w', 'mul_1'), - ('mul_1', 'add_1'), - ('const_add_1_w', 'add_1'), - ('add_1', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array(2.0).shape, 'value': np.array(2.0)}, - 'const_add_1_w': {'shape': np.array([3, 1, 1]), - 'value': np.reshape(np.array([1, 2, 3]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - replacer = ImageScaler() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last') - self.assertTrue(flag, resp) - - # Graph with Add operation - def test_image_scaler_test_6(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'im_scaler'), - ('im_scaler', 'last'), - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'im_scaler': {'scale': np.array(1.0), 'bias': np.reshape(np.array([1, 2, 3]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'add_1'), - ('const_add_1_w', 'add_1'), - ('add_1', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'const_add_1_w': {'shape': np.array([3, 1, 1]), - 'value': np.reshape(np.array([1, 2, 3]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - replacer = ImageScaler() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last') - self.assertTrue(flag, resp) - - # Graph with Mul operation - def test_image_scaler_test_7(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'im_scaler'), - ('im_scaler', 'last'), - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'im_scaler': {'scale': np.array(2.0), 'bias': np.reshape(np.array([0, 0, 0]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'mul_1'), - ('const_mul_1_w', 'mul_1'), - ('mul_1', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array(2.0).shape, 'value': np.array(2.0)}, - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - replacer = ImageScaler() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last') - self.assertTrue(flag, resp) - - # Graph without Mul and Add operations - def test_image_scaler_test_8(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'im_scaler'), - ('im_scaler', 'last'), - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'im_scaler': {'scale': np.array(1.0), 'bias': np.reshape(np.array([0, 0, 0]), [3, 1, 1])}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - replacer = ImageScaler() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/instance_normalization_test.py b/tools/mo/unit_tests/mo/front/instance_normalization_test.py deleted file mode 100644 index a631ad859a9016..00000000000000 --- a/tools/mo/unit_tests/mo/front/instance_normalization_test.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.instance_normalization import InstanceNormalization -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'input': {'kind': 'op', 'op': 'AnyOp'}, - 'scale': {'kind': 'op', 'op': 'AnyOp'}, - 'B': {'kind': 'op', 'op': 'AnyOp'}, - 'node': {'kind': 'op', 'op': 'InstanceNormalization', 'epsilon': None}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, -} - -nodes_ref_attributes = { - 'input': {'kind': 'op', 'op': 'AnyOp'}, - 'scale': {'kind': 'op', 'op': 'AnyOp'}, - 'B': {'kind': 'op', 'op': 'AnyOp'}, - 'start': {'kind': 'op', 'op': 'Const'}, - 'step': {'kind': 'op', 'op': 'Const'}, - 'rank': {'kind': 'op', 'op': 'Rank'}, - 'mvn_axes': {'kind': 'op', 'op': 'Range'}, - 'mvn': {'kind': 'op', 'op': 'MVN', 'name': 'node/Ins_Norm/MVN_', 'eps': None}, - 'mul': {'kind': 'op', 'op': 'Mul', 'name': 'node/Ins_Norm/mul_'}, - 'add': {'kind': 'op', 'op': 'Add', 'name': 'node/Ins_Norm/add_'}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, -} - - -class TestInstanceNormalization(unittest.TestCase): - def test_instance_normalization_test_1(self): - graph = build_graph(nodes_attributes, - [('input', 'node'), - ('scale', 'node'), - ('B', 'node'), - ('node', 'out') - ], - {'node': {'epsilon': 0.123}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_ref_attributes, - [('input', 'mvn', {'out': 0}), - ('input', 'rank', {'out': 0}), - ('start', 'mvn_axes'), - ('rank', 'mvn_axes'), - ('step', 'mvn_axes'), - ('mvn_axes', 'mvn'), - ('mvn', 'mul'), - ('scale', 'mul'), - ('mul', 'add'), - ('B', 'add'), - ('add', 'out') - ], - {'mvn': {'eps': 0.123, 'eps_mode': 'inside_sqrt', 'normalize_variance': 1}, - }, nodes_with_edges_only=True) - - graph.stage = 'front' - - tested_class = InstanceNormalization() - tested_class.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=False) - self.assertTrue(flag, resp) - - def test_instance_normalization_test_2(self): - graph = build_graph(nodes_attributes, - [('input', 'out', {'out': 0, 'in': 0}), - ('input', 'node', {'out': 1}), - ('scale', 'node'), - ('B', 'node'), - ('node', 'out', {'in': 1}) - ], - {'node': {'epsilon': 0.123}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_ref_attributes, - [('input', 'out', {'out': 0, 'in': 0}), - ('input', 'mvn', {'out': 1}), - ('input', 'rank', {'out': 1}), - ('start', 'mvn_axes'), - ('rank', 'mvn_axes'), - ('step', 'mvn_axes'), - ('mvn_axes', 'mvn'), - ('mvn', 'mul'), - ('scale', 'mul'), - ('mul', 'add'), - ('B', 'add'), - ('add', 'out', {'in': 1}) - ], - {'mvn': {'eps': 0.123, 'eps_mode': 'inside_sqrt', 'normalize_variance': 1}, - }, nodes_with_edges_only=True) - - graph.stage = 'front' - - tested_class = InstanceNormalization() - tested_class.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=False) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/interpolate_reshape_test.py b/tools/mo/unit_tests/mo/front/interpolate_reshape_test.py deleted file mode 100644 index 1da581459662dc..00000000000000 --- a/tools/mo/unit_tests/mo/front/interpolate_reshape_test.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -import numpy as np -import pytest - -from openvino.tools.mo.front.interpolate_reshape import InterpolateWithConcat -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \ - connect_data - -nodes = { - **regular_op_with_shaped_data('placeholder', [1, 3, 30, 40], {'type': 'Parameter', 'op': 'Parameter'}), - **valued_const_with_data('out_shape', np.array([60, 160])), - - **regular_op_with_shaped_data('interpolate', [1, 3, 60, 160], - {'type': 'Interpolate', 'axes': int64_array([2, 3]), 'op': 'Interpolate', - 'version': 'opset1'}), - **regular_op_with_shaped_data('identity_00', [1, 3, 60, 160], {'identity': True, 'op': 'Identity'}), - **regular_op_with_shaped_data('identity_01', [1, 3, 60, 160], {'identity': True, 'op': 'Identity'}), - - **regular_op_with_shaped_data('shape', [4], {'type': 'ShapeOf', 'op': 'ShapeOf'}), - **valued_const_with_data('indices', np.array([2, 3])), - **valued_const_with_data('axis', np.array(0)), - **regular_op_with_shaped_data('gather', [2], {'type': 'Gather', 'op': 'Gather'}), - - **regular_op_with_shaped_data('placeholder_1', [1, 3, 60, 160], {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('identity_10', [1, 3, 60, 160], {'identity': True, 'op': 'Identity'}), - **regular_op_with_shaped_data('identity_11', [1, 3, 60, 160], {'identity': True, 'op': 'Identity'}), - **regular_op_with_shaped_data('concat', [1, 7, 60, 160], {'type': 'Concat', 'axis': 1, 'op': 'Concat'}), - - **valued_const_with_data('N', np.array([1])), - - **result('output'), - **result('output_1'), -} - - -class TestInterpolateConcat(): - def test_interpolate_concat_reshape_graph_comparison(self): - graph = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect('out_shape', '1:interpolate'), - *connect('interpolate', '0:concat'), - *connect('placeholder_1', '1:concat'), - *connect('concat', 'output'), - ], nodes_with_edges_only=True) - - InterpolateWithConcat().find_and_replace_pattern(graph) - graph.clean_up() - graph_ref = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect('placeholder_1', 'shape'), - *connect('shape', '0:gather'), - *connect('indices', '1:gather'), - *connect('axis', '2:gather'), - *connect('gather', '1:interpolate'), - *connect('interpolate', '0:concat'), - *connect_data('placeholder_1', '1:concat'), - *connect('concat', 'output'), - ], nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - def test_interpolate_identity_concat_reshape_graph_comparison(self): - graph = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect('out_shape', '1:interpolate'), - *connect('interpolate', 'identity_00'), - *connect('identity_00', 'identity_01'), - *connect('identity_01', '0:concat'), - *connect('placeholder_1', 'identity_10'), - *connect('identity_10', 'identity_11'), - *connect('identity_11', '1:concat'), - *connect('concat', 'output'), - ], nodes_with_edges_only=True) - - InterpolateWithConcat().find_and_replace_pattern(graph) - graph.clean_up() - graph_ref = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect_data('identity_11', 'shape'), - *connect('shape', '0:gather'), - *connect('indices', '1:gather'), - *connect('axis', '2:gather'), - *connect('gather', '1:interpolate'), - *connect('interpolate', 'identity_00'), - *connect('identity_00', 'identity_01'), - *connect('identity_01', '0:concat'), - *connect('placeholder_1', 'identity_10'), - *connect('identity_10', 'identity_11'), - *connect('identity_11', '1:concat'), - *connect('concat', 'output'), - ], nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - def test_interpolate_concat_negate(self): - graph = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect('out_shape', '1:interpolate'), - *connect('interpolate', 'identity_00'), - *connect('interpolate', 'identity_01'), - *connect('identity_00', 'output'), - *connect('identity_01', 'output_1'), - ], nodes_with_edges_only=True) - - InterpolateWithConcat().find_and_replace_pattern(graph) - graph.clean_up() - graph_ref = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect('out_shape', '1:interpolate'), - *connect('interpolate', 'identity_00'), - *connect('interpolate', 'identity_01'), - *connect('identity_00', 'output'), - *connect('identity_01', 'output_1'), - ], nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - @pytest.mark.parametrize("update_attrs",[ - {'concat': {'axis': None}}, - - {'concat': {'axis': -1}}, - {'interpolate': {'axes': None}}, - {'interpolate': {'axes': np.array([1])}}, - {'interpolate': {'axes': np.array([2, -1])}}, - ]) - def test_negative_axes_conditions(self, update_attrs): - graph = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect('out_shape', '1:interpolate'), - *connect('interpolate', '0:concat'), - *connect('placeholder_1', '1:concat'), - *connect('concat', 'output'), - ], update_attributes=update_attrs, nodes_with_edges_only=True) - InterpolateWithConcat().find_and_replace_pattern(graph) - graph_ref = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect('out_shape', '1:interpolate'), - *connect('interpolate', '0:concat'), - *connect('placeholder_1', '1:concat'), - *connect('concat', 'output'), - ], update_attributes=update_attrs, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - - def test_interpolate_tf_style_concat(self): - graph = build_graph(nodes, [ - *connect('placeholder', '0:interpolate'), - *connect('out_shape', '1:interpolate'), - *connect('interpolate', '0:concat'), - *connect('N', '1:concat'), - *connect('concat', 'output'), - ], update_attributes={'concat': {'N': 1}}, nodes_with_edges_only=True) - graph_ref = graph.copy() - InterpolateWithConcat().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/kaldi/__init__.py b/tools/mo/unit_tests/mo/front/kaldi/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/kaldi/add_reshape_transpose_around_conv_pool_test.py b/tools/mo/unit_tests/mo/front/kaldi/add_reshape_transpose_around_conv_pool_test.py deleted file mode 100644 index c63957f3529b5e..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/add_reshape_transpose_around_conv_pool_test.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.kaldi.add_reshape_transpose_around_conv_pool import AddReshapeTransposeAroundConvPool -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, connect_front, const, regular_op, shaped_parameter - - -class AddReshapeTransposeAroundConvPoolTests(unittest.TestCase): - nodes = { - **shaped_parameter('input', [1, 33]), - **regular_op('some_op', {'op': 'some_op'}), - **regular_op('splice', {'op': 'Splice', 'context': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]}), - **regular_op('conv', {'kind': 'op', 'op': 'Convolution', 'kernel': [1, 11, 1, 5], 'patch_stride': 5, - 'kernel_spatial': [1, 5]}), - **regular_op('pool', {'kind': 'op', 'op': 'Pooling', 'pool_stride': 5, 'pool_step': [1, 1, 1, 1]}), - **regular_op('out_op', {'op': "SomeOp"}), - } - - ref_nodes = { - **shaped_parameter('input', [1, 33]), - **regular_op('some_op', {}), - **regular_op('splice', {'op': 'Splice', 'context': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]}), - - **regular_op('shapeof', {'op': 'ShapeOf', 'type': 'ShapeOf'}), - **const('ind', int64_array([0])), - **const('axis', int64_array(0)), - **regular_op('gather_batch', {'op': 'Gather', 'type': 'Gather'}), - **const('t', int64_array([11])), - **const('h', int64_array([5])), - **const('ind_h', int64_array([1])), - **regular_op('gather_h', {'op': "Gather", 'type': 'Gather'}), - **const('th', int64_array([55])), - **regular_op('div', {'op': 'Div', 'type': 'Divide'}), - **regular_op('concat', {'op': 'Concat', 'type': 'Concat'}), - - **regular_op('reshape_in', {'op': 'Reshape', 'type': 'Reshape'}), - **const('transpose_in_order', int64_array([0, 3, 1, 2])), - **regular_op('transpose_in', {'op': 'Transpose', 'type': 'Transpose'}), - **regular_op('conv', {'kind': 'op', 'op': 'Convolution', 'kernel': [1, 1, 11, 5]}), - **regular_op('pool', {'kind': 'op', 'op': 'Pooling', 'pool_stride': 5, 'pool_step': [1, 1, 1, 1]}), - **const('transpose_out_order', int64_array([0, 2, 3, 1])), - **regular_op('transpose_out', {'op': 'Transpose', 'type': 'Transpose'}), - **const('reshape_out_shape', int64_array([0, -1])), - **regular_op('reshape_out', {'op': 'Reshape', 'type': 'Reshape'}), - **regular_op('out_op', {'op': "SomeOp"}) - } - - def test_simple_convolution(self): - graph = build_graph(self.nodes, [ - *connect_front('input', 'splice'), - *connect_front('splice', 'conv'), - *connect_front('conv', 'out_op') - ], nodes_with_edges_only=True) - graph.stage = 'front' - AddReshapeTransposeAroundConvPool.find_and_replace_pattern(graph) - - ref_graph = build_graph(self.ref_nodes, - [ - *connect_front('input', 'splice'), - *connect_front('splice', '0:reshape_in'), - - *connect_front('splice', 'shapeof'), - *connect_front('shapeof:0', '0:gather_batch'), - *connect_front('ind', '1:gather_batch'), - *connect_front('axis', '2:gather_batch'), - *connect_front('shapeof:0', '0:gather_h'), - *connect_front('ind_h', '1:gather_h'), - *connect_front('axis', '2:gather_h'), - *connect_front('gather_h', '0:div'), - *connect_front('th', '1:div'), - *connect_front('gather_batch', '0:concat'), - *connect_front('t', '1:concat'), - *connect_front('h', '2:concat'), - *connect_front('div', '3:concat'), - *connect_front('concat', '1:reshape_in'), - - *connect_front('reshape_in', '0:transpose_in'), - *connect_front('transpose_in_order', "1:transpose_in"), - *connect_front('transpose_in', 'conv'), - *connect_front('conv', '0:transpose_out'), - *connect_front('transpose_out_order', '1:transpose_out'), - *connect_front('transpose_out', '0:reshape_out'), - *connect_front('reshape_out_shape', '1:reshape_out'), - *connect_front('reshape_out', 'out_op') - ]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'out_op', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_simple_convolution_wo_splice(self): - graph = build_graph(self.nodes, [ - *connect_front('input', 'conv'), - *connect_front('input', 'some_op'), - *connect_front('conv', 'out_op') - ], nodes_with_edges_only=True) - graph.stage = 'front' - AddReshapeTransposeAroundConvPool.find_and_replace_pattern(graph) - - ref_graph = build_graph(self.ref_nodes, - [ - *connect_front('input', '0:reshape_in'), - *connect_front('input', 'some_op'), - *connect_front('input', 'shapeof'), - *connect_front('shapeof:0', '0:gather_batch'), - *connect_front('ind', '1:gather_batch'), - *connect_front('axis', '2:gather_batch'), - *connect_front('shapeof:0', '0:gather_h'), - *connect_front('ind_h', '1:gather_h'), - *connect_front('axis', '2:gather_h'), - *connect_front('gather_h', '0:div'), - *connect_front('th', '1:div'), - *connect_front('gather_batch', '0:concat'), - *connect_front('t', '1:concat'), - *connect_front('h', '2:concat'), - *connect_front('div', '3:concat'), - *connect_front('concat', '1:reshape_in'), - - *connect_front('reshape_in', '0:transpose_in'), - *connect_front('transpose_in_order', "1:transpose_in"), - *connect_front('transpose_in', 'conv'), - *connect_front('conv', '0:transpose_out'), - *connect_front('transpose_out_order', '1:transpose_out'), - *connect_front('transpose_out', '0:reshape_out'), - *connect_front('reshape_out_shape', '1:reshape_out'), - *connect_front('reshape_out', 'out_op') - ]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'out_op', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_simple_pooling(self): - graph = build_graph(self.nodes, [ - *connect_front('input', 'splice'), - *connect_front('splice', 'pool'), - *connect_front('pool', 'out_op') - ], nodes_with_edges_only=True) - graph.stage = 'front' - AddReshapeTransposeAroundConvPool.find_and_replace_pattern(graph) - - ref_graph = build_graph(self.ref_nodes, - [ - *connect_front('input', 'splice'), - *connect_front('splice', '0:reshape_in'), - - *connect_front('splice', 'shapeof'), - *connect_front('shapeof:0', '0:gather_batch'), - *connect_front('ind', '1:gather_batch'), - *connect_front('axis', '2:gather_batch'), - *connect_front('shapeof:0', '0:gather_h'), - *connect_front('ind_h', '1:gather_h'), - *connect_front('axis', '2:gather_h'), - *connect_front('gather_h', '0:div'), - *connect_front('th', '1:div'), - *connect_front('gather_batch', '0:concat'), - *connect_front('t', '1:concat'), - *connect_front('h', '3:concat'), - *connect_front('div', '2:concat'), - *connect_front('concat', '1:reshape_in'), - - *connect_front('reshape_in', '0:transpose_in'), - *connect_front('transpose_in_order', "1:transpose_in"), - *connect_front('transpose_in', 'pool'), - *connect_front('pool', '0:transpose_out'), - *connect_front('transpose_out_order', '1:transpose_out'), - *connect_front('transpose_out', '0:reshape_out'), - *connect_front('reshape_out_shape', '1:reshape_out'), - *connect_front('reshape_out', 'out_op') - ]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'out_op', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/kaldi/apply_counts_test.py b/tools/mo/unit_tests/mo/front/kaldi/apply_counts_test.py deleted file mode 100644 index c0c685e867fe5d..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/apply_counts_test.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.kaldi.apply_counts import apply_biases_to_last_layer -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class TestKaldiPipeline(unittest.TestCase): - def test_apply_biases_to_ScaleShift(self): - counts = -0.5 * np.ones(10) - nodes = {'input': {'kind': 'op', 'op': None}, - 'weights': {'kind': 'op', 'op': 'Const'}, - 'biases': {'kind': 'op', 'op': 'Const', 'value': None, 'shape': None, 'data_type': None}, - 'sc': {'op': 'ScaleShift', 'kind': 'op'}, - 'sub': {'op': 'Add', 'kind': 'op'}, - "const": {'op': 'Const', 'value': -counts, 'kind': 'op'}, - 'op_output': {'op': 'Result', 'kind': 'op'} - } - graph = build_graph(nodes, - [ - ('input', 'sc', {'in': 0}), - ('weights', 'sc', {'in': 1}), - ('biases', 'sc', {'in': 2}), - ('sc', 'op_output') - ], nodes_with_edges_only=True) - - graph.stage = "front" - ref_graph = build_graph(nodes, - [ - ('input', 'sc', {'in': 0}), - ('weights', 'sc', {'in': 1}), - ('biases', 'sc', {'in': 2}), - ('sc', 'sub', {'in': 0}), - ('const', 'sub', {'in': 1}), - ('sub', 'op_output') - ], nodes_with_edges_only=True) - - apply_biases_to_last_layer(graph, counts) - compare_graphs(graph, ref_graph, 'op_output', check_op_attrs=True) - - def test_apply_biases_to_graph_with_SoftMax(self): - counts = -0.5 * np.ones(10) - nodes = {'input': {'kind': 'op', 'op': None}, - 'weights': {'kind': 'op', 'op': 'Const'}, - 'biases': {'kind': 'op', 'op': 'Const', 'value': None, 'shape': None, 'data_type': None}, - 'fc': {'op': 'FullyConnected', 'kind': 'op'}, - 'softmax': {'op': 'SoftMax', 'kind': 'op'}, - 'op_output': {'op': 'Result', 'kind': 'op'}, - 'sub': {'op': 'Add', 'kind': 'op'}, - "const": {'op': 'Const', 'value': -counts, 'kind': 'op'}, - } - graph = build_graph(nodes, - [ - ('input', 'fc', {'in': 0}), - ('weights', 'fc', {'in': 1}), - ('biases', 'fc', {'in': 2}), - ('fc', 'softmax'), - ('softmax','op_output') - ], nodes_with_edges_only=True) - ref_graph = build_graph(nodes, - [ - ('input', 'fc', {'in': 0}), - ('weights', 'fc', {'in': 1}), - ('biases', 'fc', {'in': 2}), - ('fc', 'sub', {'in': 0}), - ('const', 'sub', {'in': 1}), - ('sub', 'op_output') - ], nodes_with_edges_only=True) - - graph.stage = "front" - apply_biases_to_last_layer(graph, counts) - compare_graphs(graph, ref_graph, 'op_output', check_op_attrs=True) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/__init__.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/add_shift_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/add_shift_ext_test.py deleted file mode 100644 index f497412ff932d9..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/add_shift_ext_test.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.kaldi.extractors.add_shift_ext import AddShiftFrontExtractor -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class AddShiftFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['ScaleShift'] = ScaleShiftOp - - @classmethod - def create_pb_for_test_node(cls): - input_shape = cls.test_node.in_node().shape - pb = cls.write_tag_with_value('', 0) - pb += cls.write_tag_with_value('FV', input_shape[1]) - for i in np.zeros(input_shape[1], dtype=np.uint32): - pb += TestKaldiUtilsLoading.pack_value(i, TestKaldiUtilsLoading.uint32_fmt) - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - AddShiftFrontExtractor.extract(cls.test_node) - - def test_assertion(self): - self.assertRaises(AttributeError, AddShiftFrontExtractor.extract, None) - - def test_extracted_shapes_add_shift(self): - weights = self.test_node.weights - biases = self.test_node.biases - weights_shape = weights.shape[0] - self.assertEqual(self.test_node.in_node().shape[1], weights_shape) - self.assertEqual(biases.shape[0], weights_shape) - - def test_extracted_blobs_add_shift(self): - weights = self.test_node.weights - biases = self.test_node.biases - self.assertTrue(np.array_equal(weights, np.ones(weights.shape))) - self.assertTrue(np.array_equal(biases, np.zeros(biases.shape))) - self.assertTrue(self.test_node.bias_term) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/affine_component_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/affine_component_ext_test.py deleted file mode 100644 index 36ac52b2e9c7f5..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/affine_component_ext_test.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.MatMul import FullyConnected -from openvino.tools.mo.front.kaldi.extractors.affine_transform_ext import AffineTransformFrontExtractor -from openvino.tools.mo.ops.op import Op -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class AffineComponentFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['FullyConnected'] = FullyConnected - - @classmethod - def create_pb_for_test_node(cls): - pb = KaldiFrontExtractorTest.generate_learn_info() - pb += KaldiFrontExtractorTest.generate_matrix([10, 10]) - pb += KaldiFrontExtractorTest.generate_vector(10) - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - AffineTransformFrontExtractor.extract(cls.test_node) - - def test_assertion(self): - self.assertRaises(AttributeError, AffineTransformFrontExtractor.extract, None) - - def test_attrs(self): - self.assertEqual(self.test_node['out-size'], 10) - - def test_out_blobs(self): - self.assertTrue(np.array_equal(self.test_node.weights, range(10 * 10))) - self.assertTrue(np.array_equal(self.test_node.biases, range(10))) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/affine_transform_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/affine_transform_ext_test.py deleted file mode 100644 index a9ddefa16e0a7c..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/affine_transform_ext_test.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.MatMul import FullyConnected -from openvino.tools.mo.front.kaldi.extractors.affine_transform_ext import AffineTransformFrontExtractor -from openvino.tools.mo.ops.op import Op -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class AffineTransformFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['FullyConnected'] = FullyConnected - - @classmethod - def create_pb_for_test_node(cls): - pb = KaldiFrontExtractorTest.generate_learn_info() - pb += KaldiFrontExtractorTest.generate_matrix([10, 10]) - pb += KaldiFrontExtractorTest.generate_vector(10) - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - AffineTransformFrontExtractor.extract(cls.test_node) - - def test_assertion(self): - self.assertRaises(AttributeError, AffineTransformFrontExtractor.extract, None) - - def test_attrs(self): - self.assertEqual(self.test_node['out-size'], 10) - - def test_out_blobs(self): - self.assertTrue(np.array_equal(self.test_node.weights, range(10 * 10))) - self.assertTrue(np.array_equal(self.test_node.biases, range(10))) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/batchnorm_component_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/batchnorm_component_ext_test.py deleted file mode 100644 index 315afcad90a234..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/batchnorm_component_ext_test.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.kaldi.extractors.batchnorm_component_ext import BatchNormComponentFrontExtractor -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class BatchNormComponentFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['ScaleShift'] = ScaleShiftOp - - @classmethod - def create_pb_for_test_node(cls): - pb = KaldiFrontExtractorTest.write_tag_with_value('', 16) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 16) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 0.00001, np.float32) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 0.5, np.float32) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 'F', np.string_) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 16) - pb += b' ' - pb += KaldiFrontExtractorTest.generate_vector(16) - pb += b' ' - pb += KaldiFrontExtractorTest.generate_vector(16) - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - - def test_extract(self): - BatchNormComponentFrontExtractor.extract(self.test_node) - self.assertEqual(len(self.test_node['embedded_inputs']), 2) - ref_weights = list([1.5811389e+02, 4.9999750e-01, 3.5355249e-01, 2.8867465e-01, 2.4999970e-01, - 2.2360659e-01, 2.0412397e-01, 1.8898210e-01, 1.7677659e-01, 1.6666657e-01, - 1.5811381e-01, 1.5075560e-01, 1.4433751e-01, 1.3867499e-01, 1.3363057e-01, 1.2909940e-01]) - ref_biases = list([-0., -0.4999975, -0.707105, -0.86602396, -0.9999988, -1.1180329, - -1.2247438, -1.3228748, -1.4142127, -1.4999992, -1.5811381, -1.6583116, - -1.7320502, -1.8027749, -1.870828, -1.936491]) - self.assertEqual(np.allclose(self.test_node['weights'], ref_weights), True) - self.assertEqual(np.allclose(self.test_node['biases'], ref_biases), True) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/bias_component_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/bias_component_ext_test.py deleted file mode 100644 index b316f40805c0ee..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/bias_component_ext_test.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.kaldi.extractors.bias_component_ext import FixedBiasComponentFrontExtractor -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class FixedBiasComponentFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['ScaleShift'] = ScaleShiftOp - - @classmethod - def create_pb_for_test_node(cls): - cls.input_shape = 10 - - pb = b' ' - pb += KaldiFrontExtractorTest.generate_vector(cls.input_shape) - pb += b'' - - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - FixedBiasComponentFrontExtractor.extract(cls.test_node) - - def test_fixedbias_extractor(self): - input_shape = FixedBiasComponentFrontExtractorTest.input_shape - - exp_res = { - 'op': 'ScaleShift', - 'layout': 'NCHW', - 'bias_term': True, - 'out-size': input_shape, - 'biases': np.arange(input_shape) - } - - self.compare_node_attrs(exp_res) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/common_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/common_ext_test.py deleted file mode 100644 index 08980e1b2ba2c6..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/common_ext_test.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.graph.graph import Node, Graph -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from unit_tests.utils.graph import build_graph - - -class KaldiFrontExtractorTest(UnitTestWithMockedTelemetry): - graph = Graph() - nodes_attributes = {} - test_node = None - - @classmethod - def setUp(cls): - super().setUp(cls) - cls.nodes_attributes = { - 'input_data_node': { - 'name': 'input_data_node', - 'kind': 'data', - 'shape': np.array([1, 32, 1, 40], dtype=np.int64), - }, - 'weights': { - 'name': 'weights', - 'kind': 'data', - 'shape': np.array([10, 32, 1, 8], dtype=np.int64), - 'value': np.zeros((10, 32, 1, 8)), - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis'], - }, - 'test_node': { - 'name': 'test_node', - 'kind': 'op' - }, - 'output_data_node': { - 'name': 'output_data_node', - 'kind': 'data', - 'shape': None - } - } - cls.create_graph() - cls.test_node = Node(cls.graph, 'test_node') - cls.graph.add_node(cls.test_node.id, type='test_node') - cls.register_op() - cls.create_pb_for_test_node() - - @staticmethod - def register_op(): - raise NotImplementedError('Please, implement register_op') - - @classmethod - def create_graph(cls): - cls.graph = build_graph(cls.nodes_attributes, [ - ('input_data_node', 'test_node'), - ('test_node', 'output_data_node') - ], nodes_with_edges_only=True) - - @classmethod - def create_pb_for_test_node(cls): - pass - - @staticmethod - def generate_learn_info(): - pb = KaldiFrontExtractorTest.write_tag_with_value('', 0) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 1) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 2) - return pb - - @staticmethod - def generate_matrix(shape): - pb = KaldiFrontExtractorTest.write_tag_with_value('FM', shape[0]) - pb += KaldiFrontExtractorTest.write_int_value(shape[1]) - pb += KaldiFrontExtractorTest.generate_blob(np.prod(shape)) - return pb - - @staticmethod - def generate_vector(size: int) -> bytes: - pb = KaldiFrontExtractorTest.write_tag_with_value('FV', size) - pb += KaldiFrontExtractorTest.generate_blob(size) - return pb - - @staticmethod - def generate_blob(size: int) -> bytes: - pb = b'' - for i in range(size): - pb += TestKaldiUtilsLoading.pack_value(i, TestKaldiUtilsLoading.float32_fmt) - return pb - - @staticmethod - def write_tag_with_value(tag: str, value, value_type=np.int32) -> bytes: - pb = bytes(tag + ' ', 'ascii') - if value_type == np.int32: - return pb + KaldiFrontExtractorTest.write_int_value(value) - elif value_type == np.float32: - return pb + KaldiFrontExtractorTest.write_float_value(value) - else: - return pb + KaldiFrontExtractorTest.write_str_value(value) - - @staticmethod - def write_int_value(value) -> bytes: - pb = TestKaldiUtilsLoading.pack_value(4, 'B') - pb += TestKaldiUtilsLoading.pack_value(value, TestKaldiUtilsLoading.uint32_fmt) - return pb - - @staticmethod - def write_float_value(value) -> bytes: - pb = TestKaldiUtilsLoading.pack_value(4, 'B') - pb += TestKaldiUtilsLoading.pack_value(value, TestKaldiUtilsLoading.float32_fmt) - return pb - - @staticmethod - def write_str_value(value) -> bytes: - pb = bytes(value, 'ascii') - return pb - - def compare_node_attrs(self, exp_res): - node = self.test_node - for key in exp_res.keys(): - if type(node[key]) in [list, np.ndarray]: - self.assertTrue(np.array_equal(np.array(node[key]), np.array(exp_res[key]))) - else: - self.assertEqual(node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/concat_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/concat_ext_test.py deleted file mode 100644 index eab18f66a83c57..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/concat_ext_test.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.kaldi.extractors.concat_ext import ConcatFrontExtractor -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.ops.op import Op -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest - - -class ConcatFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['Concat'] = Convolution - - def test_concat(self): - ConcatFrontExtractor.extract(self.test_node) - self.assertEqual(self.test_node.axis, 1) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/convolutional_component_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/convolutional_component_ext_test.py deleted file mode 100644 index 19100e860d47a8..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/convolutional_component_ext_test.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.kaldi.extractors.convolutional_component_ext import ConvolutionalComponentFrontExtractor -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.ops.op import Op -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class ConvolutionalComponentFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['Convolution'] = Convolution - - @classmethod - def create_pb_for_test_node(cls): - pb = KaldiFrontExtractorTest.write_tag_with_value('', 2) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 2) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 4) - pb += KaldiFrontExtractorTest.generate_learn_info() - pb += b' ' - pb += KaldiFrontExtractorTest.generate_matrix([2, 4]) - pb += b' ' - pb += KaldiFrontExtractorTest.generate_vector(2) - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - ConvolutionalComponentFrontExtractor.extract(cls.test_node) - - def test_assertion(self): - self.assertRaises(AttributeError, ConvolutionalComponentFrontExtractor.extract, None) - - def test_attrs(self): - val_attrs = { - 'kernel': [2, 2, 1, 2], - 'stride': [1, 1, 1, 2], - 'pad': [[[0, 0], [0, 0], [0, 0], [0, 0]]], - 'output': 2, - 'patch_stride': 4, - 'spatial_dims': [2, 3], - 'channel_dims': [1], - 'batch_dims': [0], - 'dilation': [1, 1, 1, 1] - } - for attr in val_attrs: - if isinstance(val_attrs[attr], list): - self.assertTrue((self.test_node[attr] == val_attrs[attr]).all()) - else: - self.assertEqual(self.test_node[attr], val_attrs[attr]) - - def test_convolution_blobs(self): - self.assertTrue(np.array_equal(self.test_node.weights, [0, 1, 2, 3, 4, 5, 6, 7])) - self.assertTrue(np.array_equal(self.test_node.biases, [0, 1])) - diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/fixed_affine_component_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/fixed_affine_component_ext_test.py deleted file mode 100644 index 344fb9c75278d2..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/fixed_affine_component_ext_test.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.MatMul import FullyConnected -from openvino.tools.mo.front.kaldi.extractors.fixed_affine_component_ext import FixedAffineComponentFrontExtractor -from openvino.tools.mo.ops.op import Op -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class FixedAffineComponentFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['FullyConnected'] = FullyConnected - - @classmethod - def create_pb_for_test_node(cls): - pb = b' ' + KaldiFrontExtractorTest.generate_matrix([10, 10]) - pb += b' ' + KaldiFrontExtractorTest.generate_vector(10) - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - FixedAffineComponentFrontExtractor.extract(cls.test_node) - - def test_assertion(self): - self.assertRaises(AttributeError, FixedAffineComponentFrontExtractor.extract, None) - - def test_attrs(self): - self.assertEqual(self.test_node['out-size'], 10) - - def test_out_blobs(self): - self.assertTrue(np.array_equal(self.test_node.weights, range(10 * 10))) - self.assertTrue(np.array_equal(self.test_node.biases, range(10))) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/max_pooling_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/max_pooling_ext_test.py deleted file mode 100644 index 4a3906e9019e7b..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/max_pooling_ext_test.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.kaldi.extractors.max_pooling_ext import MaxPoolingComponentFrontExtractor -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.pooling import Pooling -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class MaxPoolingComponentFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['Pooling'] = Pooling - - @classmethod - def create_pb_for_test_node(cls): - pb = KaldiFrontExtractorTest.write_tag_with_value('', 2) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 2) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 4) - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - MaxPoolingComponentFrontExtractor.extract(cls.test_node) - - def test_assertion(self): - self.assertRaises(AttributeError, MaxPoolingComponentFrontExtractor.extract, None) - - def test_attrs(self): - val_attrs = { - 'window': [1, 1, 1, 2], - 'stride': [1, 1, 1, 2], - 'pool_stride': 4, - 'pad': [[[0, 0], [0, 0], [0, 0], [0, 0]]] - } - for attr in val_attrs: - if isinstance(val_attrs[attr], list): - self.assertTrue((self.test_node[attr] == val_attrs[attr]).all()) - else: - self.assertEqual(self.test_node[attr], val_attrs[attr]) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/memoryoffset_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/memoryoffset_ext_test.py deleted file mode 100644 index ed0af3882223cf..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/memoryoffset_ext_test.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.kaldi.extractors.memoryoffset_ext import MemoryOffsetFrontExtractor -from openvino.tools.mo.ops.memoryoffset import MemoryOffset -from openvino.tools.mo.ops.op import Op -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest - - -class MemoryOffsetFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['memoryoffset'] = MemoryOffset - - @classmethod - def create_pb_for_test_node(cls): - pb = {'pair_name': 'my_pair', - 't': -5, - 'has_default': False - } - cls.test_node['parameters'] = pb - - def test_extract(self): - MemoryOffsetFrontExtractor.extract(self.test_node) - self.assertEqual(self.test_node['pair_name'], 'my_pair') - self.assertEqual(self.test_node['t'], -5) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/normalize_component_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/normalize_component_ext_test.py deleted file mode 100644 index 7c5dbc17a8d7cc..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/normalize_component_ext_test.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.normalize import NormalizeOp -from openvino.tools.mo.front.kaldi.extractors.normalize_component_ext import NormalizeComponentFrontExtractor -from openvino.tools.mo.ops.op import Op -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class NormalizeComponentFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['Normalize'] = NormalizeOp - - @classmethod - def create_pb_for_test_node(cls): - pb = KaldiFrontExtractorTest.write_tag_with_value('', 16) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 0.5, np.float32) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 'F', np.string_) - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - - def test_extract(self): - NormalizeComponentFrontExtractor.extract(self.test_node) - self.assertEqual(len(self.test_node['embedded_inputs']), 1) - self.assertListEqual(list(self.test_node['weights']), [2.0]) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/pnorm_component_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/pnorm_component_ext_test.py deleted file mode 100644 index d3eff6c561b84f..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/pnorm_component_ext_test.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.pnorm import PNormOp -from openvino.tools.mo.front.kaldi.extractors.pnorm_component_ext import PNormComponentFrontExtractor -from openvino.tools.mo.ops.op import Op -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class PNormComponentFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['pnorm'] = PNormOp - - @classmethod - def create_pb_for_test_node(cls): - pb = KaldiFrontExtractorTest.write_tag_with_value('', 3500) - pb += KaldiFrontExtractorTest.write_tag_with_value('', 350) - pb += KaldiFrontExtractorTest.write_tag_with_value('

', 2, np.float32) - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - - def test_extract(self): - PNormComponentFrontExtractor.extract(self.test_node) - self.assertEqual(self.test_node['p'], 2) - self.assertEqual(self.test_node['group'], 10) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/rescale_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/rescale_ext_test.py deleted file mode 100644 index 2fa2c89d5d0069..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/rescale_ext_test.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.kaldi.extractors.rescale_ext import RescaleFrontExtractor -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class RescaleFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['ScaleShift'] = ScaleShiftOp - - @classmethod - def create_pb_for_test_node(cls): - input_shape = cls.test_node.in_node().shape - pb = cls.write_tag_with_value('', 0) - pb += cls.write_tag_with_value('FV', input_shape[1]) - for i in range(input_shape[1]): - pb += TestKaldiUtilsLoading.pack_value(i, TestKaldiUtilsLoading.float32_fmt) - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - RescaleFrontExtractor.extract(cls.test_node) - - def test_assertion(self): - self.assertRaises(AttributeError, RescaleFrontExtractor.extract, None) - - def test_extracted_shapes_add_shift(self): - weights = self.test_node.weights - weights_shape = weights.shape[0] - self.assertEqual(self.test_node.in_node().shape[1], weights_shape) - - def test_extracted_blobs_add_shift(self): - weights = self.test_node.weights - self.assertTrue(np.array_equal(weights, range(self.test_node.in_node().shape[1]))) diff --git a/tools/mo/unit_tests/mo/front/kaldi/extractors/scale_component_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/extractors/scale_component_ext_test.py deleted file mode 100644 index 989e79cfbd102b..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/extractors/scale_component_ext_test.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.kaldi.extractors.scale_component_ext import FixedScaleComponentFrontExtractor -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.scale_shift import ScaleShiftOp -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from unit_tests.mo.front.kaldi.loader.utils_test import TestKaldiUtilsLoading - - -class FixedScaleComponentFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['ScaleShift'] = ScaleShiftOp - - @classmethod - def create_pb_for_test_node(cls): - cls.input_shape = 10 - - pb = b' ' - pb += KaldiFrontExtractorTest.generate_vector(cls.input_shape) - pb += b'' - - cls.test_node['parameters'] = TestKaldiUtilsLoading.bytesio_from(pb) - FixedScaleComponentFrontExtractor.extract(cls.test_node) - - def test_fixedscale_extractor(self): - input_shape = FixedScaleComponentFrontExtractorTest.input_shape - - exp_res = { - 'op': 'ScaleShift', - 'layout': 'NCHW', - 'out-size': input_shape, - 'weights': np.arange(input_shape) - } - - self.compare_node_attrs(exp_res) diff --git a/tools/mo/unit_tests/mo/front/kaldi/loader/__init__.py b/tools/mo/unit_tests/mo/front/kaldi/loader/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/kaldi/loader/loader_test.py b/tools/mo/unit_tests/mo/front/kaldi/loader/loader_test.py deleted file mode 100644 index ab2f1158faf9a0..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/loader/loader_test.py +++ /dev/null @@ -1,233 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import io -import struct -import unittest - -import numpy as np - -from openvino.tools.mo.front.kaldi.loader.loader import load_topology_map, load_components -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class TestKaldiModelsLoading(unittest.TestCase): - - def test_component_map_loading_sequence(self): - test_map = "input-node name=input dim=16 \n" + \ - "component-node name=lda component=lda input=input \n" + \ - "component-node name=tdnn1.affine component=tdnn1.affine input=lda \n" + \ - "component-node name=tdnn1.relu component=tdnn1.relu input=tdnn1.affine \n" + \ - "component-node name=tdnn1.batchnorm component=tdnn1.batchnorm input=tdnn1.relu \n\n" - graph = Graph(name="test_graph_component_map_loading_sequence") - - test_top_map = load_topology_map(io.BytesIO(bytes(test_map, 'ascii')), graph) - - ref_map = {b"lda": ["lda"], - b"tdnn1.affine": ["tdnn1.affine"], - b"tdnn1.relu": ["tdnn1.relu"], - b"tdnn1.batchnorm": ["tdnn1.batchnorm"]} - self.assertEqual(test_top_map, ref_map) - self.assertTrue("input" in graph.nodes()) - self.assertListEqual(list(Node(graph, 'input')['shape']), [1, 16]) - - ref_graph = build_graph({'input': {'shape': np.array([1, 16]), 'kind': 'op', 'op': 'Parameter'}, - 'lda': {'kind': 'op'}, - 'tdnn1.affine': {'kind': 'op'}, - 'tdnn1.relu': {'kind': 'op'}, - 'tdnn1.batchnorm': {'kind': 'op'}, - }, - [ - ('input', 'lda'), - ('lda', 'tdnn1.affine'), - ('tdnn1.affine', 'tdnn1.relu'), - ('tdnn1.relu', 'tdnn1.batchnorm'), - ] - ) - (flag, resp) = compare_graphs(graph, ref_graph, 'tdnn1.batchnorm') - self.assertTrue(flag, resp) - - # NOTE: this test is disabled because it's broken and need to be fixed! Merge request 948. - # Fail in load_topology_map() in read_node() method - we create edge with node which doesn't exist in graph - def test_component_map_loading_swap(self): - test_map = "input-node name=input dim=16 \n" + \ - "component-node name=lda component=lda input=input \n" + \ - "component-node name=tdnn1.batchnorm component=tdnn1.batchnorm input=tdnn1.relu \n" + \ - "component-node name=tdnn1.relu component=tdnn1.relu input=tdnn1.affine \n" + \ - "component-node name=tdnn1.affine component=tdnn1.affine input=lda \n" + \ - "\n" - graph = Graph(name="test_graph_component_map_loading_swap") - - test_top_map = load_topology_map(io.BytesIO(bytes(test_map, 'ascii')), graph) - - ref_map = {b"lda": ["lda"], - b"tdnn1.affine": ["tdnn1.affine"], - b"tdnn1.relu": ["tdnn1.relu"], - b"tdnn1.batchnorm": ["tdnn1.batchnorm"]} - self.assertEqual(test_top_map, ref_map) - self.assertTrue("input" in graph.nodes()) - self.assertListEqual(list(Node(graph, 'input')['shape']), [1, 16]) - - ref_graph = build_graph({'input': {'shape': np.array([1, 16]), 'kind': 'op', 'op': 'Parameter'}, - 'lda': {'kind': 'op'}, - 'tdnn1.affine': {'kind': 'op'}, - 'tdnn1.relu': {'kind': 'op'}, - 'tdnn1.batchnorm': {'kind': 'op'}, - }, - [ - ('input', 'lda'), - ('lda', 'tdnn1.affine'), - ('tdnn1.affine', 'tdnn1.relu'), - ('tdnn1.relu', 'tdnn1.batchnorm'), - ] - ) - (flag, resp) = compare_graphs(graph, ref_graph, 'tdnn1.batchnorm') - self.assertTrue(flag, resp) - - def test_component_map_loading_append(self): - test_map = "input-node name=input dim=16 \n" + \ - "component-node name=lda component=lda input=input \n" + \ - "component-node name=tdnn1.affine component=tdnn1.affine input=Append(input, lda) \n" + \ - "component-node name=tdnn1.relu component=tdnn1.relu input=Append(tdnn1.affine, input, lda) \n" + \ - "\n" - graph = Graph(name="test_graph_component_map_loading_append") - - test_top_map= load_topology_map(io.BytesIO(bytes(test_map, 'ascii')), graph) - - ref_map = {b"lda": ["lda"], - b"tdnn1.affine": ["tdnn1.affine"], - b"tdnn1.relu": ["tdnn1.relu"]} - self.assertEqual(test_top_map, ref_map) - self.assertTrue("input" in graph.nodes()) - self.assertListEqual(list(Node(graph, 'input')['shape']), [1, 16]) - - ref_graph = build_graph({'input': {'shape': np.array([1, 16]), 'kind': 'op', 'op': 'Parameter'}, - 'lda': {'kind': 'op'}, - 'tdnn1.affine': {'kind': 'op'}, - 'tdnn1.relu': {'kind': 'op'}, - 'append_input_lda': {'kind': 'op', 'op': 'Concat'}, - 'append_affine_input_lda': {'kind': 'op', 'op': 'Concat'}, - }, - [ - ('input', 'lda', {'out': 0}), - ('lda', 'append_input_lda', {'in': 1, 'out': 0}), - ('input', 'append_input_lda', {'in': 0, 'out': 1}), - ('append_input_lda', 'tdnn1.affine', {'out': 0}), - ('input', 'append_affine_input_lda', {'in': 1, 'out': 2}), - ('lda', 'append_affine_input_lda', {'in': 2, 'out': 1}), - ('tdnn1.affine', 'append_affine_input_lda', {'in': 0, 'out': 0}), - ('append_affine_input_lda', 'tdnn1.relu', {'out': 0}), - ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'tdnn1.relu') - self.assertTrue(flag, resp) - - def test_component_map_loading_offset(self): - test_map = "input-node name=input dim=16\n" + \ - "component-node name=lda component=lda input=Offset(input, -3)\n" + \ - "component-node name=tdnn1.affine component=tdnn1.affine input=Append(Offset(input, -1), Offset(lda, 1))\n" + \ - "component-node name=tdnn1.relu component=tdnn1.relu input=tdnn1.affine\n" + \ - "\n" - graph = Graph(name="test_graph_component_map_loading_offset") - - test_top_map= load_topology_map(io.BytesIO(bytes(test_map, 'ascii')), graph) - - ref_map = {b"lda": ["lda"], - b"tdnn1.affine": ["tdnn1.affine"], - b"tdnn1.relu": ["tdnn1.relu"]} - self.assertEqual(test_top_map, ref_map) - self.assertTrue("input" in graph.nodes()) - self.assertListEqual(list(Node(graph, 'input')['shape']), [1, 16]) - - ref_graph = build_graph({'input': {'shape': np.array([1, 16]), 'kind': 'op', 'op': 'Parameter'}, - 'lda': {'kind': 'op'}, - 'tdnn1.affine': {'kind': 'op'}, - 'tdnn1.relu': {'kind': 'op'}, - 'append_input_lda': {'kind': 'op', 'op': 'Concat'}, - 'offset_in_input_3': {'kind': 'op', 'op': 'memoryoffset', 't': -3, 'pair_name': 'offset_out_input_3'}, - 'offset_in_input_1': {'kind': 'op', 'op': 'memoryoffset', 't': -1, 'pair_name': 'offset_out_input_1'}, - 'offset_in_lda_1': {'kind': 'op', 'op': 'memoryoffset', 't': -1, 'pair_name': 'offset_out_lda_1'}, - }, - [ - ('input', 'offset_in_input_3', {'out': 0}), - ('offset_in_input_3', 'lda', {'out': 0}), - ('lda', 'offset_in_lda_1', {'out': 0}), - ('input', 'offset_in_input_1', {'out': 1}), - ('offset_in_lda_1', 'append_input_lda', {'in': 1, 'out': 0}), - ('offset_in_input_1', 'append_input_lda', {'in': 0, 'out': 0}), - ('append_input_lda', 'tdnn1.affine', {'out': 0}), - ('tdnn1.affine', 'tdnn1.relu', {'out': 0}), - ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'tdnn1.relu') - self.assertTrue(flag, resp) - - def test_load_components(self): - test_map = b" " + struct.pack('B', 4) + struct.pack('I', 3) + \ - b" lda " + \ - b" tdnn1.affine @? " + \ - b" tdnn1.relu FV " - - graph = build_graph({'input': {'shape': np.array([1, 16]), 'kind': 'op', 'op': 'Parameter'}, - 'lda': {'kind': 'op', 'op': 'fixedaffinecomponent'}, - 'tdnn1.affine': {'kind': 'op', 'op': 'fixedaffinecomponent'}, - 'tdnn1.relu': {'kind': 'op', 'op': 'relu'}, - }, - [ - ('input', 'lda'), - ('lda', 'tdnn1.affine'), - ('tdnn1.affine', 'tdnn1.relu'), - ] - ) - - ref_map = {b"lda": ["lda"], - b"tdnn1.affine": ["tdnn1.affine"], - b"tdnn1.relu": ["tdnn1.relu"]} - - load_components(io.BytesIO(test_map), graph, ref_map) - - ref_graph = build_graph({'input': {'shape': np.array([1, 16]), 'kind': 'op', 'op': 'Parameter'}, - 'lda': {'kind': 'op', 'op': 'fixedaffinecomponent', 'parameters': ' '}, - 'tdnn1.affine': {'kind': 'op', 'op': 'naturalgradientaffinecomponent', 'parameters': " @? ·С8 "}, - 'tdnn1.relu': {'kind': 'op', 'op': 'rectifiedlinearcomponent', 'parameters': " FV "}, - }, - [ - ('input', 'lda'), - ('lda', 'tdnn1.affine'), - ('tdnn1.affine', 'tdnn1.relu'), - ] - ) - (flag, resp) = compare_graphs(graph, ref_graph, 'tdnn1.relu') - self.assertTrue(flag, resp) - - def test_component_map_loading_scale(self): - test_map = "input-node name=input dim=16\n" + \ - "component-node name=lda component=lda input=Scale(0.1, input)\n" + \ - "\n" - graph = Graph(name="test_graph_component_map_loading_scale") - - test_top_map = load_topology_map(io.BytesIO(bytes(test_map, 'ascii')), graph) - - ref_map = {b"lda": ["lda"]} - self.assertEqual(test_top_map, ref_map) - self.assertTrue("input" in graph.nodes()) - self.assertListEqual(list(Node(graph, 'input')['shape']), [1, 16]) - - ref_graph = build_graph({'input': {'shape': np.array([1, 16]), 'kind': 'op', 'op': 'Parameter'}, - 'lda': {'kind': 'op'}, - 'mul': {'kind': 'op'}, - 'scale_const': {'kind': 'op', 'op': 'Const'}, - }, - [ - ('input', 'mul', {'in': 0}), - ('scale_const', 'mul', {'in': 1}), - ('mul', 'lda', {'out': 0}), - ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'lda') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/kaldi/loader/utils_test.py b/tools/mo/unit_tests/mo/front/kaldi/loader/utils_test.py deleted file mode 100644 index bd5e6610a83dcb..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/loader/utils_test.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import io -import struct -import unittest - -from openvino.tools.mo.front.kaldi.loader.utils import end_of_nnet_tag, end_of_component_tag, get_bool, get_uint16, get_uint32, \ - get_uint64, read_binary_bool_token, read_binary_integer32_token, read_binary_integer64_token, read_string, \ - read_binary_float_token, find_next_tag, find_next_component, find_end_of_component, get_parameters, \ - collect_until_token_and_read, get_args_for_specifier -from openvino.tools.mo.utils.error import Error - - -class TestKaldiUtilsLoading(unittest.TestCase): - bool_fmt = '?' - uint16_fmt = 'H' - uint32_fmt = 'I' - uint64_fmt = 'q' - float32_fmt = 'f' - - @staticmethod - def bytesio_from(buffer): - return io.BytesIO(buffer) - - @staticmethod - def pack_value(value, fmt): - return struct.pack(fmt, value) - - def test_check_common_tags(self): - self.assertEqual(end_of_nnet_tag, '') - self.assertEqual(end_of_component_tag, '') - - def test_check_results_getting_function(self): - self.assertTrue(get_bool(self.pack_value(True, self.bool_fmt))) - self.assertFalse(get_bool(self.pack_value(False, self.bool_fmt))) - self.assertEqual(get_uint16(self.pack_value(16, self.uint16_fmt)), 16) - self.assertEqual(get_uint32(self.pack_value(32, self.uint32_fmt)), 32) - self.assertEqual(get_uint64(self.pack_value(64, self.uint64_fmt)), 64) - - def test_read_binary_bool_token(self): - true_value = self.bytesio_from(self.pack_value(True, self.bool_fmt)) - false_value = self.bytesio_from(self.pack_value(False, self.bool_fmt)) - self.assertTrue(read_binary_bool_token(true_value)) - self.assertFalse(read_binary_bool_token(false_value)) - - def test_read_binary_integer32_token(self): - stream = self.bytesio_from(self.pack_value(4, 'B') + self.pack_value(32, self.uint32_fmt)) - self.assertEqual(read_binary_integer32_token(stream), 32) - - def test_read_binary_integer64_token(self): - stream = self.bytesio_from(self.pack_value(8, 'B') + self.pack_value(64, self.uint64_fmt)) - self.assertEqual(read_binary_integer64_token(stream), 64) - - def test_read_binary_float_token(self): - stream = self.bytesio_from(self.pack_value(4, 'B') + self.pack_value(0.001, self.float32_fmt)) - self.assertAlmostEqual(read_binary_float_token(stream), 0.001) - - def test_read_string_token(self): - stream = self.bytesio_from(b"opgru3.renorm ") - self.assertEqual(read_string(stream), b"opgru3.renorm") - - def test_find_next_tag(self): - test_token = b'' - self.assertEqual(find_next_tag(self.bytesio_from(test_token)), test_token.decode('ascii')) - fake_token = b'' - test_file = b'somefakeinfoinfo' + component + b'' - self.assertEqual(find_next_component(self.bytesio_from(test_file)), component.decode('ascii').lower()[1:-1]) - - def test_find_next_component_eoc(self): - component = b'' - test_file = b'' + component + b'' - self.assertEqual(find_next_component(self.bytesio_from(test_file)), component.decode('ascii').lower()[1:-1]) - - def test_find_next_component_end_of_nnet(self): - test_file = b'somefakeinfoinfo' - self.assertRaises(Error, find_next_component, self.bytesio_from(test_file)) - - def test_find_end_of_component(self): - component = '' - test_file = b'somefakeinfoinfo' + bytes(end_of_component_tag, 'ascii') + b'' - end_tag, position = find_end_of_component(self.bytesio_from(test_file), component.lower()[1:-1]) - self.assertEqual(end_tag, end_of_component_tag) - self.assertEqual(position, test_file.decode('ascii').index(end_of_component_tag) + len(end_of_component_tag)) - - def test_get_pb(self): - component = '' - test_file = b'somefakeinfoinfo' + bytes(end_of_component_tag, 'ascii') + b'' - end_tag, end_position = find_end_of_component(self.bytesio_from(test_file), component[1:-1].lower()) - pb = get_parameters(self.bytesio_from(test_file), 0, end_position) - - def test_collect_until_token_and_read(self): - tag = b'' - test_file = b' opgru3.renorm ' + self.pack_value(4, 'B') + \ - self.pack_value(256, 'I') + b' ' + self.pack_value(4, 'B') + \ - self.pack_value(0.5, 'f') + b' F' - value = collect_until_token_and_read(self.bytesio_from(test_file), tag) - self.assertEqual(value, 256) - - def test_get_args_for_specifier(self): - string = b"(Offset(input, -2), Offset(input, -1), input, Offset(input, 1), Offset(input, 2))" - args = get_args_for_specifier(string) - ref = [b"Offset(input, -2)", b"Offset(input, -1)", b"input", b"Offset(input, 1)", b"Offset(input, 2)"] - self.assertEqual(args, ref) - - def test_get_args_for_specifier_2(self): - string = b"(Offset(input, -2), input, Offset(Offset(input, -1), 1))" - args = get_args_for_specifier(string) - ref = [b"Offset(input, -2)", b"input", b"Offset(Offset(input, -1), 1)"] - self.assertEqual(args, ref) - - def test_get_args_for_specifier_3(self): - string = b"(Offset(input, 1), Offset(input, 2))" - args = get_args_for_specifier(string) - ref = [b"Offset(input, 1)", b"Offset(input, 2)"] - self.assertEqual(args, ref) diff --git a/tools/mo/unit_tests/mo/front/kaldi/memory_offset_adjustment_test.py b/tools/mo/unit_tests/mo/front/kaldi/memory_offset_adjustment_test.py deleted file mode 100644 index 2861597480540b..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/memory_offset_adjustment_test.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.kaldi.memory_offset_adjustment import MemoryOffsetAdjustment -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class MemoruOffsetAdjustmentTests(unittest.TestCase): - - def test_several_memory_concat(self): - graph = build_graph({'in': {'kind': 'op', 'op': None}, - 'memory_2': {'kind': 'op', 'op': 'MemoryOffset', 't': 2}, - 'memory_1': {'kind': 'op', 'op': 'MemoryOffset', 't': 1}, - 'memory__3': {'kind': 'op', 'op': 'MemoryOffset', 't': -3}, - 'concat': {'kind': 'op', 'op': 'Concat'}}, - [('in', 'memory_2', {'out': 0}), ('in', 'memory_1', {'out': 1}), - ('in', 'memory__3', {'out': 3}), - ('memory_2', 'concat', {'in': 0}), - ('memory_1', 'concat', {'in': 1}), - ('in', 'concat', {'in': 2, 'out': 2}), - ('memory__3', 'concat', {'in': 3})], - nodes_with_edges_only=True) - graph.stage = 'front' - - ref_graph = build_graph({'in': {'kind': 'op', 'op': None}, - 'memory__5': {'kind': 'op', 'op': 'MemoryOffset', 't': -5}, - 'memory__1': {'kind': 'op', 'op': 'MemoryOffset', 't': -1}, - 'memory__2': {'kind': 'op', 'op': 'MemoryOffset', 't': -2}, - 'concat': {'kind': 'op', 'op': 'Concat'}}, - [('in', 'memory__5', {'out': 3}), ('in', 'memory__1', {'out': 1}), - ('in', 'memory__2', {'out': 2}), - ('in', 'concat', {'in': 0, 'out': 0}), - ('memory__2', 'concat', {'in': 2}), - ('memory__1', 'concat', {'in': 1}), - ('memory__5', 'concat', {'in': 3})], - nodes_with_edges_only=True) - - MemoryOffsetAdjustment().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'concat', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_memory_before_several_memory_concat(self): - graph = build_graph({'in': {'kind': 'op', 'op': None}, - 'memory_2': {'kind': 'op', 'op': 'MemoryOffset', 't': 2}, - 'memory_1': {'kind': 'op', 'op': 'MemoryOffset', 't': 1}, - 'memory__3': {'kind': 'op', 'op': 'MemoryOffset', 't': -3}, - 'concat': {'kind': 'op', 'op': 'Concat'}}, - [('in', 'memory_1', {'out': 0}), ('memory_1', 'memory_2', {'out': 0}), - ('memory_1', 'memory__3', {'out': 0}), - ('memory_2', 'concat', {'in': 1}), - ('memory__3', 'concat', {'in': 0}), - ('memory_1', 'concat', {'in': 2, 'out': 0}), - ('in', 'concat', {'in': 3, 'out': 1})], - nodes_with_edges_only=True) - graph.stage = 'front' - - ref_graph = build_graph({'in': {'kind': 'op', 'op': None}, - 'memory__3': {'kind': 'op', 'op': 'MemoryOffset', 't': -3}, - 'memory__6': {'kind': 'op', 'op': 'MemoryOffset', 't': -5}, - 'memory__2': {'kind': 'op', 'op': 'MemoryOffset', 't': -2}, - 'concat': {'kind': 'op', 'op': 'Concat'}}, - [('in', 'memory__2', {'out': 0}), ('memory__2', 'concat', {"in": 1, 'out': 0}), - ('memory__2', 'memory__6', {'out': 0}), - ('memory__6', 'concat', {'in': 0}), - ('memory__3', 'concat', {'in': 3}), - ('memory__2', 'concat', {"in": 2, 'out': 0}), - ('in', 'memory__3', {'out': 1})], - nodes_with_edges_only=True) - - MemoryOffsetAdjustment().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'concat', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_memory_parallel_several_memory_concat(self): - graph = build_graph({'in': {'kind': 'op', 'op': None}, - 'memory_3': {'kind': 'op', 'op': 'MemoryOffset', 't': 3}, - 'memory__1': {'kind': 'op', 'op': 'MemoryOffset', 't': -1}, - 'memory_5': {'kind': 'op', 'op': 'MemoryOffset', 't': 5}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_1': {'kind': 'op', 'op': 'Concat'}, - 'split': {'kind': 'op', 'op': 'Split'}}, - [('in', 'split', {'out': 0}), ('in', 'memory_5', {'out': 0}), - ('split', 'memory_3', {'out': 0}), - ('split', 'memory__1', {'out': 1}), - ('memory_3', 'concat', {'in': 0}), - ('memory__1', 'concat', {'in': 1}), - ('concat', 'concat_1', {'in': 0, 'out': 0}), - ('memory_5', 'concat_1', {'in': 1, 'out': 0}), - ], - nodes_with_edges_only=True) - graph.stage = 'front' - - ref_graph = build_graph({'in': {'kind': 'op', 'op': None}, - 'memory__4': {'kind': 'op', 'op': 'MemoryOffset', 't': -4}, - 'memory__2': {'kind': 'op', 'op': 'MemoryOffset', 't': -2}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_1': {'kind': 'op', 'op': 'Concat'}, - 'split': {'kind': 'op', 'op': 'Split'}, - }, - [('in', 'split', {'out': 0}), ('in', 'concat_1', {'in': 1, 'out': 0}), - ('split', 'concat', {'out': 0, 'in': 0}), ('split', 'memory__4', {'out': 1}), - ('memory__4', 'concat', {'in': 1}), - ('concat', 'memory__2'), - ('memory__2', 'concat_1', {'in': 0}), - ], - nodes_with_edges_only=True) - - MemoryOffsetAdjustment().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'concat_1', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/kaldi/replace_lstm_nonlinearity_test.py b/tools/mo/unit_tests/mo/front/kaldi/replace_lstm_nonlinearity_test.py deleted file mode 100644 index e16edb2849c4d7..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/replace_lstm_nonlinearity_test.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.kaldi.replace_lstm_nonlinearity import ReplaceLstmNonLinearityPattern -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class ReplaceLstmNonlinearityTests(unittest.TestCase): - # i_t = Sigmoid(i_part + w_ic*ct_1) - # f_t = Sigmoid(f_part + w_fc*ct_1) - # c_t = f_t * f_scale * ct_1 + i_t * i_scale * tanh(c_part) - # o_t = Sigmoid(o_part + w_oc*c_t) - # m_t = o_t * o_scale * Tanh(c_t) - nodes_attributes = { - 'in': {'kind': 'op', 'op': 'Parameter'}, - 'i_part': {'kind': 'op', 'op': 'Parameter'}, - 'f_part': {'kind': 'op', 'op': 'Parameter'}, - 'c_part': {'kind': 'op', 'op': 'Parameter'}, - 'o_part': {'kind': 'op', 'op': 'Parameter'}, - 'split': {'kind': 'op', 'op': 'Split'}, - 'split_dropout': {'kind': 'op', 'op': 'AttributedVariadicSplit', 'size_splits': [-1, 1, 1, 1]}, - 'sigmoid_i': {'kind': 'op', 'op': 'Sigmoid'}, - 'sigmoid_f': {'kind': 'op', 'op': 'Sigmoid'}, - 'sigmoid_o': {'kind': 'op', 'op': 'Sigmoid'}, - 'i_plus_c': {'kind': 'op', 'op': 'Eltwise', 'operation': 'sum'}, - 'f_plus_c': {'kind': 'op', 'op': 'Eltwise', 'operation': 'sum'}, - 'fc_plus_itanhc': {'kind': 'op', 'op': 'Eltwise', 'operation': 'sum'}, - 'o_plus_c': {'kind': 'op', 'op': 'Eltwise', 'operation': 'sum'}, - 'scaled_i': {'kind': 'op', 'op': 'Mul'}, - 'scaled_f': {'kind': 'op', 'op': 'Mul'}, - 'scaled_o': {'kind': 'op', 'op': 'Mul'}, - 'scale_i_c': {'kind': 'op', 'op': 'ScaleShift'}, - 'scale_f_c': {'kind': 'op', 'op': 'ScaleShift'}, - 'scale_o_c': {'kind': 'op', 'op': 'ScaleShift'}, - 'f_mul_c': {'kind': 'op', 'op': 'Eltwise', 'operation': 'mul'}, - 'i_mul_tanhc': {'kind': 'op', 'op': 'Eltwise', 'operation': 'mul'}, - 'o_mul_tanhc': {'kind': 'op', 'op': 'Eltwise', 'operation': 'mul'}, - 'tanhcp': {'kind': 'op', 'op': 'Tanh'}, - 'tanhc': {'kind': 'op', 'op': 'Tanh'}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'out': {'kind': 'op', 'op': 'Placeholder'}, - 'lstm': {'kind': 'op', 'op': 'LstmNonLinearity', - 'i_weights': np.array([]), - 'f_weights': np.array([]), - 'o_weights': np.array([])} - } - - def test_lstm_nonlinearity(self): - graph = build_graph({'in': {'kind': 'op', 'op': 'Parameter'}, - 'lstm': {'kind': 'op', 'op': 'LstmNonLinearity', - 'use_dropout': False, - 'i_weights': np.array([]), - 'f_weights': np.array([]), - 'o_weights': np.array([]),}, - 'out': {'kind': 'op', 'op': 'Placeholder'}}, - [('in', 'lstm'), ('lstm', 'out')], nodes_with_edges_only=True) - graph.stage = 'front' - # split input to (i_part, f_part, c_part, o_part, ct_1) - ref_graph = build_graph(self.nodes_attributes, [ - ('in', 'split'), - ('split', 'scale_i_c', {'out': 4}), - ('scale_i_c', 'i_plus_c'), - ('split', 'i_plus_c', {'out': 0}), - ('i_plus_c', 'sigmoid_i'), - ('split', 'scale_f_c', {'out': 4}), - ('scale_f_c', 'f_plus_c'), - ('split', 'f_plus_c', {'out': 1}), - ('f_plus_c', 'sigmoid_f'), - ('split', 'tanhcp', {'out': 2}), - ('tanhcp', 'i_mul_tanhc'), - ('sigmoid_i', 'i_mul_tanhc'), - ('sigmoid_f', 'f_mul_c'), - ('split', 'f_mul_c', {'out': 4}), - ('f_mul_c', 'fc_plus_itanhc'), - ('i_mul_tanhc', 'fc_plus_itanhc'), - ('split', 'scale_o_c', {'out': 4}), - ('scale_o_c', 'o_plus_c'), - ('split', 'o_plus_c', {'out': 3}), - ('o_plus_c', 'sigmoid_o'), - ('fc_plus_itanhc', 'tanhc'), - ('sigmoid_o', 'o_mul_tanhc'), - ('tanhc', 'o_mul_tanhc'), - ('fc_plus_itanhc', 'concat'), - ('o_mul_tanhc', 'concat'), - ('lstm', 'out'), - ], nodes_with_edges_only=True) - ReplaceLstmNonLinearityPattern().replace_op(graph, Node(graph, 'lstm')) - (flag, resp) = compare_graphs(graph, ref_graph, 'out', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_lstm_nonlinearity_dropout(self): - graph = build_graph({'in': {'kind': 'op', 'op': 'Parameter'}, - 'lstm': {'kind': 'op', 'op': 'LstmNonLinearity', - 'use_dropout': True, - 'i_weights': np.array([]), - 'f_weights': np.array([]), - 'o_weights': np.array([]),}, - 'out': {'kind': 'op', 'op': 'Placeholder'}}, - [('in', 'lstm'), ('lstm', 'out')], nodes_with_edges_only=True) - graph.stage = 'front' - # split input to (i_part, f_part, c_part, o_part, ct_1) - ref_graph = build_graph(self.nodes_attributes, [ - ('in', 'split_dropout'), - ('split_dropout', 'split', {'out': 0}), - ('split', 'scale_i_c', {'out': 4}), - ('scale_i_c', 'i_plus_c'), - ('split', 'i_plus_c', {'out': 0}), - ('i_plus_c', 'sigmoid_i'), - ('sigmoid_i', 'scaled_i', {'in': 0}), - ('split_dropout', 'scaled_i', {'out': 1, 'in': 1}), - ('split', 'scale_f_c', {'out': 4}), - ('scale_f_c', 'f_plus_c'), - ('split', 'f_plus_c', {'out': 1}), - ('f_plus_c', 'sigmoid_f'), - ('sigmoid_f', 'scaled_f', {'in': 0}), - ('split_dropout', 'scaled_f', {'out': 2, 'in': 1}), - ('split', 'tanhcp', {'out': 2}), - ('tanhcp', 'i_mul_tanhc'), - ('scaled_i', 'i_mul_tanhc'), - ('scaled_f', 'f_mul_c'), - ('split', 'f_mul_c', {'out': 4}), - ('f_mul_c', 'fc_plus_itanhc'), - ('i_mul_tanhc', 'fc_plus_itanhc'), - ('split', 'scale_o_c', {'out': 4}), - ('scale_o_c', 'o_plus_c'), - ('split', 'o_plus_c', {'out': 3}), - ('o_plus_c', 'sigmoid_o'), - ('sigmoid_o', 'scaled_o', {'in': 0}), - ('split_dropout', 'scaled_o', {'out': 3, 'in': 1}), - ('fc_plus_itanhc', 'tanhc'), - ('scaled_o', 'o_mul_tanhc'), - ('tanhc', 'o_mul_tanhc'), - ('fc_plus_itanhc', 'concat'), - ('o_mul_tanhc', 'concat'), - ('lstm', 'out'), - ], nodes_with_edges_only=True) - ReplaceLstmNonLinearityPattern().replace_op(graph, Node(graph, 'lstm')) - (flag, resp) = compare_graphs(graph, ref_graph, 'out', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/kaldi/replace_timeheightconvolution_test.py b/tools/mo/unit_tests/mo/front/kaldi/replace_timeheightconvolution_test.py deleted file mode 100644 index 5160621b028372..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/replace_timeheightconvolution_test.py +++ /dev/null @@ -1,324 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.kaldi.replace_timeheightconvolution import ReplaceTimeHeightConvolutionPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op, connect_front, const - - -class TimeheightconvolutionReplacerTest(unittest.TestCase): - nodes = { - **regular_op('placeholder', {}), - **regular_op('timeheightconv', {'op': 'timeheightconvolutioncomponent'}), - **const('weights', int64_array([])), - **const('biases', int64_array([])), - **regular_op('placeholder_out', {}), - - **regular_op('concat', {'type': 'Concat', 'axis': 1}), - **regular_op('memoryoffset_0', {'type': None, 'op': 'MemoryOffset', 't': -1, 'has_default': False}), - **regular_op('memoryoffset_1', {'type': None, 'op': 'MemoryOffset', 't': 0, 'has_default': False}), - **regular_op('memoryoffset_2', {'type': None, 'op': 'MemoryOffset', 't': 1, 'has_default': True}), - **regular_op('conv', {'op': 'Convolution', 'type': 'Convolution', 'output': 12, 'height_in': 80}), - } - - def test_timeheightconvolution_1offset(self): - graph = build_graph(self.nodes, [ - *connect_front('placeholder', '0:timeheightconv'), - *connect_front('weights', '1:timeheightconv'), - *connect_front('biases', '2:timeheightconv'), - *connect_front('timeheightconv', 'placeholder_out') - ], nodes_with_edges_only=True) - - graph.stage = 'front' - - conv = graph.nodes['timeheightconv'] - conv['height_subsample'] = 1 - conv['height_in'] = 80 - conv['height_out'] = 80 - conv['in_channels'] = 1 - conv['out_channels'] = 12 - conv['offsets'] = int64_array([[-1, -1], [-1, 0], [-1, 1]]) - conv['time_offsets'] = [-1] - graph.nodes['weights']['value'] = np.zeros([36]) - - ref_graph = build_graph(self.nodes, [ - *connect_front('placeholder', 'memoryoffset_0'), - *connect_front('memoryoffset_0', '0:concat'), - *connect_front('concat', '0:conv'), - *connect_front('weights', '1:conv'), - *connect_front('biases', '2:conv'), - *connect_front('conv', 'placeholder_out') - ], nodes_with_edges_only=True) - ref_graph.nodes['weights']['value'] = np.zeros([36]) - new_conv = ref_graph.nodes['conv'] - new_conv['pad'] = int64_array([[0, 0], [0, 0], [0, 0], [1, 1]]) - new_conv['dilation'] = int64_array([1, 1, 1, 1]) - new_conv['kernel'] = int64_array([12, 1, 1, 3]) - new_conv['stride'] = int64_array([1, 1, 1, 1]) - - - ReplaceTimeHeightConvolutionPattern().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder_out', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_timeheightconvolution_2_offsets(self): - graph = build_graph(self.nodes, [ - *connect_front('placeholder', '0:timeheightconv'), - *connect_front('weights', '1:timeheightconv'), - *connect_front('biases', '2:timeheightconv'), - *connect_front('timeheightconv', 'placeholder_out') - ], nodes_with_edges_only=True) - - graph.stage = 'front' - - conv = graph.nodes['timeheightconv'] - conv['height_subsample'] = 1 - conv['height_in'] = 80 - conv['height_out'] = 80 - conv['in_channels'] = 1 - conv['out_channels'] = 12 - conv['offsets'] = int64_array([[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 0], [0, 1]]) - conv['time_offsets'] = int64_array([-1, 0]) - graph.nodes['weights']['value'] = np.zeros([72]) - - ref_graph = build_graph(self.nodes, [ - *connect_front('placeholder', 'memoryoffset_0'), - *connect_front('placeholder', 'memoryoffset_1'), - *connect_front('memoryoffset_0', '0:concat'), - *connect_front('memoryoffset_1', '1:concat'), - *connect_front('concat', '0:conv'), - *connect_front('weights', '1:conv'), - *connect_front('biases', '2:conv'), - *connect_front('conv', 'placeholder_out') - ], nodes_with_edges_only=True) - ref_graph.nodes['weights']['value'] = np.zeros([72]) - new_conv = ref_graph.nodes['conv'] - new_conv['pad'] = int64_array([[0, 0], [0, 0], [0, 0], [1, 1]]) - new_conv['dilation'] = int64_array([1, 1, 1, 1]) - new_conv['kernel'] = int64_array([12, 1, 2, 3]) - new_conv['stride'] = int64_array([1, 1, 1, 1]) - - ReplaceTimeHeightConvolutionPattern().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder_out', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_timeheightconvolution_2_offsets_def(self): - graph = build_graph(self.nodes, [ - *connect_front('placeholder', '0:timeheightconv'), - *connect_front('weights', '1:timeheightconv'), - *connect_front('biases', '2:timeheightconv'), - *connect_front('timeheightconv', 'placeholder_out') - ], nodes_with_edges_only=True) - - graph.stage = 'front' - - conv = graph.nodes['timeheightconv'] - conv['height_subsample'] = 1 - conv['height_in'] = 80 - conv['height_out'] = 80 - conv['in_channels'] = 1 - conv['out_channels'] = 12 - conv['offsets'] = int64_array([[0, -1], [0, 0], [0, 1], [1, -1], [1, 0], [1, 1]]) - conv['time_offsets'] = int64_array([0]) - graph.nodes['weights']['value'] = np.zeros([72]) - - ref_graph = build_graph(self.nodes, [ - *connect_front('placeholder', 'memoryoffset_1'), - *connect_front('placeholder', 'memoryoffset_2'), - *connect_front('memoryoffset_1', '0:concat'), - *connect_front('memoryoffset_2', '1:concat'), - *connect_front('concat', '0:conv'), - *connect_front('weights', '1:conv'), - *connect_front('biases', '2:conv'), - *connect_front('conv', 'placeholder_out') - ], nodes_with_edges_only=True) - ref_graph.nodes['weights']['value'] = np.zeros([72]) - new_conv = ref_graph.nodes['conv'] - new_conv['pad'] = int64_array([[0, 0], [0, 0], [0, 0], [1, 1]]) - new_conv['dilation'] = int64_array([1, 1, 1, 1]) - new_conv['kernel'] = int64_array([12, 1, 2, 3]) - new_conv['stride'] = int64_array([1, 1, 1, 1]) - - ReplaceTimeHeightConvolutionPattern().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder_out', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_timeheightconvolution_2_offsets_dilation(self): - graph = build_graph(self.nodes, [ - *connect_front('placeholder', '0:timeheightconv'), - *connect_front('weights', '1:timeheightconv'), - *connect_front('biases', '2:timeheightconv'), - *connect_front('timeheightconv', 'placeholder_out') - ], nodes_with_edges_only=True) - - graph.stage = 'front' - - conv = graph.nodes['timeheightconv'] - conv['height_subsample'] = 1 - conv['height_in'] = 80 - conv['height_out'] = 80 - conv['in_channels'] = 1 - conv['out_channels'] = 12 - conv['offsets'] = int64_array([[-1, -3], [-1, 0], [-1, 3], [1, -3], [1, 0], [1, 3]]) - conv['time_offsets'] = int64_array([-1]) - graph.nodes['weights']['value'] = np.zeros([72]) - - ref_graph = build_graph(self.nodes, [ - *connect_front('placeholder', 'memoryoffset_0'), - *connect_front('placeholder', 'memoryoffset_2'), - *connect_front('memoryoffset_0', '0:concat'), - *connect_front('memoryoffset_2', '1:concat'), - *connect_front('concat', '0:conv'), - *connect_front('weights', '1:conv'), - *connect_front('biases', '2:conv'), - *connect_front('conv', 'placeholder_out') - ], nodes_with_edges_only=True) - ref_graph.nodes['weights']['value'] = np.zeros([72]) - new_conv = ref_graph.nodes['conv'] - new_conv['pad'] = int64_array([[0, 0], [0, 0], [0, 0], [3, 3]]) - new_conv['dilation'] = int64_array([1, 1, 2, 3]) - new_conv['kernel'] = int64_array([12, 1, 2, 3]) - new_conv['stride'] = int64_array([1, 1, 1, 1]) - - ReplaceTimeHeightConvolutionPattern().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder_out', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_timeheightconvolution_2_offsets_pad(self): - graph = build_graph(self.nodes, [ - *connect_front('placeholder', '0:timeheightconv'), - *connect_front('weights', '1:timeheightconv'), - *connect_front('biases', '2:timeheightconv'), - *connect_front('timeheightconv', 'placeholder_out') - ], nodes_with_edges_only=True) - - graph.stage = 'front' - conv = graph.nodes['timeheightconv'] - conv['height_subsample'] = 1 - conv['height_in'] = 80 - conv['height_out'] = 74 - conv['in_channels'] = 1 - conv['out_channels'] = 12 - conv['offsets'] = int64_array([[-1, 0], [-1, 3], [-1, 6], [1, 0], [1, 3], [1, 6]]) - conv['time_offsets'] = int64_array([-1]) - graph.nodes['weights']['value'] = np.zeros([72]) - - ref_graph = build_graph(self.nodes, [ - *connect_front('placeholder', 'memoryoffset_0'), - *connect_front('placeholder', 'memoryoffset_2'), - *connect_front('memoryoffset_0', '0:concat'), - *connect_front('memoryoffset_2', '1:concat'), - *connect_front('concat', '0:conv'), - *connect_front('weights', '1:conv'), - *connect_front('biases', '2:conv'), - *connect_front('conv', 'placeholder_out') - ], nodes_with_edges_only=True) - ref_graph.nodes['weights']['value'] = np.zeros([72]) - new_conv = ref_graph.nodes['conv'] - new_conv['pad'] = int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]) - new_conv['dilation'] = int64_array([1, 1, 2, 3]) - new_conv['kernel'] = int64_array([12, 1, 2, 3]) - new_conv['stride'] = int64_array([1, 1, 1, 1]) - - ReplaceTimeHeightConvolutionPattern().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder_out', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_timeheightconvolution_out_channels(self): - graph = build_graph(self.nodes, [ - *connect_front('placeholder', '0:timeheightconv'), - *connect_front('weights', '1:timeheightconv'), - *connect_front('biases', '2:timeheightconv'), - *connect_front('timeheightconv', 'placeholder_out') - ], nodes_with_edges_only=True) - - graph.stage = 'front' - conv = graph.nodes['timeheightconv'] - conv['height_subsample'] = 1 - conv['height_in'] = 80 - conv['height_out'] = 74 - conv['in_channels'] = 3 - conv['out_channels'] = 4 - conv['offsets'] = int64_array([[-1, 0], [-1, 3], [-1, 6], [1, 0], [1, 3], [1, 6]]) - conv['time_offsets'] = int64_array([-1]) - graph.nodes['weights']['value'] = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, - 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, - 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72]) - - ref_graph = build_graph(self.nodes, [ - *connect_front('placeholder', 'memoryoffset_0'), - *connect_front('placeholder', 'memoryoffset_2'), - *connect_front('memoryoffset_0', '0:concat'), - *connect_front('memoryoffset_2', '1:concat'), - *connect_front('concat', '0:conv'), - *connect_front('weights', '1:conv'), - *connect_front('biases', '2:conv'), - *connect_front('conv', 'placeholder_out') - ], nodes_with_edges_only=True) - ref_graph.nodes['weights']['value'] = np.array([1, 4, 7, 10, 13, 16, 2, 5, 8, 11, 14, 17, 3, 6, 9, 12, 15, 18, - 19, 22, 25, 28, 31, 34, 20, 23, 26, 29, 32, 35, 21, 24, 27, 30, 33, 36, - 37, 40, 43, 46, 49, 52, 38, 41, 44, 47, 50, 53, 39, 42, 45, 48, 51, 54, - 55, 58, 61, 64, 67, 70, 56, 59, 62, 65, 68, 71, 57, 60, 63, 66, 69, 72]) - new_conv = ref_graph.nodes['conv'] - new_conv['output'] = 4 - new_conv['pad'] = int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]) - new_conv['dilation'] = int64_array([1, 1, 2, 3]) - new_conv['kernel'] = int64_array([4, 3, 2, 3]) - new_conv['stride'] = int64_array([1, 1, 1, 1]) - - ReplaceTimeHeightConvolutionPattern().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder_out', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_timeheightconvolution_2_offsets_stride(self): - graph = build_graph(self.nodes, [ - *connect_front('placeholder', '0:timeheightconv'), - *connect_front('weights', '1:timeheightconv'), - *connect_front('biases', '2:timeheightconv'), - *connect_front('timeheightconv', 'placeholder_out') - ], nodes_with_edges_only=True) - - graph.stage = 'front' - conv = graph.nodes['timeheightconv'] - conv['height_subsample'] = 2 - conv['height_in'] = 80 - conv['height_out'] = 37 - conv['in_channels'] = 1 - conv['out_channels'] = 12 - conv['offsets'] = int64_array([[-1, 0], [-1, 3], [-1, 6], [1, 0], [1, 3], [1, 6]]) - conv['time_offsets'] = int64_array([-1]) - graph.nodes['weights']['value'] = np.zeros([72]) - - ref_graph = build_graph(self.nodes, [ - *connect_front('placeholder', 'memoryoffset_0'), - *connect_front('placeholder', 'memoryoffset_2'), - *connect_front('memoryoffset_0', '0:concat'), - *connect_front('memoryoffset_2', '1:concat'), - *connect_front('concat', '0:conv'), - *connect_front('weights', '1:conv'), - *connect_front('biases', '2:conv'), - *connect_front('conv', 'placeholder_out') - ], nodes_with_edges_only=True) - ref_graph.nodes['weights']['value'] = np.zeros([72]) - new_conv = ref_graph.nodes['conv'] - new_conv['pad'] = int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]) - new_conv['dilation'] = int64_array([1, 1, 2, 3]) - new_conv['kernel'] = int64_array([12, 1, 2, 3]) - new_conv['stride'] = int64_array([1, 1, 1, 2]) - - ReplaceTimeHeightConvolutionPattern().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder_out', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/kaldi/restrictedattentioncomponent_replacer_test.py b/tools/mo/unit_tests/mo/front/kaldi/restrictedattentioncomponent_replacer_test.py deleted file mode 100644 index 22e8ef01720431..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/restrictedattentioncomponent_replacer_test.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.kaldi.restrictedattentioncomponent_replacer \ - import RestrictedAttentionComponentReplacer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.common.partial_infer.utils import mo_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op, connect_front, const - - -class RestrictedAttentionComponentReplacerTest(unittest.TestCase): - nodes = { - **regular_op('placeholder', {}), - **regular_op('restrictedattention', {'op': 'restrictedattentioncomponent'}), - **regular_op('placeholder_out', {}), - **regular_op('reshape_1', {'type': 'Reshape'}), - **const('reshape_1_shape', int64_array([10, -1])), - **regular_op('split_1', {'kind': 'op', 'type': 'VariadicSplit'}), - **const('split_1_axis', int64_array(1)), - **const('split_1_shape', int64_array([44, 55, 47])), - **regular_op('memoryoffset_1_0', {'type': None, - 't': -2, 'has_default': False}), - **regular_op('memoryoffset_1_1', {'type': None, - 't': 2, 'has_default': False}), - **regular_op('concat_1', {'type': 'Concat'}), - **regular_op('split_2', {'type': 'VariadicSplit'}), - **const('split_2_axis', int64_array(1)), - **const('split_2_shape', int64_array([44, 3])), - **regular_op('einsum_1', {'type': 'Einsum', 'equation': 'ij,ik->i'}), - **regular_op('reshape_helper_1', {'type': 'Reshape'}), - **const('reshape_helper_1_shape', int64_array([10, 1])), - **regular_op('mul', {'type': 'Multiply'}), - **const('mul_scale', mo_array(0.5, dtype=float)), - **regular_op('add', {'type': 'Add'}), - **regular_op('softmax', {'type': 'SoftMax'}), - **regular_op('reshape_helper_3', {'type': 'Reshape'}), - **const('reshape_helper_3_shape', int64_array([10, 1, 3])), - **regular_op('memoryoffset_2_0', {'type': None, - 't': -2, 'has_default': False}), - **regular_op('memoryoffset_2_1', {'type': None, - 't': 2, 'has_default': False}), - **regular_op('concat_2', {'type': 'Concat'}), - **regular_op('reshape_helper_2', {'type': 'Reshape'}), - **const('reshape_helper_2_shape', int64_array([10, 55, 3])), - **regular_op('einsum_2', {'type': 'Einsum', 'equation': 'ijk,ilk->ij'}), - **regular_op('concat_3', {'type': 'Concat'}), - **regular_op('reshape_2', {'type': 'Reshape'}), - **const('reshape_2_shape', int64_array([1, -1])), - } - - def test_restrictedattentioncomponent(self): - """ - Test case that validates if supgraph replaced by RestrictedAttentionComponentReplacer - class instead of RestrictedAttention operator is correct. - """ - graph = build_graph(self.nodes, [ - *connect_front('placeholder', '0:restrictedattention'), - *connect_front('restrictedattention', 'placeholder_out') - ], nodes_with_edges_only=True) - - graph.stage = 'front' - - restricted_attention_node = graph.nodes['restrictedattention'] - restricted_attention_node['num_left_inputs'] = 1 - restricted_attention_node['num_right_inputs'] = 1 - restricted_attention_node['num_heads'] = 10 - restricted_attention_node['key_dim'] = 44 - restricted_attention_node['value_dim'] = 55 - restricted_attention_node['time_stride'] = 2 - restricted_attention_node['key_scale'] = 0.5 - - ref_graph = build_graph(self.nodes, [ - ('placeholder', 'reshape_1', {'in': 0, 'out': 0}), - ('reshape_1_shape', 'reshape_1', {'in': 1, 'out': 0}), - ('reshape_1', 'split_1', {'in': 0, 'out': 0}), - ('split_1_axis', 'split_1', {'in': 1, 'out': 0}), - ('split_1_shape', 'split_1', {'in': 2, 'out': 0}), - ('split_1', 'memoryoffset_1_0', {'in': 0, 'out': 0}), - ('split_1', 'memoryoffset_1_1', {"in": 0, 'out': 0}), - ('split_1', 'memoryoffset_2_0', {"in": 0, 'out': 1}), - ('split_1', 'memoryoffset_2_1', {"in": 0, 'out': 1}), - ('split_1', 'split_2', {'in': 0, 'out': 2}), - ('split_2_axis', 'split_2', {'in': 1, 'out': 0}), - ('split_2_shape', 'split_2', {'in': 2, 'out': 0}), - ('memoryoffset_1_0', 'concat_1', {'in': 0, 'out': 0}), - ('split_1', 'concat_1', {'in': 1, 'out': 0}), - ('memoryoffset_1_1', 'concat_1', {'in': 2, 'out': 0}), - ('concat_1', 'einsum_1', {'in': 0, 'out': 0}), - ('split_2', 'einsum_1', {'in': 1, 'out': 0}), - ('einsum_1', 'reshape_helper_1', {'in': 0, 'out': 0}), - ('reshape_helper_1_shape', 'reshape_helper_1', {'in': 1, 'out': 0}), - ('reshape_helper_1', 'mul', {'in': 0, 'out': 0}), - ('mul_scale', 'mul', {'in': 1, 'out': 0}), - ('mul', 'add', {'in': 1, 'out': 0}), - ('split_2', 'add', {'in': 0, 'out': 1}), - ('add', 'softmax', {'in': 0, 'out': 0}), - ('memoryoffset_2_0', 'concat_2', {'in': 0, 'out': 0}), - ('split_1', 'concat_2', {'in': 1, 'out': 1}), - ('memoryoffset_2_1', 'concat_2', {'in': 2, 'out': 0}), - ('concat_2', 'reshape_helper_2', {'in': 0, 'out': 0}), - ('reshape_helper_2_shape', 'reshape_helper_2', {'in': 1, 'out': 0}), - ('reshape_helper_2', 'einsum_2', {'in': 0, 'out': 0}), - ('softmax', 'reshape_helper_3', {'in': 0, 'out': 0}), - ('reshape_helper_3_shape', 'reshape_helper_3', {'in': 1, 'out': 0}), - ('reshape_helper_3', 'einsum_2', {'in': 1, 'out': 0}), - ('einsum_2', 'concat_3', {'in': 0, 'out': 0}), - ('softmax', 'concat_3', {'in': 1, 'out': 0}), - ('concat_3', 'reshape_2', {'in': 0, 'out': 0}), - ('reshape_2_shape', 'reshape_2', {'in': 1, 'out': 0}), - ('reshape_2', 'placeholder_out') - ], nodes_with_edges_only=True) - - RestrictedAttentionComponentReplacer().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, - 'placeholder_out', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/kaldi/set_ports_test.py b/tools/mo/unit_tests/mo/front/kaldi/set_ports_test.py deleted file mode 100644 index 00562bb4b9d62c..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/set_ports_test.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.kaldi.set_ports import SetPortsPattern -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op, connect_front, Node - - -class SetPortsTest(unittest.TestCase): - def test_set_ports_chain(self): - nodes = { - **regular_op('op1', {}), - **regular_op('op2', {}), - **regular_op('op3', {}), - } - - graph = build_graph(nodes, [ - ('op1', 'op2', {'fw_tensor_debug_info': {}}), - ('op2', 'op3', {'fw_tensor_debug_info': {}}) - ], nodes_with_edges_only=True) - - graph.stage = 'front' - - ref_graph = build_graph(nodes, [ - *connect_front('op1:0', '0:op2'), - *connect_front('op2:0', '0:op3') - ], nodes_with_edges_only=True) - - SetPortsPattern().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'op3', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_set_ports_split(self): - nodes = { - **regular_op('op1', {}), - **regular_op('split', {'op': 'Split'}), - **regular_op('op2', {}), - **regular_op('op3', {}), - **regular_op('op4', {}), - } - - graph = build_graph(nodes, [ - ('op1', 'split', {'fw_tensor_debug_info': {}}), - ('split', 'op2', {'fw_tensor_debug_info': {}, 'out_port': 0}), - ('split', 'op3', {'fw_tensor_debug_info': {}, 'out_port': 1}), - ('split', 'op4', {'fw_tensor_debug_info': {}, 'out_port': 2}) - ], nodes_with_edges_only=True) - - graph.stage = 'front' - graph.nodes()['split']['out_ports_count'] = 3 - - ref_graph = build_graph(nodes, [ - *connect_front('op1:0', '0:split'), - *connect_front('split:0', '0:op2'), - *connect_front('split:1', '0:op3'), - *connect_front('split:2', '0:op4') - ], nodes_with_edges_only=True) - - SetPortsPattern().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'op4', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_set_ports_split2(self): - nodes = { - **regular_op('op1', {}), - **regular_op('split', {'op': 'Split'}), - **regular_op('op2', {}), - **regular_op('op3', {}), - **regular_op('op4', {}), - } - - graph = build_graph(nodes, [ - ('op1', 'split', {'fw_tensor_debug_info': {}}), - ('split', 'op2', {'fw_tensor_debug_info': {}, 'out_port': 0}), - ('split', 'op4', {'fw_tensor_debug_info': {}, 'out_port': 4}), - ('split', 'op3', {'fw_tensor_debug_info': {}, 'out_port': 6}) - ], nodes_with_edges_only=True) - - graph.stage = 'front' - graph.nodes()['split']['out_ports_count'] = 3 - - ref_graph = build_graph(nodes, [ - *connect_front('op1:0', '0:split'), - *connect_front('split:0', '0:op2'), - *connect_front('split:1', '0:op4'), - *connect_front('split:2', '0:op3') - ], nodes_with_edges_only=True) - - SetPortsPattern().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'op4', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_set_ports_split3(self): - nodes = { - **regular_op('op1', {}), - **regular_op('split', {'op': 'Split'}), - **regular_op('op2', {}), - **regular_op('op3', {}), - **regular_op('op4', {}), - } - - graph = build_graph(nodes, [ - ('op1', 'split', {'fw_tensor_debug_info': {}}), - ('split', 'op2', {'fw_tensor_debug_info': {}, 'out_port': 0}), - ('split', 'op3', {'fw_tensor_debug_info': {}, 'out_port': 1}), - ('split', 'op4', {'fw_tensor_debug_info': {}, 'out_port': 2}) - ], nodes_with_edges_only=True) - - Node(graph, 'split').out_port(0).get_connection().add_destination(Node(graph, 'op4').in_port(0)) - graph.nodes()['split']['out_ports_count'] = 2 - - graph.stage = 'front' - - self.assertRaises(Error, SetPortsPattern().find_and_replace_pattern, graph) diff --git a/tools/mo/unit_tests/mo/front/kaldi/sigmoid_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/sigmoid_ext_test.py deleted file mode 100644 index af04a98535e830..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/sigmoid_ext_test.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.kaldi.sigmoid_ext import SigmoidFrontExtractor -from openvino.tools.mo.ops.activation_ops import Sigmoid -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from openvino.tools.mo.ops.op import Op - - -class SigmoidFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['Sigmoid'] = Sigmoid - - def test_assertion(self): - self.assertRaises(AttributeError, SigmoidFrontExtractor.extract, None) - - def test_extracted_blobs_add_shift(self): - SigmoidFrontExtractor.extract(self.test_node) - self.assertTrue(self.test_node.op, 'Sigmoid') diff --git a/tools/mo/unit_tests/mo/front/kaldi/tanh_ext_test.py b/tools/mo/unit_tests/mo/front/kaldi/tanh_ext_test.py deleted file mode 100644 index 1d69996dc08355..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/tanh_ext_test.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.kaldi.tanh_component_ext import TanhFrontExtractor -from openvino.tools.mo.ops.activation_ops import Tanh -from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest -from openvino.tools.mo.ops.op import Op - - -class TanhFrontExtractorTest(KaldiFrontExtractorTest): - @classmethod - def register_op(cls): - Op.registered_ops['Tanh'] = Tanh - - def test_assertion(self): - self.assertRaises(AttributeError, TanhFrontExtractor.extract, None) - - def test_extracted_blobs_add_shift(self): - TanhFrontExtractor.extract(self.test_node) - self.assertTrue(self.test_node.op, 'Tanh') diff --git a/tools/mo/unit_tests/mo/front/kaldi/tdnn_component_replacer_test.py b/tools/mo/unit_tests/mo/front/kaldi/tdnn_component_replacer_test.py deleted file mode 100644 index 10567a94a73c4a..00000000000000 --- a/tools/mo/unit_tests/mo/front/kaldi/tdnn_component_replacer_test.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -import numpy as np -import pytest - -from openvino.tools.mo.front.kaldi.tdnn_component_replacer import TdnnComponentReplacer -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op, result, connect_front, const - - -class TestTdnnComponentReplacerTest(): - - @pytest.mark.parametrize("weights, biases, time_offsets",[ - ([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 1],), - ([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 1, 2, 10, 1000],), - ([[1, 1, 1], [4, 4, 4]], [1, 2], [-1, 0]), - ]) - def test_tdnnreplacer(self, weights, biases, time_offsets): - def generate_offsets(): - offset_edges = [] - offset_nodes = {} - - for i, t in enumerate(time_offsets): - offset_nodes.update(**regular_op('memoryoffset_' + str(i), {'type': None})) - - if t != 0: - offset_edges.append(('placeholder', 'memoryoffset_' + str(i), {'out': 0, 'in': 0})) - offset_edges.append(('memoryoffset_' + str(i), 'concat', {'out': 0, 'in': i})) - else: - offset_edges.append(('placeholder', 'concat', {'out': 0, 'in': i})) - - return offset_nodes, offset_edges - - offset_nodes, ref_offset_edges = generate_offsets() - - nodes = { - **offset_nodes, - **regular_op('placeholder', {'type': 'Parameter'}), - **regular_op('tdnncomponent', {'op': 'tdnncomponent', - 'weights': np.array(weights), - 'biases': np.array(biases), - 'time_offsets': np.array(time_offsets)}), - **const('weights', np.array(weights)), - **const('biases', np.array(biases)), - **regular_op('concat', {'type': 'Concat', 'axis': 1}), - **regular_op('memoryoffset_0', {'type': None}), - **regular_op('memoryoffset_1', {'type': None}), - **regular_op('memoryoffset_2', {'type': None}), - **regular_op('fully_connected', {'type': 'FullyConnected'}), - **result('result'), - } - - graph = build_graph(nodes, [ - *connect_front('placeholder', 'tdnncomponent'), - *connect_front('tdnncomponent', 'result') - ], nodes_with_edges_only=True) - - graph.stage = 'front' - - ref_graph = build_graph(nodes, [ - *ref_offset_edges, - *connect_front('concat', '0:fully_connected'), - *connect_front('weights', '1:fully_connected'), - *connect_front('biases', '2:fully_connected'), - *connect_front('fully_connected', 'result') - ], nodes_with_edges_only=True) - - TdnnComponentReplacer().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/onnx/AttributedSliceToSlice_test.py b/tools/mo/unit_tests/mo/front/onnx/AttributedSliceToSlice_test.py deleted file mode 100644 index 44b5781ff49e34..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/AttributedSliceToSlice_test.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.onnx.AttributedSliceToSlice import AttributedSliceToSliceReplacer -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, const, connect_front - - -class TestSliceReplacerTest(): - @pytest.mark.parametrize("attributed_slice_attrs",[ - {'op': 'AttributedSlice', 'type': None, 'starts': np.array([0, 0]), 'ends': np.array([1, -1]), 'axes': np.array([0, 1])} - ]) - def test_attributed_slice_replacer(self, attributed_slice_attrs): - nodes = { - **regular_op_with_empty_data('input', {'type': 'Parameter'}), - **regular_op_with_empty_data('attributed_slice', attributed_slice_attrs), - **result(), - - # nodes after replacement - **const('start', np.array([0, 0])), - **const('end', np.array([1, -1])), - **const('axis', np.array(np.array([0, 1]))), - **regular_op_with_empty_data('slice', {'op': 'Slice', 'type': None}), - } - - graph = build_graph(nodes_attrs=nodes, edges=[ - ('input', 'attributed_slice'), - ('attributed_slice', 'output'), - ], nodes_with_edges_only=True) - graph.stage = 'front' - - AttributedSliceToSliceReplacer().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attrs=nodes, edges=[ - ('input', 'slice'), - *connect_front('start', '1:slice'), - *connect_front('end', '2:slice'), - *connect_front('axis', '3:slice'), - ('slice', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/onnx/MvnOnnxToMvn_test.py b/tools/mo/unit_tests/mo/front/onnx/MvnOnnxToMvn_test.py deleted file mode 100644 index 140bcbcdb7dc65..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/MvnOnnxToMvn_test.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.onnx.MvnOnnxToMvn import MvnOnnxToMvn -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, const, connect_front - -nodes = { - **regular_op_with_empty_data('input', {'type': 'Parameter'}), - **regular_op_with_empty_data('mvn_onnx', {'op': 'MVNOnnx', - 'axes': int64_array([2, 3]), - 'eps': 1e-9, - 'eps_mode': 'outside_sqrt', - 'normalize_variance': 1}), - **result(), - - # nodes after replacement - **const('axes', int64_array([2, 3])), - **regular_op_with_empty_data('mvn', {'op': 'MVN', 'type': None}), -} - - -class MvnOnnxToMvnTest(unittest.TestCase): - def test_mvn_normalize(self): - graph = build_graph(nodes, [('input', 'mvn_onnx'), - ('mvn_onnx', 'output')], - nodes_with_edges_only=True) - graph.stage = 'front' - - MvnOnnxToMvn().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [('input', 'mvn'), - *connect_front('axes', '1:mvn'), - ('mvn', 'output')], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/onnx/__init__.py b/tools/mo/unit_tests/mo/front/onnx/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/onnx/activation_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/activation_ext_test.py deleted file mode 100644 index c02e3c3f1f3d88..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/activation_ext_test.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import onnx -import pytest - -import openvino.tools.mo.front.onnx.activation_ext as extractors -from openvino.tools.mo.ops.activation_ops import Elu -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import PB -from unit_tests.utils.graph import build_graph - - -class TestActivationOpsONNXExtractorTest(): - @staticmethod - def _create_node(op_name: str): - pb = onnx.helper.make_node(op_name, ["X"], ["Y"]) - graph = build_graph({'node_0': {'pb': pb}}, []) - return Node(graph, 'node_0') - - @staticmethod - def _base_attrs(op_name: str): - # reference output Node attributes - return ( - dict( - op=op_name, - ) - ) - - def _match(self, out, ref): - for key in ref.keys(): - status = out[key] == ref[key] - if type(status) in [list, np.ndarray]: - status = np.all(status) - assert status, f"Mismatch for field {key}, observed: {out[key]}, expected: {ref[key]}" - - @staticmethod - def _extract(op_name): - node = __class__._create_node(op_name) - getattr(extractors, op_name + 'Extractor').extract(node) - return node.graph.node[node.id] - - @pytest.mark.parametrize("op_name",['Abs', 'Acos', 'Asin', 'Atan', 'Acosh', 'Asinh', 'Atanh', 'Cos', 'Cosh', 'Erf', 'Exp', 'Floor', 'Log', 'Not', 'Sigmoid', 'Sin', - 'Sinh', 'Tan', 'Tanh']) - def test_default(self, op_name): - ref = self._base_attrs(op_name) - if ref['op'] == 'Not': - ref['op'] = 'LogicalNot' - out = self._extract(op_name) - self._match(out, ref) - - -class TestEluONNXExt(): - @staticmethod - def _create_elu_node(alpha=1.0): - pb = onnx.helper.make_node( - 'Elu', - inputs=['x'], - outputs=['y'], - alpha=alpha - ) - node = PB({'pb': pb}) - return node - - @classmethod - def setUpClass(cls): - Op.registered_ops['Elu'] = Elu - - @pytest.mark.parametrize("alpha",[1.0, 2.0, 3.0]) - def test_elu_ext(self, alpha): - node = self._create_elu_node(alpha) - extractors.EluExtractor.extract(node) - - exp_res = { - 'type': 'Elu', - 'alpha': alpha, - 'infer': Elu.infer - } - - for key in exp_res.keys(): - assert node[key] == exp_res[key] diff --git a/tools/mo/unit_tests/mo/front/onnx/affine_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/affine_ext_test.py deleted file mode 100644 index f425bce0cd944f..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/affine_ext_test.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.affine_ext import AffineFrontExtractor -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - - -class AffineONNXExtractorTest(unittest.TestCase): - @staticmethod - def _create_node(attrs: dict): - pb = onnx.helper.make_node("Affine", ["X"], ["Y"], **attrs) - graph = build_graph({'node_0': {'pb': pb}}, []) - return Node(graph, 'node_0') - - @staticmethod - def _base_attrs(): - # Commonly used attributes in the tests - # Each test takes these ones and then adds/modifies/deletes particular fields - return ( - # test input ONNX attributes - dict( - alpha=1.0, - beta=0.0 - ), - # reference output Node attributes - dict( - op='ImageScaler', - scale=1.0, - bias=0.0 - ) - ) - - @staticmethod - def _extract(inp): - node = __class__._create_node(inp) - AffineFrontExtractor.extract(node) - return node.graph.node[node.id] - - def _match(self, out, ref): - for key in ref.keys(): - status = out[key] == ref[key] - if type(status) in [list, np.ndarray]: - status = np.all(status) - self.assertTrue(status, 'Mismatch for field {}, observed: {}, expected: {}'.format(key, out[key], ref[key])) - - def test_default(self): - inp, ref = self._base_attrs() - out = self._extract(inp) - self._match(out, ref) - - def test_random(self): - inp, ref = self._base_attrs() - inp['alpha'] = 123. - inp['beta'] = 321. - - ref['scale'] = 123. - ref['bias'] = 321. - - out = self._extract(inp) - self._match(out, ref) diff --git a/tools/mo/unit_tests/mo/front/onnx/conv_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/conv_ext_test.py deleted file mode 100644 index c161a44d8842f3..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/conv_ext_test.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.conv_ext import ConvTransposeFrontExtractor -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.graph import build_graph - - -class ConvTransposeONNXExtractorTest(unittest.TestCase): - @staticmethod - def _create_node(attrs: dict): - pb = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"], **attrs) - graph = build_graph({'node_0': {'pb': pb}}, []) - return Node(graph, 'node_0') - - @staticmethod - def _base_attrs(): - # Commonly used attributes in the tests - # Each test takes these ones and then adds/modifies/deletes particular fields - return ( - # test input ONNX attributes - dict( - pads=[1, 2, 3, 4], - kernel_shape=[5, 6] - ), - # reference output Node attributes - dict( - type='Deconvolution', - pad=[[0, 0], [0, 0], [1, 3], [2, 4]], - bias_term=None, - output_shape=None, - output_padding=None, - dilation=None, - stride=None, - output_spatial_shape=None, - group=1 - ) - ) - - @staticmethod - def _extract(inp): - node = __class__._create_node(inp) - ConvTransposeFrontExtractor.extract(node) - return node.graph.node[node.id] - - def _match(self, out, ref): - for key in ref.keys(): - status = out[key] == ref[key] - if type(status) in [list, np.ndarray]: - status = np.all(status) - self.assertTrue(status, 'Mismatch for field {}, observed: {}, expected: {}'.format(key, out[key], ref[key])) - - def test_all_valid_default(self): - inp, ref = self._base_attrs() - del inp['pads'] - del ref['pad'] - out = self._extract(inp) - self._match(out, ref) - - def test_most_used(self): - inp, ref = self._base_attrs() - out = self._extract(inp) - self._match(out, ref) - - def test_dilation(self): - inp, ref = self._base_attrs() - inp['dilations'] = [10, 11] - ref['dilation'] = [1, 1, 10, 11] - out = self._extract(inp) - self._match(out, ref) - - def test_stride(self): - inp, ref = self._base_attrs() - inp['strides'] = [12, 13] - ref['stride'] = [1, 1, 12, 13] - out = self._extract(inp) - self._match(out, ref) - - def test_group(self): - inp, ref = self._base_attrs() - inp['group'] = 14 - ref['group'] = 14 - out = self._extract(inp) - self._match(out, ref) - - def test_auto_pad_supported(self): - inp, ref = self._base_attrs() - del inp['pads'] - inp['auto_pad'] = 'SAME_UPPER' - - ref['auto_pad'] = 'same_upper' - del ref['pad'] - - out = self._extract(inp) - self._match(out, ref) - - def test_pads_not_even_invalid(self): - inp, ref = self._base_attrs() - inp['pads'] = [1, 2, 3] - with self.assertRaisesRegex(Error, '.*pads.*not correct.*'): - out = self._extract(inp) - - def test_missing_kernel_shape_not_supported(self): - inp, ref = self._base_attrs() - del inp['kernel_shape'] - with self.assertRaisesRegex(Error, '.*kernel_shape.*not supported.*'): - out = self._extract(inp) - - def test_output_padding(self): - inp, ref = self._base_attrs() - inp['output_padding'] = [19, 20] - ref['output_padding'] = [0, 0, 19, 20] - out = self._extract(inp) - self._match(out, ref) diff --git a/tools/mo/unit_tests/mo/front/onnx/crop_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/crop_ext_test.py deleted file mode 100644 index 621b906d8304b2..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/crop_ext_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.crop_ext import CropFrontExtractor -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - - -class CropONNXExtractorTest(unittest.TestCase): - @staticmethod - def _create_node(attrs: dict): - pb = onnx.helper.make_node("Crop", ["X"], ["Y"], **attrs) - graph = build_graph({'node_0': {'pb': pb}}, []) - return Node(graph, 'node_0') - - @staticmethod - def _base_attrs(): - # Commonly used attributes in the tests - # Each test takes these ones and then adds/modifies/deletes particular fields - return ( - # test input ONNX attributes - dict( - border=[5, 10, 15, 20], - ), - # reference output Node attributes - dict( - op='Crop', - crop_begin=np.array([10, 5]), - crop_end=np.array([20, 15]), - axis=np.array([2, 3]) - ) - ) - - @staticmethod - def _extract(inp): - node = __class__._create_node(inp) - CropFrontExtractor.extract(node) - return node.graph.node[node.id] - - def _match(self, out, ref): - for key in ref.keys(): - status = out[key] == ref[key] - if type(status) in [list, np.ndarray]: - status = np.all(status) - self.assertTrue(status, 'Mismatch for field {}, observed: {}, expected: {}'.format(key, out[key], ref[key])) - - def test_default(self): - inp, ref = self._base_attrs() - out = self._extract(inp) - self._match(out, ref) - - def test_with_scale(self): - inp, ref = self._base_attrs() - inp['scale'] = np.array([34, 50]) - - del ref['crop_begin'] - del ref['crop_end'] - ref['dim'] = np.array([34, 50]) - ref['offset'] = np.array([10, 5]) - - out = self._extract(inp) - self._match(out, ref) diff --git a/tools/mo/unit_tests/mo/front/onnx/detection_output_test.py b/tools/mo/unit_tests/mo/front/onnx/detection_output_test.py deleted file mode 100644 index 4e0ff529ebb64a..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/detection_output_test.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.detection_output_ext import DetectionOutputFrontExtractor -from openvino.tools.mo.ops.DetectionOutput import DetectionOutput -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import PB - - -class TestDetectionOutputExt(unittest.TestCase): - @staticmethod - def _create_do_node(num_classes=0, share_location=0, background_label_id=0, - code_type="", variance_encoded_in_target=0, keep_top_k=0, - confidence_threshold=0, nms_threshold=0, top_k=0, eta=0): - pb = onnx.helper.make_node( - 'DetectionOutput', - inputs=['x'], - outputs=['y'], - num_classes=num_classes, - share_location=share_location, - background_label_id=background_label_id, - code_type=code_type, - variance_encoded_in_target=variance_encoded_in_target, - keep_top_k=keep_top_k, - confidence_threshold=confidence_threshold, - # nms_param - nms_threshold=nms_threshold, - top_k=top_k, - eta=eta, - ) - - node = PB({'pb': pb}) - return node - - @classmethod - def setUpClass(cls): - Op.registered_ops['DetectionOutput'] = DetectionOutput - - def test_do_no_pb_no_ml(self): - self.assertRaises(AttributeError, DetectionOutputFrontExtractor.extract, None) - - def test_do_ext_ideal_numbers(self): - node = self._create_do_node(num_classes=21, share_location=1, - code_type="CENTER_SIZE", keep_top_k=200, - confidence_threshold=0.01, nms_threshold=0.45, top_k=400, eta=1.0) - - DetectionOutputFrontExtractor.extract(node) - - exp_res = { - 'op': 'DetectionOutput', - 'type': 'DetectionOutput', - 'num_classes': 21, - 'share_location': 1, - 'background_label_id': 0, - 'code_type': "caffe.PriorBoxParameter.CENTER_SIZE", - 'variance_encoded_in_target': 0, - 'keep_top_k': 200, - 'confidence_threshold': 0.01, - 'visualize_threshold': 0.6, - # nms_param - 'nms_threshold': 0.45, - 'top_k': 400, - 'eta': 1.0, - # ONNX have not such parameters - # save_output_param.resize_param - 'prob': 0, - 'resize_mode': "", - 'height': 0, - 'width': 0, - 'height_scale': 0, - 'width_scale': 0, - 'pad_mode': "", - 'pad_value': "", - 'interp_mode': "", - 'input_width': 1, - 'input_height': 1, - 'normalized': 1, - } - - for key in exp_res.keys(): - if key in ['confidence_threshold', 'visualise_threshold', 'nms_threshold', 'eta']: - np.testing.assert_almost_equal(node[key], exp_res[key]) - else: - self.assertEqual(node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/onnx/gru_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/gru_ext_test.py deleted file mode 100644 index ffb92db6999abf..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/gru_ext_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.gru_ext import GRUFrontExtractor -from unit_tests.utils.extractors import PB - - -class GRUExtractorTest(unittest.TestCase): - @staticmethod - def _create_node(**attrs): - pb = onnx.helper.make_node( - 'GRU', - inputs=['X', 'W', 'R', 'B',], - outputs=['Y', 'Y_h', 'Y_c'], - hidden_size=128, - **attrs, - ) - node = PB({'pb': pb}) - return node - - base_attrs = { - 'type': 'RNNSequence', - 'op': 'GRU', - 'batch_dim': 1, - 'sequence_dim': 0, - 'blobs_wrb': True, - 'has_num_directions': True, - 'num_layers': 1, - 'format': 'onnx', - 'multilayers': False, - 'gate_order': np.array([0, 1, 2]), - 'direction': 'forward', - 'linear_before_reset': 0, - } - - def test_base_attrs(self): - node = self._create_node() - GRUFrontExtractor.extract(node) - - exp_res = self.base_attrs - - for key in exp_res.keys(): - equal = np.all(np.equal(node[key], exp_res[key], dtype=object)) - self.assertTrue(equal, 'Values for attr {} are not equal'.format(key)) - - def test_additional_attributes(self): - additional_attrs = { - 'activation_alpha': [1.0, 0.0, 2.0], - 'activations': [b'relu', b'tanh', b'sigmoid'], - 'clip': 10.0, - 'linear_before_reset': 1, - } - - node = self._create_node(**additional_attrs) - GRUFrontExtractor.extract(node) - - exp_res = {**self.base_attrs, **additional_attrs} - exp_res['activations'] = ['relu', 'tanh', 'sigmoid'] - - for key in exp_res.keys(): - equal = np.all(np.equal(node[key], exp_res[key], dtype=object)) - self.assertTrue(equal, 'Values for attr {} are not equal'.format(key)) diff --git a/tools/mo/unit_tests/mo/front/onnx/image_scaler_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/image_scaler_ext_test.py deleted file mode 100644 index 169077ec6fe63c..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/image_scaler_ext_test.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.image_scaler_ext import ImageScalerFrontExtractor -from unit_tests.utils.extractors import PB - - -class TestImageScalerONNXExt(unittest.TestCase): - @staticmethod - def _create_image_scaler_node(): - pb = onnx.helper.make_node( - 'ImageScaler', - inputs=['a'], - outputs=['b'], - scale=1.0, - bias=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], - ) - node = PB({'pb': pb, 'graph': PB({'graph': {'layout': 'NCHW'}})}) - return node - - def test_image_scaler_ext(self): - node = self._create_image_scaler_node() - ImageScalerFrontExtractor.extract(node) - - exp_res = { - 'scale': 1.0, - 'bias': [[[1.0]], [[2.0]], [[3.0]], [[4.0]], [[5.0]], [[6.0]], [[7.0]], [[8.0]]], - } - - for key in exp_res.keys(): - if type(node[key]) in [list, np.ndarray]: - self.assertTrue(np.array_equal(np.array(node[key]), np.array(exp_res[key]))) - else: - self.assertEqual(node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/onnx/instance_normalization_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/instance_normalization_ext_test.py deleted file mode 100644 index a0d7763d56c9cd..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/instance_normalization_ext_test.py +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import onnx - -from openvino.tools.mo.front.onnx.instance_normalization_ext import InstanceNormalizationExtractor -from unit_tests.utils.extractors import PB, BaseExtractorsTestingClass - - -class TestInstanceNormalization(BaseExtractorsTestingClass): - @staticmethod - def _create_node(): - pb = onnx.helper.make_node( - 'InstanceNormalization', - inputs=['a'], - outputs=['b'], - epsilon=0.5, - ) - node = PB({'pb': pb}) - return node - - def test_image_scaler_ext(self): - node = self._create_node() - InstanceNormalizationExtractor.extract(node) - self.res = node - - self.expected = { - 'epsilon': 0.5, - } - - self.compare() diff --git a/tools/mo/unit_tests/mo/front/onnx/lstm_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/lstm_ext_test.py deleted file mode 100644 index c5a4c9c6489246..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/lstm_ext_test.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.lstm_ext import LSTMFrontExtractor -from unit_tests.utils.extractors import PB - - -class LSTMExtractorTest(unittest.TestCase): - @staticmethod - def _create_node(**attrs): - pb = onnx.helper.make_node( - 'LSTM', - inputs=['X', 'W', 'R', 'B',], - outputs=['Y', 'Y_h', 'Y_c'], - hidden_size=128, - **attrs, - ) - node = PB({'pb': pb}) - return node - - base_attrs = { - 'type': 'RNNSequence', - 'op': 'LSTM', - 'batch_dim': 1, - 'sequence_dim': 0, - 'blobs_wrb': True, - 'has_num_directions': True, - 'num_layers': 1, - 'format': 'onnx', - 'multilayers': False, - 'gate_order': np.array([2, 0, 3, 1]), - 'direction': 'forward', - } - - def test_base_attrs(self): - node = self._create_node() - LSTMFrontExtractor.extract(node) - - exp_res = self.base_attrs - - for key in exp_res.keys(): - equal = np.all(np.equal(node[key], exp_res[key], dtype=object)) - self.assertTrue(equal) - - def test_additional_attributes(self): - additional_attrs = { - 'activation_alpha': [1.0, 0.0, 2.0], - 'activations': [b'relu', b'tanh', b'sigmoid'], - 'clip': 10.0, - } - - node = self._create_node(**additional_attrs) - LSTMFrontExtractor.extract(node) - - exp_res = dict(**self.base_attrs, **additional_attrs) - exp_res['activations'] = ['relu', 'tanh', 'sigmoid'] - - for key in exp_res.keys(): - equal = np.all(np.equal(node[key], exp_res[key], dtype=object)) - self.assertTrue(equal, 'Values for attr {} are not equal'.format(key)) diff --git a/tools/mo/unit_tests/mo/front/onnx/normalize_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/normalize_ext_test.py deleted file mode 100644 index 5468e3c6498b4c..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/normalize_ext_test.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import onnx - -from openvino.tools.mo.front.onnx.normalize_ext import NormalizeFrontExtractor -from unit_tests.utils.extractors import PB, BaseExtractorsTestingClass - - -class TestNormalize(BaseExtractorsTestingClass): - @staticmethod - def _create_node(across_spatial=None, channel_shared=None, eps=None): - if across_spatial is None: - across_spatial = 0 - if channel_shared is None: - channel_shared = 0 - if eps is None: - eps = 0.1 - pb = onnx.helper.make_node( - 'Normalize', - across_spatial=across_spatial, - channel_shared=channel_shared, - eps=eps, - inputs=['a'], - outputs=['b'] - ) - node = PB({'pb': pb}) - return node - - def test_ok(self): - node = self._create_node() - NormalizeFrontExtractor.extract(node) - self.res = node - - self.expected = { - 'across_spatial': False, - 'channel_shared': False, - 'eps': 0.1 - } - - self.compare() diff --git a/tools/mo/unit_tests/mo/front/onnx/pad_converter_test.py b/tools/mo/unit_tests/mo/front/onnx/pad_converter_test.py deleted file mode 100644 index bc10fb391699f2..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/pad_converter_test.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.onnx.pad_converter import ONNXPadToPad -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - -nodes_attributes = { - 'placeholder': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - **const('pads', np.array([1, 2, 3, 4], dtype=np.int64)), - **const('value', np.array(0.5, dtype=np.float32)), - 'onnx_pad': {'type': None, 'kind': 'op', 'op': 'ONNXPad', 'name': 'my_pad', 'mode': 'constant'}, - 'result': {'type': 'Result', 'value': None, 'kind': 'op', 'op': 'Result'}, - - 'pad': {'type': 'Pad', 'kind': 'op', 'op': 'Pad'}, - 'split': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 2}, - **const('split_axis', np.array(0, dtype=np.int32)), -} - - -class AttributedClampNormalizerTest(unittest.TestCase): - def test_1(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'onnx_pad', {'in': 0, 'out': 0}), - ('pads', 'onnx_pad', {'in': 1, 'out': 0}), - ('value', 'onnx_pad', {'in': 2, 'out': 0}), - ('onnx_pad', 'result', {'in': 0, 'out': 0}), - ], - {}, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder', 'pad', {'in': 0, 'out': 0}), - ('pads', 'split', {'in': 0, 'out': 0}), - ('split_axis', 'split', {'in': 1, 'out': 0}), - ('split', 'pad', {'in': 1, 'out': 0}), - ('split', 'pad', {'in': 2, 'out': 1}), - ('value', 'pad', {'in': 3, 'out': 0}), - ('pad', 'result') - ], - {}, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - ONNXPadToPad().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Pad')[0]]['name'] == 'my_pad') diff --git a/tools/mo/unit_tests/mo/front/onnx/pad_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/pad_ext_test.py deleted file mode 100644 index e59c4f4af271da..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/pad_ext_test.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import onnx - -from openvino.tools.mo.front.onnx.pad_ext import PadFrontExtractor -from openvino.tools.mo.graph.graph import Graph -from unit_tests.utils.extractors import PB, BaseExtractorsTestingClass - - -class TestPad(BaseExtractorsTestingClass): - @staticmethod - def _create_node(pads=None, value=None, mode=None): - if pads is None: - pads = [1, 2, 3, 4] - if value is None: - value = 0.0 - if mode is None: - mode = 'constant' - pb = onnx.helper.make_node( - 'Pad', - pads=pads, - mode=mode, - value=value, - inputs=['a'], - outputs=['b'] - ) - graph = Graph() - node = PB({'pb': pb, 'graph': graph}) - - return node - - def test_ok(self): - node = self._create_node() - PadFrontExtractor.extract(node) - self.res = node - - self.expected = { - 'pads': [[1, 3], [2, 4]], - 'mode': 'constant', - 'fill_value': 0 - } - - self.compare() - - def test_older_pad_opset_11(self): - node = self._create_node() - node.graph.graph['fw_opset_version'] = 11 - PadFrontExtractor.extract(node) - self.res = node - - self.expected = { - 'pads': [[1, 3], [2, 4]], - 'mode': 'constant', - 'fill_value': 0 - } - - self.compare() - - def test_reflect(self): - node = self._create_node(mode='reflect') - PadFrontExtractor.extract(node) - self.res = node - - self.expected = { - 'pads': [[1, 3], [2, 4]], - 'mode': 'reflect', - 'fill_value': 0 - } - - self.compare() - - def test_non_zero_fill_value(self): - node = self._create_node(value=1.0) - PadFrontExtractor.extract(node) - self.res = node - - self.expected = { - 'pads': [[1, 3], [2, 4]], - 'mode': 'constant', - 'fill_value': 1.0 - } - - self.compare() diff --git a/tools/mo/unit_tests/mo/front/onnx/priorbox_clustered_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/priorbox_clustered_ext_test.py deleted file mode 100644 index 9feb0233150fd8..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/priorbox_clustered_ext_test.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.priorbox_clustered_ext import PriorBoxClusteredFrontExtractor -from openvino.tools.mo.ops.priorbox_clustered import PriorBoxClusteredOp -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import PB - - -class TestPriorBoxClusteredExt(unittest.TestCase): - @staticmethod - def _create_priorbox_clustered_node(width=np.array([]), height=np.array([]), - flip=False, clip=False, variance=None, img_size=0, img_h=0, - img_w=0, step=0, step_h=0, step_w=0, offset=0): - pb = onnx.helper.make_node( - 'PriorBoxClustered', - inputs=['x'], - outputs=['y'], - width=width, - height=height, - flip=flip, - clip=clip, - variance=variance, - img_size=img_size, - img_h=img_h, - img_w=img_w, - step=step, - step_h=step_h, - step_w=step_w, - offset=offset, - ) - - node = PB({'pb': pb}) - return node - - @classmethod - def setUpClass(cls): - Op.registered_ops['PriorBoxClustered'] = PriorBoxClusteredOp - - def test_priorbox_clustered_no_pb_no_ml(self): - self.assertRaises(AttributeError, PriorBoxClusteredFrontExtractor.extract, None) - - def test_priorbox_clustered_ext_ideal_numbers(self): - node = self._create_priorbox_clustered_node(width= np.array([2, 3], dtype=float), - height=np.array([4, 5], dtype=float), - variance=np.array([0.2, 0.3, 0.2, 0.3]), - img_size=300, step=5.0, offset=0.6, flip=True) - - PriorBoxClusteredFrontExtractor.extract(node) - - exp_res = { - 'op': 'PriorBoxClustered', - 'type': 'PriorBoxClustered', - 'clip': 0, - 'flip': 1, - 'width': np.array([2, 3], dtype=float), - 'height': np.array([4, 5], dtype=float), - 'variance': [0.2, 0.3, 0.2, 0.3], - 'img_size': 300, - 'img_h': 0, - 'img_w': 0, - 'step': 5, - 'step_h': 0, - 'step_w': 0, - 'offset': 0.6 - } - - for key in exp_res.keys(): - if key in ['variance', 'width', 'height', 'step_h', 'step_w', 'offset']: - np.testing.assert_almost_equal(node[key], exp_res[key]) - else: - self.assertEqual(node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/onnx/priorbox_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/priorbox_ext_test.py deleted file mode 100644 index 78b19c72f04381..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/priorbox_ext_test.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.priorbox_ext import PriorBoxFrontExtractor -from openvino.tools.mo.ops.priorbox import PriorBoxOp -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import PB - - -class TestPriorBoxExt(unittest.TestCase): - @staticmethod - def _create_priorbox_node(aspect_ratio=[], min_size=None, max_size=None, - flip=False, clip=False, variance=None, img_size=0, img_h=0, - img_w=0, step=0, step_h=0, step_w=0, offset=0): - pb = onnx.helper.make_node( - 'PriorBox', - inputs=['x'], - outputs=['y'], - aspect_ratio=aspect_ratio, - min_size=min_size, - max_size=max_size, - flip=flip, - clip=clip, - variance=variance, - img_size=img_size, - img_h=img_h, - img_w=img_w, - step=step, - step_h=step_h, - step_w=step_w, - offset=offset, - ) - - node = PB({'pb': pb}) - return node - - @classmethod - def setUpClass(cls): - Op.registered_ops['PriorBox'] = PriorBoxOp - - def test_priorbox_no_pb_no_ml(self): - self.assertRaises(AttributeError, PriorBoxFrontExtractor.extract, None) - - def test_priorbox_ext_ideal_numbers(self): - node = self._create_priorbox_node(aspect_ratio=np.array([2, 3], dtype=float), - variance=np.array([0.2, 0.3, 0.2, 0.3]), - img_size=300, step=5.0, offset=0.6, flip=True) - - PriorBoxFrontExtractor.extract(node) - - exp_res = { - 'op': 'PriorBox', - 'type': 'PriorBox', - 'clip': 0, - 'flip': 1, - 'aspect_ratio': np.array([2, 3], dtype=float), - 'variance': [0.2, 0.3, 0.2, 0.3], - 'img_size': 300, - 'img_h': 0, - 'img_w': 0, - 'step': 5, - 'step_h': 0, - 'step_w': 0, - 'offset': 0.6 - } - - for key in exp_res.keys(): - if key in ['variance', 'aspect_ratio', 'step_h', 'step_w', 'offset']: - np.testing.assert_almost_equal(node[key], exp_res[key]) - else: - self.assertEqual(node[key], exp_res[key]) diff --git a/tools/mo/unit_tests/mo/front/onnx/rnn_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/rnn_ext_test.py deleted file mode 100644 index 0d2d1f8b75a0ae..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/rnn_ext_test.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.rnn_ext import RNNFrontExtractor -from unit_tests.utils.extractors import PB - - -class RNNExtractorTest(unittest.TestCase): - @staticmethod - def _create_node(**attrs): - pb = onnx.helper.make_node( - 'RNN', - inputs=['X', 'W', 'R', 'B',], - outputs=['Y', 'Y_h', 'Y_c'], - hidden_size=128, - **attrs, - ) - node = PB({'pb': pb}) - return node - - base_attrs = { - 'type': 'RNNSequence', - 'op': 'RNN', - 'batch_dim': 1, - 'sequence_dim': 0, - 'blobs_wrb': True, - 'has_num_directions': True, - 'num_layers': 1, - 'format': 'onnx', - 'multilayers': False, - 'gate_order': np.array([0]), - 'direction': 'forward', - } - - def test_base_attrs(self): - node = self._create_node() - RNNFrontExtractor.extract(node) - - exp_res = self.base_attrs - - for key in exp_res.keys(): - equal = np.all(np.equal(node[key], exp_res[key], dtype=object)) - self.assertTrue(equal) - - def test_additional_attributes(self): - additional_attrs = { - 'activation_alpha': [1.0, 0.0, 2.0], - 'activations': [b'relu', b'tanh', b'sigmoid'], - 'clip': 10.0, - } - - node = self._create_node(**additional_attrs) - RNNFrontExtractor.extract(node) - - exp_res = {**self.base_attrs, **additional_attrs} - exp_res['activations'] = ['relu', 'tanh', 'sigmoid'] - - for key in exp_res.keys(): - equal = np.all(np.equal(node[key], exp_res[key], dtype=object)) - self.assertTrue(equal, 'Values for attr {} are not equal'.format(key)) diff --git a/tools/mo/unit_tests/mo/front/onnx/squeeze_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/squeeze_ext_test.py deleted file mode 100644 index d93c05b777723e..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/squeeze_ext_test.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import onnx -import pytest - -from openvino.tools.mo.front.onnx.squeeze_ext import SqueezeFrontExtractor -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.squeeze import Squeeze -from unit_tests.utils.extractors import PB - - -class TestSqueezeONNXExt(): - @staticmethod - def _create_squeeze_node(axes): - if axes is None: - pb = onnx.helper.make_node( - 'Squeeze', - inputs=['x'], - outputs=['y'], - ) - else: - pb = onnx.helper.make_node( - 'Squeeze', - inputs=['x'], - outputs=['y'], - axes=axes, - ) - - node = PB({'pb': pb}) - return node - - @classmethod - def setUpClass(cls): - Op.registered_ops['Squeeze'] = Squeeze - - @pytest.mark.parametrize("axes",[[0, 1, 2, 3], [1], None]) - def test_squeeze_ext(self, axes): - node = self._create_squeeze_node(axes) - SqueezeFrontExtractor.extract(node) - - exp_res = { - 'type': 'Squeeze', - 'squeeze_dims': axes, - } - - for key in exp_res.keys(): - if type(node[key]) in [list, np.ndarray]: - assert np.array_equal(np.array(node[key]), np.array(exp_res[key])) - else: - assert node[key] == exp_res[key] diff --git a/tools/mo/unit_tests/mo/front/onnx/transpose_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/transpose_ext_test.py deleted file mode 100644 index 5fba56077d0cd4..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/transpose_ext_test.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import itertools -import pytest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.transpose_ext import TransposeFrontExtractor -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.ops.op import Op -from unit_tests.utils.extractors import PB - - -class TestTransposeONNXExt(): - @staticmethod - def _create_transpose_node(order: list): - if order is None: - # Default transpose - pb = onnx.helper.make_node( - 'Transpose', - inputs=['data'], - outputs=['transposed'], - ) - else: - # Transpose with order - pb = onnx.helper.make_node( - 'Transpose', - inputs=['data'], - outputs=['transposed'], - perm=order - ) - node = PB({'pb': pb}) - return node - - @classmethod - def setUpClass(cls): - Op.registered_ops['Transpose'] = Transpose - pass - - # This generator generates all permutations for [0,1,2,3] and [0,1,2] orders - @pytest.mark.parametrize("order",[list(order) for order in list(itertools.permutations(np.arange(4)))] + - [list(order) for order in list(itertools.permutations(np.arange(3)))] + [None]) - def test_transpose_ext(self, order): - node = self._create_transpose_node(order) - TransposeFrontExtractor.extract(node) - - exp_res = { - 'type': 'Transpose', - 'order': order, - 'infer': Transpose.infer - } - - for key in exp_res.keys(): - if isinstance(exp_res[key], list): - assert np.array_equal(node[key], exp_res[key]),\ - "Orders are not the same: {} and {}".format(node[key], exp_res[key]) - else: - assert node[key] == exp_res[key] diff --git a/tools/mo/unit_tests/mo/front/onnx/unsqueeze_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/unsqueeze_ext_test.py deleted file mode 100644 index 04164a7ce4c652..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/unsqueeze_ext_test.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np -import onnx - -from openvino.tools.mo.front.onnx.unsqueeze_ext import UnsqueezeFrontExtractor -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.ops.unsqueeze import Unsqueeze -from unit_tests.utils.extractors import PB - - -class TestUnsqueezeONNXExt(): - @staticmethod - def _create_unsqueeze_node(axes): - if axes is None: - pb = onnx.helper.make_node( - 'Unsqueeze', - inputs=['x'], - outputs=['y'], - ) - else: - pb = onnx.helper.make_node( - 'Unsqueeze', - inputs=['x'], - outputs=['y'], - axes=axes, - ) - - node = PB({'pb': pb}) - return node - - @classmethod - def setUpClass(cls): - Op.registered_ops['Unsqueeze'] = Unsqueeze - - @pytest.mark.parametrize("axes",[[0, 1, 2, 3], [1]]) - def test_unsqueeze_ext(self, axes): - node = self._create_unsqueeze_node(axes) - UnsqueezeFrontExtractor.extract(node) - - exp_res = { - 'expand_axis': axes, - } - - for key in exp_res.keys(): - if type(node[key]) in [list, np.ndarray]: - assert np.array_equal(np.array(node[key]), np.array(exp_res[key])) - else: - assert node[key] == exp_res[key] diff --git a/tools/mo/unit_tests/mo/front/onnx/upsample_ext_test.py b/tools/mo/unit_tests/mo/front/onnx/upsample_ext_test.py deleted file mode 100644 index c58a78dd3bfc87..00000000000000 --- a/tools/mo/unit_tests/mo/front/onnx/upsample_ext_test.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import onnx - -from openvino.tools.mo.front.onnx.upsample_ext import UpsampleFrontExtractor -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.extractors import BaseExtractorsTestingClass -from unit_tests.utils.graph import build_graph - - -class UpsampleONNXExtractorTest(BaseExtractorsTestingClass): - @staticmethod - def _create_node(attrs: dict): - pb = onnx.helper.make_node("Upsample", ["X"], ["Y"], **attrs) - graph = build_graph({'node_0': {'pb': pb}}, []) - return Node(graph, 'node_0') - - @staticmethod - def _base_attrs(): - # Commonly used attributes in the tests - # Each test takes these ones and then adds/modifies/deletes particular fields - return ( - # test input ONNX attributes - dict( - mode='nearest', - scales=[1., 1., 2., 2.], - ), - # reference output Node attributes - dict( - width_scale=2.0, - height_scale=2.0, - mode='nearest', - ) - ) - - @staticmethod - def _extract(inp): - node = __class__._create_node(inp) - UpsampleFrontExtractor.extract(node) - return node - - def _match(self, out, ref): - self.res = out - self.expected = ref - self.compare() - - def test_all_valid_default(self): - inp, ref = self._base_attrs() - out = self._extract(inp) - self._match(out, ref) - - def test_invalid_mode(self): - inp, ref = self._base_attrs() - inp['mode'] = 'invalid_mode' - with self.assertRaisesRegex(Error, '.*decoding Upsample.*supported modes.*'): - out = self._extract(inp) - - def test_invalid_scales(self): - inp, ref = self._base_attrs() - inp['scales'] = [1.5, 1.5, 2.0, 2.0] - with self.assertRaisesRegex(Error, '.*Upsampling of batch and feature dimensions is not supported for node.*'): - out = self._extract(inp) - - def test_invalid_2D_scales(self): - inp, ref = self._base_attrs() - inp['scales'] = [2.0, 2.0] - with self.assertRaisesRegex(Error, - '.*Upsample scales attribute is wrong for node.*. Only 4D scales are supported.'): - out = self._extract(inp) diff --git a/tools/mo/unit_tests/mo/front/output_cut_test.py b/tools/mo/unit_tests/mo/front/output_cut_test.py deleted file mode 100644 index 6ea687ec999f2c..00000000000000 --- a/tools/mo/unit_tests/mo/front/output_cut_test.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.output_cut import OutputCut -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph, regular_op - -nodes = { - **regular_op('Parameter1', {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}), - **regular_op('Op1', {'type': 'Op1', 'kind': 'op', 'op': 'Op1'}), - **regular_op('Op2', {'type': 'Op2', 'kind': 'op', 'op': 'Op2'}), - - **regular_op('FakeOutput1', {'type': 'Identity', 'kind': 'op', 'op': 'Identity', 'needs_removal': True, 'id': 0}), - **regular_op('FakeOutput2', {'type': 'Identity', 'kind': 'op', 'op': 'Identity', 'needs_removal': True, 'id': 1}), - -} - - -class TestsOutputCut(unittest.TestCase): - def test_case1(self): - graph = build_graph(nodes, [('Parameter1', 'FakeOutput1', - {'in': 0, 'out': 0, 'fw_tensor_debug_info': - [('Parameter1', 'Parameter1_tensor_name')]})]) - graph.graph['packed_outputs'] = None - graph.graph['user_shapes'] = None - - graph.stage = 'front' - OutputCut().find_and_replace_pattern(graph) - - param1 = Node(graph, 'Parameter1') - self.assertTrue(param1.out_node()['type'] == 'Result') - self.assertTrue(param1.out_edge()['fw_tensor_debug_info'] == [('Parameter1', 'Parameter1_tensor_name')]) - self.assertTrue(graph.get_op_nodes(name='FakeOutput1') == []) - - def test_case2(self): - graph = build_graph(nodes, [('Parameter1', 'Op1'), - ('Op1', 'FakeOutput1', - {'in': 1, 'out': 1, 'fw_tensor_debug_info': - [('Op1', 'Op1_tensor_name')]}), - ('Parameter1', 'Op2'), - ('Op2', 'FakeOutput2', - {'in': 2, 'out': 3, - 'fw_tensor_debug_info': [('Op2', 'Op2_tensor_name')]})]) - graph.graph['packed_outputs'] = None - graph.graph['user_shapes'] = None - - graph.stage = 'front' - OutputCut().find_and_replace_pattern(graph) - - op1 = Node(graph, 'Op1') - op2 = Node(graph, 'Op2') - self.assertTrue(op1.out_node(1)['type'] == 'Result') - self.assertTrue(op2.out_node(3)['type'] == 'Result') - self.assertTrue(op1.out_edge(1)['fw_tensor_debug_info'] == [('Op1', 'Op1_tensor_name')]) - self.assertTrue(op2.out_edge(3)['fw_tensor_debug_info'] == [('Op2', 'Op2_tensor_name')]) - self.assertTrue(graph.get_op_nodes(name='FakeOutput1') == []) - self.assertTrue(graph.get_op_nodes(name='FakeOutput2') == []) - - def test_case3(self): - graph = build_graph(nodes, []) - graph.graph['packed_outputs'] = None - graph.graph['user_shapes'] = None - - graph.stage = 'front' - OutputCut().find_and_replace_pattern(graph) - - self.assertTrue(graph.get_op_nodes(name='FakeOutput1') == []) - self.assertTrue(graph.get_op_nodes(name='FakeOutput2') == []) diff --git a/tools/mo/unit_tests/mo/front/rank_decomposer_test.py b/tools/mo/unit_tests/mo/front/rank_decomposer_test.py deleted file mode 100644 index 860e58c69327f7..00000000000000 --- a/tools/mo/unit_tests/mo/front/rank_decomposer_test.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.rank_decomposer import RankDecomposer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, connect, \ - valued_const_with_data - -nodes = lambda output_type: { - **regular_op_with_empty_data('input', {'type': 'Parameter'}), - **regular_op_with_empty_data('rank', {'op': 'Rank', 'type': None, 'output_type': output_type, 'name': 'my_rank'}), - **result(), - - **regular_op_with_empty_data('shape', {'type': 'ShapeOf', 'output_type': output_type}), - **regular_op_with_empty_data('rank_1D', {'type': 'ShapeOf', 'output_type': output_type}), - **valued_const_with_data('zero', int64_array(0)), - **regular_op_with_empty_data('rank_0D', {'type': 'Squeeze'}), -} - - -class TestRankDecomposerTest(): - - @pytest.mark.parametrize("output_type", [np.int32, np.int64]) - def test_rank_decomposer(self, output_type): - graph = build_graph(nodes_attrs=nodes(output_type), edges=[ - *connect('input', 'rank'), - *connect('rank', 'output'), - ], nodes_with_edges_only=True) - RankDecomposer().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attrs=nodes(output_type), edges=[ - *connect('input', 'shape'), - *connect('shape', 'rank_1D'), - *connect('rank_1D', '0:rank_0D'), - *connect('zero', '1:rank_0D'), - *connect('rank_0D', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - assert graph.get_op_nodes(type='Squeeze')[0]['name'] == 'my_rank',\ - 'Name is not inherited from original node for RankDecomposer' - print(output_type) - - def test_rank_decomposer_assertion(self): - graph = build_graph(nodes_attrs=nodes(None), edges=[ - *connect('input', 'rank'), - *connect('rank', 'output'), - ], nodes_with_edges_only=True) - with pytest.raises(AssertionError): - RankDecomposer().find_and_replace_pattern (graph) diff --git a/tools/mo/unit_tests/mo/front/reciprocal_test.py b/tools/mo/unit_tests/mo/front/reciprocal_test.py deleted file mode 100644 index eeb88858000e21..00000000000000 --- a/tools/mo/unit_tests/mo/front/reciprocal_test.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.reciprocal import ReciprocalReplacer -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - # Reciprocal operation - 'reciprocal_1': {'kind': 'op', 'op': 'Reciprocal'}, - # Test operation - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': None}, - # Pow operations - 'const': {'value': np.array(-1), 'op': 'Const', 'kind': 'op'}, - 'pow': {'type': 'Power', 'kind': 'op', 'op': 'Pow'}, -} - - -class ReciprocalReplacerTests(unittest.TestCase): - def test_reciprocal_test_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'reciprocal_1'), - ('reciprocal_1', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'pow', {'in': 0}), - ('const', 'pow', {'in': 1}), - ('pow', 'last'), - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - }, nodes_with_edges_only=True) - - graph.stage = 'front' - - pattern = ReciprocalReplacer() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_neg_reciprocal_1(self): - # Test if power = 0 - - graph = build_graph(nodes_attributes, - [('placeholder_1', 'reciprocal_1'), - ('reciprocal_1', 'last') - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'pow'), - ('const', 'pow', {'in': 1}), - ('pow', 'last'), - ], - {'placeholder_1': {'shape': np.array([1, 227, 227, 3])}, - 'const': {'value': np.array(0)}, - }, nodes_with_edges_only=True) - - graph.stage = 'front' - - pattern = ReciprocalReplacer() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(not flag) diff --git a/tools/mo/unit_tests/mo/front/reduce_axis_normalizer_test.py b/tools/mo/unit_tests/mo/front/reduce_axis_normalizer_test.py deleted file mode 100644 index 762141aa1a9602..00000000000000 --- a/tools/mo/unit_tests/mo/front/reduce_axis_normalizer_test.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.reduce_axis_normalizer import ReduceAxisNormalizer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, connect_front, regular_op - -nodes = { - **regular_op('parameter', {'type': 'Parameter'}), - **regular_op('reduce', {'op': 'ReduceSum', 'axis': None}), - **regular_op('axis', {'op': 'Const', 'type': 'Const', 'value': int64_array([1])}), - **result(), -} - -edges = [ - *connect_front('parameter:0', '0:reduce'), - *connect_front('reduce', 'output'), -] - - -class ReduceAxisNormalizerTest(unittest.TestCase): - def test_reduce_axis_is_None(self): - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - graph.stage = 'front' - - ReduceAxisNormalizer().find_and_replace_pattern(graph) - - ref_nodes = nodes.copy() - ref_nodes.update({**regular_op('rank', {'op': 'Rank', 'type': None}), - **regular_op('range', {'op': 'Range', 'type': 'Range'}), - **regular_op('begin', {'type': 'Const', 'value': int64_array([0])}), - **regular_op('step', {'type': 'Const', 'value': int64_array([1])}), - }) - graph_ref = build_graph(ref_nodes, [ - *edges, - *connect_front('parameter:0', 'rank'), - *connect_front('begin:0', '0:range'), - *connect_front('rank:0', '1:range'), - *connect_front('step:0', '2:range'), - *connect_front('range:0', '1:reduce'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_reduce_axis_is_const(self): - graph = build_graph(nodes, edges, {'reduce': {'axis': 1}}, nodes_with_edges_only=True) - graph.stage = 'front' - - graph_ref = build_graph(nodes, [ - *edges, - *connect_front('axis', '1:reduce'), - ], {'axis': {'value': np.int64(1)}}, nodes_with_edges_only=True) - - ReduceAxisNormalizer().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/scatter_normalizer_test.py b/tools/mo/unit_tests/mo/front/scatter_normalizer_test.py deleted file mode 100644 index cf8be349401969..00000000000000 --- a/tools/mo/unit_tests/mo/front/scatter_normalizer_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.scatter_normalizer import ScatterNormalizer -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, connect, \ - regular_op_with_empty_data - -nodes = { - **regular_op_with_empty_data('placeholder_1', {'type': 'Parameter'}), - **regular_op_with_empty_data('placeholder_2', {'type': 'Parameter'}), - **regular_op_with_empty_data('placeholder_3', {'type': 'Parameter'}), - **regular_op_with_empty_data('node', {'op': 'ScatterElementsUpdate', 'is_scatter': True}), - **regular_op_with_empty_data('axis', {'type': 'Const', 'value': None}), - **result(), -} - -edges = [ - *connect('placeholder_1', '0:node'), - *connect('placeholder_2', '1:node'), - *connect('placeholder_3', '2:node'), - *connect('node', 'output'), -] - - -class TestDiv(unittest.TestCase): - def test_ScatterElementsUpdate_has_axis_and_3_inputs(self): - graph = build_graph(nodes, edges, {'node': {'axis': 1}}, nodes_with_edges_only=True) - ScatterNormalizer().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [ - *edges, - *connect('axis', '3:node'), - ], {'axis': {'value': np.int64(1)}}, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_ScatterElementsUpdate_has_axis_and_4_inputs(self): - graph = build_graph(nodes, [ - *edges, - *connect('axis', '3:node'), - ], {'node': {'axis': 1}, 'axis': {'value': np.int64(1)}}, nodes_with_edges_only=True) - self.assertRaises(AssertionError, ScatterNormalizer().find_and_replace_pattern, graph) - - def test_ScatterElementsUpdate_has_no_axis_and_3_inputs(self): - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - self.assertRaises(AssertionError, ScatterNormalizer().find_and_replace_pattern, graph) - - def test_ScatterElementsUpdate_has_no_axis_and_4_inputs(self): - graph = build_graph(nodes, [ - *edges, - *connect('axis', '3:node'), - ], {'axis': {'value': np.int64(1)}}, nodes_with_edges_only=True) - ScatterNormalizer().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [ - *edges, - *connect('axis', '3:node'), - ], {'axis': {'value': np.int64(1)}}, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/size_replacer_test.py b/tools/mo/unit_tests/mo/front/size_replacer_test.py deleted file mode 100644 index 2117e44c0fd25d..00000000000000 --- a/tools/mo/unit_tests/mo/front/size_replacer_test.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.SizeReplacer import SizeFrontReplacer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, connect, \ - valued_const_with_data - -nodes = lambda output_type: { - **regular_op_with_empty_data('input', {'type': 'Parameter'}), - **regular_op_with_empty_data('size', {'op': 'Size', 'type': None, 'output_type': output_type, 'name': 'my_size'}), - **result(), - - **regular_op_with_empty_data('shape', {'type': 'ShapeOf', 'output_type': output_type}), - **valued_const_with_data('zero', int64_array([0])), - **regular_op_with_empty_data('reduce', {'type': 'ReduceProd', 'keep_dims': False}), -} - - -class TestSizeReplacerTest(): - - @pytest.mark.parametrize("output_type" ,[np.int32, np.int64]) - def test_size_replacer(self, output_type): - graph = build_graph(nodes_attrs=nodes(output_type), edges=[ - *connect('input', 'size'), - *connect('size', 'output'), - ], nodes_with_edges_only=True) - SizeFrontReplacer().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attrs=nodes(output_type), edges=[ - *connect('input', 'shape'), - *connect('shape', '0:reduce'), - *connect('zero', '1:reduce'), - *connect('reduce', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp - assert graph.get_op_nodes(type='ReduceProd')[0]['name'] == 'my_size',\ - 'Name is not inherited from original node for SizeReplacer' - print(output_type) - - def test_size_replacer_assertion(self): - graph = build_graph(nodes_attrs=nodes(None), edges=[ - *connect('input', 'size'), - *connect('size', 'output'), - ], nodes_with_edges_only=True) - with pytest.raises(AssertionError): - SizeFrontReplacer().find_and_replace_pattern (graph) diff --git a/tools/mo/unit_tests/mo/front/split_normalizer_test.py b/tools/mo/unit_tests/mo/front/split_normalizer_test.py deleted file mode 100644 index 926f29422359d4..00000000000000 --- a/tools/mo/unit_tests/mo/front/split_normalizer_test.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.split_normalizer import SqueezeAxis -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - -nodes_attributes = { - 'placeholder': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'attr_split': {'type': None, 'kind': 'op', 'op': 'AttributedSplit', 'axis': 0, 'num_splits': 2, - 'squeeze_axis': True}, - 'split': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 2, 'squeeze_axis': True}, - **const('split_axis', int64_array(0)), - 'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat', 'axis': 0}, - 'result': {'type': 'Result', 'value': None, 'kind': 'op', 'op': 'Result'}, - - 'squeeze1': {'type': 'Squeeze', 'kind': 'op', 'op': 'Squeeze'}, - 'squeeze2': {'type': 'Squeeze', 'kind': 'op', 'op': 'Squeeze'}, - **const('squeeze1_axis', int64_array(0)), - **const('squeeze2_axis', int64_array(0)), -} - - -class SqueezeAxisTest(unittest.TestCase): - def test_attributed(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'attr_split', {'in': 0, 'out': 0}), - ('attr_split', 'concat', {'in': 0, 'out': 0}), - ('attr_split', 'concat', {'in': 1, 'out': 1}), - ('concat', 'result', {'in': 0, 'out': 0}), - ], nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder', 'attr_split', {'in': 0, 'out': 0}), - ('attr_split', 'squeeze1', {'in': 0, 'out': 0}), - ('squeeze1_axis', 'squeeze1', {'in': 1, 'out': 0}), - ('attr_split', 'squeeze2', {'in': 0, 'out': 1}), - ('squeeze2_axis', 'squeeze2', {'in': 1, 'out': 0}), - ('squeeze1', 'concat', {'in': 0, 'out': 0}), - ('squeeze2', 'concat', {'in': 1, 'out': 0}), - ('concat', 'result', {'in': 0, 'out': 0}), - ], nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - SqueezeAxis().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_split(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'split', {'in': 0, 'out': 0}), - ('split_axis', 'split', {'in': 1, 'out': 0}), - ('split', 'concat', {'in': 0, 'out': 0}), - ('split', 'concat', {'in': 1, 'out': 1}), - ('concat', 'result', {'in': 0, 'out': 0}), - ], nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder', 'split', {'in': 0, 'out': 0}), - ('split_axis', 'split', {'in': 1, 'out': 0}), - ('split', 'squeeze1', {'in': 0, 'out': 0}), - ('split_axis', 'squeeze1', {'in': 1, 'out': 0}), - ('split', 'squeeze2', {'in': 0, 'out': 1}), - ('split_axis', 'squeeze2', {'in': 1, 'out': 0}), - ('squeeze1', 'concat', {'in': 0, 'out': 0}), - ('squeeze2', 'concat', {'in': 1, 'out': 0}), - ('concat', 'result', {'in': 0, 'out': 0}), - ], nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph.stage = 'front' - - SqueezeAxis().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/sub_test.py b/tools/mo/unit_tests/mo/front/sub_test.py deleted file mode 100644 index 50cbe26634d9ad..00000000000000 --- a/tools/mo/unit_tests/mo/front/sub_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.sub import Sub -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect, \ - connect_data - -nodes = { - **regular_op_with_shaped_data('placeholder_1', [1, 227, 227, 3], {'type': 'Parameter'}), - **regular_op_with_shaped_data('placeholder_2', [1, 227, 227, 3], {'type': 'Parameter'}), - **regular_op_with_shaped_data('sub', None, {'op': 'Sub', 'type': 'Subtract', 'name': 'my_sub'}), - - **regular_op_with_shaped_data('negate', [1, 227, 227, 3], {'type': 'Multiply'}), - **valued_const_with_data('minus_one', np.array(-1.)), - **regular_op_with_shaped_data('add', None, {'type': 'Add'}), - - **result(), -} - - -class TestSub(unittest.TestCase): - def test_sub_test_1(self): - # Test with two different inputs from two placeholders - graph = build_graph(nodes, [ - *connect('placeholder_1', '0:sub'), - *connect('placeholder_2', '1:sub'), - *connect('sub', 'output'), - ], nodes_with_edges_only=True) - Sub().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [ - *connect('placeholder_1', '0:add'), - *connect('placeholder_2', '0:negate'), - *connect('minus_one', '1:negate'), - *connect('negate', '1:add'), - *connect('add', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Add')[0]]['name'] == 'my_sub') - - def test_sub_test_2(self): - # Test with two same inputs from one placeholder - graph = build_graph(nodes, [ - *connect('placeholder_1:0', '0:sub'), - *connect_data('placeholder_1:0', '1:sub'), - *connect('sub', 'output'), - ], nodes_with_edges_only=True) - Sub().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [ - *connect('placeholder_1:0', '0:add'), - *connect_data('placeholder_1:0', '0:negate'), - *connect('minus_one', '1:negate'), - *connect('negate', '1:add'), - *connect('add', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Add')[0]]['name'] == 'my_sub') diff --git a/tools/mo/unit_tests/mo/front/tf/CTCGreedyDecoderReplacement_test.py b/tools/mo/unit_tests/mo/front/tf/CTCGreedyDecoderReplacement_test.py deleted file mode 100644 index 4b4e4ebd069b1e..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/CTCGreedyDecoderReplacement_test.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.tf.CTCGreedyDecoderReplacement import CTCGreedyDecoderReplacement, \ - CTCGreedyDecoderWithSparseToDenseShapeReplacement, CTCGreedyDecoderSingleReplacement -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - - -class CTCGreedyDecoderReplacementTests(unittest.TestCase): - nodes_attributes = { - # nodes from original graph - 'logits': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'seq_len': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'order_arr': {'kind': 'op', 'op': 'Const'}, - 'transpose': {'type': 'Transpose', 'kind': 'op', 'op': 'Transpose'}, - 'decoder': {'kind': 'op', 'op': 'CTCGreedyDecoderSeqLen', 'merge_repeated': True, 'output_sparse_format': True}, - 'cast': {'kind': 'op', 'op': 'Cast'}, - 'sparse_to_dense': {'kind': 'op', 'op': 'SparseToDense'}, - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - 'last_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - - # new nodes - 'new_decoder': {'kind': 'op', 'op': 'CTCGreedyDecoderSeqLen', 'merge_repeated': True}, - **const('squeeze_axes', int64_array([2, 3])), - 'squeeze_dec_seq': {'kind': 'op', 'op': 'Squeeze'}, - 'cast_to_int': {'kind': 'op', 'op': 'Cast'}, - 'out_seq_len': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - } - - def test_CTCGreedyDecoderWithSparseToDenseShape(self): - graph = build_graph(self.nodes_attributes, - [('logits', 'decoder', {'out': 0, 'in': 0}), - ('seq_len', 'decoder', {'out': 0, 'in': 1}), - ('decoder', 'sparse_to_dense', {'out': 0, 'in': 0}), - ('decoder', 'sparse_to_dense', {'out': 2, 'in': 1}), - ('decoder', 'cast', {'out': 1, 'in': 0}), - ('cast', 'sparse_to_dense', {'out': 0}), - ('sparse_to_dense', 'last', {'out': 0, 'in': 0}), - ], nodes_with_edges_only=True) - graph.stage = 'front' - CTCGreedyDecoderWithSparseToDenseShapeReplacement().find_and_replace_pattern(graph) - - graph_ref = build_graph(self.nodes_attributes, - [('logits', 'transpose', {'out': 0, 'in': 0}), - ('order_arr', 'transpose', {'out': 0, 'in': 1}), - ('transpose', 'new_decoder', {'out': 0, 'in': 0}), - ('seq_len', 'new_decoder', {'out': 0, 'in': 1}), - ('new_decoder', 'last', {'out': 0, 'in': 0}), - ], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_CTCGreedyDecoderReplacement(self): - graph = build_graph(self.nodes_attributes, - [('logits', 'decoder', {'out': 0, 'in': 0}), - ('seq_len', 'decoder', {'out': 0, 'in': 1}), - ('decoder', 'sparse_to_dense', {'out': 0, 'in': 0}), - ('decoder', 'cast', {'out': 1, 'in': 0}), - ('cast', 'sparse_to_dense', {'out': 0}), - ('sparse_to_dense', 'last', {'out': 0, 'in': 0}), - ], nodes_with_edges_only=True) - graph.stage = 'front' - CTCGreedyDecoderReplacement().find_and_replace_pattern(graph) - - graph_ref = build_graph(self.nodes_attributes, - [('logits', 'transpose', {'out': 0, 'in': 0}), - ('order_arr', 'transpose', {'out': 0, 'in': 1}), - ('transpose', 'new_decoder', {'out': 0, 'in': 0}), - ('seq_len', 'new_decoder', {'out': 0, 'in': 1}), - ('new_decoder', 'last', {'out': 0, 'in': 0}), - ], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_CTCGreedyDecoderSingle(self): - graph = build_graph(self.nodes_attributes, - [('logits', 'decoder', {'out': 0, 'in': 0}), - ('seq_len', 'decoder', {'out': 0, 'in': 1}), - ('decoder', 'last', {'out': 0, 'in': 0}), - ('decoder', 'last_1', {'out': 1, 'in': 0}), - ], nodes_with_edges_only=True) - graph.stage = 'front' - CTCGreedyDecoderSingleReplacement().find_and_replace_pattern(graph) - - graph_ref = build_graph(self.nodes_attributes, - [('logits', 'transpose', {'out': 0, 'in': 0}), - ('order_arr', 'transpose', {'out': 0, 'in': 1}), - ('transpose', 'new_decoder', {'out': 0, 'in': 0}), - ('seq_len', 'new_decoder', {'out': 0, 'in': 1}), - ('new_decoder', 'last', {'out': 0, 'in': 0}), - ('new_decoder', 'last_1', {'out': 1, 'in': 0}), - ], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_CTCGreedyDecoderSingle_negative(self): - edges = [('logits', 'decoder', {'out': 0, 'in': 0}), - ('seq_len', 'decoder', {'out': 0, 'in': 1}), - ('decoder', 'sparse_to_dense', {'out': 0, 'in': 0}), - ('decoder', 'cast', {'out': 1, 'in': 0}), - ('cast', 'sparse_to_dense', {'out': 0}), - ('sparse_to_dense', 'last', {'out': 0, 'in': 0}), - ] - graph = build_graph(self.nodes_attributes, - edges, nodes_with_edges_only=True) - graph.stage = 'front' - CTCGreedyDecoderSingleReplacement().find_and_replace_pattern(graph) - - graph_ref = build_graph(self.nodes_attributes, - edges, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_CTCGreedyDecoder_no_consequent_transforms(self): - graph = build_graph(self.nodes_attributes, - [('logits', 'decoder', {'out': 0, 'in': 0}), - ('seq_len', 'decoder', {'out': 0, 'in': 1}), - ('decoder', 'sparse_to_dense', {'out': 0, 'in': 0}), - ('decoder', 'sparse_to_dense', {'out': 2, 'in': 1}), - ('decoder', 'cast', {'out': 1, 'in': 0}), - ('cast', 'sparse_to_dense', {'out': 0}), - ('sparse_to_dense', 'last', {'out': 0, 'in': 0}), - ], nodes_with_edges_only=True) - graph.stage = 'front' - CTCGreedyDecoderWithSparseToDenseShapeReplacement().find_and_replace_pattern(graph) - CTCGreedyDecoderSingleReplacement().find_and_replace_pattern(graph) - - graph_ref = build_graph(self.nodes_attributes, - [('logits', 'transpose', {'out': 0, 'in': 0}), - ('order_arr', 'transpose', {'out': 0, 'in': 1}), - ('transpose', 'new_decoder', {'out': 0, 'in': 0}), - ('seq_len', 'new_decoder', {'out': 0, 'in': 1}), - ('new_decoder', 'last', {'out': 0, 'in': 0}), - ], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/CTCLossReplacement_test.py b/tools/mo/unit_tests/mo/front/tf/CTCLossReplacement_test.py deleted file mode 100644 index eaf44cf4b5bd49..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/CTCLossReplacement_test.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import unittest -from argparse import Namespace - -from openvino.tools.mo.front.tf.CTCLossReplacement import CTCLossReplacement -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - - -class CTCLossFrontReplacementTest(unittest.TestCase): - nodes_attributes = { - 'logits': {'shape': int64_array([2, 6, 100]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'seq_mask': {'shape': int64_array([2]), 'data_type': np.int32, 'kind': 'op', 'op': 'Parameter'}, - 'transpose': {'kind': 'op', 'op': 'Transpose'}, - 'ctc_greedy_decoder': {'kind': 'op', 'op': 'CTCGreedyDecoderSeqLen', 'merge_repeated': True, - 'output_sparse_format': True}, - 'cast': {'kind': 'op', 'op': 'Cast'}, - 'sparse_to_dense': {'kind': 'op', 'op': 'SparseToDense'}, - 'tf_ctc_loss_true_logits': {'kind': 'op', 'op': 'CTCLoss', 'preprocess_collapse_repeated': False, - 'ctc_merge_repeated': True, 'unique': False, 'logits_time_major': True}, - 'tf_ctc_loss_false_logits': {'kind': 'op', 'op': 'CTCLoss', 'preprocess_collapse_repeated': False, - 'ctc_merge_repeated': True, 'unique': False, 'logits_time_major': False}, - 'ctc_loss': {'kind': 'op', 'op': 'CTCLoss', 'preprocess_collapse_repeated': False, - 'ctc_merge_repeated': True, 'unique': False}, - **const('default_value', int64_array(-1)), - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - 'transpose2': {'kind': 'op', 'op': 'Transpose'}, - **const('transpose2_axis', int64_array([1, 0, 2])), - - 'new_ctc_greedy_decoder': {'kind': 'op', 'op': 'CTCGreedyDecoderSeqLen', 'merge_repeated': True}, - } - - def CTCLossReplacement_test_true_logits(self): - graph = build_graph(self.nodes_attributes, - [('logits', 'transpose', {'out': 0, 'in': 0}), - ('transpose', 'ctc_greedy_decoder', {'out': 0, 'in': 0}), - ('seq_mask', 'ctc_greedy_decoder', {'out': 0, 'in': 1}), - ('transpose', 'tf_ctc_loss_true_logits', {'out': 0, 'in': 0}), - ('seq_mask', 'tf_ctc_loss_true_logits', {'out': 0, 'in': 3}), - ('ctc_greedy_decoder', 'sparse_to_dense', {'out': 0, 'in': 0}), - ('ctc_greedy_decoder', 'sparse_to_dense', {'out': 2, 'in': 1}), - ('ctc_greedy_decoder', 'sparse_to_dense', {'out': 1, 'in': 2}), - ('default_value', 'sparse_to_dense', {'out': 0, 'in': 3}), - ('ctc_greedy_decoder', 'cast', {'out': 1, 'in': 0}), - ('ctc_greedy_decoder', 'tf_ctc_loss_true_logits', {'out': 0, 'in': 1}), - ('cast', 'tf_ctc_loss_true_logits', {'out': 0, 'in': 2}), - ('tf_ctc_loss_true_logits', 'last', {'out': 0, 'in': 0})], - nodes_with_edges_only=True) - graph.graph['cmd_params'] = Namespace(data_type='FP32') - graph.stage = 'front' - CTCLossReplacement().find_and_replace_pattern(graph) - - graph_ref = build_graph(self.nodes_attributes, - [('logits', 'transpose', {'out': 0, 'in': 0}), - ('transpose', 'transpose2', {'out': 0, 'in': 0}), - ('transpose2_axis', 'transpose2', {'out': 0, 'in': 1}), - ('transpose2', 'new_ctc_greedy_decoder', {'out': 0, 'in': 0}), - ('seq_mask', 'new_ctc_greedy_decoder', {'out': 0, 'in': 1}), - ('transpose2', 'ctc_loss', {'out': 0, 'in': 0}), - ('new_ctc_greedy_decoder', 'ctc_loss', {'out': 0, 'in': 2}), - ('new_ctc_greedy_decoder', 'ctc_loss', {'out': 1, 'in': 3}), - ('seq_mask', 'ctc_loss', {'out': 0, 'in': 1}), - ('ctc_loss', 'last', {'out': 0, 'in': 0})], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) - - def CTCLossReplacement_test_false_logits(self): - graph = build_graph(self.nodes_attributes, - [('logits', 'transpose', {'out': 0, 'in': 0}), - ('transpose', 'ctc_greedy_decoder', {'out': 0, 'in': 0}), - ('seq_mask', 'ctc_greedy_decoder', {'out': 0, 'in': 1}), - ('transpose', 'tf_ctc_loss_false_logits', {'out': 0, 'in': 0}), - ('seq_mask', 'tf_ctc_loss_false_logits', {'out': 0, 'in': 3}), - ('ctc_greedy_decoder', 'sparse_to_dense', {'out': 0, 'in': 0}), - ('ctc_greedy_decoder', 'sparse_to_dense', {'out': 2, 'in': 1}), - ('ctc_greedy_decoder', 'sparse_to_dense', {'out': 1, 'in': 2}), - ('default_value', 'sparse_to_dense', {'out': 0, 'in': 3}), - ('ctc_greedy_decoder', 'cast', {'out': 1, 'in': 0}), - ('ctc_greedy_decoder', 'tf_ctc_loss_false_logits', {'out': 0, 'in': 1}), - ('cast', 'tf_ctc_loss_false_logits', {'out': 0, 'in': 2}), - ('tf_ctc_loss_false_logits', 'last', {'out': 0, 'in': 0})], - nodes_with_edges_only=True) - graph.graph['cmd_params'] = Namespace(data_type='FP32') - graph.stage = 'front' - CTCLossReplacement().find_and_replace_pattern(graph) - - graph_ref = build_graph(self.nodes_attributes, - [('logits', 'transpose', {'out': 0, 'in': 0}), - ('transpose', 'transpose2', {'out': 0, 'in': 0}), - ('transpose2_axis', 'transpose2', {'out': 0, 'in': 1}), - ('transpose2', 'new_ctc_greedy_decoder', {'out': 0, 'in': 0}), - ('seq_mask', 'new_ctc_greedy_decoder', {'out': 0, 'in': 1}), - ('transpose', 'ctc_loss', {'out': 0, 'in': 0}), - ('new_ctc_greedy_decoder', 'ctc_loss', {'out': 0, 'in': 2}), - ('new_ctc_greedy_decoder', 'ctc_loss', {'out': 1, 'in': 3}), - ('seq_mask', 'ctc_loss', {'out': 0, 'in': 1}), - ('ctc_loss', 'last', {'out': 0, 'in': 0})], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/ComplexAbsAfterComplex_test.py b/tools/mo/unit_tests/mo/front/tf/ComplexAbsAfterComplex_test.py deleted file mode 100644 index 616730d868e2fc..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/ComplexAbsAfterComplex_test.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -import unittest - -import numpy as np - -from openvino.tools.mo.front.tf.ComplexAbsAfterComplex import ComplexAbsAfterComplex -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -graph_node_attrs = { - 'placeholder_0': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'complex': {'kind': 'op', 'op': 'Complex'}, - 'complex_abs': {'kind': 'op', 'op': 'ComplexAbs'}, - 'relu': {'type': 'ReLU', 'kind': 'op', 'op': 'ReLU'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, -} - -graph_edges = [ - ('placeholder_0', 'complex', {'in': 0}), - ('placeholder_1', 'complex', {'in': 1}), - ('complex', 'complex_abs', {'in': 0}), - ('complex_abs', 'relu'), - ('relu', 'output'), -] - - -ref_graph_node_attrs = { - 'placeholder_0': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'pow0_const': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': np.float32(2.0) - }, - 'pow1_const': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': np.float32(2.0) - }, - 'pow0': {'type': 'Power', 'kind': 'op', 'op': 'Pow'}, - 'pow1': {'type': 'Power', 'kind': 'op', 'op': 'Pow'}, - 'add': {'type': 'Add', 'kind': 'op', 'op': 'Add'}, - 'sqrt_const': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': np.float32(0.5) - }, - 'sqrt': {'type': 'Power', 'kind': 'op', 'op': 'Pow'}, - 'relu': {'type': 'ReLU', 'kind': 'op', 'op': 'ReLU'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, -} - -ref_graph_edges = [ - ('placeholder_0', 'pow0', {'in': 0}), - ('placeholder_1', 'pow1', {'in': 0}), - ('pow0_const', 'pow0', {'in': 1}), - ('pow1_const', 'pow1', {'in': 1}), - ('pow0', 'add', {'in': 0}), - ('pow1', 'add', {'in': 1}), - ('add', 'sqrt', {'in': 0}), - ('sqrt_const', 'sqrt', {'in': 1}), - ('sqrt', 'relu'), - ('relu', 'output'), -] - - -class ComplexAbsAfterComplexTest(unittest.TestCase): - def test_replacement(self): - graph = build_graph(nodes_attrs=graph_node_attrs, edges=graph_edges) - graph.stage = 'front' - ComplexAbsAfterComplex().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=ref_graph_node_attrs, edges=ref_graph_edges) - (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/ComplexAbs_test.py b/tools/mo/unit_tests/mo/front/tf/ComplexAbs_test.py deleted file mode 100644 index af9e6ba50eb52d..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/ComplexAbs_test.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -import unittest - -import numpy as np - -from openvino.tools.mo.front.tf.ComplexAbs import ComplexAbs -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -graph_node_attrs = { - 'placeholder': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'complex_abs': {'kind': 'op', 'op': 'ComplexAbs'}, - 'relu': {'type': 'ReLU', 'kind': 'op', 'op': 'ReLU'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, -} - -graph_edges = [ - ('placeholder', 'complex_abs'), - ('complex_abs', 'relu'), - ('relu', 'output'), -] - - -ref_graph_node_attrs = { - 'placeholder': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'pow2_const': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': np.float32(2.0) - }, - 'pow2': {'type': 'Power', 'kind': 'op', 'op': 'Pow'}, - 'sum_axis': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': int64_array(-1) - }, - 'sum': {'type': 'ReduceSum', 'kind': 'op', 'op': 'ReduceSum'}, - 'sqrt_const': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': np.float32(0.5) - }, - 'sqrt': {'type': 'Power', 'kind': 'op', 'op': 'Pow'}, - 'relu': {'type': 'ReLU', 'kind': 'op', 'op': 'ReLU'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, -} - -ref_graph_edges = [ - ('placeholder', 'pow2', {'in': 0}), - ('pow2_const', 'pow2', {'in': 1}), - ('sum_axis', 'sum', {'in': 1}), - ('pow2', 'sum', {'in': 0}), - ('sum', 'sqrt', {'in': 0}), - ('sqrt_const', 'sqrt', {'in': 1}), - ('sqrt', 'relu'), - ('relu', 'output'), -] - - -class ComplexAbsTest(unittest.TestCase): - def test_replacement(self): - graph = build_graph(nodes_attrs=graph_node_attrs, edges=graph_edges) - graph.stage = 'front' - ComplexAbs().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=ref_graph_node_attrs, edges=ref_graph_edges) - (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/CorrectPaddingsForPadAfterComplex_test.py b/tools/mo/unit_tests/mo/front/tf/CorrectPaddingsForPadAfterComplex_test.py deleted file mode 100644 index 29800e400e7557..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/CorrectPaddingsForPadAfterComplex_test.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -import unittest -import numpy as np - - -from openvino.tools.mo.front.tf.CorrectPaddingsForPadAfterComplex import CorrectPaddingsForPadAfterComplex -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - - -graph_node_attrs = { - 'placeholder_real': {'shape': int64_array([3, 100, 67]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_imag': {'shape': int64_array([3, 100, 67]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'complex': {'kind': 'op', 'op': 'Complex'}, - 'pad': {'type': 'Pad', 'kind': 'op', 'op': 'Pad', 'mode': 'constant'}, - **const('pad_begin', int64_array([1, 3, 5])), - **const('pad_end', int64_array([2, 4, 6])), - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, -} - -graph_edges = [ - ('placeholder_real', 'complex', {'in': 0}), - ('placeholder_imag', 'complex', {'in': 1}), - ('complex', 'pad', {'in': 0, 'out': 0}), - ('pad_begin', 'pad', {'in': 1, 'out': 0}), - ('pad_end', 'pad', {'in': 2, 'out': 0}), - ('pad', 'abs'), - ('abs', 'output'), -] - - -ref_graph_node_attrs = { - 'placeholder_real': {'shape': int64_array([3, 100, 67]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_imag': {'shape': int64_array([3, 100, 67]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'complex': {'kind': 'op', 'op': 'Complex'}, - 'pad': {'type': 'Pad', 'kind': 'op', 'op': 'Pad', 'mode': 'constant'}, - **const('pad_begin', int64_array([1, 3, 5])), - **const('pad_end', int64_array([2, 4, 6])), - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - **const('additional_pad_begin', int64_array([0])), - **const('additional_pad_end', int64_array([0])), - 'concat_for_pad_begin': {'kind': 'op', 'op': 'Concat', 'type': 'Concat', 'axis': 0}, - 'concat_for_pad_end': {'kind': 'op', 'op': 'Concat', 'type': 'Concat', 'axis': 0}, -} - -ref_graph_edges = [ - ('placeholder_real', 'complex', {'in': 0}), - ('placeholder_imag', 'complex', {'in': 1}), - ('complex', 'pad', {'in': 0, 'out': 0}), - ('pad_begin', 'concat_for_pad_begin', {'in': 0, 'out': 0}), - ('additional_pad_begin', 'concat_for_pad_begin', {'in': 1, 'out': 0}), - ('pad_end', 'concat_for_pad_end', {'in': 0, 'out': 0}), - ('additional_pad_end', 'concat_for_pad_end', {'in': 1, 'out': 0}), - ('concat_for_pad_begin', 'pad', {'in': 1, 'out': 0}), - ('concat_for_pad_end', 'pad', {'in': 2, 'out': 0}), - ('pad', 'abs'), - ('abs', 'output'), -] - - -class CorrectPaddingsForPadAfterComplexTest(unittest.TestCase): - def test_replacement(self): - graph = build_graph(nodes_attrs=graph_node_attrs, edges=graph_edges) - graph.stage = 'front' - CorrectPaddingsForPadAfterComplex().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=ref_graph_node_attrs, edges=ref_graph_edges) - (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/IteratorGetNextCut_test.py b/tools/mo/unit_tests/mo/front/tf/IteratorGetNextCut_test.py deleted file mode 100644 index 6654f1bee8e6a5..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/IteratorGetNextCut_test.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.tf.IteratorGetNextCut import IteratorGetNextCut -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph_with_edge_attrs - - -class IteratorGetNextAnalysisTest(unittest.TestCase): - - def test_one_output(self): - graph = build_graph_with_edge_attrs( - { - 'iter_get_next': {'kind': 'op', 'op': 'IteratorGetNext', 'shapes': shape_array([[2, 2]]), - 'types': [np.int32]}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - }, - [ - ('iter_get_next', 'sub', {'out': 0, 'in': 0}), - ] - ) - - graph_ref = build_graph_with_edge_attrs( - { - 'parameter_1': {'kind': 'op', 'op': 'Parameter', 'shape': shape_array([2, 2]), 'type': np.int32}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - }, - [ - ('parameter_1', 'sub', {'out': 0, 'in': 0}), - ] - ) - - IteratorGetNextCut().find_and_replace_pattern(graph) - - flag, msg = compare_graphs(graph, graph_ref, last_node='sub') - self.assertTrue(flag, msg) - - def test_two_outputs(self): - graph = build_graph_with_edge_attrs( - { - 'iter_get_next': {'kind': 'op', 'op': 'IteratorGetNext', 'shapes': [shape_array([2, 2]), - shape_array([1, 1])], - 'types': [np.int32, np.float32]}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - 'add': {'kind': 'op', 'op': 'Add'}, - 'concat': {'kind': 'op', 'op': 'Concat'} - }, - [ - ('iter_get_next', 'sub', {'out': 0, 'in': 0}), - ('iter_get_next', 'add', {'out': 1, 'in': 0}), - ('sub', 'concat', {'out': 0, 'in': 0}), - ('add', 'concat', {'out': 0, 'in': 1}) - ] - ) - - graph_ref = build_graph_with_edge_attrs( - { - 'parameter_1': {'kind': 'op', 'op': 'Parameter', 'shape': shape_array([2, 2]), 'data_type': np.int32}, - 'parameter_2': {'kind': 'op', 'op': 'Parameter', 'shape': shape_array([1, 1]), 'data_type': np.float32}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - 'add': {'kind': 'op', 'op': 'Add'}, - 'concat': {'kind': 'op', 'op': 'Concat'} - }, - [ - ('parameter_1', 'sub', {'out': 0, 'in': 0}), - ('parameter_2', 'add', {'out': 0, 'in': 0}), - ('sub', 'concat', {'out': 0, 'in': 0}), - ('add', 'concat', {'out': 0, 'in': 1}) - ] - ) - - IteratorGetNextCut().find_and_replace_pattern(graph) - - flag, msg = compare_graphs(graph, graph_ref, last_node='concat', check_op_attrs=True) - self.assertTrue(flag, msg) - - def test_unsupported_data_type(self): - graph = build_graph_with_edge_attrs( - { - 'iter_get_next': {'kind': 'op', 'op': 'IteratorGetNext', 'shapes': [shape_array([2, 2])], - 'types': [None]}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - 'result': {'kind': 'op', 'op': 'Result'} - }, - [ - ('iter_get_next', 'sub', {'out': 0, 'in': 0}), - ('sub', 'result', {'out': 0, 'in': 0}), - ] - ) - - self.assertRaises(Error, IteratorGetNextCut().find_and_replace_pattern, graph) diff --git a/tools/mo/unit_tests/mo/front/tf/NonConstBeginStridedSliceReplacement_test.py b/tools/mo/unit_tests/mo/front/tf/NonConstBeginStridedSliceReplacement_test.py deleted file mode 100644 index a603fdd6af7e9c..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/NonConstBeginStridedSliceReplacement_test.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.tf.NonConstBeginStridedSliceReplacement import NonConstBeginStridedSliceReplacement -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - - -class NonConstBeginStridedSliceReplacementTests(unittest.TestCase): - def test1(self): - nodes_attributes = { - # nodes from original graph - 'input': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'index': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'add': {'type': 'Add', 'kind': 'op', 'op': 'Add'}, - **const('slice_size', int64_array(1)), - 'begin': {'type': 'Pack', 'kind': 'op', 'op': 'Pack'}, - **const('begin_1', int64_array(0)), - **const('begin_3', int64_array(0)), - 'end': {'type': 'Pack', 'kind': 'op', 'op': 'Pack'}, - **const('end_1', int64_array(0)), - **const('end_3', int64_array(0)), - **const('step', int64_array([1, 1, 1])), - 'strided_slice': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice', - 'begin_mask': int64_array([0, 1, 0]), 'end_mask': int64_array([0, 1, 0]), - 'shrink_axis_mask': int64_array([0, 1, 0]), 'name': 'non_const_begin_strided_slice'}, - 'result': {'type': 'Result', 'kind': 'op', 'op': 'Result'}, - - # nodes from the reference graph - 'unsqueeze': {'type': 'Unsqueeze', 'kind': 'op', 'op': 'Unsqueeze'}, - **const('unsqueeze_axis', int64_array(0)), - 'gather': {'type': 'Gather', 'kind': 'op', 'op': 'Gather'}, - **const('gather_axis', int64_array(1)), - 'squeeze': {'type': 'Squeeze', 'kind': 'op', 'op': 'Squeeze'}, - **const('squeeze_axis', int64_array(1)), - } - - graph = build_graph(nodes_attributes, - [('input', 'strided_slice', {'out': 0, 'in': 0}), - ('begin_1', 'begin', {'out': 0, 'in': 0}), - ('index', 'begin', {'out': 0, 'in': 1}), - ('begin_3', 'begin', {'out': 0, 'in': 2}), - ('begin', 'strided_slice', {'out': 0, 'in': 1}), - ('end_1', 'end', {'out': 0, 'in': 0}), - ('index', 'add', {'out': 0, 'in': 0}), - ('slice_size', 'add', {'out': 0, 'in': 1}), - ('add', 'end', {'out': 0, 'in': 1}), - ('end_3', 'end', {'out': 0, 'in': 2}), - ('end', 'strided_slice', {'out': 0, 'in': 2}), - ('step', 'strided_slice', {'out': 0, 'in': 3}), - ('strided_slice', 'result', {'out': 0, 'in': 0}), - ], nodes_with_edges_only=True) - graph.stage = 'front' - NonConstBeginStridedSliceReplacement().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attributes, - [('input', 'gather', {'out': 0, 'in': 0}), - ('gather_axis', 'gather', {'out': 0, 'in': 2}), - ('index', 'unsqueeze', {'out': 0, 'in': 0}), - ('unsqueeze_axis', 'unsqueeze', {'out': 0, 'in': 1}), - ('unsqueeze', 'gather', {'out': 0, 'in': 1}), - ('gather', 'squeeze', {'out': 0, 'in': 0}), - ('squeeze_axis', 'squeeze', {'out': 0, 'in': 1}), - ('squeeze', 'result', {'out': 0, 'in': 0}), - ], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Squeeze')[0]]['name'] == - 'non_const_begin_strided_slice') - - def test2_not_applied_transform(self): - # the transformation is not applied if begin and end are constant - nodes_attributes = { - # nodes from original graph - 'input': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'begin': {'type': 'Pack', 'kind': 'op', 'op': 'Pack'}, - **const('begin_1', int64_array(0)), - **const('begin_2', int64_array(0)), - **const('begin_3', int64_array(0)), - 'end': {'type': 'Pack', 'kind': 'op', 'op': 'Pack'}, - **const('end_1', int64_array(0)), - **const('end_2', int64_array(3)), - **const('end_3', int64_array(0)), - **const('step', int64_array([1, 1, 1])), - 'strided_slice': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice', - 'begin_mask': int64_array([0, 1, 0]), 'end_mask': int64_array([0, 1, 0]), - 'shrink_axis_mask': int64_array([0, 1, 0]), 'name': 'non_const_begin_strided_slice'}, - 'result': {'type': 'Result', 'kind': 'op', 'op': 'Result'}, - } - - graph = build_graph(nodes_attributes, - [('input', 'strided_slice', {'out': 0, 'in': 0}), - ('begin_1', 'begin', {'out': 0, 'in': 0}), - ('begin_2', 'begin', {'out': 0, 'in': 1}), - ('begin_3', 'begin', {'out': 0, 'in': 2}), - ('begin', 'strided_slice', {'out': 0, 'in': 1}), - ('end_1', 'end', {'out': 0, 'in': 0}), - ('end_2', 'end', {'out': 0, 'in': 1}), - ('end_3', 'end', {'out': 0, 'in': 2}), - ('end', 'strided_slice', {'out': 0, 'in': 2}), - ('step', 'strided_slice', {'out': 0, 'in': 3}), - ('strided_slice', 'result', {'out': 0, 'in': 0}), - ], nodes_with_edges_only=True) - graph.stage = 'front' - NonConstBeginStridedSliceReplacement().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attributes, - [('input', 'strided_slice', {'out': 0, 'in': 0}), - ('begin_1', 'begin', {'out': 0, 'in': 0}), - ('begin_2', 'begin', {'out': 0, 'in': 1}), - ('begin_3', 'begin', {'out': 0, 'in': 2}), - ('begin', 'strided_slice', {'out': 0, 'in': 1}), - ('end_1', 'end', {'out': 0, 'in': 0}), - ('end_2', 'end', {'out': 0, 'in': 1}), - ('end_3', 'end', {'out': 0, 'in': 2}), - ('end', 'strided_slice', {'out': 0, 'in': 2}), - ('step', 'strided_slice', {'out': 0, 'in': 3}), - ('strided_slice', 'result', {'out': 0, 'in': 0})], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/ObjectDetectionAPI_test.py b/tools/mo/unit_tests/mo/front/tf/ObjectDetectionAPI_test.py deleted file mode 100644 index d8abd85e2a3268..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/ObjectDetectionAPI_test.py +++ /dev/null @@ -1,349 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from argparse import Namespace -from unittest.mock import patch -import os - -import pytest - -from openvino.tools.mo.front.tf.ObjectDetectionAPI import calculate_shape_keeping_aspect_ratio, \ - calculate_placeholder_spatial_shape, ObjectDetectionAPIPreprocessor2Replacement -from openvino.tools.mo.front.common.partial_infer.utils import float32_array -from openvino.tools.mo.front.subgraph_matcher import SubgraphMatch -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.utils.custom_replacement_config import CustomReplacementDescriptor -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.mo.utils.pipeline_config_test import file_content -from unit_tests.utils.graph import const, regular_op, result, build_graph, connect_front -from openvino.runtime import PartialShape - - -class FakePipelineConfig: - def __init__(self, model_params: dict): - self._model_params = model_params - - def get_param(self, param: str): - if param not in self._model_params: - return None - return self._model_params[param] - - -class TestCalculateShape(): - min_size = 600 - max_size = 1024 - - @pytest.mark.parametrize("h, w, th, tw",[(100, 300, 341, 1024), - (100, 600, 171, 1024), - (100, 3000, 34, 1024), - (300, 300, 600, 600), - (300, 400, 600, 800), - (300, 600, 512, 1024), - (1000, 2500, 410, 1024), - (1800, 2000, 600, 667), - (300, 100, 1024, 341), - (600, 100, 1024, 171), - (3000, 100, 1024, 34), - (400, 300, 800, 600), - (600, 300, 1024, 512), - (2500, 1000, 1024, 410), - (2000, 1800, 667, 600), - ]) - def test_calculate_shape(self, h, w, th, tw): - assert calculate_shape_keeping_aspect_ratio(h, w, self.min_size, self.max_size) == (th, tw) - - -class TestCalculatePlaceholderSpatialShape(unittest.TestCase): - def setUp(self): - self.graph = Graph() - self.graph.graph['user_shapes'] = None - self.replacement_desc = CustomReplacementDescriptor('dummy_id', {}) - self.match = SubgraphMatch(self.graph, self.replacement_desc, [], [], [], '') - self.pipeline_config = FakePipelineConfig({}) - - def test_default_fixed_shape_resizer(self): - self.pipeline_config._model_params['resizer_image_height'] = 300 - self.pipeline_config._model_params['resizer_image_width'] = 600 - self.assertTupleEqual((300, 600), - calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config)) - - def test_fixed_shape_resizer_overrided_by_user(self): - self.pipeline_config._model_params['resizer_image_height'] = 300 - self.pipeline_config._model_params['resizer_image_width'] = 600 - self.graph.graph['user_shapes'] = {'image_tensor': [{'shape': PartialShape([1, 400, 500, 3])}]} - self.assertTupleEqual((400, 500), - calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config)) - - def test_default_keep_aspect_ratio_resizer(self): - self.pipeline_config._model_params['resizer_min_dimension'] = 600 - self.pipeline_config._model_params['resizer_max_dimension'] = 1024 - self.assertTupleEqual((600, 600), - calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config)) - - def test_keep_aspect_ratio_resizer_overrided_by_user(self): - self.pipeline_config._model_params['resizer_min_dimension'] = 600 - self.pipeline_config._model_params['resizer_max_dimension'] = 1024 - self.graph.graph['user_shapes'] = {'image_tensor': [{'shape': PartialShape([1, 400, 300, 3])}]} - self.assertTupleEqual((800, 600), - calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config)) - - def test_keep_aspect_ratio_resizer_overrided_by_user_pad(self): - self.pipeline_config._model_params['resizer_min_dimension'] = 600 - self.pipeline_config._model_params['resizer_max_dimension'] = 1024 - self.pipeline_config._model_params['pad_to_max_dimension'] = True - self.graph.graph['user_shapes'] = {'image_tensor': [{'shape': PartialShape([1, 400, 300, 3])}]} - self.assertTupleEqual((1024, 1024), - calculate_placeholder_spatial_shape(self.graph, self.match, self.pipeline_config)) - - def test_missing_input_shape_information(self): - self.assertRaises(Error, calculate_placeholder_spatial_shape, self.graph, self.match, self.pipeline_config) - - -@patch('openvino.tools.mo.front.tf.ObjectDetectionAPI.update_parameter_shape') -class TestObjectDetectionAPIPreprocessor2Replacement(unittest.TestCase): - def setUp(self): - self.start_node_name = 'StatefulPartitionedCall/Preprocessor/unstack' - self.end_node_name = 'StatefulPartitionedCall/Preprocessor/stack' - self.end_node_name2 = 'StatefulPartitionedCall/Preprocessor/stack2' - self.loop_start_node_name = 'prefix/map/while/Preprocessor/unstack' - self.loop_end_node_name = 'prefix/map/while/Preprocessor/stack' - self.mul_const = float32_array([0.025, 0.374, -0.45]) - self.sub_const = float32_array([2.0, 3.0, 4.0]) - - self.nodes = { - **regular_op('input', {'op': 'Parameter', 'type': 'Parameter'}), - - **regular_op('mul', {'op': 'Mul', 'type': 'Multiply', 'name': 'my_mul'}), - **regular_op('sub', {'op': 'Sub', 'type': 'Subtract', 'name': 'my_sub'}), - **const('mul_const', self.mul_const), - **const('sub_const', self.sub_const), - - **regular_op(self.start_node_name, {'op': 'Identity'}), - **regular_op(self.end_node_name, {'op': 'Identity'}), - **regular_op(self.end_node_name2, {'op': 'Identity'}), - - **regular_op('loop', {'op': 'Loop', 'body': None}), - - **regular_op('resize', {'type': 'Interpolate'}), - **result('result'), - } - self.replacement_desc = {'start_nodes': [self.start_node_name], - 'end_nodes': [self.end_node_name, self.end_node_name2]} - - def build_ref_graph(self, preprocessing: bool): - if preprocessing: - ref_edges = [*connect_front('input', '0:mul'), - *connect_front('mul_const', '1:mul'), - *connect_front('sub_const', '0:sub'), - *connect_front('mul', '1:sub'), - *connect_front('sub', 'result'), - ] - else: - ref_edges = [*connect_front('input', 'result')] - ref_graph = build_graph(self.nodes, ref_edges, nodes_with_edges_only=True) - ref_graph.stage = 'front' - return ref_graph - - def test_case_1_pad_to_max_dim(self, update_parameter_shape_mock): - # test for case #1 described in the ObjectDetectionAPIPreprocessor2Replacement - # sub/mul should be removed because they are applied before prep-processing and pad_to_max_dimension is True - update_parameter_shape_mock.return_value = (None, None) - edges = [*connect_front('input', '0:mul'), - *connect_front('mul_const', '1:mul'), - *connect_front('sub_const', '0:sub'), - *connect_front('mul', '1:sub'), - *connect_front('sub', self.start_node_name), - *connect_front(self.start_node_name, 'resize'), - *connect_front('resize', self.end_node_name), - *connect_front(self.end_node_name, 'result'), - ] - graph = build_graph(self.nodes, edges) - graph.stage = 'front' - graph.graph['cmd_params'] = Namespace(tensorflow_object_detection_api_pipeline_config=__file__) - - with unittest.mock.patch('builtins.open', unittest.mock.mock_open(read_data=file_content)): - ObjectDetectionAPIPreprocessor2Replacement().transform_graph(graph, self.replacement_desc) - - (flag, resp) = compare_graphs(graph, self.build_ref_graph(False), 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_case_1_no_pad_to_max_dim(self, update_parameter_shape_mock): - # test for case #1 described in the ObjectDetectionAPIPreprocessor2Replacement - # sub/mul should be kept even though they are applied before prep-processing and pad_to_max_dimension is False - update_parameter_shape_mock.return_value = (None, None) - edges = [*connect_front('input', '0:mul'), - *connect_front('mul_const', '1:mul'), - *connect_front('sub_const', '0:sub'), - *connect_front('mul', '1:sub'), - *connect_front('sub', self.start_node_name), - *connect_front(self.start_node_name, 'resize'), - *connect_front('resize', self.end_node_name), - *connect_front(self.end_node_name, 'result'), - ] - graph = build_graph(self.nodes, edges) - graph.stage = 'front' - graph.graph['cmd_params'] = Namespace(tensorflow_object_detection_api_pipeline_config=__file__) - - updated_pipeline_config_content = file_content.replace('pad_to_max_dimension: true', - 'pad_to_max_dimension: false') - with unittest.mock.patch('builtins.open', unittest.mock.mock_open(read_data=updated_pipeline_config_content)): - ObjectDetectionAPIPreprocessor2Replacement().transform_graph(graph, self.replacement_desc) - - (flag, resp) = compare_graphs(graph, self.build_ref_graph(True), 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_case_2(self, update_parameter_shape_mock): - # test for case #2 described in the ObjectDetectionAPIPreprocessor2Replacement - update_parameter_shape_mock.return_value = (None, None) - - edges = [*connect_front('input', self.start_node_name), - *connect_front(self.start_node_name, 'resize'), - *connect_front('resize', self.end_node_name), - *connect_front(self.end_node_name, '0:mul'), - *connect_front('mul_const', '1:mul'), - *connect_front('sub_const', '0:sub'), - *connect_front('mul', '1:sub'), - *connect_front('sub', 'result'), - ] - graph = build_graph(self.nodes, edges) - graph.stage = 'front' - graph.graph['cmd_params'] = Namespace(tensorflow_object_detection_api_pipeline_config=__file__) - - with unittest.mock.patch('builtins.open', unittest.mock.mock_open(read_data=file_content)): - ObjectDetectionAPIPreprocessor2Replacement().transform_graph(graph, self.replacement_desc) - - (flag, resp) = compare_graphs(graph, self.build_ref_graph(True), 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_case_3(self, update_parameter_shape_mock): - # test for case #3 described in the ObjectDetectionAPIPreprocessor2Replacement - update_parameter_shape_mock.return_value = (None, None) - - edges = [*connect_front('input', self.start_node_name), - *connect_front(self.start_node_name, 'resize'), - *connect_front('resize', self.end_node_name), - *connect_front(self.end_node_name, 'result'), - ] - graph = build_graph(self.nodes, edges) - graph.stage = 'front' - graph.graph['cmd_params'] = Namespace(tensorflow_object_detection_api_pipeline_config=__file__) - - with unittest.mock.patch('builtins.open', unittest.mock.mock_open(read_data=file_content)): - ObjectDetectionAPIPreprocessor2Replacement().transform_graph(graph, self.replacement_desc) - - (flag, resp) = compare_graphs(graph, self.build_ref_graph(False), 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def build_main_graph(self, pre_processing: str): - def build_body_graph(pre_processing: str): - nodes = { - **regular_op('input', {'type': 'Parameter', 'op': 'Parameter'}), - - **regular_op('mul', {'op': 'Mul', 'type': 'Multiply', 'name': 'my_body_mul'}), - **regular_op('sub', {'op': 'Sub', 'type': 'Subtract', 'name': 'my_body_sub'}), - **const('body_mul_const', self.mul_const), - **const('body_sub_const', self.sub_const), - - **regular_op(self.loop_start_node_name, {'op': 'Identity'}), - **regular_op(self.loop_end_node_name, {'op': 'Identity'}), - - **regular_op('resize', {'type': 'Interpolate'}), - **result('result'), - } - if pre_processing == 'no': - edges = [*connect_front('input', self.loop_start_node_name), - *connect_front(self.loop_start_node_name, 'resize'), - *connect_front('resize', self.loop_end_node_name), - *connect_front(self.loop_end_node_name, 'result'), - ] - elif pre_processing == 'trailing': - edges = [*connect_front('input', self.loop_start_node_name), - *connect_front(self.loop_start_node_name, 'resize'), - *connect_front('resize', self.loop_end_node_name), - *connect_front(self.loop_end_node_name, '0:mul'), - *connect_front('body_mul_const', '1:mul'), - *connect_front('body_sub_const', '0:sub'), - *connect_front('mul', '1:sub'), - *connect_front('sub', 'result'), - ] - else: - edges = [*connect_front('input', '0:mul'), - *connect_front('body_mul_const', '1:mul'), - *connect_front('body_sub_const', '0:sub'), - *connect_front('mul', '1:sub'), - *connect_front('sub', self.loop_start_node_name), - *connect_front(self.loop_start_node_name, 'resize'), - *connect_front('resize', self.loop_end_node_name), - *connect_front(self.loop_end_node_name, 'result'), - ] - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - graph.stage = 'front' - return graph - - edges = [*connect_front('input', self.start_node_name), - *connect_front(self.start_node_name, 'loop'), - *connect_front('loop:0', self.end_node_name), - *connect_front('loop:1', self.end_node_name2), - *connect_front(self.end_node_name, 'result'), - ] - graph = build_graph(self.nodes, edges, {'loop': {'body': build_body_graph(pre_processing)}}, - nodes_with_edges_only=True) - graph.stage = 'front' - return graph - - def test_case_4(self, update_parameter_shape_mock): - # test for case #4 described in the ObjectDetectionAPIPreprocessor2Replacement - update_parameter_shape_mock.return_value = (None, None) - - graph = self.build_main_graph('leading') - graph.graph['cmd_params'] = Namespace(tensorflow_object_detection_api_pipeline_config=__file__) - - with unittest.mock.patch('builtins.open', unittest.mock.mock_open(read_data=file_content)): - ObjectDetectionAPIPreprocessor2Replacement().transform_graph(graph, self.replacement_desc) - - (flag, resp) = compare_graphs(graph, self.build_ref_graph(True), 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_case_5(self, update_parameter_shape_mock): - # test for case #5 described in the ObjectDetectionAPIPreprocessor2Replacement - update_parameter_shape_mock.return_value = (None, None) - - graph = self.build_main_graph('trailing') - graph.graph['cmd_params'] = Namespace(tensorflow_object_detection_api_pipeline_config=__file__) - - with unittest.mock.patch('builtins.open', unittest.mock.mock_open(read_data=file_content)): - ObjectDetectionAPIPreprocessor2Replacement().transform_graph(graph, self.replacement_desc) - - (flag, resp) = compare_graphs(graph, self.build_ref_graph(True), 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_case_6(self, update_parameter_shape_mock): - # test for case #6 described in the ObjectDetectionAPIPreprocessor2Replacement - update_parameter_shape_mock.return_value = (None, None) - - graph = self.build_main_graph('no') - graph.graph['cmd_params'] = Namespace(tensorflow_object_detection_api_pipeline_config=__file__) - - with unittest.mock.patch('builtins.open', unittest.mock.mock_open(read_data=file_content)): - ObjectDetectionAPIPreprocessor2Replacement().transform_graph(graph, self.replacement_desc) - - (flag, resp) = compare_graphs(graph, self.build_ref_graph(False), 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - -class TestPipelineConfig(unittest.TestCase): - def test_pipeline_config_loading(self): - from openvino.tools.mo.utils.pipeline_config import PipelineConfig - pipeline_config = PipelineConfig(os.path.join(os.path.dirname(__file__), "test_configs/config1.config")) - assert pipeline_config.get_param('ssd_anchor_generator_num_layers') == 6 - assert pipeline_config.get_param('num_classes') == 90 - assert pipeline_config.get_param('resizer_image_width') == 300 - assert pipeline_config.get_param('resizer_image_height') == 300 - - pipeline_config = PipelineConfig(os.path.join(os.path.dirname(__file__), "test_configs/config2.config")) - assert pipeline_config.get_param('ssd_anchor_generator_num_layers') is None - assert pipeline_config.get_param('num_classes') == 10 - assert pipeline_config.get_param('resizer_image_width') == 640 - assert pipeline_config.get_param('resizer_image_height') == 640 diff --git a/tools/mo/unit_tests/mo/front/tf/RFFTRealImagToRFFTSplit_test.py b/tools/mo/unit_tests/mo/front/tf/RFFTRealImagToRFFTSplit_test.py deleted file mode 100644 index e3789e8bec24dd..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/RFFTRealImagToRFFTSplit_test.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.RFFTRealImagToRFFTSplit import RFFTRealImagToRDFTSplit -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -graph_node_attrs = { - 'placeholder': {'shape': int64_array([3, 192, 36, 64]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'rfft': {'kind': 'op', 'op': 'TFFFT', 'num_of_dimensions': 2, 'fft_kind': 'RDFT'}, - 'real': {'kind': 'op', 'op': 'Real'}, - 'imag': {'kind': 'op', 'op': 'Imag'}, - 'real_sigmoid': {'type': 'Sigmoid', 'kind': 'op', 'op': 'Sigmoid'}, - 'imag_sigmoid': {'type': 'Sigmoid', 'kind': 'op', 'op': 'Sigmoid'}, - 'rfft_lengths': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([36, 33]) - }, - 'add': {'type': 'Add', 'kind': 'op', 'op': 'Add'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, -} - -graph_edges = [ - ('placeholder', 'rfft', {'in': 0}), - ('rfft', 'real', {'out': 0, 'in': 0}), - ('rfft', 'imag', {'out': 0, 'in': 0}), - ('real', 'real_sigmoid', {'in': 0}), - ('imag', 'imag_sigmoid', {'in': 0}), - ('real_sigmoid', 'add', {'in': 0}), - ('imag_sigmoid', 'add', {'in': 1}), - ('rfft_lengths', 'rfft', {'in': 1}), - ('add', 'abs'), - ('abs', 'output'), -] - - -ref_graph_node_attrs = { - 'placeholder': {'shape': int64_array([3, 192, 36, 64]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'rfft': {'kind': 'op', 'op': 'TFFFT', 'num_of_dimensions': 2, 'fft_kind': 'RDFT'}, - 'real': {'kind': 'op', 'op': 'Real'}, - 'imag': {'kind': 'op', 'op': 'Imag'}, - 'real_sigmoid': {'type': 'Sigmoid', 'kind': 'op', 'op': 'Sigmoid'}, - 'imag_sigmoid': {'type': 'Sigmoid', 'kind': 'op', 'op': 'Sigmoid'}, - 'rfft_lengths': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([36, 33]) - }, - 'add': {'type': 'Add', 'kind': 'op', 'op': 'Add'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - 'split': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 2}, - 'split_axis': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array(-1).shape, 'value': int64_array(-1) - }, - 'real_squeeze': {'type': 'Squeeze', 'kind': 'op', 'op': 'Squeeze'}, - 'imag_squeeze': {'type': 'Squeeze', 'kind': 'op', 'op': 'Squeeze'}, - 'real_squeeze_axis': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array(-1).shape, 'value': int64_array(-1) - }, - 'imag_squeeze_axis': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array(-1).shape, 'value': int64_array(-1) - }, -} - -ref_graph_edges = [ - ('placeholder', 'rfft', {'in': 0}), - ('rfft', 'split', {'in': 0, 'out': 0}), - ('split_axis', 'split', {'in': 1}), - ('split', 'real_squeeze', {'in': 0, 'out': 0}), - ('split', 'imag_squeeze', {'in': 0, 'out': 1}), - ('real_squeeze_axis', 'real_squeeze', {'in': 1}), - ('imag_squeeze_axis', 'imag_squeeze', {'in': 1}), - ('rfft_lengths', 'rfft', {'in': 1}), - ('real_squeeze', 'real_sigmoid', {'in': 0}), - ('imag_squeeze', 'imag_sigmoid', {'in': 0}), - ('real_sigmoid', 'add', {'in': 0}), - ('imag_sigmoid', 'add', {'in': 1}), - ('add', 'abs'), - ('abs', 'output'), -] - - -class TestRFFTRealImagToRFFTSplitTest(): - @pytest.mark.parametrize("num_of_dims",[1, 2, 3]) - def test_replacement(self, num_of_dims): - graph = build_graph(nodes_attrs=graph_node_attrs, - edges=graph_edges, - update_attributes={ - 'rfft': {'num_of_dimensions': num_of_dims} - }) - graph.stage = 'front' - RFFTRealImagToRDFTSplit().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=ref_graph_node_attrs, - edges=ref_graph_edges, - update_attributes={ - 'rfft': {'num_of_dimensions': num_of_dims} - }) - (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/tf/RollRealImagPack_test.py b/tools/mo/unit_tests/mo/front/tf/RollRealImagPack_test.py deleted file mode 100644 index 155efb4dbbc787..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/RollRealImagPack_test.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -import unittest - -from openvino.tools.mo.front.tf.RollRealImagPack import RollRealImagPack -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -graph_node_attrs = { - 'placeholder': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'unroll': {'kind': 'op', 'op': 'Roll', 'type': 'Roll'}, - 'real': {'kind': 'op', 'op': 'Real'}, - 'imag': {'kind': 'op', 'op': 'Imag'}, - 'pack': {'kind': 'op', 'op': 'Pack'}, - 'unroll_shift': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([50, 50]) - }, - 'unroll_axes': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([-2, -1]) - }, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, -} - -graph_edges = [ - ('placeholder', 'unroll', {'in': 0}), - ('unroll', 'real', {'out': 0, 'in': 0}), - ('unroll', 'imag', {'out': 0, 'in': 0}), - ('real', 'pack', {'in': 0}), - ('imag', 'pack', {'in': 1}), - ('pack', 'abs'), - ('abs', 'output'), - ('unroll_shift', 'unroll', {'in': 1}), - ('unroll_axes', 'unroll', {'in': 2}), -] - - -ref_graph_node_attrs = { - 'placeholder': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'unroll': {'kind': 'op', 'op': 'Roll', 'type': 'Roll'}, - 'unroll_shift': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([50, 50]) - }, - 'unroll_axes': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([-2, -1]) - }, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - 'add': {'type': 'Add', 'kind': 'op', 'op': 'Add'}, - 'mul': {'type': 'Multiply', 'kind': 'op', 'op': 'Mul'}, - 'less': {'type': 'Less', 'kind': 'op', 'op': 'Less'}, - 'zero': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([]), 'value': int64_array(0) - }, - 'minus_one': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([]), 'value': int64_array(-1) - }, -} - -ref_graph_edges = [ - ('placeholder', 'unroll', {'out': 0, 'in': 0}), - ('unroll', 'abs'), - ('abs', 'output'), - ('unroll_shift', 'unroll', {'in': 1}), - ('unroll_axes', 'unroll', {'in': 2}), - - ('mul', 'add', {'in': 1}), - ('add', 'unroll', {'in': 2}), - ('zero', 'less', {'in': 1}), - ('minus_one', 'mul', {'in': 1}), - ('less', 'mul', {'in': 0}), - ('unroll_axes', 'less', {'out': 0, 'in': 0}), - ('unroll_axes', 'add', {'out': 0, 'in': 0}), -] - - -class RollRealImagPackTest(unittest.TestCase): - def test_replacement(self): - graph = build_graph(nodes_attrs=graph_node_attrs, edges=graph_edges) - graph.stage = 'front' - RollRealImagPack().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=ref_graph_node_attrs, edges=ref_graph_edges) - (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/SwitchMergeOptimization_test.py b/tools/mo/unit_tests/mo/front/tf/SwitchMergeOptimization_test.py deleted file mode 100644 index 376eb62e135058..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/SwitchMergeOptimization_test.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.tf.SwitchMergeOptimization import SwitchMergeOptimization -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class SwitchMergeOptimizationTest(unittest.TestCase): - - def test(self): - nodes_attributes = { - 'switch_2_input': {'shape': int64_array([1, 3]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'switches_input': {'shape': int64_array([1, 3]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - - 'switch_input_0': {'kind': 'op', 'op': 'SomeOp'}, - 'switch_1_input_0': {'kind': 'op', 'op': 'SomeOp'}, - - 'switch': {'kind': 'op', 'op': 'Switch'}, - 'switch_1': {'kind': 'op', 'op': 'Switch'}, - 'switch_2': {'kind': 'op', 'op': 'Switch'}, - - 'some_op': {'kind': 'op', 'op': 'Max'}, - 'identity': {'kind': 'op', 'op': 'Identity'}, - - 'merge': {'kind': 'op', 'op': 'Merge'}, - - 'select': {'kind': 'op', 'op': 'Select'}, - - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - } - - # check two cases when switch_2 goes to 0-th and 1-st input port of the Merge - for merge_input_port in range(2): - graph = build_graph(nodes_attributes, - [('switch_2_input', 'switch_2', {'in': 0}), - ('switch_input_0', 'switch', {'in': 0}), - ('switch_1_input_0', 'switch_1', {'in': 0}), - ('switches_input', 'switch', {'in': 1, 'out': 0}), - ('switches_input', 'switch_1', {'in': 1, 'out': 0}), - ('switches_input', 'switch_2', {'in': 1, 'out': 0}), - ('switch', 'some_op', {'in': 0}), - ('switch_1', 'some_op', {'in': 1}), - ('some_op', 'identity', {'in': 0}), - ('switch_2', 'merge', {'in': merge_input_port}), - ('identity', 'merge', {'in': 1 - merge_input_port}), - ('merge', 'last', {'in': 0}), - ], nodes_with_edges_only=True) - graph.stage = 'front' - SwitchMergeOptimization().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attributes, - [('switches_input', 'select', {'in': 0}), - ('switch_2_input', 'select', {'in': 1}), - ('switch_input_0', 'some_op', {'in': 0}), - ('switch_1_input_0', 'some_op', {'in': 1}), - ('some_op', 'identity', {'in': 0}), - ('identity', 'select', {'in': 2}), - ('select', 'last', {'in': 0}), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/TFFFTToDFT_test.py b/tools/mo/unit_tests/mo/front/tf/TFFFTToDFT_test.py deleted file mode 100644 index 7e301d6f2e591b..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/TFFFTToDFT_test.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - - -import pytest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.TFFFTToDFT import TFFFTToDFT -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -dft_graph_node_attrs = { - 'placeholder': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'fft': {'kind': 'op', 'op': 'TFFFT', 'num_of_dimensions': 2, 'is_inverse': False}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, -} - -dft_graph_edges = [ - ('placeholder', 'fft', {'in': 0}), - ('fft', 'abs'), - ('abs', 'output'), -] - - -ref_dft_graph_node_attrs = { - 'placeholder': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'fft': {'kind': 'op', 'op': 'DFT'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - 'fft_axes': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([-2, -1]) - }, -} - -ref_dft_graph_edges = [ - ('placeholder', 'fft', {'in': 0}), - ('fft', 'abs'), - ('abs', 'output'), - ('fft_axes', 'fft', {'in': 1}), -] - - -dft_graph_with_signal_size_node_attrs = { - 'placeholder': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'fft': {'kind': 'op', 'op': 'TFFFT', 'num_of_dimensions': 2, 'is_inverse': False}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - 'signal_size': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([-2, -1]) - }, -} - -dft_graph_with_signal_size_edges = [ - ('placeholder', 'fft', {'in': 0}), - ('signal_size', 'fft', {'in': 1}), - ('fft', 'abs'), - ('abs', 'output'), -] - - -ref_dft_graph_with_signal_size_node_attrs = { - 'placeholder': {'shape': int64_array([3, 100, 100, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'fft': {'kind': 'op', 'op': 'DFT'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'output': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - 'fft_axes': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([-2, -1]) - }, - 'signal_size': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([2]), 'value': int64_array([-2, -1]) - }, -} - -ref_dft_graph_with_signal_size_edges = [ - ('placeholder', 'fft', {'in': 0}), - ('fft', 'abs'), - ('abs', 'output'), - ('fft_axes', 'fft', {'in': 1}), - ('signal_size', 'fft', {'in': 2}), -] - - -class TestTFFFTToDFTTest(): - @pytest.mark.parametrize("num_of_dimensions, dft_type, fft_axes",[(2, 'DFT', int64_array([-2, -1])), - (2, 'IDFT', int64_array([-2, -1])), - (1, 'DFT', int64_array([-1])), - (1, 'IDFT', int64_array([-1])), - (3, 'DFT', int64_array([-3, -2, -1])), - (3, 'IDFT', int64_array([-3, -2, -1])), - (2, 'RDFT', int64_array([-2, -1])), - (2, 'IRDFT', int64_array([-2, -1])), - (1, 'RDFT', int64_array([-1])), - (1, 'IRDFT', int64_array([-1])), - (3, 'RDFT', int64_array([-3, -2, -1])), - (3, 'IRDFT', int64_array([-3, -2, -1]))]) - def test_replacement(self, num_of_dimensions, dft_type, fft_axes): - graph = build_graph(nodes_attrs=dft_graph_node_attrs, - edges=dft_graph_edges, - update_attributes={ - 'fft': {'num_of_dimensions': num_of_dimensions, 'fft_kind': dft_type}, - }) - graph.stage = 'front' - graph.graph['layout'] = 'NHWC' - TFFFTToDFT().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=ref_dft_graph_node_attrs, - edges=ref_dft_graph_edges, - update_attributes={ - 'fft': {'kind': 'op', 'op': dft_type}, - 'fft_axes': {'value': fft_axes, 'shape': int64_array(fft_axes.shape)}, - }) - (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - assert flag, resp - - @pytest.mark.parametrize("num_of_dims, fft_kind, fft_axes, input_shape, signal_size",[ - (2, 'RDFT', int64_array([-2, -1]), int64_array([3, 100, 100]), int64_array([100, -1])), - (2, 'IRDFT', int64_array([-2, -1]), int64_array([3, 100, 100, 2]), int64_array([100, -1])), - (2, 'RDFT', int64_array([-2, -1]), int64_array([3, 100, 100]), int64_array([95, 116])), - (2, 'IRDFT', int64_array([-2, -1]), int64_array([3, 100, 100, 2]), int64_array([95, 116])), - - (3, 'RDFT', int64_array([-3, -2, -1]), int64_array([3, 100, 100]), int64_array([5, 100, -1])), - (3, 'IRDFT', int64_array([-3, -2, -1]), int64_array([3, 100, 100, 2]), int64_array([5, 100, -1])), - (3, 'RDFT', int64_array([-3, -2, -1]), int64_array([3, 100, 100]), int64_array([5, 95, 116])), - (3, 'IRDFT', int64_array([-3, -2, -1]), int64_array([3, 100, 100, 2]), int64_array([5, 95, 116])), - - (1, 'RDFT', int64_array([-1]), int64_array([3, 100, 100]), int64_array([-1])), - (1, 'IRDFT', int64_array([-1]), int64_array([3, 100, 100, 2]), int64_array([-1])), - (1, 'RDFT', int64_array([-1]), int64_array([3, 100, 100]), int64_array([95])), - (1, 'IRDFT', int64_array([-1]), int64_array([3, 100, 100, 2]), int64_array([95])), - (1, 'RDFT', int64_array([-1]), int64_array([3, 100, 100]), int64_array([116])), - (1, 'IRDFT', int64_array([-1]), int64_array([3, 100, 100, 2]), int64_array([116])), - (1, 'RDFT', int64_array([-1]), int64_array([3, 100, 100]), int64_array([100])), - (1, 'IRDFT', int64_array([-1]), int64_array([3, 100, 100, 2]), int64_array([100])), - ]) - def test_replacement_for_signal_size(self, num_of_dims, fft_kind, fft_axes, input_shape, signal_size): - graph = build_graph(nodes_attrs=dft_graph_with_signal_size_node_attrs, - edges=dft_graph_with_signal_size_edges, - update_attributes={ - 'placeholder': {'shape': input_shape}, - 'signal_size': { - 'shape': signal_size.shape, 'value': signal_size - }, - 'fft': {'num_of_dimensions': num_of_dims, 'fft_kind': fft_kind}, - }) - graph.stage = 'front' - graph.graph['layout'] = 'NHWC' - TFFFTToDFT().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=ref_dft_graph_with_signal_size_node_attrs, - edges=ref_dft_graph_with_signal_size_edges, - update_attributes={ - 'placeholder': {'shape': input_shape}, - 'signal_size': { - 'shape': signal_size.shape, 'value': signal_size - }, - 'fft': {'kind': 'op', 'op': fft_kind}, - 'fft_axes': {'value': fft_axes, 'shape': int64_array(fft_axes.shape)}, - }) - (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/front/tf/TFSliceToSlice_test.py b/tools/mo/unit_tests/mo/front/tf/TFSliceToSlice_test.py deleted file mode 100644 index 96ebbad9f1ebb8..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/TFSliceToSlice_test.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.tf.TFSliceToSlice import TFSliceToSliceReplacer -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, const, connect_front - -nodes = { - **regular_op_with_empty_data('input', {'type': 'Parameter'}), - **regular_op_with_empty_data('tfslice', {'op': 'TFSlice', 'type': None}), - **const('begin', np.array(0)), - **const('size', np.array([-1])), - **regular_op_with_empty_data('john_doe', {'op': 'Sum', 'type': None}), - **result(), - - # nodes after replacement - **const('minus_one', np.array(-1)), - **regular_op_with_empty_data('shapeof', {'op': 'ShapeOf', 'type': 'ShapeOf'}), - **regular_op_with_empty_data('end_const', {'op': 'Add', 'type': 'Add'}), - **regular_op_with_empty_data('equal', {'op': 'Equal', 'type': 'Equal'}), - **regular_op_with_empty_data('select', {'op': 'Select', 'type': 'Select'}), - **regular_op_with_empty_data('slice', {'op': 'Slice', 'type': None}), - **regular_op_with_empty_data('cast', {'op': 'Cast', 'type': 'Convert'}), -} - - -class SliceReplacerTest(unittest.TestCase): - def test_slice_replacer(self): - graph = build_graph(nodes_attrs=nodes, edges=[ - *connect_front('input:0', '0:tfslice'), - *connect_front('begin:0', '1:tfslice'), - *connect_front('size:0', '2:tfslice'), - *connect_front('tfslice:0', 'output'), - ], nodes_with_edges_only=True) - graph.stage = 'front' - - TFSliceToSliceReplacer().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attrs=nodes, edges=[ - *connect_front('input:0', 'slice'), - *connect_front('input:0', 'shapeof'), - *connect_front('begin:0', '1:slice'), - *connect_front('begin:0', '0:end_const'), - *connect_front('size:0', '1:end_const'), - *connect_front('size:0', '0:equal'), - *connect_front('shapeof:0', '1:select'), - *connect_front('minus_one:0', '1:equal'), - *connect_front('equal:0', '0:select'), - *connect_front('end_const:0', '0:cast'), - *connect_front('cast:0', '2:select'), - *connect_front('select:0', '2:slice'), - *connect_front('slice:0', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/UnpackPackReverseInputChannels_test.py b/tools/mo/unit_tests/mo/front/tf/UnpackPackReverseInputChannels_test.py deleted file mode 100644 index dfa7903fb7f56d..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/UnpackPackReverseInputChannels_test.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.UnpackPackReverseInputChannels import UnpackPackReverseInputChannels -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs - -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, connect_front - -nodes = { - **regular_op_with_empty_data('input', {'type': 'Parameter'}), - **regular_op_with_empty_data('unpack', {'op': 'AttributedSplit', 'axis': int64_array(0)}), - **regular_op_with_empty_data('pack', {'op': 'Pack', 'axis': int64_array(0)}), - **result(), - - **regular_op_with_empty_data('reverseChannels', - {'op': 'ReverseChannels', 'order': int64_array([2, 1, 0]), 'axis': int64_array(0), 'type': None}), -} - - -class UnpackPackReverseInputChannelsTest(unittest.TestCase): - def test_replace_to_reverse_channel(self): - graph = build_graph(nodes_attrs=nodes, edges=[ - *connect_front('input:0', '0:unpack'), - *connect_front('unpack:0', '2:pack'), - *connect_front('unpack:1', '1:pack'), - *connect_front('unpack:2', '0:pack'), - *connect_front('pack:0', '0:output'), - ], nodes_with_edges_only=True) - graph.stage = 'front' - - UnpackPackReverseInputChannels().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attrs=nodes, edges=[ - *connect_front('input:0', '0:reverseChannels'), - *connect_front('reverseChannels:0', '0:output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/WhereDecomposition_test.py b/tools/mo/unit_tests/mo/front/tf/WhereDecomposition_test.py deleted file mode 100644 index 9786d6ddccfa85..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/WhereDecomposition_test.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.tf.WhereDecomposition import WhereDecomposition -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -graph_node_attrs = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': None, - 'kind': 'data', - 'data_type': None - }, - 'tf_where': {'op': 'Where', 'kind': 'op'}, - 'tf_where_data': {'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - - -graph_edges = [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'tf_where'), - ('tf_where', 'tf_where_data'), - ('tf_where_data', 'output'), -] - - -ref_graph_node_attrs = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': None, - 'kind': 'data', - 'data_type': None - }, - 'non_zero': {'kind': 'op', 'op': 'NonZero', 'output_type': np.int64}, - 'non_zero_data': {'kind': 'data'}, - 'transpose': {'kind': 'op', 'op': 'Transpose'}, - 'transpose_data': {'kind': 'data'}, - 'perm_const': {'kind': 'op', 'op': 'Const', 'shape': [2], 'value': int64_array([1, 0])}, - 'perm_const_data': {'kind': 'data', 'shape': [2], 'value': int64_array([1, 0])}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -ref_graph_edges = [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'non_zero'), - ('non_zero', 'non_zero_data'), - ('non_zero_data', 'transpose', {'in': 0}), - ('perm_const', 'perm_const_data'), - ('perm_const_data', 'transpose', {'in': 1}), - ('transpose', 'transpose_data'), - ('transpose_data', 'output'), -] - - -class TestTFWhereDecompositionTest(): - @pytest.mark.parametrize("input_shape",[[1, 100, 120, 150], [16, 125, 14]]) - def test_1(self, input_shape): - in_shape = int64_array(input_shape) - graph = build_graph(graph_node_attrs, - graph_edges, - update_attributes={ - 'placeholder_data': {'shape': in_shape} - }) - WhereDecomposition().find_and_replace_pattern(graph) - ref_graph = build_graph(ref_graph_node_attrs, - ref_graph_edges, - update_attributes={ - 'placeholder_data': {'shape': in_shape} - }) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - assert flag,resp diff --git a/tools/mo/unit_tests/mo/front/tf/__init__.py b/tools/mo/unit_tests/mo/front/tf/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/tf/concat_ext_test.py b/tools/mo/unit_tests/mo/front/tf/concat_ext_test.py deleted file mode 100644 index f0b68cbdf8a3aa..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/concat_ext_test.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.tf.concat_ext import ConcatFrontExtractor -from unit_tests.utils.extractors import PB, BaseExtractorsTestingClass - - -class ConcatExtractorTest(BaseExtractorsTestingClass): - def test_concat(self): - node = PB({'pb': PB({'attr': {'N': PB({'i': 4})}})}) - self.expected = { - 'N': 4, - 'simple_concat': True, - 'type': 'Concat', - 'op': 'Concat', - 'kind': 'op', - 'axis': 1 - } - ConcatFrontExtractor.extract(node) - self.res = node - self.compare() diff --git a/tools/mo/unit_tests/mo/front/tf/concat_test.py b/tools/mo/unit_tests/mo/front/tf/concat_test.py deleted file mode 100644 index 2655f771677e47..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/concat_test.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.tf.concat import Concat -from unit_tests.utils.graph import build_graph_with_edge_attrs - - -class TestConcatEdgesReshuffler(unittest.TestCase): - def test_concat_edges_reshaffle(self): - graph = build_graph_with_edge_attrs( - {'axis': {}, - 'input_1': {}, - 'input_2': {}, - 'input_3': {}, - 'concat': {'op': 'Concat', 'simple_concat': True, 'axis': 1}, - }, - [('axis', 'concat', {'in': 0}), - ('input_1', 'concat', {'in': 1}), - ('input_2', 'concat', {'in': 2}), - ('input_3', 'concat', {'in': 3})], - ) - Concat().find_and_replace_pattern(graph=graph) - for u, v, attrs in graph.in_edges('concat', data=True): - if attrs['in'] == 0: - self.assertEqual(u, 'input_1') - if attrs['in'] == 1: - self.assertEqual(u, 'input_2') - if attrs['in'] == 2: - self.assertEqual(u, 'input_3') - if attrs['in'] == 3: - self.assertEqual(u, 'axis') - self.assertTrue('axis' not in graph.node['concat']) diff --git a/tools/mo/unit_tests/mo/front/tf/conv_ext_test.py b/tools/mo/unit_tests/mo/front/tf/conv_ext_test.py deleted file mode 100644 index d1a02fea225c79..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/conv_ext_test.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.tf.conv_ext import Conv2DFrontExtractor, DepthwiseConv2dNativeFrontExtractor -from unit_tests.utils.extractors import PB, BaseExtractorsTestingClass - - -class ConvExtractorTest(BaseExtractorsTestingClass): - @classmethod - def setUpClass(cls): - cls.strides = [1, 2, 3, 4] - cls.dilations = [1, 1, 1, 1] - - def test_conv_2d_defaults(self): - node = PB({'pb': PB({'attr': { - 'data_format': PB({ - 's': b"NHWC" - }), - 'strides': PB({ - 'list': PB({"i": self.strides}) - }), - 'padding': PB({ - 's': b'VALID' - }), - 'dilations': PB({ - 'list': PB({"i": [1, 1, 1, 1]}) - }) - }})}) - self.expected = { - 'bias_addable': True, - 'dilation': np.array([1, 1, 1, 1], dtype=np.int8), - 'type': 'Convolution', - 'layout': 'NHWC', - } - Conv2DFrontExtractor.extract(node) - self.res = node - self.expected_call_args = (None, False) - self.compare() - - def test_conv2d_nhwc(self): - node = PB({'pb': PB({'attr': { - 'data_format': PB({ - 's': b"NHWC" - }), - 'strides': PB({ - 'list': PB({"i": self.strides}) - }), - 'padding': PB({ - 's': b'VALID' - }), - 'dilations': PB({ - 'list': PB({"i": [1, 1, 1, 1]}) - }) - }})}) - self.expected = { - # spatial_dims = [1, 2] will be detected in infer function - "channel_dims": [3], - "batch_dims": [0], - "input_feature_channel": 2, - "output_feature_channel": 3, - 'dilation': np.array([1, 1, 1, 1], dtype=np.int8), - 'stride': np.array(self.strides, dtype=np.int8), - } - Conv2DFrontExtractor.extract(node) - self.res = node - self.expected_call_args = (None, False) - self.compare() - - def test_conv2d_nchw(self): - node = PB({'pb': PB({'attr': { - 'data_format': PB({ - 's': b"NCHW" - }), - 'strides': PB({ - 'list': PB({"i": self.strides}) - }), - 'padding': PB({ - 's': b'VALID' - }), - 'dilations': PB({ - 'list': PB({"i": [1, 1, 1, 1]}) - }) - }})}) - self.expected = { - # spatial_dims = [2, 3] will be detected in infer function - "channel_dims": [1], - "batch_dims": [0], - "input_feature_channel": 2, - "output_feature_channel": 3, - 'dilation': np.array([1, 1, 1, 1], dtype=np.int8), - 'stride': np.array(self.strides, dtype=np.int8), - } - Conv2DFrontExtractor.extract(node) - self.res = node - self.expected_call_args = (None, False) - self.compare() - - def test_conv2d_depthwise(self): - node = PB({'pb': PB({'attr': { - 'data_format': PB({ - 's': b"NHWC" - }), - 'strides': PB({ - 'list': PB({"i": self.strides}), - }), - 'dilations': PB({ - 'list': PB({"i": self.dilations}), - }), - 'padding': PB({ - 's': b'VALID' - }) - }})}) - self.expected = { - # spatial_dims = [1, 2] will be detected in infer function - "channel_dims": [3], - "batch_dims": [0], - "input_feature_channel": 2, - "output_feature_channel": 2, - 'dilation': np.array([1, 1, 1, 1], dtype=np.int8), - 'stride': np.array(self.strides, dtype=np.int8), - } - DepthwiseConv2dNativeFrontExtractor.extract(node) - self.res = node - self.expected_call_args = (None, True) - self.compare() diff --git a/tools/mo/unit_tests/mo/front/tf/deconv_ext_test.py b/tools/mo/unit_tests/mo/front/tf/deconv_ext_test.py deleted file mode 100644 index 5b43b0ebb6044b..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/deconv_ext_test.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.front.tf.deconv_ext import Conv2DBackpropInputFrontExtractor -from unit_tests.utils.extractors import PB, BaseExtractorsTestingClass - - -class DeconvolutionExtractorTest(BaseExtractorsTestingClass): - @classmethod - def setUpClass(cls): - cls.strides = [1, 2, 3, 4] - - def test_deconv2d_defaults(self): - node = PB({'pb': PB({'attr': { - 'data_format': PB({ - 's': b"NHWC" - }), - 'strides': PB({ - 'list': PB({"i": self.strides}) - }), - 'padding': PB({ - 's': b'VALID' - }) - }})}) - self.expected = { - 'bias_addable': True, - 'pad': None, # will be inferred when input shape is known - 'pad_spatial_shape': None, - 'output_spatial_shape': None, - 'output_shape': None, - 'group': None, - } - Conv2DBackpropInputFrontExtractor.extract(node) - self.res = node - self.expected_call_args = (None, False) - self.compare() - - def test_deconv2d_nhwc(self): - node = PB({'pb': PB({'attr': { - 'data_format': PB({ - 's': b"NHWC" - }), - 'strides': PB({ - 'list': PB({"i": self.strides}) - }), - 'padding': PB({ - 's': b'VALID' - }) - }})}) - - self.expected = { - "spatial_dims": [1, 2], - "channel_dims": [3], - "batch_dims": [0], - 'stride': np.array(self.strides, dtype=np.int8), - } - - Conv2DBackpropInputFrontExtractor.extract(node) - self.res = node - self.expected_call_args = (None, False) - self.compare() - - def test_deconv2d_nchw(self): - node = PB({'pb': PB({'attr': { - 'data_format': PB({ - 's': b"NCHW" - }), - 'strides': PB({ - 'list': PB({"i": self.strides}) - }), - 'padding': PB({ - 's': b'VALID' - }) - }})}) - self.expected = { - "spatial_dims": [2, 3], - "channel_dims": [1], - "batch_dims": [0], - 'stride': np.array(self.strides, dtype=np.int8), - } - - Conv2DBackpropInputFrontExtractor.extract(node) - self.res = node - self.expected_call_args = (None, False) - self.compare() diff --git a/tools/mo/unit_tests/mo/front/tf/embedding_segments_operation_fusing_test.py b/tools/mo/unit_tests/mo/front/tf/embedding_segments_operation_fusing_test.py deleted file mode 100644 index 88ec471742dbcc..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/embedding_segments_operation_fusing_test.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.embedding_segments_operation_fusing import \ - EmbeddingSegmentsOperationMultipleFeaturesFusing, \ - EmbeddingSegmentsOperationSingleFeatureFusing -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - - -class EmbeddingSegmentsOperationFusingTest(unittest.TestCase): - def test1(self): - nodes_attributes = { - 'input_indices': {'shape': int64_array([5, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_values': {'shape': int64_array([5]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_dense_shape': {'shape': int64_array([2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_params_table': {'shape': int64_array([10, 3, 4]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'input_default_value': {'shape': int64_array([]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - - 'identity_spw': {'kind': 'op', 'op': 'Identity'}, - 'gather0_1': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'gather0_2': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'reshape0': {'kind': 'op', 'op': 'Reshape'}, - 'where0': {'kind': 'op', 'op': 'Where'}, - 'greaterequal0': {'kind': 'op', 'op': 'GreaterEqual'}, - 'sparse_fill_empty_rows': {'kind': 'op', 'op': 'SparseFillEmptyRows'}, - 'unique': {'kind': 'op', 'op': 'Unique'}, - 'strided_slice': {'kind': 'op', 'op': 'StridedSlice'}, - 'cast': {'kind': 'op', 'op': 'Cast'}, - 'gather': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'sparse_segment_sum': {'kind': 'op', 'op': 'SparseSegmentSum'}, - 'reshape': {'kind': 'op', 'op': 'Reshape'}, - 'tile': {'kind': 'op', 'op': 'Tile', 'type': 'Tile'}, - 'select': {'kind': 'op', 'op': 'Select'}, - - 'split_for_indices': {'kind': 'op', 'op': 'Split'}, - 'squeeze_for_indices': {'kind': 'op', 'op': 'Squeeze'}, - 'split_for_dense_shape': {'kind': 'op', 'op': 'Split'}, - 'squeeze_to_scalar': {'kind': 'op', 'op': 'Squeeze'}, - 'cast_indices': {'kind': 'op', 'op': 'Cast'}, - 'cast_segment_ids': {'kind': 'op', 'op': 'Cast'}, - 'cast_default_value': {'kind': 'op', 'op': 'Cast'}, - 'cast_number_segments': {'kind': 'op', 'op': 'Cast'}, - 'embedding_segments_sum': {'kind': 'op', 'op': 'EmbeddingSegmentsSum'}, - - **const('split_for_indices_axis', int64_array(1)), - **const('split_for_dense_shape_axis', int64_array(0)), - **const('squeeze_axis', int64_array([0])), - **const('squeeze_for_indices_axis', int64_array([1])), - - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - } - - graph = build_graph(nodes_attributes, - [('input_indices', 'gather0_1', {'out': 0, 'in': 0}), - ('input_dense_shape', 'identity_spw', {'out': 0, 'in': 0}), - ('input_values', 'greaterequal0', {'out': 0, 'in': 0}), - ('input_values', 'gather0_2', {'out': 0, 'in': 0}), - ('input_params_table', 'gather', {'out': 0, 'in': 0}), - ('input_default_value', 'sparse_fill_empty_rows', {'out': 0, 'in': 3}), - - ('gather0_1', 'sparse_fill_empty_rows', {'out': 0, 'in': 0}), - ('gather0_2', 'sparse_fill_empty_rows', {'out': 0, 'in': 1}), - ('identity_spw', 'sparse_fill_empty_rows', {'out': 0, 'in': 2}), - ('reshape0', 'gather0_1', {'out': 0, 'in': 1}), - ('reshape0', 'gather0_2', {'out': 0, 'in': 1}), - ('where0', 'reshape0', {'out': 0, 'in': 0}), - ('greaterequal0', 'where0', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'unique', {'out': 1, 'in': 0}), - ('sparse_fill_empty_rows', 'strided_slice', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'reshape', {'out': 2, 'in': 0}), - ('unique', 'sparse_segment_sum', {'out': 1, 'in': 1}), - ('unique', 'gather', {'out': 0, 'in': 1}), - ('strided_slice', 'cast', {'out': 0, 'in': 0}), - ('gather', 'sparse_segment_sum', {'out': 0, 'in': 0}), - ('cast', 'sparse_segment_sum', {'out': 0, 'in': 2}), - ('sparse_segment_sum', 'select', {'out': 0, 'in': 2}), - ('reshape', 'tile', {'out': 0, 'in': 0}), - ('tile', 'select', {'out': 0, 'in': 0}), - ('select', 'last', {'out': 0, 'in': 0}), - ], nodes_with_edges_only=True) - graph.stage = 'front' - EmbeddingSegmentsOperationSingleFeatureFusing().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attributes, - [('input_indices', 'split_for_indices', {'in': 0}), - ('split_for_indices_axis', 'split_for_indices', {'in': 1}), - ('split_for_indices', 'squeeze_for_indices', {'in': 0}), - ('squeeze_for_indices_axis', 'squeeze_for_indices', {'in': 1}), - ('squeeze_for_indices', 'cast_segment_ids', {'in': 0}), - ('cast_segment_ids', 'embedding_segments_sum', {'in': 2, 'out': 0}), - ('input_values', 'cast_indices', {'in': 0}), - ('cast_indices', 'embedding_segments_sum', {'in': 1}), - ('input_dense_shape', 'split_for_dense_shape', {'in': 0}), - ('split_for_dense_shape_axis', 'split_for_dense_shape', {'in': 1}), - ('split_for_dense_shape', 'squeeze_to_scalar', {'in': 0}), - ('squeeze_axis', 'squeeze_to_scalar', {'in': 1}), - ('squeeze_to_scalar', 'cast_number_segments', {'in': 0}), - ('cast_number_segments', 'embedding_segments_sum', {'in': 3, 'out': 0}), - ('input_params_table', 'embedding_segments_sum', {'in': 0}), - ('input_default_value', 'cast_default_value', {'in': 0}), - ('cast_default_value', 'embedding_segments_sum', {'in': 4}), - ('embedding_segments_sum', 'last', {'in': 0}), ], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test2(self): - nodes_attributes = { - 'input_indices': {'shape': int64_array([5, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_values': {'shape': int64_array([5]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_dense_shape': {'shape': int64_array([2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_params_table': {'shape': int64_array([10, 3, 4]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'input_default_value': {'shape': int64_array([]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - - 'identity_spw': {'kind': 'op', 'op': 'Identity'}, - 'gather0_1': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'gather0_2': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'reshape0': {'kind': 'op', 'op': 'Reshape'}, - 'where0': {'kind': 'op', 'op': 'Where'}, - 'greaterequal0': {'kind': 'op', 'op': 'GreaterEqual'}, - 'sparse_fill_empty_rows': {'kind': 'op', 'op': 'SparseFillEmptyRows'}, - 'unique': {'kind': 'op', 'op': 'Unique'}, - 'strided_slice': {'kind': 'op', 'op': 'StridedSlice'}, - 'cast': {'kind': 'op', 'op': 'Cast'}, - 'gather': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'identity': {'kind': 'op', 'op': 'Identity'}, - 'identity_1': {'kind': 'op', 'op': 'Identity'}, - 'sparse_segment_mean': {'kind': 'op', 'op': 'SparseSegmentMean'}, - 'reshape': {'kind': 'op', 'op': 'Reshape'}, - 'tile': {'kind': 'op', 'op': 'Tile', 'type': 'Tile'}, - 'select': {'kind': 'op', 'op': 'Select'}, - - 'split_for_indices': {'kind': 'op', 'op': 'Split'}, - 'squeeze_for_indices': {'kind': 'op', 'op': 'Squeeze'}, - 'split_for_dense_shape': {'kind': 'op', 'op': 'Split'}, - 'squeeze_to_scalar': {'kind': 'op', 'op': 'Squeeze'}, - 'cast_indices': {'kind': 'op', 'op': 'Cast'}, - 'cast_segment_ids': {'kind': 'op', 'op': 'Cast'}, - 'cast_default_value': {'kind': 'op', 'op': 'Cast'}, - 'cast_number_segments': {'kind': 'op', 'op': 'Cast'}, - 'embedding_segments_mean': {'kind': 'op', 'op': 'EmbeddingSegmentsMean'}, - - **const('split_for_indices_axis', int64_array(1)), - **const('split_for_dense_shape_axis', int64_array(0)), - **const('squeeze_axis', int64_array([0])), - **const('squeeze_for_indices_axis', int64_array([1])), - - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - } - - graph = build_graph(nodes_attributes, - [('input_indices', 'gather0_1', {'out': 0, 'in': 0}), - ('input_dense_shape', 'identity_spw', {'out': 0, 'in': 0}), - ('input_values', 'greaterequal0', {'out': 0, 'in': 0}), - ('input_values', 'gather0_2', {'out': 0, 'in': 0}), - ('input_params_table', 'gather', {'out': 0, 'in': 0}), - ('input_default_value', 'sparse_fill_empty_rows', {'out': 0, 'in': 3}), - - ('identity_spw', 'sparse_fill_empty_rows', {'out': 0, 'in': 2}), - ('gather0_1', 'sparse_fill_empty_rows', {'out': 0, 'in': 0}), - ('gather0_2', 'sparse_fill_empty_rows', {'out': 0, 'in': 1}), - ('reshape0', 'gather0_1', {'out': 0, 'in': 1}), - ('reshape0', 'gather0_2', {'out': 0, 'in': 1}), - ('where0', 'reshape0', {'out': 0, 'in': 0}), - ('greaterequal0', 'where0', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'unique', {'out': 1, 'in': 0}), - ('sparse_fill_empty_rows', 'strided_slice', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'reshape', {'out': 2, 'in': 0}), - ('unique', 'sparse_segment_mean', {'out': 1, 'in': 1}), - ('unique', 'gather', {'out': 0, 'in': 1}), - ('strided_slice', 'cast', {'out': 0, 'in': 0}), - ('gather', 'identity', {'out': 0, 'in': 0}), - ('identity', 'identity_1', {'out': 0, 'in': 0}), - ('identity_1', 'sparse_segment_mean', {'out': 0, 'in': 0}), - ('cast', 'sparse_segment_mean', {'out': 0, 'in': 2}), - ('sparse_segment_mean', 'select', {'out': 0, 'in': 2}), - ('reshape', 'tile', {'out': 0, 'in': 0}), - ('tile', 'select', {'out': 0, 'in': 0}), - ('select', 'last', {'out': 0, 'in': 0})], - nodes_with_edges_only=True) - graph.stage = 'front' - EmbeddingSegmentsOperationMultipleFeaturesFusing().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attributes, - [('input_indices', 'split_for_indices', {'in': 0}), - ('split_for_indices_axis', 'split_for_indices', {'in': 1}), - ('split_for_indices', 'squeeze_for_indices', {'in': 0}), - ('squeeze_for_indices_axis', 'squeeze_for_indices', {'in': 1}), - ('squeeze_for_indices', 'cast_segment_ids', {'in': 0}), - ('cast_segment_ids', 'embedding_segments_mean', {'in': 2, 'out': 0}), - ('input_values', 'cast_indices', {'in': 0}), - ('cast_indices', 'embedding_segments_mean', {'in': 1}), - ('input_dense_shape', 'split_for_dense_shape', {'in': 0}), - ('split_for_dense_shape_axis', 'split_for_dense_shape', {'in': 1}), - ('split_for_dense_shape', 'squeeze_to_scalar', {'in': 0}), - ('squeeze_axis', 'squeeze_to_scalar', {'in': 1}), - ('squeeze_to_scalar', 'cast_number_segments', {'in': 0}), - ('cast_number_segments', 'embedding_segments_mean', {'in': 3, 'out': 0}), - ('input_params_table', 'embedding_segments_mean', {'in': 0}), - ('input_default_value', 'cast_default_value', {'in': 0}), - ('cast_default_value', 'embedding_segments_mean', {'in': 4}), - ('embedding_segments_mean', 'last', {'in': 0}), ], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/embedding_segments_sum_test.py b/tools/mo/unit_tests/mo/front/tf/embedding_segments_sum_test.py deleted file mode 100644 index 88ec471742dbcc..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/embedding_segments_sum_test.py +++ /dev/null @@ -1,213 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.embedding_segments_operation_fusing import \ - EmbeddingSegmentsOperationMultipleFeaturesFusing, \ - EmbeddingSegmentsOperationSingleFeatureFusing -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - - -class EmbeddingSegmentsOperationFusingTest(unittest.TestCase): - def test1(self): - nodes_attributes = { - 'input_indices': {'shape': int64_array([5, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_values': {'shape': int64_array([5]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_dense_shape': {'shape': int64_array([2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_params_table': {'shape': int64_array([10, 3, 4]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'input_default_value': {'shape': int64_array([]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - - 'identity_spw': {'kind': 'op', 'op': 'Identity'}, - 'gather0_1': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'gather0_2': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'reshape0': {'kind': 'op', 'op': 'Reshape'}, - 'where0': {'kind': 'op', 'op': 'Where'}, - 'greaterequal0': {'kind': 'op', 'op': 'GreaterEqual'}, - 'sparse_fill_empty_rows': {'kind': 'op', 'op': 'SparseFillEmptyRows'}, - 'unique': {'kind': 'op', 'op': 'Unique'}, - 'strided_slice': {'kind': 'op', 'op': 'StridedSlice'}, - 'cast': {'kind': 'op', 'op': 'Cast'}, - 'gather': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'sparse_segment_sum': {'kind': 'op', 'op': 'SparseSegmentSum'}, - 'reshape': {'kind': 'op', 'op': 'Reshape'}, - 'tile': {'kind': 'op', 'op': 'Tile', 'type': 'Tile'}, - 'select': {'kind': 'op', 'op': 'Select'}, - - 'split_for_indices': {'kind': 'op', 'op': 'Split'}, - 'squeeze_for_indices': {'kind': 'op', 'op': 'Squeeze'}, - 'split_for_dense_shape': {'kind': 'op', 'op': 'Split'}, - 'squeeze_to_scalar': {'kind': 'op', 'op': 'Squeeze'}, - 'cast_indices': {'kind': 'op', 'op': 'Cast'}, - 'cast_segment_ids': {'kind': 'op', 'op': 'Cast'}, - 'cast_default_value': {'kind': 'op', 'op': 'Cast'}, - 'cast_number_segments': {'kind': 'op', 'op': 'Cast'}, - 'embedding_segments_sum': {'kind': 'op', 'op': 'EmbeddingSegmentsSum'}, - - **const('split_for_indices_axis', int64_array(1)), - **const('split_for_dense_shape_axis', int64_array(0)), - **const('squeeze_axis', int64_array([0])), - **const('squeeze_for_indices_axis', int64_array([1])), - - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - } - - graph = build_graph(nodes_attributes, - [('input_indices', 'gather0_1', {'out': 0, 'in': 0}), - ('input_dense_shape', 'identity_spw', {'out': 0, 'in': 0}), - ('input_values', 'greaterequal0', {'out': 0, 'in': 0}), - ('input_values', 'gather0_2', {'out': 0, 'in': 0}), - ('input_params_table', 'gather', {'out': 0, 'in': 0}), - ('input_default_value', 'sparse_fill_empty_rows', {'out': 0, 'in': 3}), - - ('gather0_1', 'sparse_fill_empty_rows', {'out': 0, 'in': 0}), - ('gather0_2', 'sparse_fill_empty_rows', {'out': 0, 'in': 1}), - ('identity_spw', 'sparse_fill_empty_rows', {'out': 0, 'in': 2}), - ('reshape0', 'gather0_1', {'out': 0, 'in': 1}), - ('reshape0', 'gather0_2', {'out': 0, 'in': 1}), - ('where0', 'reshape0', {'out': 0, 'in': 0}), - ('greaterequal0', 'where0', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'unique', {'out': 1, 'in': 0}), - ('sparse_fill_empty_rows', 'strided_slice', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'reshape', {'out': 2, 'in': 0}), - ('unique', 'sparse_segment_sum', {'out': 1, 'in': 1}), - ('unique', 'gather', {'out': 0, 'in': 1}), - ('strided_slice', 'cast', {'out': 0, 'in': 0}), - ('gather', 'sparse_segment_sum', {'out': 0, 'in': 0}), - ('cast', 'sparse_segment_sum', {'out': 0, 'in': 2}), - ('sparse_segment_sum', 'select', {'out': 0, 'in': 2}), - ('reshape', 'tile', {'out': 0, 'in': 0}), - ('tile', 'select', {'out': 0, 'in': 0}), - ('select', 'last', {'out': 0, 'in': 0}), - ], nodes_with_edges_only=True) - graph.stage = 'front' - EmbeddingSegmentsOperationSingleFeatureFusing().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attributes, - [('input_indices', 'split_for_indices', {'in': 0}), - ('split_for_indices_axis', 'split_for_indices', {'in': 1}), - ('split_for_indices', 'squeeze_for_indices', {'in': 0}), - ('squeeze_for_indices_axis', 'squeeze_for_indices', {'in': 1}), - ('squeeze_for_indices', 'cast_segment_ids', {'in': 0}), - ('cast_segment_ids', 'embedding_segments_sum', {'in': 2, 'out': 0}), - ('input_values', 'cast_indices', {'in': 0}), - ('cast_indices', 'embedding_segments_sum', {'in': 1}), - ('input_dense_shape', 'split_for_dense_shape', {'in': 0}), - ('split_for_dense_shape_axis', 'split_for_dense_shape', {'in': 1}), - ('split_for_dense_shape', 'squeeze_to_scalar', {'in': 0}), - ('squeeze_axis', 'squeeze_to_scalar', {'in': 1}), - ('squeeze_to_scalar', 'cast_number_segments', {'in': 0}), - ('cast_number_segments', 'embedding_segments_sum', {'in': 3, 'out': 0}), - ('input_params_table', 'embedding_segments_sum', {'in': 0}), - ('input_default_value', 'cast_default_value', {'in': 0}), - ('cast_default_value', 'embedding_segments_sum', {'in': 4}), - ('embedding_segments_sum', 'last', {'in': 0}), ], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test2(self): - nodes_attributes = { - 'input_indices': {'shape': int64_array([5, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_values': {'shape': int64_array([5]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_dense_shape': {'shape': int64_array([2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_params_table': {'shape': int64_array([10, 3, 4]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'input_default_value': {'shape': int64_array([]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - - 'identity_spw': {'kind': 'op', 'op': 'Identity'}, - 'gather0_1': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'gather0_2': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'reshape0': {'kind': 'op', 'op': 'Reshape'}, - 'where0': {'kind': 'op', 'op': 'Where'}, - 'greaterequal0': {'kind': 'op', 'op': 'GreaterEqual'}, - 'sparse_fill_empty_rows': {'kind': 'op', 'op': 'SparseFillEmptyRows'}, - 'unique': {'kind': 'op', 'op': 'Unique'}, - 'strided_slice': {'kind': 'op', 'op': 'StridedSlice'}, - 'cast': {'kind': 'op', 'op': 'Cast'}, - 'gather': {'kind': 'op', 'op': 'Gather', 'type': 'Gather'}, - 'identity': {'kind': 'op', 'op': 'Identity'}, - 'identity_1': {'kind': 'op', 'op': 'Identity'}, - 'sparse_segment_mean': {'kind': 'op', 'op': 'SparseSegmentMean'}, - 'reshape': {'kind': 'op', 'op': 'Reshape'}, - 'tile': {'kind': 'op', 'op': 'Tile', 'type': 'Tile'}, - 'select': {'kind': 'op', 'op': 'Select'}, - - 'split_for_indices': {'kind': 'op', 'op': 'Split'}, - 'squeeze_for_indices': {'kind': 'op', 'op': 'Squeeze'}, - 'split_for_dense_shape': {'kind': 'op', 'op': 'Split'}, - 'squeeze_to_scalar': {'kind': 'op', 'op': 'Squeeze'}, - 'cast_indices': {'kind': 'op', 'op': 'Cast'}, - 'cast_segment_ids': {'kind': 'op', 'op': 'Cast'}, - 'cast_default_value': {'kind': 'op', 'op': 'Cast'}, - 'cast_number_segments': {'kind': 'op', 'op': 'Cast'}, - 'embedding_segments_mean': {'kind': 'op', 'op': 'EmbeddingSegmentsMean'}, - - **const('split_for_indices_axis', int64_array(1)), - **const('split_for_dense_shape_axis', int64_array(0)), - **const('squeeze_axis', int64_array([0])), - **const('squeeze_for_indices_axis', int64_array([1])), - - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - } - - graph = build_graph(nodes_attributes, - [('input_indices', 'gather0_1', {'out': 0, 'in': 0}), - ('input_dense_shape', 'identity_spw', {'out': 0, 'in': 0}), - ('input_values', 'greaterequal0', {'out': 0, 'in': 0}), - ('input_values', 'gather0_2', {'out': 0, 'in': 0}), - ('input_params_table', 'gather', {'out': 0, 'in': 0}), - ('input_default_value', 'sparse_fill_empty_rows', {'out': 0, 'in': 3}), - - ('identity_spw', 'sparse_fill_empty_rows', {'out': 0, 'in': 2}), - ('gather0_1', 'sparse_fill_empty_rows', {'out': 0, 'in': 0}), - ('gather0_2', 'sparse_fill_empty_rows', {'out': 0, 'in': 1}), - ('reshape0', 'gather0_1', {'out': 0, 'in': 1}), - ('reshape0', 'gather0_2', {'out': 0, 'in': 1}), - ('where0', 'reshape0', {'out': 0, 'in': 0}), - ('greaterequal0', 'where0', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'unique', {'out': 1, 'in': 0}), - ('sparse_fill_empty_rows', 'strided_slice', {'out': 0, 'in': 0}), - ('sparse_fill_empty_rows', 'reshape', {'out': 2, 'in': 0}), - ('unique', 'sparse_segment_mean', {'out': 1, 'in': 1}), - ('unique', 'gather', {'out': 0, 'in': 1}), - ('strided_slice', 'cast', {'out': 0, 'in': 0}), - ('gather', 'identity', {'out': 0, 'in': 0}), - ('identity', 'identity_1', {'out': 0, 'in': 0}), - ('identity_1', 'sparse_segment_mean', {'out': 0, 'in': 0}), - ('cast', 'sparse_segment_mean', {'out': 0, 'in': 2}), - ('sparse_segment_mean', 'select', {'out': 0, 'in': 2}), - ('reshape', 'tile', {'out': 0, 'in': 0}), - ('tile', 'select', {'out': 0, 'in': 0}), - ('select', 'last', {'out': 0, 'in': 0})], - nodes_with_edges_only=True) - graph.stage = 'front' - EmbeddingSegmentsOperationMultipleFeaturesFusing().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attributes, - [('input_indices', 'split_for_indices', {'in': 0}), - ('split_for_indices_axis', 'split_for_indices', {'in': 1}), - ('split_for_indices', 'squeeze_for_indices', {'in': 0}), - ('squeeze_for_indices_axis', 'squeeze_for_indices', {'in': 1}), - ('squeeze_for_indices', 'cast_segment_ids', {'in': 0}), - ('cast_segment_ids', 'embedding_segments_mean', {'in': 2, 'out': 0}), - ('input_values', 'cast_indices', {'in': 0}), - ('cast_indices', 'embedding_segments_mean', {'in': 1}), - ('input_dense_shape', 'split_for_dense_shape', {'in': 0}), - ('split_for_dense_shape_axis', 'split_for_dense_shape', {'in': 1}), - ('split_for_dense_shape', 'squeeze_to_scalar', {'in': 0}), - ('squeeze_axis', 'squeeze_to_scalar', {'in': 1}), - ('squeeze_to_scalar', 'cast_number_segments', {'in': 0}), - ('cast_number_segments', 'embedding_segments_mean', {'in': 3, 'out': 0}), - ('input_params_table', 'embedding_segments_mean', {'in': 0}), - ('input_default_value', 'cast_default_value', {'in': 0}), - ('cast_default_value', 'embedding_segments_mean', {'in': 4}), - ('embedding_segments_mean', 'last', {'in': 0}), ], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/extractors/__init__.py b/tools/mo/unit_tests/mo/front/tf/extractors/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/front/tf/extractors/concat_test.py b/tools/mo/unit_tests/mo/front/tf/extractors/concat_test.py deleted file mode 100644 index 90bbff25949711..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/extractors/concat_test.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.tf.extractors.concat import tf_concat_ext -from unit_tests.utils.extractors import PB, BaseExtractorsTestingClass - - -class ConcatExtractorTest(BaseExtractorsTestingClass): - @classmethod - def setUpClass(cls): - cls.patcher = 'openvino.tools.mo.front.tf.extractors.concat.concat_infer' - - def test_concat(self): - pb = PB({'attr': { - 'N': PB({'i': 3}), - }}) - self.expected = { - 'type': 'Concat', - 'N': 3, - } - self.res = tf_concat_ext(pb=pb) - self.res["infer"](None) - self.call_args = self.infer_mock.call_args - self.expected_call_args = (None) - self.compare() diff --git a/tools/mo/unit_tests/mo/front/tf/extractors/identity_test.py b/tools/mo/unit_tests/mo/front/tf/extractors/identity_test.py deleted file mode 100644 index fab7dd045ec225..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/extractors/identity_test.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.tf.extractors.identity import tf_identity_ext -from unit_tests.utils.extractors import BaseExtractorsTestingClass - - -class IdentityExtractorTest(BaseExtractorsTestingClass): - @classmethod - def setUpClass(cls): - cls.patcher = 'openvino.tools.mo.front.tf.extractors.identity.copy_shape_infer' - - def test_identity(self): - self.expected = {} - self.res = tf_identity_ext(pb=None) - self.res["infer"](None) - self.call_args = self.infer_mock.call_args - self.expected_call_args = None - self.compare() diff --git a/tools/mo/unit_tests/mo/front/tf/extractors/utils_test.py b/tools/mo/unit_tests/mo/front/tf/extractors/utils_test.py deleted file mode 100644 index ddb8acfe07e5e9..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/extractors/utils_test.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.extractors.utils import collect_tf_attrs, tf_tensor_content -from unit_tests.utils.extractors import PB - - -class AttrParsingTest(unittest.TestCase): - def test_simple_check(self): - pb = PB({'attr': { - 'str': PB({'s': "aaaa"}), - 'int': PB({'i': 7}), - 'float': PB({'f': 2.0}), - 'bool': PB({'b': True}), - 'lisint': PB({'list': PB({'i': 5, 'i': 6})})} - }) - - res = collect_tf_attrs(pb.attr) - - # Reference results for given parameters - ref = { - 'str': pb.attr['str'].s, - 'int': pb.attr['int'].i, - 'float': pb.attr['float'].f, - 'bool': pb.attr['bool'].b, - 'lisint': pb.attr['lisint'].list.i - } - for attr in ref: - self.assertEqual(res[attr], ref[attr]) - - -class TensorContentParsing(unittest.TestCase): - def test_list_not_type_no_shape(self): - pb_tensor = PB(dict( - dtype=3, - tensor_content=b'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00' - )) - tf_dtype = pb_tensor.dtype - shape = np.array([5]) - ref = [1, 2, 3, 4, 5] - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(np.all(res == ref)) - - def test_list_type_no_shape(self): - pb_tensor = PB({ - 'dtype': 3, - 'int_val': np.array([1, 2, 3, 4, 5], dtype=np.int32) - }) - tf_dtype = pb_tensor.dtype - shape = np.array([5]) - ref = [1, 2, 3, 4, 5] - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(np.all(res == ref)) - - def test_list_not_type_shape(self): - pb_tensor = PB({ - 'dtype': 3, - 'tensor_content': b'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00' - }) - tf_dtype = pb_tensor.dtype - shape = np.array([10]) - ref = [1, 2, 3, 4, 5, 5, 5, 5, 5, 5] - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(np.all(res == ref)) - - def test_list_type_shape(self): - pb_tensor = PB({ - 'dtype': 3, - 'int_val': np.array([1, 2, 3, 4, 5], dtype=np.int32) - }) - tf_dtype = pb_tensor.dtype - shape = np.array([10]) - ref = [1, 2, 3, 4, 5, 5, 5, 5, 5, 5] - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(np.all(res == ref)) - - def test_0d_not_type_no_shape(self): - pb_tensor = PB({ - 'dtype': 3, - 'tensor_content': b'\x01\x00\x00\x00', - }) - tf_dtype = pb_tensor.dtype - shape = np.array([]) - ref = 1 - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(res == ref) - - def test_0d_type_no_shape(self): - pb_tensor = PB({ - 'dtype': 3, - 'int_val': [5], - }) - tf_dtype = pb_tensor.dtype - shape = np.array([]) - ref = 5 - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(res == ref) - - def test_0d_not_type_shape(self): - pb_tensor = PB({ - 'dtype': 3, - 'tensor_content': b'\x01\x00\x00\x00', - }) - tf_dtype = pb_tensor.dtype - shape = np.array([3]) - ref = [1, 1, 1] - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(np.all(res == ref)) - - def test_0d_type_shape(self): - pb_tensor = PB({ - 'dtype': 3, - 'int_val': [5], - }) - tf_dtype = pb_tensor.dtype - shape = np.array([3]) - ref = [5, 5, 5] - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(np.all(res == ref)) - - def test_0d_type_shape_1(self): - pb_tensor = PB({ - 'dtype': 3, - 'int_val': [5], - }) - tf_dtype = pb_tensor.dtype - shape = np.array([1]) - ref = [5] - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(np.all(res == ref)) - - def test_nd_not_type_no_shape(self): - pb_tensor = PB({ - 'dtype': 3, - 'tensor_content': - b'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\x06\x00\x00\x00', - }) - tf_dtype = pb_tensor.dtype - shape = np.array([2, 3]) - ref = [[1, 2, 3], [4, 5, 6]] - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(np.all(res == ref)) - - def test_nd_type_no_shape(self): - pb_tensor = PB({ - 'dtype': 3, - 'int_val': [[1, 2, 3], [4, 5, 6]], - }) - tf_dtype = pb_tensor.dtype - shape = np.array([2, 3]) - ref = [[1, 2, 3], [4, 5, 6]] - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(np.all(res == ref)) - - def test_nd_not_type_shape(self): - pb_tensor = PB({ - 'dtype': 3, - 'tensor_content': - b'\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\x06\x00\x00\x00', - }) - tf_dtype = pb_tensor.dtype - shape = np.array([2, 5, 2]) - ref = [[[1, 2], [3, 4], [5, 6], [6, 6], [6, 6]], - [[6, 6], [6, 6], [6, 6], [6, 6], [6, 6]]] - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(np.all(res == ref)) - - def test_nd_type_shape(self): - pb_tensor = PB({ - 'dtype': 3, - 'int_val': [[1, 2, 3], [4, 5, 6]], - }) - tf_dtype = pb_tensor.dtype - shape = np.array([2, 5, 2]) - ref = [[[1, 2], [3, 4], [5, 6], [6, 6], [6, 6]], - [[6, 6], [6, 6], [6, 6], [6, 6], [6, 6]]] - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertTrue(np.all(res == ref)) - - def test_str_decode(self): - pb_tensor = PB({ - 'dtype': 7, - 'string_val': [b"\037\000\036\000\002\000\303\237\035\000\002"] - }) - tf_dtype = pb_tensor.dtype - shape = int64_array([1]) - warning_message = 'ERROR:root:Failed to parse a tensor with Unicode characters. Note that OpenVINO ' \ - 'does not support string literals, so the string constant should be eliminated from the ' \ - 'graph.' - ref_val = np.array([b'\x1f\x00\x1e\x00\x02\x00\xc3\x9f\x1d\x00\x02']) - with self.assertLogs(log.getLogger(), level="ERROR") as cm: - result = tf_tensor_content(tf_dtype, shape, pb_tensor) - self.assertEqual([warning_message], cm.output) - self.assertEqual(ref_val, result) - - def test_str_decode_list(self): - pb_tensor = PB({ - 'dtype': 7, - 'string_val': [b'\377\330\377\377\330\377'], - }) - shape = int64_array([]) - warning_message = 'ERROR:root:Failed to parse a tensor with Unicode characters. Note that OpenVINO ' \ - 'does not support string literals, so the string constant should be eliminated from the ' \ - 'graph.' - with self.assertLogs(log.getLogger(), level="ERROR") as cm: - result = tf_tensor_content(pb_tensor.dtype, shape, pb_tensor) - self.assertEqual([warning_message, warning_message], cm.output) - - def test_empty_value(self): - pb_tensor = PB({ - 'dtype': 1, - 'float_val': [] - }) - - shape = int64_array([1, 1, 0]) - tf_dtype = pb_tensor.dtype - ref = np.array([[[]]], dtype=np.float32) - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - - self.assertEqual(res.shape, ref.shape) - self.assertTrue(np.all(res == ref)) - - def test_scalar_value(self): - pb_tensor = PB({ - 'dtype': 3, - 'int_val': 4 - }) - - shape = int64_array([]) - tf_dtype = pb_tensor.dtype - ref = np.array(4, dtype=np.int32) - res = tf_tensor_content(tf_dtype, shape, pb_tensor) - - self.assertEqual(ref, res) diff --git a/tools/mo/unit_tests/mo/front/tf/fifo_replacer_test.py b/tools/mo/unit_tests/mo/front/tf/fifo_replacer_test.py deleted file mode 100644 index 04d29d4b89acab..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/fifo_replacer_test.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array -from openvino.tools.mo.front.tf.fifo_replacer import FIFOQueue, FIFOQueueDequeueCut -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph_with_edge_attrs - - -def create_fifo_queue_graph(batch_size_shape: np.ndarray): - nodes = { - 'placeholder': {'op': 'Parameter', 'data_type': np.int32, 'kind': 'op', 'shape': batch_size_shape}, - 'batch_join/fifo_queue': {'op': 'FIFOQueueV2', 'name': 'batch_join/fifo_queue', - 'shapes': np.array([[1, 2, 3]]), 'types': np.array([np.float32]), 'kind': 'op'}, - 'batch_join': {'op': 'QueueDequeueUpToV2', 'kind': 'op'}, - 'image_batch': {'op': 'Identity', 'data_type': np.float32, 'kind': 'op'}, - 'label_batch': {'op': 'Identity', 'kind': 'op'}, - 'label_batch_op_output': {'op': 'Result', 'kind': 'op'}, - } - edges = [ - ('placeholder', 'batch_join', {'out': 0, 'in': 0}), - ('batch_join/fifo_queue', 'batch_join', {'out': 0, 'in': 1}), - ('batch_join', 'image_batch', {'out': 0, 'in': 0}), - ('batch_join', 'label_batch', {'out': 1, 'in': 0}), - ('label_batch', 'label_batch_op_output', {'out': 0, 'in': 0}) - ] - graph = build_graph_with_edge_attrs(nodes, edges) - return graph - - -class TestFIFOQueueReplacement(unittest.TestCase): - def test_fifo_with_label_batch(self): - graph = create_fifo_queue_graph(shape_array([1])) - tested_class = FIFOQueue() - tested_class.find_and_replace_pattern(graph=graph) - after_pattern = graph.nodes() - self.assertEqual(2, len(after_pattern)) - try: - new_ph_dict = graph.node[[u for u, v in graph.in_edges('image_batch')][0]] - except Exception as e: - self.fail("Can't get new placeholder. Broken edge. Additional information: {}".format(e)) - self.assertEqual(new_ph_dict['name'], 'batch_join/fifo_queue') - self.assertTrue(np.array_equal(new_ph_dict['shape'], [1, 2, 3])) - - def test_fifo_with_undefined_batch_size(self): - graph = create_fifo_queue_graph(None) - tested_class = FIFOQueue() - tested_class.find_and_replace_pattern(graph=graph) - after_pattern = graph.nodes() - self.assertEqual(2, len(after_pattern)) - try: - new_ph_dict = graph.node[[u for u, v in graph.in_edges('image_batch')][0]] - except Exception as e: - self.fail("Can't get new placeholder. Broken edge. Additional information: {}".format(e)) - self.assertEqual(new_ph_dict['name'], 'batch_join/fifo_queue') - self.assertTrue(np.array_equal(new_ph_dict['shape'], [1, 2, 3])) - - def test_fifo_with_out_label_batch(self): - nodes_no_label = { - 'placeholder': {'op': 'Parameter', 'data_type': np.int32, 'kind': 'op', 'shape': np.array(0)}, - 'batch_join/fifo_queue': {'op': 'FIFOQueueV2', 'name': 'batch_join/fifo_queue', - 'shapes': np.array([[1, 2, 3]]), 'types': np.array([np.float32]), 'kind': 'op'}, - 'batch_join': {'op': 'QueueDequeueUpToV2', 'kind': 'op'}, - 'image_batch': {'op': 'Identity', 'data_type': np.float32, 'kind': 'op'}, - } - edges_no_label = [ - ('placeholder', 'batch_join', {'out': 0, 'in': 0}), - ('batch_join/fifo_queue', 'batch_join', {'out': 0, 'in': 1}), - ('batch_join', 'image_batch', {'out': 0, 'in': 0}) - ] - - graph = build_graph_with_edge_attrs(nodes_no_label, edges_no_label) - tested_class = FIFOQueue() - tested_class.find_and_replace_pattern(graph=graph) - after_pattern = graph.nodes() - self.assertEqual(2, len(after_pattern)) - try: - new_ph_dict = graph.node[[u for u, v in graph.in_edges('image_batch')][0]] - except Exception as e: - self.fail("Can't get new placeholder. Broken edge. Additional information: {}".format(e)) - self.assertEqual(new_ph_dict['name'], 'batch_join/fifo_queue') - self.assertTrue(np.array_equal(new_ph_dict['shape'], np.array([1, 2, 3]))) - - -class FIFOQueueDequeueCutTest(unittest.TestCase): - def test_one_output_v1(self): - graph = build_graph_with_edge_attrs( - { - 'queue_dequeue': {'kind': 'op', 'op': 'QueueDequeue', 'shapes': shape_array([[2, 2]]), - 'types': [np.int32]}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - }, - [ - ('queue_dequeue', 'sub', {'out': 0, 'in': 0}), - ] - ) - - graph_ref = build_graph_with_edge_attrs( - { - 'parameter_1': {'kind': 'op', 'op': 'Parameter', 'shape': shape_array([2, 2]), 'type': np.int32}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - }, - [ - ('parameter_1', 'sub', {'out': 0, 'in': 0}), - ] - ) - - FIFOQueueDequeueCut().find_and_replace_pattern(graph) - - flag, msg = compare_graphs(graph, graph_ref, last_node='sub') - self.assertTrue(flag, msg) - - def test_one_output_v2(self): - graph = build_graph_with_edge_attrs( - { - 'queue_dequeue': {'kind': 'op', 'op': 'QueueDequeueV2', 'shapes': shape_array([[2, 2]]), - 'types': [np.int32]}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - }, - [ - ('queue_dequeue', 'sub', {'out': 0, 'in': 0}), - ] - ) - - graph_ref = build_graph_with_edge_attrs( - { - 'parameter_1': {'kind': 'op', 'op': 'Parameter', 'shape': shape_array([2, 2]), 'type': np.int32}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - }, - [ - ('parameter_1', 'sub', {'out': 0, 'in': 0}), - ] - ) - - FIFOQueueDequeueCut().find_and_replace_pattern(graph) - - flag, msg = compare_graphs(graph, graph_ref, last_node='sub') - self.assertTrue(flag, msg) - - def test_two_outputs_v1(self): - graph = build_graph_with_edge_attrs( - { - 'queue_dequeue': {'kind': 'op', 'op': 'QueueDequeue', 'shapes': [shape_array([2, 2]), - shape_array([1, 1])], - 'types': [np.int32, np.float32]}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - 'add': {'kind': 'op', 'op': 'Add'}, - 'concat': {'kind': 'op', 'op': 'Concat'} - }, - [ - ('queue_dequeue', 'sub', {'out': 0, 'in': 0}), - ('queue_dequeue', 'add', {'out': 1, 'in': 0}), - ('sub', 'concat', {'out': 0, 'in': 0}), - ('add', 'concat', {'out': 0, 'in': 1}) - ] - ) - - graph_ref = build_graph_with_edge_attrs( - { - 'parameter_1': {'kind': 'op', 'op': 'Parameter', 'shape': shape_array([2, 2]), 'data_type': np.int32}, - 'parameter_2': {'kind': 'op', 'op': 'Parameter', 'shape': shape_array([1, 1]), 'data_type': np.float32}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - 'add': {'kind': 'op', 'op': 'Add'}, - 'concat': {'kind': 'op', 'op': 'Concat'} - }, - [ - ('parameter_1', 'sub', {'out': 0, 'in': 0}), - ('parameter_2', 'add', {'out': 0, 'in': 0}), - ('sub', 'concat', {'out': 0, 'in': 0}), - ('add', 'concat', {'out': 0, 'in': 1}) - ] - ) - - FIFOQueueDequeueCut().find_and_replace_pattern(graph) - - flag, msg = compare_graphs(graph, graph_ref, last_node='concat', check_op_attrs=True) - self.assertTrue(flag, msg) - - def test_two_outputs_v2(self): - graph = build_graph_with_edge_attrs( - { - 'queue_dequeue': {'kind': 'op', 'op': 'QueueDequeueV2', 'shapes': [shape_array([2, 2]), - shape_array([1, 1])], - 'types': [np.int32, np.float32]}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - 'add': {'kind': 'op', 'op': 'Add'}, - 'concat': {'kind': 'op', 'op': 'Concat'} - }, - [ - ('queue_dequeue', 'sub', {'out': 0, 'in': 0}), - ('queue_dequeue', 'add', {'out': 1, 'in': 0}), - ('sub', 'concat', {'out': 0, 'in': 0}), - ('add', 'concat', {'out': 0, 'in': 1}) - ] - ) - - graph_ref = build_graph_with_edge_attrs( - { - 'parameter_1': {'kind': 'op', 'op': 'Parameter', 'shape': shape_array([2, 2]), 'data_type': np.int32}, - 'parameter_2': {'kind': 'op', 'op': 'Parameter', 'shape': shape_array([1, 1]), 'data_type': np.float32}, - 'sub': {'kind': 'op', 'op': 'Sub'}, - 'add': {'kind': 'op', 'op': 'Add'}, - 'concat': {'kind': 'op', 'op': 'Concat'} - }, - [ - ('parameter_1', 'sub', {'out': 0, 'in': 0}), - ('parameter_2', 'add', {'out': 0, 'in': 0}), - ('sub', 'concat', {'out': 0, 'in': 0}), - ('add', 'concat', {'out': 0, 'in': 1}) - ] - ) - - FIFOQueueDequeueCut().find_and_replace_pattern(graph) - - flag, msg = compare_graphs(graph, graph_ref, last_node='concat', check_op_attrs=True) - self.assertTrue(flag, msg) diff --git a/tools/mo/unit_tests/mo/front/tf/floor_div_test.py b/tools/mo/unit_tests/mo/front/tf/floor_div_test.py deleted file mode 100644 index 9c9a2c43f40d29..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/floor_div_test.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.tf.floor_div_decomposition import FloorDivDecomposition -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, connect, \ - connect_data, regular_op_with_empty_data - -nodes = { - **regular_op_with_empty_data('placeholder_1', {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_empty_data('placeholder_2', {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_empty_data('floor_div', {'op': 'FloorDiv', 'type': None, 'name': 'my_floor_div'}), - - **regular_op_with_empty_data('div', {'type': 'Divide', 'op': 'Div'}), - **regular_op_with_empty_data('floor', {'type': 'Floor', 'op': 'Floor'}), - - **result(), -} - - -class TestFloorDiv(unittest.TestCase): - def test_floor_div_test_1(self): - # Test with two different inputs from two placeholders - graph = build_graph(nodes, [ - *connect('placeholder_1:0', '0:floor_div'), - *connect('placeholder_2:0', '1:floor_div'), - *connect('floor_div:0', '0:output'), - ], nodes_with_edges_only=True) - FloorDivDecomposition().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [ - *connect('placeholder_1:0', '0:div'), - *connect('placeholder_2:0', '1:div'), - *connect('div:0', '0:floor'), - *connect('floor:0', '0:output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Floor')[0]]['name'] == 'my_floor_div') - - def test_floor_div_test_2(self): - # Test with two same inputs from one placeholder - graph = build_graph(nodes, [ - *connect('placeholder_1:0', '0:floor_div'), - *connect_data('placeholder_1:0', '1:floor_div'), - *connect('floor_div', 'output'), - ], nodes_with_edges_only=True) - FloorDivDecomposition().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [ - *connect('placeholder_1:0', '0:div'), - *connect_data('placeholder_1:0', '1:div'), - *connect('div', 'floor'), - *connect('floor', 'output'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(type='Floor')[0]]['name'] == 'my_floor_div') diff --git a/tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py b/tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py deleted file mode 100644 index 4f67edce9da954..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/identityN_to_identity_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.tf.identityN_to_identity import IdentityN_to_Identity -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import result, regular_op_with_shaped_data, \ - regular_op_with_empty_data, build_graph, connect, empty_data - -nodes = { - **regular_op_with_shaped_data('placeholder_0', [1, 227, 227, 3], {'type': 'Parameter'}), - **regular_op_with_shaped_data('placeholder_1', [1, 227, 227, 3], {'type': 'Parameter'}), - - **regular_op_with_empty_data('identityN', {'op': 'IdentityN', 'type': None, 'data_types': [np.int32, np.float32], - 'name': 'my_identity'}), - **empty_data('identityN_1_d'), - **regular_op_with_empty_data('identity0', {'op': 'Identity', 'type': None, 'data_type': np.int32, - 'name': 'my_identity/0_port'}), - **regular_op_with_empty_data('identity1', {'op': 'Identity', 'type': None, 'data_type': np.float32, - 'name': 'my_identity/1_port'}), - - **result('output0'), - **result('output1'), -} - - -class TestIdentityN(unittest.TestCase): - def test_identityN(self): - graph = build_graph(nodes, [ - *connect('placeholder_0', '0:identityN'), - *connect('placeholder_1', '1:identityN'), - *connect('identityN:0', 'output0'), - ('identityN', 'identityN_1_d', {'out': 1}), - ('identityN_1_d', 'output1', {'out': 1}), - ], nodes_with_edges_only=True) - - IdentityN_to_Identity().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [ - *connect('placeholder_0', 'identity0'), - *connect('placeholder_1', 'identity1'), - *connect('identity0', 'output0'), - *connect('identity1', 'output1'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_identityN_unused_ports(self): - graph = build_graph(nodes, [ - *connect('placeholder_0', '0:identityN'), - *connect('placeholder_1', '1:identityN'), - *connect('identityN:0', 'output0'), - ], nodes_with_edges_only=True) - - IdentityN_to_Identity().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, [ - *connect('placeholder_0', 'identity0'), - *connect('identity0', 'output0'), - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output0', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/loader_test.py b/tools/mo/unit_tests/mo/front/tf/loader_test.py deleted file mode 100644 index dd02b83e6a02d3..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/loader_test.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest.mock -from io import StringIO -from openvino.tools.mo.front.tf.loader import load_tf_graph_def -from unittest.mock import Mock, MagicMock - - -class TestLoader(unittest.TestCase): - @unittest.mock.patch('sys.stdout', new_callable=StringIO) - def test_helper_print_ckpt(self, out): - for path in ['/path/to/somewhere/my_checkpoint.ckpt', '/path/to/somewhere/my_meta_graph.meta']: - mock = Mock(__bool__=MagicMock(side_effect=Exception())) - self.assertRaises(Exception, load_tf_graph_def, path, meta_graph_file=mock) - self.assertRegex(out.getvalue(), - r'\[ WARNING ] The value for the --input_model command line parameter ends with "\.ckpt"') diff --git a/tools/mo/unit_tests/mo/front/tf/mvn_unrolled_test.py b/tools/mo/unit_tests/mo/front/tf/mvn_unrolled_test.py deleted file mode 100644 index 92e6f39782b92b..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/mvn_unrolled_test.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.tf.mvn_unrolled import MVNUnrolled -from openvino.tools.mo.ops.mvn import MVN -from openvino.tools.mo.ops.op import Op -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph_with_attrs - - -class MVNUnrolledMatchingTests(unittest.TestCase): - @classmethod - def setUpClass(cls): - Op.registered_ops['MVN'] = MVN - - def test(self): - pattern_matcher = MVNUnrolled() - pattern = pattern_matcher.pattern() - graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], update_edge_attrs=None, - new_nodes_with_attrs=[('reduction_indicies', {'kind': 'data'}), - ('conv2d', {'kind': 'op'}), - ('variance_reduction', {'kind': 'data'}), - ('pow2', {'kind': 'data'}), - ('eps', {'kind': 'data'}), - ('next_op', {'kind': 'op'})], - new_edges_with_attrs=[('reduction_indicies', 'mean', {'in': 1}), - ('conv2d', 'mean',{'in': 0, 'out': 1}), - ('variance_reduction', 'variance', {'in': 1}), - ('pow2', 'pow', {'in': 1}), - ('eps', 'add'), ('truediv', 'next_op')]) - graph.graph['layout'] = 'NHWC' - pattern_matcher.find_and_replace_pattern(graph) - - graph_ref = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'][:-1], - edges_with_attrs=pattern['edges'][:-2], update_edge_attrs=None, - new_nodes_with_attrs=[('reduction_indicies', {'kind':'data'}), - ('conv2d', {'kind':'op'}), - ('variance_reduction', {'kind':'data'}), - ('pow2', {'kind': 'data'}), - ('eps', {'kind': 'data'}), - ('mvn', {'kind': 'op', 'op': 'MVN'}), - ('next_op', {'kind': 'op'})], - new_edges_with_attrs=[('reduction_indicies', 'mean', {'in':1}), - ('conv2d', 'mean', {'in': 0}), - ('variance_reduction', 'variance',{'in': 1}), - ('pow2', 'pow', {'in': 1}), - ('eps', 'add'), - ('conv2d', 'mvn',{'in': 0}), - ('reduction_indicies', 'mvn', {'in': 1}), - ('variance_reduction', 'mvn',{'in': 2}), - ('pow2', 'mvn', {'in': 3}), - ('eps', 'mvn',{'in': 4}), - ('mvn', 'next_op')]) - - (flag, resp) = compare_graphs(graph, graph_ref, 'next_op', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/next_iteration_ext_test.py b/tools/mo/unit_tests/mo/front/tf/next_iteration_ext_test.py deleted file mode 100644 index abbb6bfc6c2cb8..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/next_iteration_ext_test.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.front.tf.next_iteration_ext import NextIterationExtractor -from unit_tests.utils.extractors import PB, BaseExtractorsTestingClass - - -class TestNextIteration(BaseExtractorsTestingClass): - def test_is_cyclic(self): - pb = PB({}) - node = PB({'pb': pb}) - NextIterationExtractor.extract(node) - self.expected = { - 'is_cyclic': True, - } - self.res = node - self.compare() diff --git a/tools/mo/unit_tests/mo/front/tf/pad_tf_to_pad_test.py b/tools/mo/unit_tests/mo/front/tf/pad_tf_to_pad_test.py deleted file mode 100644 index 9c3179a1ea2c55..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/pad_tf_to_pad_test.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.tf.pad_tf_to_pad import PadTFToPad -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - -nodes_attributes = { - 'placeholder': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'tfpad': {'type': None, 'kind': 'op', 'op': 'TFPad', 'mode': 'constant', 'name': 'tfpad_name'}, - **const('paddings', int64_array([1, 2, 3, 4, 5, 6]).reshape([3, 2])), - **const('fill', float_array(5.75)), - 'result': {'type': 'Result', 'value': None, 'kind': 'op', 'op': 'Result'}, - - # new Pad layer and sub-graph - 'pad': {'type': 'Pad', 'kind': 'op', 'op': 'Pad', 'mode': 'constant'}, - 'transpose': {'type': 'Transpose', 'kind': 'op', 'op': 'Transpose'}, - **const('transpose_order', int64_array([1, 0])), - 'split': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 2}, - **const('split_axis', int64_array(0)), - 'squeeze_1': {'type': 'Squeeze', 'kind': 'op', 'op': 'Squeeze'}, - **const('squeeze_1_axis', int64_array([0])), - 'squeeze_2': {'type': 'Squeeze', 'kind': 'op', 'op': 'Squeeze'}, - **const('squeeze_2_axis', int64_array([0])), - 'convert_like': {'type': 'ConvertLike', 'kind': 'op', 'op': 'ConvertLike'}, - - **const('pad_fill', np.array(0.0)), -} - -common_edges = [('placeholder', 'pad', {'in': 0, 'out': 0}), - - ('paddings', 'transpose', {'in': 0, 'out': 0}), - ('transpose_order', 'transpose', {'in': 1, 'out': 0}), - - ('transpose', 'split', {'in': 0, 'out': 0}), - ('split_axis', 'split', {'in': 1, 'out': 0}), - - ('split', 'squeeze_1', {'in': 0, 'out': 0}), - ('squeeze_1_axis', 'squeeze_1', {'in': 1, 'out': 0}), - - ('split', 'squeeze_2', {'in': 0, 'out': 1}), - ('squeeze_2_axis', 'squeeze_2', {'in': 1, 'out': 0}), - - ('squeeze_1', 'pad', {'in': 1, 'out': 0}), - ('squeeze_2', 'pad', {'in': 2, 'out': 0}), - - ('pad', 'result') - ] - - -class PadTFToPadTest(unittest.TestCase): - def _run_test(self, graph, graph_ref): - graph.graph['layout'] = 'NHWC' - graph.stage = 'front' - - replacer = PadTFToPad() - replacer.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(graph.node[graph.get_nodes_with_attributes(op='Pad')[0]]['name'] == 'tfpad_name') - self.assertTrue(flag, resp) - - def test_2_inputs(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'tfpad', {'in': 0, 'out': 0}), - ('paddings', 'tfpad', {'in': 1, 'out': 0}), - ('tfpad', 'result', {'in': 0, 'out': 0}), - ], - {}, nodes_with_edges_only=True) - graph.get_op_nodes(op='TFPad')[0].add_input_port(2) - - graph_ref = build_graph(nodes_attributes, common_edges, - {}, nodes_with_edges_only=True) - self._run_test(graph, graph_ref) - - def test_3_inputs(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'tfpad', {'in': 0, 'out': 0}), - ('paddings', 'tfpad', {'in': 1, 'out': 0}), - ('fill', 'tfpad', {'in': 2, 'out': 0}), - ('tfpad', 'result', {'in': 0, 'out': 0}), - ], - {}, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, common_edges + [('fill', 'pad', {'in': 3, 'out': 0})], - {}, nodes_with_edges_only=True) - - self._run_test(graph, graph_ref) - - def test_3_inputs_with_non_constant_pad(self): - updated_paddings_attrs = {'type': 'Parameter', 'op': 'Parameter', 'value': None} - graph = build_graph(nodes_attributes, - [('placeholder', 'tfpad', {'in': 0, 'out': 0}), - ('paddings', 'tfpad', {'in': 1, 'out': 0}), - ('fill', 'tfpad', {'in': 2, 'out': 0}), - ('tfpad', 'result', {'in': 0, 'out': 0}), - ], - {'paddings': updated_paddings_attrs}, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, common_edges + [('fill', 'pad', {'in': 3, 'out': 0})], - {'paddings': updated_paddings_attrs}, nodes_with_edges_only=True) - - self._run_test(graph, graph_ref) - - def test_2_inputs_non_constant_mode(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'tfpad', {'in': 0, 'out': 0}), - ('paddings', 'tfpad', {'in': 1, 'out': 0}), - ('tfpad', 'result', {'in': 0, 'out': 0}), - ], - {'tfpad': {'mode': 'reflect'}}, nodes_with_edges_only=True) - graph.get_op_nodes(op='TFPad')[0].add_input_port(2) - - graph_ref = build_graph(nodes_attributes, common_edges, - {'pad': {'mode': 'reflect'}}, nodes_with_edges_only=True) - self._run_test(graph, graph_ref) diff --git a/tools/mo/unit_tests/mo/front/tf/sparse_to_dense_replacer_test.py b/tools/mo/unit_tests/mo/front/tf/sparse_to_dense_replacer_test.py deleted file mode 100644 index 1a340b2b92d84b..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/sparse_to_dense_replacer_test.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.tf.sparse_to_dense_replacer import SparseToDenseReplacer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, const - - -class SparseToDenseFrontReplacersTest(unittest.TestCase): - def test1(self): - nodes_attributes = { - 'input_indices': {'shape': int64_array([5, 2]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'input_values': {'shape': int64_array([5]), 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - - 'sparse_to_dense': {'kind': 'op', 'op': 'SparseToDense'}, - 'broadcast': {'kind': 'op', 'op': 'Broadcast'}, - 'scatternd': {'kind': 'op', 'op': 'ScatterNDUpdate'}, - - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': 'Result'}, - - **const('input_dense_shape', int64_array([50, 40])), - **const('input_default_value', int64_array(0))} - - graph = build_graph(nodes_attributes, - [('input_indices', 'sparse_to_dense', {'out': 0, 'in': 0}), - ('input_dense_shape', 'sparse_to_dense', {'out': 0, 'in': 1}), - ('input_values', 'sparse_to_dense', {'out': 0, 'in': 2}), - ('input_default_value', 'sparse_to_dense', {'out': 0, 'in': 3}), - ('sparse_to_dense', 'last', {'out': 0, 'in': 0})], - nodes_with_edges_only=True) - graph.stage = 'front' - SparseToDenseReplacer().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes_attributes, - [('input_default_value', 'broadcast', {'in': 0}), - ('input_dense_shape', 'broadcast', {'in': 1}), - ('broadcast', 'scatternd', {'in': 0}), - ('input_indices', 'scatternd', {'in': 1}), - ('input_values', 'scatternd', {'in': 2}), - ('scatternd', 'last', {'in': 0})], - nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/front/tf/test_configs/config1.config b/tools/mo/unit_tests/mo/front/tf/test_configs/config1.config deleted file mode 100755 index 22a47732c22de9..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/test_configs/config1.config +++ /dev/null @@ -1,17 +0,0 @@ -model { - ssd { - num_classes: 90 - image_resizer { - fixed_shape_resizer { - height: 300 - width: 300 - } - } - anchor_generator { - ssd_anchor_generator { - num_layers: 6 - } - } - } -} - diff --git a/tools/mo/unit_tests/mo/front/tf/test_configs/config2.config b/tools/mo/unit_tests/mo/front/tf/test_configs/config2.config deleted file mode 100755 index 0e837a7139129c..00000000000000 --- a/tools/mo/unit_tests/mo/front/tf/test_configs/config2.config +++ /dev/null @@ -1,11 +0,0 @@ -model { - ssd { - num_classes: 10 - image_resizer { - fixed_shape_resizer { - height: 640 - width: 640 - } - } - } -} diff --git a/tools/mo/unit_tests/mo/frontend_ngraph_test.py b/tools/mo/unit_tests/mo/frontend_ngraph_test.py deleted file mode 100644 index d7571bc36c7169..00000000000000 --- a/tools/mo/unit_tests/mo/frontend_ngraph_test.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import os -import subprocess -import sys -import unittest -from unittest.mock import patch - -from openvino.tools.mo.subprocess_main import setup_env, subprocess_main - -import pytest - - -class TestNoInferenceEngine(unittest.TestCase): - @patch('openvino.tools.mo.utils.find_ie_version.find_ie_version') - def test_no_ie_ngraph(self, mock_find): - mock_find.return_value = False - with pytest.raises(SystemExit) as e, self.assertLogs(log.getLogger(), level="ERROR") as cm: - subprocess_main() - assert e.value.code == 1 - res = [i for i in cm.output if - 'Consider building the OpenVINO and Python APIs from sources' in i] - assert res - -@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") == 'true', reason="Ticket - 113358") -def test_frontends(): - setup_env() - args = [sys.executable, '-m', 'pytest', - os.path.join(os.path.dirname(__file__), 'frontend_ngraph_test_actual.py'), '-s'] - - status = subprocess.run(args, env=os.environ) - assert not status.returncode - -@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") == 'true', reason="Ticket - 113358") -def test_moc_extractor(): - setup_env() - args = [sys.executable, '-m', 'pytest', - os.path.join(os.path.dirname(__file__), 'moc_frontend/moc_extractor_test_actual.py'), '-s'] - - status = subprocess.run(args, env=os.environ) - assert not status.returncode - - -def test_moc_preprocessing(): - setup_env() - args = [sys.executable, '-m', 'pytest', - os.path.join(os.path.dirname(__file__), 'back/moc_preprocessing_test_actual.py'), '-s'] - - status = subprocess.run(args, env=os.environ) - assert not status.returncode - - -def test_main_test(): - setup_env() - args = [sys.executable, '-m', 'pytest', - os.path.join(os.path.dirname(__file__), 'main_test_actual.py'), '-s'] - - status = subprocess.run(args, env=os.environ) - assert not status.returncode - - -@pytest.mark.xfail(reason="Mismatched error messages due to namespace redesign.") -def test_main_error_log(): - setup_env() - args = [sys.executable, - os.path.join(os.path.dirname(__file__), 'main_test_error_log.py')] - - status = subprocess.run(args, env=os.environ, capture_output=True) - test_log = status.stderr.decode("utf-8").replace("\r\n", "\n") - - # Check that log has exactly one warning from parse_args and - # exactly one error message "FW ERROR" - ref_log = "[ WARNING ] warning\n[ FRAMEWORK ERROR ] FW ERROR MESSAGE\n" - - assert test_log == ref_log - - -def test_mo_convert_logger(): - setup_env() - args = [sys.executable, - os.path.join(os.path.dirname(__file__), 'convert/logger_test_actual.py')] - - status = subprocess.run(args, env=os.environ, capture_output=True) - test_log = status.stdout.decode("utf-8").replace("\r\n", "\n") - - assert "test message 1" in test_log - assert "test message 2" in test_log - assert "test message 3" in test_log - - assert test_log.count("[ SUCCESS ] Total execution time") == 2 - - -@pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") == 'true', reason="Ticket - 115084") -def test_rt_info(): - setup_env() - args = [sys.executable, '-m', 'pytest', - os.path.join(os.path.dirname(__file__), 'convert/meta_data_test_actual.py'), '-s'] - - status = subprocess.run(args, env=os.environ, capture_output=True) - assert not status.returncode - - -def test_mo_extensions_test(): - setup_env() - args = [sys.executable, '-m', 'pytest', - os.path.join(os.path.dirname(__file__), 'extensions_test_actual.py'), '-s'] - - status = subprocess.run(args, env=os.environ) - assert not status.returncode - - -@pytest.mark.skipif(sys.version_info > (3, 10), reason="Ticket: 95904") -def test_mo_fallback_test(): - setup_env() - args = [sys.executable, '-m', 'pytest', - os.path.join(os.path.dirname(__file__), 'utils/mo_fallback_test_actual.py'), '-s'] - - status = subprocess.run(args, env=os.environ) - assert not status.returncode - - -def test_mo_model_analysis(): - setup_env() - args = [sys.executable, '-m', 'pytest', - os.path.join(os.path.dirname(__file__), 'utils/test_mo_model_analysis_actual.py'), '-s'] - - status = subprocess.run(args, env=os.environ) - assert not status.returncode - - -def test_convert_impl_tmp_irs_cleanup(): - setup_env() - args = [sys.executable, '-m', 'pytest', - os.path.join(os.path.dirname(__file__), 'utils', 'convert_impl_tmp_irs_cleanup_test_actual.py')] - - status = subprocess.run(args, env=os.environ) - assert not status.returncode diff --git a/tools/mo/unit_tests/mo/frontend_ngraph_test_actual.py b/tools/mo/unit_tests/mo/frontend_ngraph_test_actual.py deleted file mode 100644 index 759acb3984f883..00000000000000 --- a/tools/mo/unit_tests/mo/frontend_ngraph_test_actual.py +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import io -import re -import unittest -from contextlib import redirect_stdout -from unittest.mock import patch - -import numpy as np - -import pytest - - -mock_available = True -try: - # pylint: disable=no-name-in-module,import-error - from openvino.tools.mo.main import main - - # pylint: disable=no-name-in-module,import-error - from mock_mo_python_api import get_frontend_statistic, get_model_statistic, \ - clear_frontend_statistic, clear_model_statistic, clear_place_statistic, \ - mock_return_partial_shape - - # pylint: disable=no-name-in-module,import-error - from openvino.runtime import PartialShape - from openvino.frontend import FrontEndManager - from openvino.runtime.utils.types import get_element_type - -except Exception: - print("No mock frontend API available, " - "ensure to use -DENABLE_TESTS=ON option when running these tests") - mock_available = False - -# FrontEndManager shall be initialized and destroyed after all tests finished -# This is because destroy of FrontEndManager will unload all plugins, -# no objects shall exist after this -if mock_available: - fem = FrontEndManager() - -mock_needed = pytest.mark.skipif(not mock_available, - reason="mock MO fe is not available") - - -def replaceArgsHelper(log_level='DEBUG', - silent=False, - model_name='abc', - input_model='abc.test_mo_mock_mdl', - transform=[], - scale=None, - output=None, - _input=None, - input_shape=None, - batch=None, - mean_values=None, - scale_values=None, - layout=None, - source_layout=None, - target_layout=None, - output_dir='.', - freeze_placeholder_with_value=None): - return argparse.Namespace( - log_level=log_level, - silent=silent, - model_name=model_name, - input_model=input_model, - transform=transform, - scale=scale, - output=output, - input=_input, - input_shape=input_shape, - batch=batch, - mean_values=mean_values, - scale_values=scale_values, - layout=layout, - source_layout=source_layout, - target_layout=target_layout, - output_dir=output_dir, - freeze_placeholder_with_value=freeze_placeholder_with_value, - use_legacy_frontend=None, - use_new_frontend=None, - framework=None) - - -class TestMainFrontend(unittest.TestCase): - def setUp(self): - clear_frontend_statistic() - clear_model_statistic() - clear_place_statistic() - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper()) - def test_simple_convert(self, mock_argparse): - f = io.StringIO() - with redirect_stdout(f): - main(argparse.ArgumentParser(), fem, 'openvino_mock_mo_frontend') - out = f.getvalue() - - xml_file = re.search(r'\[ SUCCESS \] XML file: (.*)', out).\ - group(1).replace("\r", "") - bin_file = re.search(r'\[ SUCCESS \] BIN file: (.*)', out).\ - group(1).replace("\r", "") - assert xml_file and bin_file - - # verify that 'convert' was called, and 'supported' was not - stat = get_frontend_statistic() - assert stat.convert_model == 1 - assert stat.supported == 0 - # verify that meta info is added to XML file - with open(xml_file) as file: - assert 'openvino_mock_mo_frontend' in file.read() - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper()) - def test_convert_framework_discover(self, mock_argparse): - f = io.StringIO() - with redirect_stdout(f): - main(argparse.ArgumentParser(), fem, None) - out = f.getvalue() - - xml_file = re.search(r'\[ SUCCESS \] XML file: (.*)', out). \ - group(1).replace("\r", "") - bin_file = re.search(r'\[ SUCCESS \] BIN file: (.*)', out). \ - group(1).replace("\r", "") - assert xml_file and bin_file - - # verify that 'convert', 'supported' and 'get_name' were called - stat = get_frontend_statistic() - assert stat.convert_model == 1 - assert stat.supported == 1 - assert stat.get_name > 0 - - # verify that meta info is added to XML file - with open(xml_file) as file: - assert 'openvino_mock_mo_frontend' in file.read() - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper(_input='newInput1,mock_input2')) - def test_override_inputs(self, mock_argparse): - - main(argparse.ArgumentParser(), fem, 'openvino_mock_mo_frontend') - stat = get_model_statistic() - - # verify that 'override_all_inputs' was called - assert stat.override_all_inputs == 1 - assert stat.override_all_outputs == 0 - assert stat.extract_subgraph == 0 - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper(output='newOut1,mock_output2')) - def test_override_outputs(self, mock_argparse): - main(argparse.ArgumentParser(), fem, 'openvino_mock_mo_frontend') - stat = get_model_statistic() - - # verify that 'override_all_outputs' was called - assert stat.override_all_inputs == 0 - assert stat.override_all_outputs == 1 - assert stat.extract_subgraph == 0 - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper(_input='newIn1,newIn2', - output='newOut1,newOut2')) - def test_extract_subgraph(self, mock_argparse): - main(argparse.ArgumentParser(), fem, 'openvino_mock_mo_frontend') - stat = get_model_statistic() - - # verify that 'extract_subgraph' was called - assert stat.override_all_inputs == 0 - assert stat.override_all_outputs == 0 - assert stat.extract_subgraph == 1 - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper(_input='mock_input2,mock_input1', - output='new_output2,mock_output1')) - def test_override_same_inputs(self, mock_argparse): - - main(argparse.ArgumentParser(), fem, 'openvino_mock_mo_frontend') - stat = get_model_statistic() - - # verify that 'override_all_outputs' was called - # because inputs were not changed - assert stat.override_all_inputs == 0 - assert stat.override_all_outputs == 1 - assert stat.extract_subgraph == 0 - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper(_input='newInput1,mock_input2', - output='mock_output2,mock_output1')) - def test_override_same_outputs(self, mock_argparse): - - main(argparse.ArgumentParser(), fem, 'openvino_mock_mo_frontend') - stat = get_model_statistic() - - # verify that 'override_all_inputs' was called - # because outputs were not changed - assert stat.override_all_inputs == 1 - assert stat.override_all_outputs == 0 - assert stat.extract_subgraph == 0 - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper(_input='newIn1', - input_shape='[1,2,3,4]')) - @pytest.mark.skip(reason="Unskip as 8301 will be merged") - def test_input_shape(self, mock_argparse): - main(argparse.ArgumentParser(), fem, 'openvino_mock_mo_frontend') - stat = get_model_statistic() - - # verify that 'set_partial_shape' was called - assert stat.set_partial_shape == 1 - assert stat.lastArgPartialShape == PartialShape([1, 2, 3, 4]) - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper(_input='newIn1{i8}')) - @pytest.mark.skip(reason="Unskip as 8301 will be merged") - def test_element_type(self, mock_argparse): - main(argparse.ArgumentParser(), fem, 'openvino_mock_mo_frontend') - stat = get_model_statistic() - - # verify that 'set_element_type' was called - assert stat.set_element_type == 1 - assert stat.lastArgElementType == get_element_type(np.int8) - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper(batch=123)) - @pytest.mark.skip(reason="Unskip as 8301 will be merged") - def test_set_batch_size(self, mock_argparse): - mock_return_partial_shape(PartialShape([-1, 2, 3, 4])) - main(argparse.ArgumentParser(), fem, 'openvino_mock_mo_frontend') - stat = get_model_statistic() - - # verify that 'set_element_type' was called - # 2 is because mock model has 2 inputs - assert stat.get_partial_shape == 2 - assert stat.set_partial_shape == 2 - assert stat.lastArgPartialShape == PartialShape([123, 2, 3, 4]) - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper(batch=123)) - def test_error_batch(self, mock_argparse): - # First dimension doesn't look like a batch, - # so MO shall not convert anything and produce specified error - mock_return_partial_shape(PartialShape([122, 2, 3, 4])) - with self.assertLogs() as logger: - main(argparse.ArgumentParser(), fem, 'openvino_mock_mo_frontend') - - stat = get_model_statistic() - - assert [s for s in logger.output if 'question=39' in s] - - # verify that 'get_element_type' was called - assert stat.get_partial_shape == 1 - # verify that 'set_element_type' was not called - assert stat.set_partial_shape == 0 - - @mock_needed - @patch('argparse.ArgumentParser.parse_args', - return_value=replaceArgsHelper(input_model='abc.qwerty')) - def test_error_input_model_no_framework(self, mock_argparse): - # Framework is not specified and 'abc.qwerty' is not supported - # so MO shall not convert anything and produce specified error - with self.assertLogs() as logger: - main(argparse.ArgumentParser(), fem, None) - - stat = get_frontend_statistic() - - assert [s for s in logger.output if 'can not be deduced' in s] - - # verify that 'supported' was called - assert stat.supported == 1 diff --git a/tools/mo/unit_tests/mo/graph/__init__.py b/tools/mo/unit_tests/mo/graph/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/graph/connection_test.py b/tools/mo/unit_tests/mo/graph/connection_test.py deleted file mode 100644 index 03c94cacd6833a..00000000000000 --- a/tools/mo/unit_tests/mo/graph/connection_test.py +++ /dev/null @@ -1,621 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op - -nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('Op1', {'type': 'Op1', 'kind': 'op', 'op': 'Op1'}), - **regular_op('Op2', {'type': 'Op2', 'kind': 'op', 'op': 'Op2'}), - **regular_op('NewOp', {'type': 'NewOp', 'kind': 'op', 'op': 'NewOp'}), - - 'input_data': {'kind': 'data', 'fw_tensor_debug_info': [('input', 'input')]}, - 'Op1_data': {'kind': 'data', 'fw_tensor_debug_info': [('Op1', 'Op1')]}, - 'Op2_data': {'kind': 'data', 'fw_tensor_debug_info': [('Op2', 'Op2')]}, - 'NewOp_data': {'kind': 'data'}, -} - - -class TestsFront(unittest.TestCase): - def check_graph_attrs_front(self, graph: Graph, graph_ref: Graph): - for node in graph_ref.get_op_nodes(): - if len(node.out_edges()) > 0: - out_edge_ref = node.out_edge(0) - out_edge = Node(graph, node.id).out_edge(0) - if 'fw_tensor_debug_info' in out_edge_ref: - self.assertTrue(out_edge['fw_tensor_debug_info'] == out_edge_ref['fw_tensor_debug_info']) - else: - self.assertFalse('fw_tensor_debug_info' in out_edge) - - def test_case1_merge(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - graph_ref = build_graph(nodes, [ - ('input', 'NewOp', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - - input_node = Node(graph, 'input') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - - graph.stage = 'front' - new_node.in_port(0).get_connection().set_source(input_node.out_port(0), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case1_source(self): - graph = build_graph(nodes, [ - ('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - graph_ref = build_graph(nodes, [ - ('input', 'NewOp', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - - input_node = Node(graph, 'input') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - - graph.stage = 'front' - new_node.in_port(0).get_connection().set_source(input_node.out_port(0), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case1_dest(self): - graph = build_graph(nodes, [ - ('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - graph_ref = build_graph(nodes, [ - ('input', 'NewOp', {'in': 0, 'out': 0})]) - - input_node = Node(graph, 'input') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - - graph.stage = 'front' - new_node.in_port(0).get_connection().set_source(input_node.out_port(0), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case2_merge(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - graph_ref = build_graph(nodes, [ - ('input', 'NewOp', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - - graph.stage = 'front' - op1_node.in_port(0).get_connection().set_destination(new_node.in_port(0), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case2_source(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - graph_ref = build_graph(nodes, [ - ('input', 'NewOp', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - - graph.stage = 'front' - op1_node.in_port(0).get_connection().set_destination(new_node.in_port(0), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case2_dest(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - graph_ref = build_graph(nodes, [('input', 'NewOp', {'in': 0, 'out': 0})]) - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - - graph.stage = 'front' - op1_node.in_port(0).get_connection().set_destination(new_node.in_port(0), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case3_merge(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - graph_ref = build_graph(nodes, [ - ('NewOp', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - - graph.stage = 'front' - op1_node.in_port(0).get_connection().set_source(new_node.out_port(0), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case3_source(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - graph_ref = build_graph(nodes, [('NewOp', 'Op1', {'in': 0, 'out': 0})]) - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - - graph.stage = 'front' - op1_node.in_port(0).get_connection().set_source(new_node.out_port(0), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case3_dest(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - graph_ref = build_graph(nodes, [ - ('NewOp', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - - graph.stage = 'front' - op1_node.in_port(0).get_connection().set_source(new_node.out_port(0), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case4_merge(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - graph_ref = build_graph(nodes, [ - ('NewOp', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input')]})]) - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - - graph.stage = 'front' - new_node.out_port(0).get_connection().set_destination(op1_node.in_port(0), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case4_source(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input')]})]) - graph_ref = build_graph(nodes, [('NewOp', 'Op1', {'in': 0, 'out': 0})]) - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - - graph.stage = 'front' - new_node.out_port(0).get_connection().set_destination(op1_node.in_port(0), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case4_dest(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input')]})]) - graph_ref = build_graph(nodes, [ - ('NewOp', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input')]})]) - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - - graph.stage = 'front' - new_node.out_port(0).get_connection().set_destination(op1_node.in_port(0), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case5_merge(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input')]}), - ('Op1', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('Op1', 0, 'Op1')]})]) - graph_ref = build_graph(nodes, [ - ('input', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input'), ('Op1', 0, 'Op1')]})]) - op1_node = Node(graph, 'Op1') - - inp_node = Node(graph, 'input') - op2_node = Node(graph, 'Op2') - graph.stage = 'front' - op1_node.out_port(0).get_connection().set_source(op1_node.in_port(0).get_source(), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case5_source(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input')]}), - ('Op1', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('Op1', 0, 'Op1')]})]) - graph_ref = build_graph(nodes, [ - ('input', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input')]})]) - op1_node = Node(graph, 'Op1') - - graph.stage = 'front' - op1_node.out_port(0).get_connection().set_source(op1_node.in_port(0).get_source(), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case5_dest(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input')]}), - ('Op1', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('Op1', 0, 'Op1')]})]) - graph_ref = build_graph(nodes, - [('input', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('Op1', 0, 'Op1')]})]) - op1_node = Node(graph, 'Op1') - - graph.stage = 'front' - op1_node.out_port(0).get_connection().set_source(op1_node.in_port(0).get_source(), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case6_merge(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input')]}), - ('Op1', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('Op1', 0, 'Op1')]})]) - graph_ref = build_graph(nodes, [ - ('input', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input'), ('Op1', 0, 'Op1')]})]) - op1_node = Node(graph, 'Op1') - - graph.stage = 'front' - op1_node.in_port(0).get_connection().set_destination(op1_node.out_port(0).get_destination(), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case6_source(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input')]}), - ('Op1', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('Op1', 0, 'Op1')]})]) - graph_ref = build_graph(nodes, [ - ('input', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input')]})]) - op1_node = Node(graph, 'Op1') - - graph.stage = 'front' - op1_node.in_port(0).get_connection().set_destination(op1_node.out_port(0).get_destination(), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - def test_case6_dest(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 0, 'input')]}), - ('Op1', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('Op1', 0, 'Op1')]})]) - graph_ref = build_graph(nodes, - [('input', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('Op1', 0, 'Op1')]})]) - op1_node = Node(graph, 'Op1') - - graph.stage = 'front' - op1_node.in_port(0).get_connection().set_destination(op1_node.out_port(0).get_destination(), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_front(graph, graph_ref) - - -class TestsMiddle(unittest.TestCase): - def check_graph_attrs_middle(self, graph: Graph, graph_ref: Graph): - for node in graph_ref.get_op_nodes(): - if len(node.out_nodes()) > 0: - data_node_ref = node.out_node(0) - data_node = Node(graph, node.id).out_node(0) - if 'fw_tensor_debug_info' in data_node_ref: - self.assertTrue(data_node_ref['fw_tensor_debug_info'] == data_node['fw_tensor_debug_info']) - else: - self.assertFalse('fw_tensor_debug_info' in data_node) - - def test_case1_merge(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('input_data', 'NewOp')]) - input_node = Node(graph, 'input') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - new_node.in_port(0).get_connection().set_source(input_node.out_port(0), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case1_source(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('input_data', 'NewOp')]) - input_node = Node(graph, 'input') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - new_node.in_port(0).get_connection().set_source(input_node.out_port(0), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case1_dest(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('input_data', 'NewOp')]) - - input_node_data = Node(graph_ref, 'input_data') - del input_node_data['fw_tensor_debug_info'] - - input_node = Node(graph, 'input') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - new_node.in_port(0).get_connection().set_source(input_node.out_port(0), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case2_merge(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), - ('input_data', 'NewOp')]) - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - op1_node.in_port(0).get_connection().set_destination(new_node.in_port(0), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case2_source(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), - ('input_data', 'NewOp')]) - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - op1_node.in_port(0).get_connection().set_destination(new_node.in_port(0), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case2_dest(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), - ('input_data', 'NewOp')]) - - input_node_data = Node(graph_ref, 'input_data') - del input_node_data['fw_tensor_debug_info'] - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_input_port(0) - op1_node.in_port(0).get_connection().set_destination(new_node.in_port(0), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'NewOp', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case3_merge(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('NewOp', 'NewOp_data'), ('NewOp_data', 'Op1')]) - - new_op_data = Node(graph_ref, 'NewOp_data') - new_op_data['fw_tensor_debug_info'] = [('input', 'input')] - - input_data = Node(graph_ref, 'input_data') - del input_data['fw_tensor_debug_info'] - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - op1_node.in_port(0).get_connection().set_source(new_node.out_port(0), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case3_source(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('NewOp', 'NewOp_data'), ('NewOp_data', 'Op1')]) - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - op1_node.in_port(0).get_connection().set_source(new_node.out_port(0), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case3_dest(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('NewOp', 'NewOp_data'), ('NewOp_data', 'Op1')]) - - new_op_data = Node(graph_ref, 'NewOp_data') - new_op_data['fw_tensor_debug_info'] = [('input', 'input')] - - input_data = Node(graph_ref, 'input_data') - del input_data['fw_tensor_debug_info'] - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - op1_node.in_port(0).get_connection().set_source(new_node.out_port(0), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case4_merge(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('NewOp', 'NewOp_data'), ('NewOp_data', 'Op1')]) - - new_op_data = Node(graph_ref, 'NewOp_data') - new_op_data['fw_tensor_debug_info'] = [('input', 'input')] - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - new_node.out_port(0).get_connection().set_destination(op1_node.in_port(0), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case4_source(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('NewOp', 'NewOp_data'), ('NewOp_data', 'Op1')]) - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - new_node.out_port(0).get_connection().set_destination(op1_node.in_port(0), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case4_dest(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('NewOp', 'NewOp_data'), ('NewOp_data', 'Op1')]) - - new_op_data = Node(graph_ref, 'NewOp_data') - new_op_data['fw_tensor_debug_info'] = [('input', 'input')] - - op1_node = Node(graph, 'Op1') - new_node = Node(graph, 'NewOp') - new_node.add_output_port(0) - new_node.out_port(0).get_connection().set_destination(op1_node.in_port(0), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op1', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case5_merge(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('Op1', 'Op1_data'), ('Op1_data', 'Op2')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('Op1', 'Op1_data'), ('input_data', 'Op2')]) - - input_data = Node(graph_ref, 'input_data') - input_data['fw_tensor_debug_info'] = [('input', 'input'), ('Op1', 'Op1')] - - op1_data = Node(graph_ref, 'Op1_data') - del op1_data['fw_tensor_debug_info'] - - op1_node = Node(graph, 'Op1') - op1_node.out_port(0).get_connection().set_source(op1_node.in_port(0).get_source(), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case5_source(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('Op1', 'Op1_data'), ('Op1_data', 'Op2')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('Op1', 'Op1_data'), ('input_data', 'Op2')]) - - input_data = Node(graph_ref, 'input_data') - input_data['fw_tensor_debug_info'] = [('input', 'input')] - - op1_node = Node(graph, 'Op1') - op1_node.out_port(0).get_connection().set_source(op1_node.in_port(0).get_source(), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case5_dest(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('Op1', 'Op1_data'), ('Op1_data', 'Op2')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('Op1', 'Op1_data'), ('input_data', 'Op2')]) - - input_data = Node(graph_ref, 'input_data') - input_data['fw_tensor_debug_info'] = [('Op1', 'Op1')] - - op1_data = Node(graph_ref, 'Op1_data') - del op1_data['fw_tensor_debug_info'] - - op1_node = Node(graph, 'Op1') - op1_node.out_port(0).get_connection().set_source(op1_node.in_port(0).get_source(), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case6_merge(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('Op1', 'Op1_data'), ('Op1_data', 'Op2')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op2'), - ('Op1', 'Op1_data')]) - - input_data = Node(graph_ref, 'input_data') - input_data['fw_tensor_debug_info'] = [('input', 'input'), ('Op1', 'Op1')] - - op1_node = Node(graph, 'Op1') - op1_node.in_port(0).get_connection().set_destination(op1_node.out_port(0).get_destination(), "merge") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case6_source(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('Op1', 'Op1_data'), ('Op1_data', 'Op2')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op2'), - ('Op1', 'Op1_data')]) - - input_data = Node(graph_ref, 'input_data') - input_data['fw_tensor_debug_info'] = [('input', 'input')] - - op1_node = Node(graph, 'Op1') - op1_node.in_port(0).get_connection().set_destination(op1_node.out_port(0).get_destination(), "source") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) - - def test_case6_dest(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('Op1', 'Op1_data'), ('Op1_data', 'Op2')]) - graph_ref = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op2'), - ('Op1', 'Op1_data')]) - - input_data = Node(graph_ref, 'input_data') - input_data['fw_tensor_debug_info'] = [('Op1', 'Op1')] - - op1_node = Node(graph, 'Op1') - op1_node.in_port(0).get_connection().set_destination(op1_node.out_port(0).get_destination(), "dest") - - (flag, resp) = compare_graphs(graph, graph_ref, 'Op2', check_op_attrs=True) - self.assertTrue(flag, resp) - self.check_graph_attrs_middle(graph, graph_ref) diff --git a/tools/mo/unit_tests/mo/graph/graph_test.py b/tools/mo/unit_tests/mo/graph/graph_test.py deleted file mode 100644 index 6aa770b5994ed3..00000000000000 --- a/tools/mo/unit_tests/mo/graph/graph_test.py +++ /dev/null @@ -1,1867 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import pytest - -from openvino.tools.mo.graph.graph import Node, Graph, add_opoutput, dict_includes_compare_attrs, get_edge_attribute_between_nodes, \ - set_edge_attribute_between_nodes -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from unit_tests.utils.graph import build_graph, build_graph_with_edge_attrs - -nodes = { - '0': {'name': 'input1', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '1': {'name': 'input2', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '2': {'name': 'node_1', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'NotPlaceholder'}, - '3': {'name': 'node_2', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'NotPlaceholder'}, - '4': {'name': 'node_3', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'NotPlaceholder'}, - '5': {'name': 'node_4', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'NotPlaceholder'}, - '6': {'name': 'output', 'value': None, 'kind': 'op', 'op': 'Result'}, - 'input_3': {'name': 'input_3', 'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Parameter'} -} -edges = { - ('0', '2'), - ('2', '3'), - ('4', '6'), - ('1', '5'), - ('5', '6'), - ('input_3', '6') -} - - -class TestGetNodeById(UnitTestWithMockedTelemetry): - def setUp(self): - super().setUp() - self.graph = build_graph(nodes, edges) - - def test_get_node_id_by_name(self): - self.assertEqual(self.graph.get_node_id_by_name('input1'), '0') - - def test_get_node_id_by_name_1(self): - self.assertEqual(self.graph.get_node_id_by_name('input2'), '1') - - def test_get_node_id_by_name_2(self): - self.assertEqual(self.graph.get_node_id_by_name('node_1'), '2') - - def test_get_node_id_by_name_3(self): - self.assertEqual(self.graph.get_node_id_by_name('node_2'), '3') - - def test_get_node_id_by_name_4(self): - self.assertEqual(self.graph.get_node_id_by_name('node_3'), '4') - - def test_get_node_id_by_name_5(self): - self.assertEqual(self.graph.get_node_id_by_name('node_4'), '5') - - def test_get_node_id_by_name_6(self): - self.assertEqual(self.graph.get_node_id_by_name('output'), '6') - - def test_get_node_id_by_name_7(self): - self.assertEqual(self.graph.get_node_id_by_name('input_3'), 'input_3') - - def test_get_node_id_by_name_8(self): - self.assertRaises(Error, self.graph.get_node_id_by_name, '1') - - -class TestEraseNode(unittest.TestCase): - def test_remove_noop_nodes_middle(self): - graph = build_graph( - { - 'input': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'}, - 'output': {'type': 'Identity', 'value': None, 'kind': 'op'}, - }, - [('input', 'noop'), - ('noop', 'output')]) - - self.assertEqual(len(graph.nodes()), 3) - self.assertEqual(len(graph.edges()), 2) - self.assertListEqual(list(graph.out_edges('input')), [('input', 'noop')]) - - graph.erase_node(Node(graph, 'noop')) - - self.assertEqual(len(graph.nodes()), 2) - self.assertEqual(len(graph.edges()), 1) - self.assertListEqual(list(graph.out_edges('input')), [('input', 'output')]) - - def test_remove_noop_nodes_middle_2(self): - graph = build_graph( - { - 'input': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'}, - 'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'}, - }, - [('input', 'noop'), - ('noop', 'output_1', {'in': 4, 'out': 0}), - ('noop', 'output_2', {'in': 2, 'out': 0}), - ('noop', 'output_3', {'in': 10, 'out': 0})]) - - ref_graph = build_graph( - { - 'input': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'}, - }, - [('input', 'output_1', {'in': 4, 'out': 0}), - ('input', 'output_2', {'in': 2, 'out': 0}), - ('input', 'output_3', {'in': 10, 'out': 0})], - nodes_with_edges_only=True) - - graph.erase_node(Node(graph, 'noop')) - - compare_graphs(graph, ref_graph, 'output_1') - - def test_remove_noop_nodes_check_out_port(self): - graph = build_graph( - { - 'input': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'}, - 'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'}, - }, - [('input', 'noop'), - ('noop', 'output_1', {'in': 4, 'out': 1}), - ('noop', 'output_2', {'in': 2, 'out': 1}), - ('noop', 'output_3', {'in': 10, 'out': 1})]) - - ref_graph = build_graph( - { - 'input': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'}, - }, - [('input', 'output_1', {'in': 4, 'out': 0}), - ('input', 'output_2', {'in': 2, 'out': 0}), - ('input', 'output_3', {'in': 10, 'out': 0})], - nodes_with_edges_only=True) - - graph.erase_node(Node(graph, 'noop')) - - compare_graphs(graph, ref_graph, 'output_1') - - def test_remove_noop_nodes_too_many_outputs(self): - graph = build_graph( - { - 'input': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'}, - 'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'}, - }, - [('input', 'noop'), - ('noop', 'output_1', {'in': 4, 'out': 0}), - ('noop', 'output_2', {'in': 2, 'out': 1}), - ('noop', 'output_3', {'in': 10, 'out': 0})]) - - self.assertRaises(AssertionError, graph.erase_node, Node(graph, 'noop')) - - def test_remove_noop_nodes_front(self): - graph = build_graph( - { - 'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'}, - 'output': {'type': 'Identity', 'value': None, 'kind': 'op'} - }, - [('noop', 'output')] - ) - - self.assertEqual(len(graph.nodes()), 2) - self.assertEqual(len(graph.edges()), 1) - self.assertListEqual(list(graph.out_edges('noop')), [('noop', 'output')]) - - graph.erase_node(Node(graph, 'noop')) - - self.assertEqual(len(graph.nodes()), 1) - self.assertEqual(len(graph.edges()), 0) - self.assertEqual(len(graph.in_edges('output')), 0) - - def test_remove_noop_nodes_back(self): - graph = build_graph( - { - 'input': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'} - }, - [('input', 'noop')] - ) - - self.assertEqual(len(graph.nodes()), 2) - self.assertEqual(len(graph.edges()), 1) - self.assertListEqual(list(graph.in_edges('noop')), [('input', 'noop')]) - - graph.erase_node(Node(graph, 'noop')) - - self.assertEqual(len(graph.nodes()), 1) - self.assertEqual(len(graph.edges()), 0) - self.assertEqual(len(graph.in_edges('input')), 0) - - def test_remove_noop_nodes_noop_only(self): - graph = Graph() - graph.add_node('noop', **{'type': 'NoOp', 'value': None, 'kind': 'op'}) - - self.assertEqual(len(graph.nodes()), 1) - self.assertEqual(len(graph.edges()), 0) - - graph.erase_node(Node(graph, 'noop')) - - self.assertEqual(len(graph.nodes()), 0) - self.assertEqual(len(graph.edges()), 0) - - def test_remove_noop_error(self): - graph = build_graph( - { - 'input_1': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'input_2': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'input_3': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'noop': {'type': 'NoOp', 'value': None, 'kind': 'op'}, - 'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'}, - }, - [('input_1', 'noop'), - ('input_2', 'noop'), - ('input_3', 'noop'), - ('noop', 'output_1'), - ('noop', 'output_2'), - ('noop', 'output_3')]) - self.assertRaises(AssertionError, graph.erase_node, Node(graph, 'noop')) - - -class TestReplaceNode(unittest.TestCase): - def test_replace_node_one_consumer(self): - graph = build_graph( - { - 'input_1': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'input_2': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'old': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output': {'op': 'Result', 'value': None, 'kind': 'op'}, - }, - [('input_1', 'old'), - ('input_2', 'old'), - ('old', 'output')]) - - new_node = Const(graph, {'name': 'new'}).create_node([Node(graph, 'input_1'), Node(graph, 'input_2')]) - - old_node = Node(graph, 'old') - old_node.replace_node(new_node) - - self.assertEqual(len(graph.nodes()), 4) - self.assertEqual(len(graph.edges()), 3) - self.assertEqual(new_node.out_node().op, 'Result') - self.assertEqual(len(graph.out_edges('new')), 1) - - def test_replace_node_several_consumers(self): - graph = build_graph( - { - 'input_1': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'input_2': {'type': 'Parameter', 'value': None, 'kind': 'op'}, - 'old': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_1': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_2': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'output_3': {'type': 'Identity', 'value': None, 'kind': 'op'}, - }, - [('input_1', 'old'), - ('input_2', 'old'), - ('old', 'output_3'), - ('old', 'output_2'), - ('old', 'output_1'), - ]) - - new_node = Const(graph, {'name': 'new'}).create_node([Node(graph, 'input_1'), Node(graph, 'input_2')]) - Node(graph, 'old').replace_node(new_node) - - self.assertEqual(len(graph.nodes()), 6) - self.assertEqual(len(graph.edges()), 5) - self.assertListEqual(sorted(graph.out_edges('new')), [('new', 'output_1'), ('new', 'output_2'), - ('new', 'output_3')]) - expected_result = [('new', 'output_1', {'in': 0, 'out': 2, 'name': 'old'}), - ('new', 'output_2', {'in': 0, 'out': 1, 'name': 'old'}), - ('new', 'output_3', {'in': 0, 'out': 0, 'name': 'old'})] - self.assertListEqual(sorted(graph.out_edges('new', data=True)), expected_result) - - -class GetNodesWithPorts(unittest.TestCase): - def test_get_nodes_with_ports(self): - nodes = { - 'one': {}, - 'two': {}, - 'three': {}, - 'four': {}, - 'five': {} - } - edges = [ - ('one', 'two', {'in': 0, 'out': 0}), - ('two', 'three', {'in': 0, 'out': 0}), - ('two', 'four', {'in': 0, 'out': 1}), - ('two', 'five', {'in': 0, 'out': 2}), - ('three', 'five', {'in': 1, 'out': 0}) - ] - graph = build_graph(nodes, edges) - match = { - 'one': Node(graph, 'one'), - 'two': Node(graph, 'two'), - 'three': Node(graph, 'three'), - 'four': Node(graph, 'four'), - 'five': Node(graph, 'five'), - - } - input_names_in_pattern = ['one', 'three'] - result = graph.get_inputs_with_ports(match=match, pattern_edges=edges, - input_names_in_pattern=input_names_in_pattern) - self.assertListEqual([(match['one'], 0), (match['three'], 0)], result) - - -class TestGraphShapeChecker(unittest.TestCase): - nodes = { - '0': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '0_data': {'value': None, 'shape': None, 'kind': 'data'}, - - '1': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '1_data': {'value': None, 'shape': None, 'kind': 'data'}, - - '2': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '2_data': {'value': None, 'shape': None, 'kind': 'data'}, - } - - def test_check_shape_consistency_1(self): - # No shape attr in data node - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - - ('0_data', '1'), - ('0_data', '2') - ]) - - del graph.node['2_data']['shape'] - - with self.assertRaisesRegex(Error, r"Graph contains data nodes \(1\) with inconsistent shapes:.*"): - graph.check_shapes_consistency() - - def test_check_shape_consistency_2(self): - # No shape attr in data node - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - - ('0_data', '1'), - ('0_data', '2') - ]) - - graph.node['1_data']['shape'] = (1, 2, 3) - graph.node['2_data']['shape'] = (1, 2, 3) - - with self.assertRaisesRegex(Error, r"Graph contains data nodes \(2\) with inconsistent shapes:.*"): - graph.check_shapes_consistency() - - -class TestGraphPortsChecker(): - nodes = { - '0': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '0_data': {'value': None, 'shape': None, 'kind': 'data'}, - - '1': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '1_data': {'value': None, 'shape': None, 'kind': 'data'}, - - '2': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '2_data': {'value': None, 'shape': None, 'kind': 'data'}, - - '3': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '3_data': {'value': None, 'shape': None, 'kind': 'data'}, - } - - @pytest.mark.parametrize("node_id, port_type, port_idx",[('0', 'in', 1), ('0', 'out', 2), ('1', 'in', 2), ('3', 'out', 2)]) - def test_check_shape_consistency_1(self, node_id: str, port_type: str, port_idx: int): - # - # ,->2-->2_data---,->3-->3_data - # 0-->0_data-/-->1-->1_data--/ - # - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('3', '3_data'), - - ('0_data', '1'), - ('0_data', '2'), - ('1_data', '3'), - ('2_data', '3'), - ]) - - node = Node(graph, node_id) - if port_type == 'in': - node.add_input_port(idx=port_idx) - else: - node.add_output_port(idx=port_idx) - - with pytest.raises (Error, match= "Node {} has not consecutive {} ports indexes:.*".format(node_id, - port_type)): - graph.check_nodes_ports_are_consecutive() - - -class TestNewGraphAPIMiddle(unittest.TestCase): - - nodes = { - '0': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '0_data': {'value': None, 'shape': None, 'kind': 'data'}, - - '1': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '1_data': {'value': None, 'shape': None, 'kind': 'data'}, - - '2': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '2_data': {'value': None, 'shape': None, 'kind': 'data'}, - - '3': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '3_data': {'value': None, 'shape': None, 'kind': 'data'}, - - '4': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '4_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'const_1': {'type': 'Const', 'value': None, 'kind': 'op', 'op': 'Const'}, - 'const_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - } - - nodes_10_in_10_out = { - 'op_concat': {'type': 'Concat', 'value': None, 'kind': 'op', 'op': 'Concat'}, - 'op_concat_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'op_split': {'type': 'Split', 'value': None, 'kind': 'op', 'op': 'Split'}, - } - - # Filling nodes list - for idx in range(11): - nodes_10_in_10_out.update({'in_{}'.format(idx): {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}}) - nodes_10_in_10_out.update({'in_{}_data'.format(idx): {'value': None, 'shape': None, 'kind': 'data'}}) - nodes_10_in_10_out.update({'out_{}'.format(idx): {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}}) - nodes_10_in_10_out.update({'op_split_{}_data'.format(idx): {'value': None, 'shape': None, 'kind': 'data'}}) - - ########################################### - ###### TESTS FOR PORT CLASS METHODS ####### - ########################################### - - def test_port_get_destinations_1(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - - ('0_data', '1'), - ('0_data', '2') - ]) - graph.__setattr__('stage', 'middle') - - node_0_out_port = Node(graph, '0').out_port(0) - - node_1_in_port = Node(graph, '1').in_port(0) - node_2_in_port = Node(graph, '2').in_port(0) - - ports = node_0_out_port.get_destinations() - - self.assertTrue(len(ports) == 2) - for port in ports: - self.assertTrue(port in [node_1_in_port, node_2_in_port]) - - def test_port_get_destination_1(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - - ('0_data', '1'), - ('0_data', '2') - ]) - graph.__setattr__('stage', 'middle') - - node_0_out_port = Node(graph, '0').out_port(0) - - node_1_in_port = Node(graph, '1').in_port(0) - node_2_in_port = Node(graph, '2').in_port(0) - - with self.assertRaises(Error): - node_0_out_port.get_destination() - - def test_port_get_destination_2(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('0_data', '1'), - ]) - graph.__setattr__('stage', 'middle') - - node_0_out_port = Node(graph, '0').out_port(0) - - node_1_in_port = Node(graph, '1').in_port(0) - - self.assertEqual(node_0_out_port.get_destination(), node_1_in_port) - - def test_port_get_source_1(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('0_data', '1'), - ]) - graph.__setattr__('stage', 'middle') - - node_0_out_port = Node(graph, '0').out_port(0) - - node_1_in_port = Node(graph, '1').in_port(0) - - self.assertEqual(node_1_in_port.get_source(), node_0_out_port) - - def test_port_get_source_2(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('0_data', '1'), - ('2_data', '1') - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - self.assertEqual(node_1.in_port(0).get_source(), node_0.out_port(0)) - self.assertEqual(node_1.in_port(1).get_source(), node_2.out_port(0)) - - def test_port_get_source_3(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_0.add_input_port(0) - node_1.add_input_port(0) - node_2.add_input_port(0) - - self.assertEqual(node_0.in_port(0).get_source(), None) - self.assertEqual(node_1.in_port(0).get_source(), None) - self.assertEqual(node_2.in_port(0).get_source(), None) - - def test_port_disconnect_1(self): - # ,-->1-->1_data 0-->0_data - # 0-->0_data/--->2-->2_data ==> 0-->0_data 1-->1_data - # 2-->2_data - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('0_data', '1'), - ('0_data', '2') - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_0.out_port(0).disconnect() - - self.assertEqual(node_1.in_port(0).get_source(), None) - self.assertEqual(node_2.in_port(0).get_source(), None) - - self.assertTrue(len(node_1.in_nodes()) == 0) - self.assertTrue(len(node_2.in_nodes()) == 0) - - def test_port_disconnect_2(self): - # ,-->1-->1_data ,-->1-->1_data - # 0-->0_data/--->2-->2_data ==> 0-->0_data/ 2-->2_data - # - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('0_data', '1'), - ('0_data', '2') - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_2.in_port(0).disconnect() - - self.assertEqual(node_0.out_port(0).get_destination(), node_1.in_port(0)) - self.assertEqual(node_1.in_port(0).get_source(), node_0.out_port(0)) - self.assertEqual(node_2.out_port(0).get_destination(), None) - self.assertEqual(node_2.in_port(0).get_source(), None) - - self.assertTrue(len(node_0.out_nodes()) == 1) - self.assertTrue(len(node_1.in_nodes()) == 1) - self.assertTrue(len(node_2.in_nodes()) == 0) - - def test_port_disconnect_3(self): - # 1-->1_data---\ 1-->1_data - # 0-->0_data---->2-->2_data ==> 0-->0_data-->2-->2_data - # - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('0_data', '2'), - ('1_data', '2') - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_2.in_port(1).disconnect() - - self.assertEqual(node_0.out_port(0).get_destination(), node_2.in_port(0)) - self.assertEqual(node_2.in_port(0).get_source(), node_0.out_port(0)) - self.assertEqual(node_1.out_port(0).get_destination(), None) - - self.assertTrue(len(node_0.out_nodes()) == 1) - self.assertTrue(len(node_1.in_nodes()) == 0) - self.assertTrue(len(node_2.in_nodes()) == 1) - - def test_port_disconnect_4(self): - # 1-->1_data---\ 0-->0_data - # 0-->0_data---->2-->2_data ==> 1-->1_data-->2-->2_data - # - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('0_data', '2'), - ('1_data', '2') - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_2.in_port(0).disconnect() - - self.assertEqual(node_1.out_port(0).get_destination(), node_2.in_port(1)) - self.assertEqual(node_2.in_port(1).get_source(), node_1.out_port(0)) - self.assertEqual(node_2.in_port(0).get_source(), None) - self.assertEqual(node_0.out_port(0).get_destination(), None) - # - # self.assertTrue(len(node_0.out_nodes()) == 1) - # self.assertTrue(len(node_1.in_nodes()) == 0) - # self.assertTrue(len(node_2.in_nodes()) == 1) - - ########################################### - ### TESTS FOR CONNECTION CLASS METHODS #### - ########################################### - - def test_connection_set_source_1(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('3', '3_data'), - ('4', '4_data'), - - ('0_data', '1'), - ('0_data', '2'), - ('3_data', '4'), - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - node_3 = Node(graph, '3') - node_4 = Node(graph, '4') - - c = node_0.out_port(0).get_connection() - c.set_source(node_3.out_port(0)) - - self.assertTrue(node_0.out_node().kind == 'data') - - self.assertEqual(node_0.out_port(0).get_destinations(), []) - destinations = node_3.out_port(0).get_destinations() - for port in destinations: - self.assertTrue(port in [node_1.in_port(0), node_2.in_port(0), node_4.in_port(0)]) - - def test_connection_set_source_2(self): - # 2-->2_data ,->2-->2_data - # 0-->0_data-->1-->1_data ==> 0-->0_data/-->1-->1_data - # - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - - ('0_data', '1'), - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_2 = Node(graph, '2') - node_2.add_input_port(0) - - node_2.in_port(0).get_connection().set_source(node_0.out_port(0)) - - graph_ref = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - - ('0_data', '1'), - ('0_data', '2'), - ]) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_set_source_3(self): - # ,->2-->2_data 0-->0_data-->1-->1_data - # 0-->0_data/-->1-->1_data => 3-->3_data-->2-->2_data - # 3-->3_data - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('3', '3_data'), - - ('0_data', '1'), - ('0_data', '2'), - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_2 = Node(graph, '2') - node_3 = Node(graph, '3') - - node_2.in_port(0).get_connection().set_source(node_3.out_port(0)) - - graph_ref = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('3', '3_data'), - - ('0_data', '1'), - ('3_data', '2'), - ]) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - (flag, resp) = compare_graphs(graph, graph_ref, '2', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_set_source_4(self): - # 0 1 ==> 0-->1 - graph = build_graph(self.nodes, []) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - - node_0.add_output_port(0) - node_1.add_input_port(0) - - node_1.in_port(0).get_connection().set_source(node_0.out_port(0)) - - graph_ref = build_graph(self.nodes, [ - ('0', '0_data'), - ('0_data', '1'), - ]) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_set_destination(self): - # ,->2-->2_data-->3-->3_data ,->2-->2_data - # 0-->0_data/-->1-->1_data ==> 0-->0_data/-->3-->3_data - # - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('3', '3_data'), - - ('0_data', '1'), - ('0_data', '2'), - ('2_data', '3'), - ]) - graph.__setattr__('stage', 'middle') - - graph_ref = build_graph(self.nodes, [ - ('0', '0_data'), - ('2', '2_data'), - ('3', '3_data'), - - ('0_data', '3'), - ('0_data', '2'), - ]) - - node_1 = Node(graph, '1') - node_3 = Node(graph, '3') - - node_3.in_port(0).disconnect() - node_1.in_port(0).get_connection().set_destination(node_3.in_port(0)) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_set_destination_1(self): - # 2 - # ,->1-->1_data ,->2 - # 0-->0_data/-->1-->1_data ==> 0-->0_data/-->1 - # - graph = build_graph(self.nodes, [ - ('0', '0_data'), - - ('0_data', '1'), - ('0_data', '1'), - ]) - graph.__setattr__('stage', 'middle') - - graph_ref = build_graph(self.nodes, [ - ('0', '0_data'), - - ('0_data', '1'), - ('0_data', '2'), - ]) - - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - node_2.add_input_port(0) - - node_1.in_port(1).get_connection().set_destination(node_2.in_port(0)) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_set_destination_2(self): - # 2 - # ,->1 ,->1 - # 0-->0_data/-->1 ==> 0-->0_data/-->2 - # - graph = build_graph(self.nodes, [ - ('0', '0_data'), - - ('0_data', '1'), - ('0_data', '1'), - ]) - graph.__setattr__('stage', 'middle') - - graph_ref = build_graph(self.nodes, [ - ('0', '0_data'), - - ('0_data', '1', {'in': 1}), - ('0_data', '2'), - ]) - - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - node_2.add_input_port(0) - - node_1.in_port(0).get_connection().set_destination(node_2.in_port(0)) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_add_destination_1(self): - # 3-->3_data ,-->3-->3_data - # ,->2-->2_data ,-->2-->2_data - # 0-->0_data/-->1-->1_data ==> 0-->0_data/-->1-->1_data - # - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('3', '3_data'), - - ('0_data', '1'), - ('0_data', '2'), - ]) - graph.__setattr__('stage', 'middle') - - graph_ref = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('3', '3_data'), - - ('0_data', '1'), - ('0_data', '2'), - ('0_data', '3'), - ]) - - node_0 = Node(graph, '0') - node_3 = Node(graph, '3') - node_3.add_input_port(idx=0) - - node_0.out_port(0).get_connection().add_destination(node_3.in_port(0)) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_add_destination_2(self): - # 0 - # 1-->1_data ==> 0-->0_data-->1-->1_data - graph = build_graph(self.nodes, [ - ('1', '1_data'), - ]) - graph.__setattr__('stage', 'middle') - - graph_ref = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('0_data', '1'), - ]) - - node_0 = Node(graph, '0') - node_0.add_output_port(idx=0) - - node_1 = Node(graph, '1') - node_1.add_input_port(idx=0) - - node_0.out_port(0).get_connection().add_destination(node_1.in_port(0)) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_get_source_destinations_1(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - - ('0_data', '1'), - ('0_data', '2') - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - source = node_0.out_port(0).get_connection().get_source() - destinations = node_0.out_port(0).get_connection().get_destinations() - - self.assertEqual(source, node_0.out_port(0)) - for port in destinations: - self.assertTrue(port in [node_1.in_port(0), node_2.in_port(0)]) - - self.assertEqual(node_1.out_port(0).get_connection().get_destination(), None) - self.assertEqual(node_1.out_port(0).get_destination(), None) - - self.assertEqual(node_2.out_port(0).get_connection().get_destination(), None) - self.assertEqual(node_2.out_port(0).get_destination(), None) - - def test_connection_remove_1(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - - ('0_data', '1'), - ('0_data', '2') - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_1.in_port(0).get_connection().remove() - - self.assertEqual(node_0.out_port(0).get_destinations(), [node_2.in_port(0)]) - self.assertEqual(node_1.in_port(0).get_source(), None) - self.assertEqual(node_2.in_port(0).get_source(), node_0.out_port(0)) - - def test_connection_remove_2(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - - ('0_data', '1'), - ('0_data', '2') - ]) - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_0.out_port(0).get_connection().remove() - - self.assertEqual(node_0.out_port(0).get_destinations(), []) - self.assertEqual(node_1.out_port(0).get_destinations(), []) - self.assertEqual(node_2.out_port(0).get_destinations(), []) - - def test_connection_data_1(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - - ('0_data', '1'), - ('0_data', '2') - ], {'0_data': {'value': np.ones((1,3,64,64)), 'shape': np.array([1, 3, 64, 64])}}) - - graph.__setattr__('stage', 'middle') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - self.assertTrue(np.array_equal(node_0.out_port(0).get_connection().data.get_shape(), (1, 3, 64, 64))) - self.assertTrue(np.array_equal(node_0.out_port(0).get_connection().data.get_value(), np.ones((1, 3, 64, 64)))) - - self.assertEqual(node_1.out_port(0).get_connection().data.get_shape(), None) - self.assertEqual(node_1.out_port(0).get_connection().data.get_value(), None) - - self.assertEqual(node_2.out_port(0).get_connection().data.get_shape(), None) - self.assertEqual(node_2.out_port(0).get_connection().data.get_value(), None) - - ########################################### - ################## OTHER ################## - ########################################### - - def test_graph_cleanup_that_restores_const_operations(self): - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('3', '3_data'), - - ('0_data', '1'), - ('2_data', '1'), - ('3_data', '2'), - ], { - '3': {'shape': np.array([1, 227, 227, 3]), 'value': np.ones((1, 227, 227, 3))}, - '3_data': {'shape': np.array([1, 227, 227, 3]), 'value': np.ones((1, 227, 227, 3))}, - '2': {'shape': np.array([1, 227, 227, 3]), 'value': np.ones((1, 227, 227, 3))}, - '2_data': {'shape': np.array([1, 227, 227, 3]), 'value': np.ones((1, 227, 227, 3))}, - }, nodes_with_edges_only=True) - add_opoutput(graph, '1_data', 0, False) - - graph_ref = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('const_1', '2_data'), - - ('0_data', '1'), - ('2_data', '1'), - ], { - 'const_1': {'shape': np.array([1, 227, 227, 3]), 'value': np.ones((1, 227, 227, 3))}, - '2_data': {'shape': np.array([1, 227, 227, 3]), 'value': np.ones((1, 227, 227, 3))}, - }, nodes_with_edges_only=True) - add_opoutput(graph_ref, '1_data', 0, False) - - graph.clean_up() - graph_ref.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, '1_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_node_in_out_ports_order(self): - # - # ,->2-->2_data---,->3-->3_data - # 0-->0_data-/-->1-->1_data--/ - # - graph = build_graph(self.nodes, [ - ('0', '0_data'), - ('1', '1_data'), - ('2', '2_data'), - ('3', '3_data'), - - ('0_data', '1'), - ('0_data', '2'), - ('1_data', '3'), - ('2_data', '3'), - ]) - - for id in ['0', '1', '2', '3']: - node = Node(graph, id) - for idx in range(len(node.in_ports())): - self.assertEqual(node.in_port(idx), node.in_ports()[idx]) - for idx in range(len(node.out_ports())): - self.assertEqual(node.out_port(idx), node.out_ports()[idx]) - - def test_node_in_ports_order_10_inputs(self): - edges = [('op_concat', 'op_concat_data'), - ('op_concat_data', 'op_split'), - ] - - # Filling edges list - for idx in range(11): - edges.append(('in_{}'.format(idx), 'in_{}_data'.format(idx))) - edges.append(('in_{}_data'.format(idx), 'op_concat', {'in': idx})) - edges.append(('op_split', 'op_split_{}_data'.format(idx), {'out': idx})) - edges.append(('op_split_{}_data'.format(idx), 'out_{}'.format(idx))) - - graph = build_graph(self.nodes_10_in_10_out, edges) - - node_concat = Node(graph, 'op_concat') - node_split = Node(graph, 'op_split') - - self.assertEqual(len(node_concat.in_ports()), len(node_concat.in_nodes())) - - l1 = [node_concat.in_port(idx).get_source().node.name for idx in node_concat.in_ports()] - l2 = [node_concat.in_node(idx).in_node(0).name for idx in node_concat.in_nodes()] - - self.assertEqual(l1, l2) - - l1 = [node_split.out_port(idx).get_destination().node.name for idx in node_split.out_ports()] - l2 = [node_split.out_node(idx).out_node(0).name for idx in node_split.out_nodes()] - - self.assertEqual(l1, l2) - - def test_node_in_ports_order_10_inputs_control_flow(self): - edges = [('op_concat', 'op_concat_data', {'out': 'control_flow_0', 'control_flow_edge': True}), - ('op_concat_data', 'op_split', {'in': 'control_flow_0', 'control_flow_edge': True}), - ] - - # Filling edges list - for idx in range(11): - edges.append(('in_{}'.format(idx), 'in_{}_data'.format(idx), - {'out': 'control_flow_0', 'control_flow_edge': True})) - edges.append(('in_{}_data'.format(idx), 'op_concat', - {'in': 'control_flow_{}'.format(idx), 'control_flow_edge': True})) - edges.append(('op_split', 'op_split_{}_data'.format(idx), - {'out': 'control_flow_{}'.format(idx), 'control_flow_edge': True})) - edges.append(('op_split_{}_data'.format(idx), 'out_{}'.format(idx), - {'in': 'control_flow_0', 'control_flow_edge': True})) - - graph = build_graph(self.nodes_10_in_10_out, edges) - - node_concat = Node(graph, 'op_concat') - node_split = Node(graph, 'op_split') - - self.assertEqual(len(node_concat.in_ports()), len(node_concat.in_nodes())) - - l1 = [node_concat.in_port(idx, control_flow=True).get_source().node.name - for idx in node_concat.in_ports(control_flow=True)] - l2 = [node_concat.in_node(idx, control_flow=True).in_node(0, control_flow=True).name - for idx in node_concat.in_nodes(control_flow=True)] - - self.assertEqual(l1, l2) - - l1 = [node_split.out_port(idx, control_flow=True).get_destination().node.name - for idx in node_split.out_ports(control_flow=True)] - l2 = [node_split.out_node(idx, control_flow=True).out_node(0, control_flow=True).name for idx in - node_split.out_nodes(control_flow=True)] - - self.assertEqual(l1, l2) - - def test_node_in_ports_order_10_inputs_mixed(self): - edges = [('op_concat', 'op_concat_data', {'out': 'control_flow_0', 'control_flow_edge': True}), - ('op_concat_data', 'op_split', {'in': 'control_flow_0', 'control_flow_edge': True}), - ] - graph = build_graph(self.nodes_10_in_10_out, edges) - - # Filling edges list - for idx in range(5): - edges.append(('in_{}'.format(idx), 'in_{}_data'.format(idx))) - edges.append(('in_{}_data'.format(idx), 'op_concat')) - edges.append(('op_split', 'op_split_{}_data'.format(idx))) - edges.append(('op_split_{}_data'.format(idx), 'out_{}'.format(idx))) - for idx in range(5, 11): - edges.append(('in_{}'.format(idx), 'in_{}_data'.format(idx), - {'out': 'control_flow_0', 'control_flow_edge': True})) - edges.append(('in_{}_data'.format(idx), 'op_concat', - {'in': 'control_flow_{}', 'control_flow_edge': True})) - edges.append(('op_split', 'op_split_{}_data'.format(idx), - {'out': 'control_flow_{}', 'control_flow_edge': True})) - edges.append(('op_split_{}_data'.format(idx), 'out_{}'.format(idx), - {'in': 'control_flow_0', 'control_flow_edge': True})) - - node_concat = Node(graph, 'op_concat') - node_split = Node(graph, 'op_split') - - self.assertEqual(len(node_concat.in_ports()), len(node_concat.in_nodes())) - - l1 = [node_concat.in_port(idx, control_flow=True).get_source().node.name - for idx in node_concat.in_ports(control_flow=True)] - l2 = [node_concat.in_node(idx, control_flow=True).in_node(0, control_flow=True).name - for idx in node_concat.in_nodes(control_flow=True)] - - self.assertEqual(l1, l2) - - l1 = [node_split.out_port(idx, control_flow=True).get_destination().node.name - for idx in node_split.out_ports(control_flow=True)] - l2 = [node_split.out_node(idx, control_flow=True).out_node(0, control_flow=True).name for idx in - node_split.out_nodes(control_flow=True)] - - self.assertEqual(l1, l2) - - -class TestNewGraphAPIFront(unittest.TestCase): - nodes = { - '0': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '1': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '2': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '3': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - '4': {'type': 'Parameter', 'value': None, 'kind': 'op', 'op': 'Parameter'}, - 'const_1': {'type': 'Const', 'value': None, 'kind': 'op', 'op': 'Const'}, - } - - ########################################### - ###### TESTS FOR PORT CLASS METHODS ####### - ########################################### - - def test_port_get_destinations_1(self): - # ,->2 - # /-->1 - # 0 - graph = build_graph(self.nodes, [ - ('0', '1', {'out': 0}), - ('0', '2', {'out': 0}), - ]) - graph.__setattr__('stage', 'front') - - node_0_out_port = Node(graph, '0').out_port(0) - - node_1_in_port = Node(graph, '1').in_port(0) - node_2_in_port = Node(graph, '2').in_port(0) - - ports = node_0_out_port.get_destinations() - - self.assertTrue(len(ports) == 2) - for port in ports: - self.assertTrue(port in [node_1_in_port, node_2_in_port]) - - def test_port_get_destination_1(self): - # ,->2 - # /-->1 - # 0 - graph = build_graph(self.nodes, [ - ('0', '1', {'out': 0}), - ('0', '2', {'out': 0}), - ]) - graph.__setattr__('stage', 'front') - - node_0_out_port = Node(graph, '0').out_port(0) - - node_1_in_port = Node(graph, '1').in_port(0) - node_2_in_port = Node(graph, '2').in_port(0) - - with self.assertRaises(Error): - node_0_out_port.get_destination() - - def test_port_get_destination_2(self): - graph = build_graph(self.nodes, [ - ('0', '1'), - ]) - graph.__setattr__('stage', 'front') - - node_0_out_port = Node(graph, '0').out_port(0) - - node_1_in_port = Node(graph, '1').in_port(0) - - self.assertEqual(node_0_out_port.get_destination(), node_1_in_port) - - def test_port_get_destination_3(self): - graph = build_graph(self.nodes, [ - ('0', '1', {'out': 0, 'in': 0}), - ('0', '2', {'out': 1, 'in': 0}), - ('0', '3', {'out': 1, 'in': 0}), - ]) - graph.__setattr__('stage', 'front') - - node_0_out_port_1 = Node(graph, '0').out_port(1) - node_2_in_port = Node(graph, '2').in_port(0) - node_3_in_port = Node(graph, '3').in_port(0) - - destinations = node_0_out_port_1.get_destinations() - - self.assertTrue((destinations[0] == node_2_in_port and destinations[1] == node_3_in_port) or - (destinations[1] == node_2_in_port and destinations[0] == node_3_in_port)) - - def test_port_get_source_1(self): - graph = build_graph(self.nodes, [ - ('0', '1'), - ]) - graph.__setattr__('stage', 'front') - - node_0_out_port = Node(graph, '0').out_port(0) - - node_1_in_port = Node(graph, '1').in_port(0) - - self.assertEqual(node_1_in_port.get_source(), node_0_out_port) - - def test_port_get_source_2(self): - graph = build_graph(self.nodes, [ - ('0', '1'), - ('2', '1') - ]) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - self.assertEqual(node_1.in_port(0).get_source(), node_0.out_port(0)) - self.assertEqual(node_1.in_port(1).get_source(), node_2.out_port(0)) - - def test_port_get_source_3(self): - graph = build_graph(self.nodes, []) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_0.add_input_port(0) - node_1.add_input_port(0) - node_2.add_input_port(0) - - self.assertEqual(node_0.in_port(0).get_source(), None) - self.assertEqual(node_1.in_port(0).get_source(), None) - self.assertEqual(node_2.in_port(0).get_source(), None) - - def test_port_disconnect_1(self): - # ,-->1-->1_data 0-->0_data - # 0-->0_data/--->2-->2_data ==> 0-->0_data 1-->1_data - # 2-->2_data - graph = build_graph(self.nodes, [ - ('0', '1', {'out': 0}), - ('0', '2', {'out': 0}) - ]) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_0.out_port(0).disconnect() - - self.assertEqual(node_1.in_port(0).get_source(), None) - self.assertEqual(node_2.in_port(0).get_source(), None) - - self.assertTrue(len(node_1.in_nodes()) == 0) - self.assertTrue(len(node_2.in_nodes()) == 0) - - def test_port_disconnect_2(self): - # ,-->1 ,-->1 - # 0-->/--->2 ==> 0-->/ 2 - # - graph = build_graph(self.nodes, [ - ('0', '1', {'out': 0}), - ('0', '2', {'out': 0}) - ]) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_2.in_port(0).disconnect() - - self.assertEqual(node_0.out_port(0).get_destination(), node_1.in_port(0)) - self.assertEqual(node_1.in_port(0).get_source(), node_0.out_port(0)) - self.assertEqual(node_2.in_port(0).get_source(), None) - - self.assertTrue(len(node_0.out_nodes()) == 1) - self.assertTrue(len(node_1.in_nodes()) == 1) - self.assertTrue(len(node_2.in_nodes()) == 0) - - def test_port_disconnect_3(self): - # 1---\ 1 - # 0---->2 ==> 0-->2 - # - graph = build_graph(self.nodes, [ - ('0', '2'), - ('1', '2') - ]) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_2.in_port(1).disconnect() - - self.assertEqual(node_0.out_port(0).get_destination(), node_2.in_port(0)) - self.assertEqual(node_2.in_port(0).get_source(), node_0.out_port(0)) - self.assertEqual(node_1.out_port(0).get_destination(), None) - - self.assertTrue(len(node_0.out_nodes()) == 1) - self.assertTrue(len(node_1.in_nodes()) == 0) - self.assertTrue(len(node_2.in_nodes()) == 1) - - def test_port_disconnect_4(self): - # 1-----\ 0 - # 0------>2 ==> 1--->2 - # - graph = build_graph(self.nodes, [ - ('0', '2', {'out': 0}), - ('1', '2', {'out': 0}) - ]) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_2.in_port(0).disconnect() - - self.assertEqual(node_1.out_port(0).get_destination(), node_2.in_port(1)) - self.assertEqual(node_2.in_port(1).get_source(), node_1.out_port(0)) - self.assertEqual(node_2.in_port(0).get_source(), None) - self.assertEqual(node_0.out_port(0).get_destination(), None) - - def test_port_disconnected_1(self): - graph = build_graph(self.nodes, [ - ('0', '1'), - ('1', '2') - ]) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - node_2.add_output_port(0) - node_0.add_input_port(0) - - self.assertTrue(not node_0.out_port(0).disconnected()) - self.assertTrue(not node_1.out_port(0).disconnected()) - self.assertTrue(not node_1.in_port(0).disconnected()) - self.assertTrue(node_2.out_port(0).disconnected()) - self.assertTrue(node_0.in_port(0).disconnected()) - - def test_port_get_connection_1(self): - graph = build_graph(self.nodes, [ - ('0', '1'), - ('1', '2', {'out': 0}), - ('1', '3', {'out': 0}), - ]) - graph.__setattr__('stage', 'front') - - node_1 = Node(graph, '1') - node_2 = Node(graph, '3') - node_3 = Node(graph, '2') - - c = node_1.out_port(0).get_connection() - - self.assertTrue(c.get_source() == node_1.out_port(0)) - for port in c.get_destinations(): - self.assertTrue(port in [node_2.in_port(0), node_3.in_port(0)]) - - ########################################### - ### TESTS FOR CONNECTION CLASS METHODS #### - ########################################### - - def test_connection_set_source_1(self): - graph = build_graph(self.nodes, [ - ('0', '1'), - ('0', '2'), - ('3', '4'), - ]) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - node_3 = Node(graph, '3') - node_4 = Node(graph, '4') - - c = node_0.out_port(0).get_connection() - c.set_source(node_3.out_port(0)) - - self.assertEqual(node_0.out_port(0).get_destinations(), []) - destinations = node_3.out_port(0).get_destinations() - for port in destinations: - self.assertTrue(port in [node_1.in_port(0), node_2.in_port(0), node_4.in_port(0)]) - - def test_connection_set_source_2(self): - # 2 ,->2 - # 0-->1 ==> 0/-->1 - # - graph = build_graph(self.nodes, [ - ('0', '1'), - ]) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_2 = Node(graph, '2') - node_2.add_input_port(0) - - node_2.in_port(0).get_connection().set_source(node_0.out_port(0)) - - graph_ref = build_graph(self.nodes, [ - ('0', '1', {'out': 0, 'in': 0}), - ('0', '2', {'out': 0, 'in': 0}), - ]) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_set_source_3(self): - # 0 1 ==> 0-->1 - graph = build_graph(self.nodes, []) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - - node_0.add_output_port(0) - node_1.add_input_port(0) - - node_1.in_port(0).get_connection().set_source(node_0.out_port(0)) - - graph_ref = build_graph(self.nodes, [ - ('0', '1', {'out': 0, 'in': 0}), - ]) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_set_destination(self): - # ,->2-->2_data-->3-->3_data ,->2-->2_data - # 0-->0_data/-->1-->1_data ==> 0-->0_data/-->3-->3_data - # - graph = build_graph(self.nodes, [ - ('0', '1'), - ('0', '2'), - ('2', '3'), - ]) - graph.__setattr__('stage', 'front') - - graph_ref = build_graph(self.nodes, [ - ('0', '3'), - ('0', '2'), - ]) - - node_1 = Node(graph, '1') - node_3 = Node(graph, '3') - - node_3.in_port(0).disconnect() - node_1.in_port(0).get_connection().set_destination(node_3.in_port(0)) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_set_destination_1(self): - # 2 - # ,->1 ,->2 - # 0-->1 ==> 0-->1 - # - graph = build_graph(self.nodes, [ - ('0', '1', {'out': 0, 'in': 0}), - ('0', '1', {'out': 0, 'in': 1}), - ]) - graph.__setattr__('stage', 'front') - - graph_ref = build_graph(self.nodes, [ - ('0', '1', {'out': 0, 'in': 0}), - ('0', '2', {'out': 0, 'in': 0}), - ]) - - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - node_2.add_input_port(0) - - node_1.in_port(1).get_connection().set_destination(node_2.in_port(0)) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_set_destination_2(self): - # 2 - # ,->1 ,->1 - # 0-->1 ==> 0-->2 - # - graph = build_graph(self.nodes, [ - ('0', '1', {'out': 0, 'in': 0}), - ('0', '1', {'out': 0, 'in': 1}), - ]) - graph.__setattr__('stage', 'front') - - graph_ref = build_graph(self.nodes, [ - ('0', '1', {'out': 0, 'in': 1}), - ('0', '2', {'out': 0, 'in': 0}), - ]) - - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - node_2.add_input_port(0) - - node_1.in_port(0).get_connection().set_destination(node_2.in_port(0)) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_add_destination_1(self): - # 3 ,-->3 - # ,->2 ,-->2 - # 0--/-->1 ==> 0--/-->1 - # - graph = build_graph(self.nodes, [ - ('0', '1', {'in': 0, 'out': 0}), - ('0', '2', {'in': 0, 'out': 0}), - ]) - graph.__setattr__('stage', 'front') - - graph_ref = build_graph(self.nodes, [ - ('0', '1', {'in': 0, 'out': 0}), - ('0', '2', {'in': 0, 'out': 0}), - ('0', '3', {'in': 0, 'out': 0}), - ]) - - node_0 = Node(graph, '0') - node_3 = Node(graph, '3') - node_3.add_input_port(idx=0) - - node_0.out_port(0).get_connection().add_destination(node_3.in_port(0)) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_add_destination_2(self): - # 0 - # 1 ==> 0-->1 - graph = build_graph(self.nodes, []) - graph.__setattr__('stage', 'front') - - graph_ref = build_graph(self.nodes, [ - ('0', '1'), - ]) - - node_0 = Node(graph, '0') - node_0.add_output_port(idx=0) - - node_1 = Node(graph, '1') - node_1.add_input_port(idx=0) - - node_0.out_port(0).get_connection().add_destination(node_1.in_port(0)) - - (flag, resp) = compare_graphs(graph, graph_ref, '0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_connection_get_source_destinations_1(self): - graph = build_graph(self.nodes, [ - ('0', '1'), - ('0', '2') - ]) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - node_1.add_output_port(idx=0) - node_2.add_output_port(idx=0) - - source = node_0.out_port(0).get_connection().get_source() - destinations = node_0.out_port(0).get_connection().get_destinations() - - self.assertEqual(source, node_0.out_port(0)) - for port in destinations: - self.assertTrue(port in [node_1.in_port(0), node_2.in_port(0)]) - - self.assertEqual(node_1.out_port(0).get_connection().get_destination(), None) - self.assertEqual(node_1.out_port(0).get_destination(), None) - - self.assertEqual(node_2.out_port(0).get_connection().get_destination(), None) - self.assertEqual(node_2.out_port(0).get_destination(), None) - - def test_connection_remove_1(self): - graph = build_graph(self.nodes, [ - ('0', '1', {'in': 0, 'out': 0}), - ('0', '2', {'in': 0, 'out': 0}) - ]) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_1.in_port(0).get_connection().remove() - - self.assertEqual(node_0.out_port(0).get_destinations(), [node_2.in_port(0)]) - self.assertEqual(node_1.in_port(0).get_source(), None) - self.assertEqual(node_2.in_port(0).get_source(), node_0.out_port(0)) - - def test_connection_remove_2(self): - graph = build_graph(self.nodes, [ - ('0', '1', {'in': 0, 'out': 0}), - ('0', '2', {'in': 0, 'out': 0}) - ]) - graph.__setattr__('stage', 'front') - - node_0 = Node(graph, '0') - node_1 = Node(graph, '1') - node_2 = Node(graph, '2') - - node_0.out_port(0).get_connection().remove() - - self.assertEqual(node_0.out_port(0).get_destinations(), []) - self.assertEqual(node_1.in_port(0).get_source(), None) - self.assertEqual(node_2.in_port(0).get_source(), None) - - -class TestDictIncludesCompareAttrs(unittest.TestCase): - def test_numpy_scalar(self): - self.assertTrue(dict_includes_compare_attrs(2.0, np.array(2.0))) - self.assertTrue(dict_includes_compare_attrs(2, np.array(2.0))) - self.assertTrue(dict_includes_compare_attrs(np.array(2.0), 2.0)) - self.assertTrue(dict_includes_compare_attrs(np.array(2.0), 2)) - - self.assertFalse(dict_includes_compare_attrs(2.01, np.array(2.0))) - self.assertFalse(dict_includes_compare_attrs(2, np.array(2.1))) - self.assertFalse(dict_includes_compare_attrs(np.array(2.0), 2.01)) - self.assertFalse(dict_includes_compare_attrs(np.array(2.1), 2)) - - def test_regular_scalars(self): - self.assertTrue(dict_includes_compare_attrs(2.0, 2)) - self.assertFalse(dict_includes_compare_attrs(2, 1.99999999999999)) - - def test_lists_numpy(self): - self.assertTrue(dict_includes_compare_attrs([4, 2, 3], np.array([4, 2, 3]))) - self.assertFalse(dict_includes_compare_attrs([4, 2, 3], np.array([1, 2, 3]))) - - def test_regular_lists(self): - self.assertTrue(dict_includes_compare_attrs([4, 2, 3], [4, 2, 3])) - self.assertFalse(dict_includes_compare_attrs([4, 2, 3], [1, 2, 3])) - self.assertFalse(dict_includes_compare_attrs([4, 2, 3], [4, 2, 3, 5])) - - def test_regular_string(self): - self.assertTrue(dict_includes_compare_attrs("abc", "abc")) - self.assertFalse(dict_includes_compare_attrs("abc", "abd")) - - -class TestGetSetAttributeBetweenNodes(unittest.TestCase): - nodes = { - 'A': {'id': 0, 'kind': 'op'}, - 'B': {'id': 1, 'kind': 'op'}, - 'C': {'id': 2, 'kind': 'op'}, - 'D': {'id': 3, 'kind': 'op'}, - 'E': {'id': 4, 'kind': 'op'}, - 'F': {'id': 5, 'kind': 'op'}, - } - - def build_test_graph(self): - graph = build_graph(self.nodes, [ - ('A', 'D', {'in': 0, 'out': 0, 'Attr': "A-D"}), - ('A', 'E', {'in': 0, 'out': 1, 'Attr': "A-E"}), - ('A', 'F', {'in': 0, 'out': 2, 'Attr': "A-F"}), - ('B', 'D', {'in': 1, 'out': 0, 'Attr': "B-D"}), - ('B', 'F', {'in': 2, 'out': 1, 'Attr': "B-F"}), - ]) - return graph - - def test_get_attribute_between_nodes(self): - graph = self.build_test_graph() - a_node = Node(graph, 'A') - b_node = Node(graph, 'B') - d_node = Node(graph, 'D') - e_node = Node(graph, 'E') - f_node = Node(graph, 'F') - self.assertTrue(get_edge_attribute_between_nodes(a_node, d_node, 'Attr') == "A-D") - self.assertTrue(get_edge_attribute_between_nodes(a_node, e_node, 'Attr') == "A-E") - self.assertTrue(get_edge_attribute_between_nodes(a_node, f_node, 'Attr') == "A-F") - self.assertTrue(get_edge_attribute_between_nodes(b_node, d_node, 'Attr') == "B-D") - self.assertTrue(get_edge_attribute_between_nodes(b_node, f_node, 'Attr') == "B-F") - - def test_set_attribute_between_nodes(self): - graph = self.build_test_graph() - a_node = Node(graph, 'A') - b_node = Node(graph, 'B') - d_node = Node(graph, 'D') - e_node = Node(graph, 'E') - f_node = Node(graph, 'F') - - set_edge_attribute_between_nodes(a_node, d_node, 'Attr', 'new_value_1') - set_edge_attribute_between_nodes(a_node, e_node, 'Attr', 'new_value_2') - set_edge_attribute_between_nodes(a_node, f_node, 'Attr', 'new_value_3') - set_edge_attribute_between_nodes(b_node, d_node, 'Attr', 'new_value_4') - set_edge_attribute_between_nodes(b_node, f_node, 'Attr', 'new_value_5') - - self.assertTrue(get_edge_attribute_between_nodes(a_node, d_node, 'Attr') == "new_value_1") - self.assertTrue(get_edge_attribute_between_nodes(a_node, e_node, 'Attr') == "new_value_2") - self.assertTrue(get_edge_attribute_between_nodes(a_node, f_node, 'Attr') == "new_value_3") - self.assertTrue(get_edge_attribute_between_nodes(b_node, d_node, 'Attr') == "new_value_4") - self.assertTrue(get_edge_attribute_between_nodes(b_node, f_node, 'Attr') == "new_value_5") - - -class TestTopologicalSort(unittest.TestCase): - nodes = { - 'A': {'id': 0, 'kind': 'op'}, - 'B': {'id': 1, 'kind': 'op'}, - 'C': {'id': 2, 'kind': 'op'}, - 'D': {'id': 3, 'kind': 'op'}, - 'E': {'id': 4, 'kind': 'op'}, - } - - def build_test_graph(self): - graph = build_graph(self.nodes, [ - ('A', 'B', {'in': 0, 'out': 0}), - ('A', 'C', {'in': 0, 'out': 1}), - ('A', 'D', {'in': 0, 'out': 2}), - ('A', 'E', {'in': 0, 'out': 3}), - ('B', 'D', {'in': 1, 'out': 0}), - ('C', 'D', {'in': 2, 'out': 0}), - ('C', 'E', {'in': 1, 'out': 1}), - ('D', 'E', {'in': 2, 'out': 0}), - ]) - return graph - - def test_sort_with_start_node(self): - graph = self.build_test_graph() - - stat_node = Node(graph, "A") - nodes_names = [node.name for node in graph.pseudo_topological_sort_with_start_node(start_node=stat_node)] - assert nodes_names == ['A', 'C', 'B', 'D', 'E'] - - stat_node = Node(graph, "B") - nodes_names = [node.name for node in graph.pseudo_topological_sort_with_start_node(start_node=stat_node)] - assert nodes_names == ['B', 'D', 'E'] - - stat_node = Node(graph, "C") - nodes_names = [node.name for node in graph.pseudo_topological_sort_with_start_node(start_node=stat_node)] - assert nodes_names == ['C', 'D', 'E'] - - stat_node = Node(graph, "D") - nodes_names = [node.name for node in graph.pseudo_topological_sort_with_start_node(start_node=stat_node)] - assert nodes_names == ['D', 'E'] - - stat_node = Node(graph, "E") - nodes_names = [node.name for node in graph.pseudo_topological_sort_with_start_node(start_node=stat_node)] - assert nodes_names == ['E'] - - # reverse order - stat_node = Node(graph, "A") - nodes_names = [node.name for node in graph.pseudo_topological_sort_with_start_node(start_node=stat_node, - reverse=True)] - assert nodes_names == ['E', 'D', 'B', 'C', 'A'] - - stat_node = Node(graph, "B") - nodes_names = [node.name for node in graph.pseudo_topological_sort_with_start_node(start_node=stat_node, - reverse=True)] - assert nodes_names == ['E', 'D', 'B'] - - stat_node = Node(graph, "C") - nodes_names = [node.name for node in graph.pseudo_topological_sort_with_start_node(start_node=stat_node, - reverse=True)] - assert nodes_names == ['E', 'D', 'C'] - - stat_node = Node(graph, "D") - nodes_names = [node.name for node in graph.pseudo_topological_sort_with_start_node(start_node=stat_node, - reverse=True)] - assert nodes_names == ['E', 'D'] - - stat_node = Node(graph, "E") - nodes_names = [node.name for node in graph.pseudo_topological_sort_with_start_node(start_node=stat_node, - reverse=True)] - assert nodes_names == ['E'] - \ No newline at end of file diff --git a/tools/mo/unit_tests/mo/graph/port_test.py b/tools/mo/unit_tests/mo/graph/port_test.py deleted file mode 100644 index 8787bada84597d..00000000000000 --- a/tools/mo/unit_tests/mo/graph/port_test.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph, regular_op, valued_const_with_data, result, connect - -nodes = { - **regular_op('input', {'type': 'Parameter'}), - **regular_op('Op1', {'type': 'Op1', 'kind': 'op', 'op': 'Op1'}), - **regular_op('Op2', {'type': 'Op2', 'kind': 'op', 'op': 'Op2'}), - **regular_op('Op3', {'type': 'Op3', 'kind': 'op', 'op': 'Op3'}), - - 'input_data': {'kind': 'data', 'fw_tensor_debug_info': [('input', 'input'), ('Op1', 'Op1,Op2')]}, - 'Op1_data': {'kind': 'data', 'fw_tensor_debug_info': [('Op1', 'Op1,Op2')]}, - 'Op2_data': {'kind': 'data'}, - 'Op3_data': {'kind': 'data', 'fw_tensor_debug_info': [('Op3', 'Op3')]}, -} - - -class TestsGetTensorNames(unittest.TestCase): - def test_front(self): - graph = build_graph(nodes, - [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input'), - ('Op1', 'Op1,Op2')]})]) - graph.stage = 'front' - input_node = Node(graph, 'input') - self.assertTrue(input_node.out_port(0).get_tensor_names() == ['Op1\\,Op2', 'input']) - - op1_node = Node(graph, 'Op1') - op1_node.add_output_port(0) - self.assertTrue(op1_node.out_port(0).get_tensor_names() == []) - - input_node.out_port(0).add_tensor_names(["A:0", "B:0", "B:1", "B:2", "C:0"]) - self.assertTrue(input_node.out_port(0).get_tensor_debug_info() == - [('input', 'input'), ('Op1', 'Op1,Op2'), ("input", "A:0"), ("input", "B:0"), - ("input", "B:1"), ("input", "B:2"), ("input", "C:0")]) - self.assertTrue(input_node.out_port(0).get_tensor_names() == - ['A:0', 'B:0', 'B:1', 'B:2', 'C:0', 'Op1\\,Op2', 'input']) - input_node.out_port(0).remove_tensor_names() - self.assertTrue(input_node.out_port(0).get_tensor_debug_info() == []) - self.assertTrue(input_node.out_port(0).get_tensor_names() == []) - - def test_middle(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('input_data', 'Op2')]) - - input_node = Node(graph, 'input') - self.assertTrue(input_node.out_port(0).get_tensor_names() == ['Op1\\,Op2', 'input']) - - op1_node = Node(graph, 'Op1') - op1_node.add_output_port(0) - self.assertTrue(op1_node.out_port(0).get_tensor_names() == []) - - op2_node = Node(graph, 'Op2') - op2_node.add_output_port(0) - self.assertTrue(op2_node.out_port(0).get_tensor_names() == []) - - input_node.out_port(0).add_tensor_names(["A:0", "B:0", "B:1", "B:2", "C:0"]) - self.assertTrue(input_node.out_port(0).get_tensor_debug_info() == - [('input', 'input'), ('Op1', 'Op1,Op2'), ("input", "A:0"), ("input", "B:0"), - ("input", "B:1"), ("input", "B:2"), ("input", "C:0")]) - self.assertTrue(input_node.out_port(0).get_tensor_names() == - ['A:0', 'B:0', 'B:1', 'B:2', 'C:0', 'Op1\\,Op2', 'input']) - input_node.out_port(0).remove_tensor_names() - self.assertTrue(input_node.out_port(0).get_tensor_debug_info() == []) - self.assertTrue(input_node.out_port(0).get_tensor_names() == []) - - def test_port_renumber(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('Op1', 'Op1_data', {'out': 1}), ('Op1_data', 'Op2')]) - input_node = Node(graph, 'input') - self.assertTrue(input_node.out_port(0).get_tensor_names(port_renumber=True) == ['Op1\\,Op2', 'input']) - - op1_node = Node(graph, 'Op1') - op1_node.add_output_port(0) - - self.assertTrue(op1_node.out_port(0).get_tensor_names(port_renumber=True) == ['Op1\\,Op2']) - - input_node.out_port(0).add_tensor_names(["A:0", "B:0", "B:1", "B:2", "C:0"]) - self.assertTrue(input_node.out_port(0).get_tensor_debug_info() == - [('input', 'input'), ('Op1', 'Op1,Op2'), ("input", "A:0"), ("input", "B:0"), - ("input", "B:1"), ("input", "B:2"), ("input", "C:0")]) - self.assertTrue(input_node.out_port(0).get_tensor_names() == - ['A:0', 'B:0', 'B:1', 'B:2', 'C:0', 'Op1\\,Op2', 'input']) - input_node.out_port(0).remove_tensor_names(port_renumber=True) - self.assertTrue(input_node.out_port(0).get_tensor_debug_info() == []) - self.assertTrue(input_node.out_port(0).get_tensor_names() == []) - - def test_reconnect_middle_case1(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), ('Op3', 'Op3_data')]) - input_node = Node(graph, 'input') - - input_node_out_port = input_node.out_port(0) - self.assertTrue(input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input']) - - op3_node = Node(graph, 'Op3') - input_node_out_port.get_connection().set_source(op3_node.out_port(0), "merge") - - self.assertTrue(input_node_out_port.get_tensor_names() is None) - self.assertTrue(op3_node.out_port(0).get_tensor_names() == ['Op1\\,Op2', 'Op3', 'input']) - - def test_reconnect_middle_case1_parameter(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), ('Op3', 'Op3_data')]) - input_node = Node(graph, 'input') - - input_node_out_port = input_node.out_port(0) - self.assertTrue(input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input']) - - op3_node = Node(graph, 'Op3') - input_node_out_port.get_connection().set_source(op3_node.out_port(0)) - - self.assertTrue(input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input']) - self.assertTrue(op3_node.out_port(0).get_tensor_names() == ['Op3']) - - def test_reconnect_front_case1(self): - graph = build_graph(nodes, [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input'), - ('Op1', 'Op1,Op2')]}), - ('Op3', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('Op3', 'Op3')]})]) - graph.stage = 'front' - input_node = Node(graph, 'input') - - input_node_out_port = input_node.out_port(0) - self.assertTrue(input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input']) - - op3_node = Node(graph, 'Op3') - input_node_out_port.get_connection().set_source(op3_node.out_port(0), "merge") - - self.assertTrue(input_node_out_port.get_tensor_names() == []) - self.assertTrue(op3_node.out_port(0).get_tensor_names() == ['Op1\\,Op2', 'Op3', 'input']) - - def test_reconnect_front_case1_parameter(self): - graph = build_graph(nodes, [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input'), - ('Op1', 'Op1,Op2')]}), - ('Op3', 'Op2', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('Op3', 'Op3')]})]) - graph.stage = 'front' - input_node = Node(graph, 'input') - - input_node_out_port = input_node.out_port(0) - self.assertTrue(input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input']) - - op3_node = Node(graph, 'Op3') - input_node_out_port.get_connection().set_source(op3_node.out_port(0)) - - self.assertTrue(input_node_out_port.get_tensor_names() == []) - self.assertTrue(op3_node.out_port(0).get_tensor_names() == ['Op3']) - - def test_reconnect_middle_case1(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), ('Op3', 'Op3_data')]) - input_node = Node(graph, 'input') - - input_node_out_port = input_node.out_port(0) - self.assertTrue(input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input']) - - op3_node = Node(graph, 'Op3') - input_node_out_port.get_connection().set_source(op3_node.out_port(0), "merge") - - self.assertTrue(input_node_out_port.get_tensor_names() == []) - self.assertTrue(op3_node.out_port(0).get_tensor_names() == ['Op1\\,Op2', 'Op3', 'input']) - - def test_reconnect_middle_case1_parameter(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), ('Op3', 'Op3_data')]) - input_node = Node(graph, 'input') - - input_node_out_port = input_node.out_port(0) - self.assertTrue(input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input']) - - op3_node = Node(graph, 'Op3') - input_node_out_port.get_connection().set_source(op3_node.out_port(0)) - - self.assertTrue(input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input']) - self.assertTrue(op3_node.out_port(0).get_tensor_names() == ['Op3']) - - def test_reconnect_middle_case2(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1', {'out': 0}), - ('input_data', 'Op1', {'out': 1}), ('Op3', 'Op3_data')]) - input_node = Node(graph, 'input') - - input_node_out_port = input_node.out_port(0) - self.assertTrue(input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input']) - - op3_node = Node(graph, 'Op3') - input_node_out_port.get_connection().set_source(op3_node.out_port(0), "merge") - - self.assertTrue(input_node_out_port.get_tensor_names() == []) - self.assertTrue(op3_node.out_port(0).get_tensor_names() == ['Op1\\,Op2', 'Op3', 'input']) - - def test_reconnect_middle_case2(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1', {'out': 0}), - ('input_data', 'Op1', {'out': 1}), ('Op3', 'Op3_data')]) - input_node = Node(graph, 'input') - - input_node_out_port = input_node.out_port(0) - self.assertTrue(input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input']) - - op3_node = Node(graph, 'Op3') - input_node_out_port.get_connection().set_source(op3_node.out_port(0)) - - self.assertTrue(input_node_out_port.get_tensor_names() == ['Op1\\,Op2', 'input']) - self.assertTrue(op3_node.out_port(0).get_tensor_names() == ['Op3']) - - -class TestPortMethods(unittest.TestCase): - - def test_middle_disconnect_several_edges_between_two_nodes(self): - graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'), - ('Op1', 'Op1_data'), ('Op1_data', 'Op2', {'in': 0}), ('Op1_data', 'Op2', {'in': 1}), - ('Op1_data', 'Op2', {'in': 2})], - nodes_with_edges_only=True) - op1_node = Node(graph, 'Op1') - op1_node.out_port(0).disconnect() - self.assertTrue(op1_node.out_port(0).disconnected()) - - -class TestForceShape(unittest.TestCase): - def test_set_value_and_shape_with_force_shape_attribute_in_op(self): - import numpy as np - graph = build_graph({**valued_const_with_data('const', np.array([1, 2, 3])), **result()}, - [*connect('const', 'output')]) - - node = Node(graph, 'const') - node['force_shape'] = np.array([2, 5, 7], dtype=np.int64) - node.out_port(0).data.set_value(np.zeros(35)) - self.assertTrue(np.array_equal(node.out_port(0).data.get_shape(), np.array([2, 5, 7], dtype=np.int64)), - "node.out_port(0).data.get_shape()={} != [2, 5, 7]".format(node.out_port(0).data.get_shape())) - - def test_set_value_and_shape_with_force_shape_attribute_in_data(self): - import numpy as np - graph = build_graph({**valued_const_with_data('const', np.array([1, 2, 3])), **result()}, - [*connect('const', 'output')]) - - node = Node(graph, 'const') - Node(graph, 'const_d')['force_shape'] = np.array([2, 5, 7], dtype=np.int64) - node.out_port(0).data.set_value(np.zeros(30)) - self.assertTrue(np.array_equal(node.out_port(0).data.get_shape(), np.array([2, 5, 7], dtype=np.int64)), - "node.out_port(0).data.get_shape()={} != [2, 5, 7]".format( - node.out_port(0).data.get_shape())) - diff --git a/tools/mo/unit_tests/mo/load/loader_test.py b/tools/mo/unit_tests/mo/load/loader_test.py deleted file mode 100644 index 4134400daec933..00000000000000 --- a/tools/mo/unit_tests/mo/load/loader_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.load.tf.loader import graph_or_sub_graph_has_nhwc_ops -from unit_tests.utils.graph import build_graph, result, regular_op, const, connect_front - - -class TFLoaderTest(unittest.TestCase): - @staticmethod - def build_conv_graph(): - nodes = { - **const('weights', np.random.randn(1, 1, 1, 1)), - **regular_op('input', {'op': 'Parameter'}), - **regular_op('conv', {'op': 'Conv2D', 'layout': 'NHWC'}), - **result('result'), - } - edges = [*connect_front('input', '0:conv'), - *connect_front('weights', '1:conv'), - *connect_front('conv:0', 'result'), - ] - graph = build_graph(nodes, edges) - - graph.stage = 'front' - return graph - - @staticmethod - def build_parameter_result_graph(): - nodes = { - **regular_op('input', {'op': 'Parameter'}), - **result('result'), - } - edges = [*connect_front('input', '0:result'), - ] - graph = build_graph(nodes, edges) - graph.stage = 'front' - return graph - - @staticmethod - def build_loop_graph(body_graph): - # create fake Loop operation - nodes = { - **regular_op('input', {'op': 'Parameter'}), - **regular_op('loop', {'op': 'Loop', 'body': body_graph, 'sub_graphs': ['body']}), - **result('result'), - } - edges = [*connect_front('input', '0:loop'), - *connect_front('loop:0', 'result'), - ] - graph = build_graph(nodes, edges) - graph.stage = 'front' - return graph - - def test_convolution_main_graph(self): - self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_conv_graph())) - - def test_convolution_loop_body_graph(self): - self.assertTrue(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_conv_graph()))) - - def test_no_convolution_main_graph(self): - self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_parameter_result_graph())) - - def test_no_convolution_main_and_sub_graph(self): - self.assertFalse(graph_or_sub_graph_has_nhwc_ops(self.build_loop_graph(self.build_parameter_result_graph()))) diff --git a/tools/mo/unit_tests/mo/main_test_actual.py b/tools/mo/unit_tests/mo/main_test_actual.py deleted file mode 100644 index 947130272ccc32..00000000000000 --- a/tools/mo/unit_tests/mo/main_test_actual.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import unittest -from unittest.mock import patch - -import pytest - -from openvino.tools.mo.utils.error import FrameworkError - -ngraph_available = True -try: - from openvino.tools.mo.main import main -except Exception: - ngraph_available = False - -ngraph_needed = pytest.mark.skipif(not ngraph_available, - reason="mock MO fe is not available") - - -class TestMainErrors(unittest.TestCase): - @patch('argparse.ArgumentParser.parse_args', return_value=argparse.Namespace( - use_legacy_frontend=False, - use_new_frontend=False, - framework=None, - input_model="abc.pbtxt" - )) - @patch('openvino.tools.mo.convert_impl.driver', side_effect=FrameworkError('FW ERROR MESSAGE')) - @ngraph_needed - def test_FrameworkError(self, mock_argparse, mock_driver): - with self.assertLogs() as logger: - main(argparse.ArgumentParser()) - self.assertEqual(logger.output, ['ERROR:root:FW ERROR MESSAGE']) diff --git a/tools/mo/unit_tests/mo/main_test_error_log.py b/tools/mo/unit_tests/mo/main_test_error_log.py deleted file mode 100644 index 34534ac3e750c5..00000000000000 --- a/tools/mo/unit_tests/mo/main_test_error_log.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -from unittest.mock import patch - -from openvino.tools.mo.utils.error import FrameworkError - - -def mocked_parse_args(*argv): - # Mock parse_args method which generates warning - import logging as log - log.error("warning", extra={'is_warning': True}) - argv = argparse.Namespace(use_legacy_frontend=False, - use_new_frontend=False, - framework=None, - input_model="abc.pbtxt") - return argv - - -@patch('argparse.ArgumentParser.parse_args', mocked_parse_args) -@patch('openvino.tools.mo.convert_impl.driver', side_effect=FrameworkError('FW ERROR MESSAGE')) -def run_main(mock_driver): - from openvino.tools.mo.main import main - # runs main() method where driver() raises FrameworkError - main(argparse.ArgumentParser()) - - -if __name__ == "__main__": - run_main() diff --git a/tools/mo/unit_tests/mo/middle/AddIsCyclicAttribute_test.py b/tools/mo/unit_tests/mo/middle/AddIsCyclicAttribute_test.py deleted file mode 100644 index aa9fa1caf15773..00000000000000 --- a/tools/mo/unit_tests/mo/middle/AddIsCyclicAttribute_test.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.AddIsCyclicAttribute import AddIsCyclicAttribute -from unit_tests.utils.graph import build_graph_with_attrs - - -class AddIsCyclicAttributeTest(unittest.TestCase): - nodes = [('node_1', {}), - ('node_2', {})] - edges = [('node_1', 'node_2')] - - def test_1(self): - """ - Acyclic case => graph.graph['is_cyclic'] should be False. - """ - graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, - edges_with_attrs=self.edges) - tested_pass = AddIsCyclicAttribute() - tested_pass.find_and_replace_pattern(graph) - - assert graph.graph['is_cyclic'] is False - - def test_2(self): - """ - Cyclic case => graph.graph['is_cyclic'] should be True. - :return: - """ - graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, - edges_with_attrs=self.edges, - new_edges_with_attrs=[('node_2', 'node_1')]) - tested_pass = AddIsCyclicAttribute() - tested_pass.find_and_replace_pattern(graph) - - assert graph.graph['is_cyclic'] is True diff --git a/tools/mo/unit_tests/mo/middle/ArgOpsToTopK_test.py b/tools/mo/unit_tests/mo/middle/ArgOpsToTopK_test.py deleted file mode 100644 index db139419a979cb..00000000000000 --- a/tools/mo/unit_tests/mo/middle/ArgOpsToTopK_test.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.ArgOpsToTopK import ArgOpsToTopK -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import regular_op_with_empty_data, result, build_graph, connect, \ - valued_const_with_data, regular_op, empty_data, connect_front - -nodes_attributes = { - **regular_op_with_empty_data('input', {'op': 'Parameter', 'type': 'Parameter'}), - **regular_op_with_empty_data('argmax', {'op': 'ArgMax', 'type': None, 'out_max_val': 0, 'top_k': 1, 'axis': 0, - 'output_type': np.int32, 'remove_values_output': True}), - **regular_op_with_empty_data('argmin', {'op': 'ArgMin', 'type': None, 'top_k': 1, 'axis': 0, - 'output_type': np.int32, 'remove_values_output': True}), - **result('result'), - **valued_const_with_data('axis_const', int64_array([1])), - - **regular_op('topk', {'op': 'TopK', 'type': 'TopK', 'sort': 'index', 'index_element_type': np.int32}), - **empty_data('topk_out_0_data'), - **empty_data('topk_out_1_data'), - **regular_op_with_empty_data('topk_scalar', {'op': 'Const', 'type': 'Const', 'value': int64_array([1]), - 'shape': []}), - - - **regular_op_with_empty_data('concat', {'op': 'Concat', 'type': 'Concat', 'axis': 1}) -} - - -class ArgOpsToTopKTest(unittest.TestCase): - - def test_tf_argmax_to_topk(self): - graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', '0:argmax'), - *connect('axis_const', '1:argmax'), - *connect('argmax', 'result') - ], - nodes_with_edges_only=True) - ArgOpsToTopK().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', '0:topk'), - *connect('topk_scalar', '1:topk'), - *connect_front('topk:1', 'topk_out_1_data'), - *connect_front('topk_out_1_data', 'result'), - ], - update_attributes={ - 'topk': {'axis': int64_array([1]), 'mode': 'max', 'remove_values_output': True}, - }, - nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, ref_graph, 'input', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_tf_argmin_to_topk(self): - graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', '0:argmin'), - *connect('axis_const', '1:argmin'), - *connect('argmin', 'result') - ], - nodes_with_edges_only=True) - ArgOpsToTopK().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', '0:topk'), - *connect('topk_scalar', '1:topk'), - *connect_front('topk:1', 'topk_out_1_data'), - *connect_front('topk_out_1_data', 'result') - ], - update_attributes={ - 'topk': {'axis': int64_array([1]), 'mode': 'min', 'remove_values_output': True}, - }, - nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, ref_graph, 'input', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_onnx_argmax_to_topk(self): - graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', 'argmax'), - *connect('argmax', 'result') - ], - nodes_with_edges_only=True) - ArgOpsToTopK().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', '0:topk'), - *connect('topk_scalar', '1:topk'), - *connect_front('topk:1', 'topk_out_1_data'), - *connect_front('topk_out_1_data', 'result') - ], - update_attributes={ - 'topk': {'axis': 0, 'mode': 'max', 'remove_values_output': True}, - }, - nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, ref_graph, 'input', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_onnx_argmin_to_topk(self): - graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', 'argmin'), - *connect('argmin', 'result') - ], - nodes_with_edges_only=True) - ArgOpsToTopK().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', '0:topk'), - *connect('topk_scalar', '1:topk'), - *connect_front('topk:1', 'topk_out_1_data'), - *connect_front('topk_out_1_data', 'result') - ], - update_attributes={ - 'topk': {'axis': 0, 'mode': 'min', 'remove_values_output': True}, - }, - nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, ref_graph, 'input', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_caffe_argmax_to_topk(self): - graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', 'argmax'), - *connect('argmax', 'result') - ], - update_attributes={ - 'argmax': {'out_max_val': 1} - }, - nodes_with_edges_only=True) - ArgOpsToTopK().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', '0:topk'), - *connect('topk_scalar', '1:topk'), - *connect_front('topk:0','topk_out_0_data'), - *connect_front('topk:1', 'topk_out_1_data'), - *connect_front('topk_out_0_data', '1:concat'), - *connect_front('topk_out_1_data', '0:concat'), - *connect('concat', 'result') - ], - update_attributes={ - 'topk': {'axis': 0, 'mode': 'max', 'remove_values_output': True}, - }, - nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, ref_graph, 'input', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/BlockLSTMtoLSTMSequence_test.py b/tools/mo/unit_tests/mo/middle/BlockLSTMtoLSTMSequence_test.py deleted file mode 100644 index 88422254e0917d..00000000000000 --- a/tools/mo/unit_tests/mo/middle/BlockLSTMtoLSTMSequence_test.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.BlockLSTMtoLSTMSequence import BlockLSTMtoLSTMSequenceSingleFirstOutput -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import regular_op_with_empty_data, result, build_graph, connect, \ - valued_const_with_data - -nodes_attributes = { - # hidden_size = 2 - **regular_op_with_empty_data('x', {'op': 'Parameter', 'type': 'Parameter'}), - **valued_const_with_data('weights', np.array([[1, 2, 3, 4, 5, 6, 7, 8], - [9, 10, 11, 12, 13, 14, 15, 16]], dtype=np.float32)), - **valued_const_with_data('bias', np.array([2, 4, 6, 8, 10, 12, 14, 16], dtype=np.float32)), - **regular_op_with_empty_data('h_init_state', {'op': 'Parameter', 'type': 'Parameter'}), - **regular_op_with_empty_data('c_init_state', {'op': 'Parameter', 'type': 'Parameter'}), - **regular_op_with_empty_data('block_lstm', {'op': 'BlockLSTM', 'type': None, 'forget_bias': 1}), - **result('result'), - - **valued_const_with_data('weights_normalized', np.array( - [[5, 13], [6, 14], [1, 9], [2, 10], [3, 11], - [4, 12], [7, 15], [8, 16]], - dtype=np.float32)), - **valued_const_with_data('bias_normalized', np.array([11, 13, 2, 4, 6, 8, 14, 16], dtype=np.float32)), -} - - -class BlockLSTMtoLSTMSequenceSingleFirstOutputTest(unittest.TestCase): - - def test_tf_block_lstm_to_lstm_seq(self): - graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('x', '0:block_lstm'), - *connect('weights', '1:block_lstm'), - *connect('bias', '2:block_lstm'), - *connect('h_init_state', '3:block_lstm'), - *connect('c_init_state', '4:block_lstm'), - *connect('block_lstm', 'result') - ], - nodes_with_edges_only=True) - BlockLSTMtoLSTMSequenceSingleFirstOutput().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('x', '0:block_lstm'), - *connect('weights_normalized', '1:block_lstm'), - *connect('bias_normalized', '2:block_lstm'), - *connect('h_init_state', '4:block_lstm'), - *connect('c_init_state', '5:block_lstm'), - *connect('block_lstm', 'result'), - ], - update_attributes={ - 'block_lstm': {'sequence_dim': 0, 'batch_dim': 1, 'direction': 'forward', - 'hidden_size': 2, 'format': 'tf', 'type': 'RNNSequence', - 'op': 'LSTM'}, - }, - nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/CheckForCycle_test.py b/tools/mo/unit_tests/mo/middle/CheckForCycle_test.py deleted file mode 100644 index 24629aa5a7ee74..00000000000000 --- a/tools/mo/unit_tests/mo/middle/CheckForCycle_test.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.middle.CheckForCycle import CheckForCycle -from openvino.tools.mo.utils.error import Error -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'node_1_data': {'value': None, 'kind': 'data', 'data_type': None}, - 'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'concat': {'type': 'Concat', 'value': None, 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'node_3_data': {'value': None, 'kind': 'data', 'data_type': None}, - # Placeholders - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'pl_1': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'pl_1_data': {'value': None, 'kind': 'data', 'data_type': None}, - 'pl_2': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'pl_2_data': {'value': None, 'kind': 'data', 'data_type': None}, - 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # ScaleShift layer - 'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'}, - 'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Mul op - 'mul_1': {'type': None, 'kind': 'op', 'op': 'Mul'}, - 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result', 'infer': lambda x: None} - } - - -class CycleTest(UnitTestWithMockedTelemetry): - def test_check_for_cycle1(self): - # cyclic case - graph = build_graph(nodes_attributes, - [('node_1', 'node_1_data'), - ('node_1_data', 'node_3'), - ('node_3', 'node_3_data'), - ('node_3_data', 'node_1')], - nodes_with_edges_only=True) - with self.assertRaisesRegex(Error, 'Graph contains a cycle. Can not proceed.*'): - CheckForCycle().find_and_replace_pattern(graph) - - def test_check_for_cycle2(self): - # acyclic case - graph = build_graph(nodes_attributes, - [('node_1', 'node_1_data'), - ('node_1_data', 'node_3'), - ('node_3', 'node_3_data'), - ('node_3_data', 'mul_1'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data') - ], - nodes_with_edges_only=True) - try: - CheckForCycle().find_and_replace_pattern(graph) - except Error: - self.fail("Unexpected Error raised") diff --git a/tools/mo/unit_tests/mo/middle/ConcatOptimization_test.py b/tools/mo/unit_tests/mo/middle/ConcatOptimization_test.py deleted file mode 100644 index 81b1917ec46e28..00000000000000 --- a/tools/mo/unit_tests/mo/middle/ConcatOptimization_test.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.ConcatOptimization import ConcatOdInputEraserAndPortsReconnect -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, shaped_const_with_data, connect - - -class ConcatOdInputEraserAndPortsReconnectTest(unittest.TestCase): - def test_deletion(self): - nodes = { - **shaped_const_with_data('input_0', [1]), - **shaped_const_with_data('input_1', [1]), - **shaped_const_with_data('input_2', [0]), - **shaped_const_with_data('input_3', [1]), - **regular_op_with_shaped_data('concat', [3], {'type': 'Concat'}), - **result(), - } - edges_before = [ - *connect('input_0', '0:concat'), - *connect('input_1', '1:concat'), - *connect('input_2', '2:concat'), - *connect('input_3', '3:concat'), - *connect('concat', 'output'), - ] - edges_after = [ - *connect('input_0', '0:concat'), - *connect('input_1', '1:concat'), - *connect('input_3', '2:concat'), - *connect('concat', 'output'), - ] - - graph = build_graph(nodes, edges_before, nodes_with_edges_only=True) - ConcatOdInputEraserAndPortsReconnect().find_and_replace_pattern(graph) - graph_ref = build_graph(nodes, edges_after, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_deletion_2(self): - nodes = { - **shaped_const_with_data('input_0', [5, 0]), - **shaped_const_with_data('input_1', [5, 1]), - **shaped_const_with_data('input_2', [5, 3]), - **shaped_const_with_data('input_3', [5, 5]), - **regular_op_with_shaped_data('concat', [5, 9], {'type': 'Concat', 'axis': 1}), - **result(), - } - edges_before = [ - *connect('input_0', '0:concat'), - *connect('input_1', '1:concat'), - *connect('input_2', '2:concat'), - *connect('input_3', '3:concat'), - *connect('concat', 'output'), - ] - edges_after = [ - *connect('input_1', '0:concat'), - *connect('input_2', '1:concat'), - *connect('input_3', '2:concat'), - *connect('concat', 'output'), - ] - - graph = build_graph(nodes, edges_before, nodes_with_edges_only=True) - ConcatOdInputEraserAndPortsReconnect().find_and_replace_pattern(graph) - graph_ref = build_graph(nodes, edges_after, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_deletion_3(self): - nodes = { - **shaped_const_with_data('input_0', [5, 3]), - **shaped_const_with_data('input_1', [5, 1]), - **shaped_const_with_data('input_2', [5, 5]), - **shaped_const_with_data('input_3', [5, 0]), - **regular_op_with_shaped_data('concat', [5, 9], {'type': 'Concat', 'axis': 1}), - **result(), - } - edges_before = [ - *connect('input_0', '0:concat'), - *connect('input_1', '1:concat'), - *connect('input_2', '2:concat'), - *connect('input_3', '3:concat'), - *connect('concat', 'output'), - ] - edges_after = [ - *connect('input_0', '0:concat'), - *connect('input_1', '1:concat'), - *connect('input_2', '2:concat'), - *connect('concat', 'output'), - ] - - graph = build_graph(nodes, edges_before, nodes_with_edges_only=True) - ConcatOdInputEraserAndPortsReconnect().find_and_replace_pattern(graph) - graph_ref = build_graph(nodes, edges_after, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_deletion_unconnected_port_and_0d(self): - nodes = { - **shaped_const_with_data('input_0', [5, 3]), - **shaped_const_with_data('input_2', [5, 1]), - **shaped_const_with_data('input_3', [5, 0]), - **regular_op_with_shaped_data('concat', [5, 4], {'type': 'Concat', 'axis': 1}), - **result(), - } - edges_before = [ - *connect('input_0', '0:concat'), - *connect('input_2', '2:concat'), - *connect('input_3', '3:concat'), - *connect('concat', 'output'), - ] - edges_after = [ - *connect('input_0', '0:concat'), - *connect('input_2', '1:concat'), - *connect('concat', 'output'), - ] - - graph = build_graph(nodes, edges_before, nodes_with_edges_only=True) - ConcatOdInputEraserAndPortsReconnect().find_and_replace_pattern(graph) - graph_ref = build_graph(nodes, edges_after, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_deletion_unconnected_ports(self): - nodes = { - **shaped_const_with_data('input_0', [5, 3]), - **shaped_const_with_data('input_4', [5, 1]), - **shaped_const_with_data('input_7', [5, 2]), - **regular_op_with_shaped_data('concat', [5, 6], {'type': 'Concat', 'axis': 1}), - **result(), - } - edges_before = [ - *connect('input_0', '0:concat'), - *connect('input_4', '4:concat'), - *connect('input_7', '7:concat'), - *connect('concat', 'output'), - ] - edges_after = [ - *connect('input_0', '0:concat'), - *connect('input_4', '1:concat'), - *connect('input_7', '2:concat'), - *connect('concat', 'output'), - ] - - graph = build_graph(nodes, edges_before, nodes_with_edges_only=True) - ConcatOdInputEraserAndPortsReconnect().find_and_replace_pattern(graph) - graph_ref = build_graph(nodes, edges_after, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_deletion_trailing_unconnected_ports(self): - nodes = { - **shaped_const_with_data('input_0', [5, 3]), - **regular_op_with_shaped_data('concat', [5, 3], {'type': 'Concat', 'axis': 1}), - **result(), - } - edges_before = [ - *connect('input_0', '0:concat'), - *connect('concat', 'output'), - ] - edges_after = [ - *connect('input_0', '0:concat'), - *connect('concat', 'output'), - ] - - graph = build_graph(nodes, edges_before, nodes_with_edges_only=True) - Node(graph, 'concat').add_input_port(1) - ConcatOdInputEraserAndPortsReconnect().find_and_replace_pattern(graph) - graph_ref = build_graph(nodes, edges_after, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - self.assertTrue(1 not in Node(graph, 'concat').in_ports()) - - def test_negative(self): - nodes = { - **shaped_const_with_data('input_0', [1]), - **shaped_const_with_data('input_1', [1]), - **shaped_const_with_data('input_2', [1]), - **shaped_const_with_data('input_3', [1]), - **regular_op_with_shaped_data('concat', [4], {'type': 'Concat'}), - **result(), - } - edges = [ - *connect('input_0', '0:concat'), - *connect('input_1', '1:concat'), - *connect('input_2', '2:concat'), - *connect('input_3', '3:concat'), - *connect('concat', 'output'), - ] - - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - ConcatOdInputEraserAndPortsReconnect().find_and_replace_pattern(graph) - graph_ref = build_graph(nodes, edges, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_assertion_error(self): - nodes = { - **shaped_const_with_data('input_0', [0]), - **shaped_const_with_data('input_1', [0]), - **shaped_const_with_data('input_2', [0]), - **shaped_const_with_data('input_3', [0]), - **regular_op_with_shaped_data('concat', [0], {'type': 'Concat'}), - **result(), - } - edges = [ - *connect('input_0', '0:concat'), - *connect('input_1', '1:concat'), - *connect('input_2', '2:concat'), - *connect('input_3', '3:concat'), - *connect('concat', 'output'), - ] - - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - self.assertRaises(AssertionError, ConcatOdInputEraserAndPortsReconnect().find_and_replace_pattern, graph) diff --git a/tools/mo/unit_tests/mo/middle/ConvertGroupedStridedSlice_test.py b/tools/mo/unit_tests/mo/middle/ConvertGroupedStridedSlice_test.py deleted file mode 100644 index 2a39ac12d979ac..00000000000000 --- a/tools/mo/unit_tests/mo/middle/ConvertGroupedStridedSlice_test.py +++ /dev/null @@ -1,1255 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import pytest - -from openvino.tools.mo.middle.ConvertGroupedStridedSlice import ConvertGroupedStridedSlice -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder_1': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_2': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_begin_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_end_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_stride_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - - # StridedSlice layers - 'sslice_1': {'type': None, 'kind': 'op', 'op': 'StridedSlice', 'slices': None, - 'shrink_axis_mask': np.array([0, 0, 0, 0]), 'begin_mask': int64_array([1, 1, 1, 1]), - 'end_mask': int64_array([1, 1, 1, 1]), 'new_axis_mask': int64_array([0, 0, 0, 0]), - 'ellipsis_mask': int64_array([0])}, - 'sslice_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'sslice_2': {'type': None, 'kind': 'op', 'op': 'StridedSlice', 'slices': None, - 'shrink_axis_mask': np.array([0, 0, 0, 0]), 'begin_mask': int64_array([1, 1, 1, 1]), - 'end_mask': int64_array([1, 1, 1, 1]), 'new_axis_mask': int64_array([0, 0, 0, 0]), - 'ellipsis_mask': int64_array([0])}, - 'sslice_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'sslice_3': {'type': None, 'kind': 'op', 'op': 'StridedSlice', 'slices': None, - 'shrink_axis_mask': np.array([0, 0, 0, 0]), 'begin_mask': int64_array([1, 1, 1, 1]), - 'end_mask': int64_array([1, 1, 1, 1]), 'new_axis_mask': int64_array([0, 0, 0, 0]), - 'ellipsis_mask': int64_array([0])}, - 'sslice_3_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'sslice_4': {'type': None, 'kind': 'op', 'op': 'StridedSlice', 'slices': None, - 'shrink_axis_mask': np.array([0, 0, 0, 0]), 'begin_mask': int64_array([1, 1, 1, 1]), - 'end_mask': int64_array([1, 1, 1, 1]), 'new_axis_mask': int64_array([0, 0, 0, 0]), - 'ellipsis_mask': int64_array([0])}, - 'sslice_4_data': {'value': None, 'shape': None, 'kind': 'data'}, - - # Split layer - 'axis_const': {'kind': 'op'}, - 'axis_const_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'split_dim_const': {'kind': 'op'}, - 'split_dim_const_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'split_1': {'type': 'VariadicSplit', 'kind': 'op', 'op': 'VariadicSplit'}, - 'split_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'split_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'split_3_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'split_4_data': {'value': None, 'shape': None, 'kind': 'data'}, - - # Concat1 operation - 'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'}, - 'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'op_output': {'kind': 'op', 'op': 'Result'}, - 'op_output_1': {'kind': 'op', 'op': 'Result', 'keep_output_port': True}, - 'op_output_2': {'kind': 'op', 'op': 'Result', 'keep_output_port': True}, - - # Squeeze layers - 'sslice_1/Squeeze_shrink': {'type': None, 'value': None, 'kind': 'op', 'op': 'Squeeze'}, - 'sslice_1/Squeeze_shrink_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'sslice_1/squeeze_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': int64_array([2])}, - 'sslice_1/squeeze_const_data': {'kind': 'data', 'value': None, 'shape': None}, - - 'sslice_2/Squeeze_shrink': {'type': None, 'value': None, 'kind': 'op', 'op': 'Squeeze'}, - 'sslice_2/Squeeze_shrink_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'sslice_2/squeeze_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': int64_array([2])}, - 'sslice_2/squeeze_const_data': {'kind': 'data', 'value': None, 'shape': None}, - - # Unsqueeze layer - 'sslice_2/Unsqueeze_new': {'type': None, 'value': None, 'kind': 'op', 'op': 'Unsqueeze'}, - 'sslice_2/Unsqueeze_new_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'sslice_2/unsqueeze_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': int64_array([2])}, - 'sslice_2/unsqueeze_const_data': {'kind': 'data', 'value': None, 'shape': None}, - - # Activations - 'abs': {'type': None, 'value': None, 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'relu': {'type': None, 'value': None, 'kind': 'op', 'op': 'ReLU'}, - 'relu_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'erf': {'type': None, 'value': None, 'kind': 'op', 'op': 'Erf'}, - 'erf_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'gelu': {'type': None, 'value': None, 'kind': 'op', 'op': 'Gelu'}, - 'gelu_data': {'value': None, 'shape': None, 'kind': 'data'}, -} - -one_strided_slice_case_node_attributes = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'sslice': {'type': None, 'kind': 'op', 'op': 'StridedSlice', 'slices': None, - 'shrink_axis_mask': np.array([0, 0, 0, 0])}, - 'sslice_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result'}, -} - -one_strided_slice_case_edges = [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'sslice'), - ('sslice', 'sslice_data'), - ('sslice_data', 'op_output'), -] - - -class TestConvertGroupedStridedSliceTests(): - def test_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('sslice_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 18, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 227, 227, 18])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(18, 36, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 227, 227, 18])}, - - 'sslice_3': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(36, 54, 1)])}, - 'sslice_3_data': {'shape': np.array([1, 227, 227, 18])}, - - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'split_1', {'in': 0}), - ('split_1', 'split_1_data'), - ('split_1', 'split_2_data'), - ('split_1', 'split_3_data'), - ('split_1_data', 'concat_1'), - ('split_2_data', 'concat_1'), - ('split_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output'), - - ('axis_const', 'axis_const_data'), - ('split_dim_const', 'split_dim_const_data'), - ('axis_const_data', 'split_1', {'in': 1}), - ('split_dim_const_data', 'split_1', {'in': 2}), - - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'axis_const': {'value': 3}, - 'split_1_data': {'shape': np.array([1, 227, 227, 18])}, - 'split_2_data': {'shape': np.array([1, 227, 227, 18])}, - 'split_3_data': {'shape': np.array([1, 227, 227, 18])}, - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - - ConvertGroupedStridedSlice().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - assert flag, resp - - def test_2(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('sslice_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 37, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 227, 227, 18])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 54, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 227, 227, 17])}, - - 'sslice_3': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 19, 1)])}, - 'sslice_3_data': {'shape': np.array([1, 227, 227, 19])}, - - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'split_1'), - ('split_1', 'split_1_data'), - ('split_1', 'split_2_data'), - ('split_1', 'split_3_data'), - ('split_1_data', 'concat_1'), - ('split_2_data', 'concat_1'), - ('split_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output'), - - ('axis_const', 'axis_const_data'), - ('split_dim_const', 'split_dim_const_data'), - ('axis_const_data', 'split_1', {'in': 1}), - ('split_dim_const_data', 'split_1', {'in': 2}), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'axis_const': {'value': 3}, - 'split_1_data': {'shape': np.array([1, 227, 227, 18])}, - 'split_2_data': {'shape': np.array([1, 227, 227, 17])}, - 'split_3_data': {'shape': np.array([1, 227, 227, 19])}, - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - - pattern = ConvertGroupedStridedSlice() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - assert flag, resp - - # Intersection of split ranges in feature dimension - def test_3_neg(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('sslice_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 39, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 227, 227, 20])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 54, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 227, 227, 17])}, - - 'sslice_3': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 19, 1)])}, - 'sslice_3_data': {'shape': np.array([1, 227, 227, 19])}, - - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('sslice_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 39, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 227, 227, 20])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 54, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 227, 227, 17])}, - - 'sslice_3': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 19, 1)])}, - 'sslice_3_data': {'shape': np.array([1, 227, 227, 19])}, - - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - - pattern = ConvertGroupedStridedSlice() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - assert flag, resp - - # Split range overflow in feature dimension - def test_4_neg(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('sslice_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 37, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 227, 227, 18])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 55, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 227, 227, 18])}, - - 'sslice_3': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 19, 1)])}, - 'sslice_3_data': {'shape': np.array([1, 227, 227, 19])}, - - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('sslice_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 37, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 227, 227, 18])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 55, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 227, 227, 18])}, - - 'sslice_3': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 19, 1)])}, - 'sslice_3_data': {'shape': np.array([1, 227, 227, 19])}, - - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - - ConvertGroupedStridedSlice().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - assert flag, resp - - # Split(1,H,W,54)--->Fake_data (1,H,W,1) - # |`---->Sslice1_out (1,H,W,18) - # |`---->Sslice2_out (1,H,W,18) - # `----->Sslice3_out (1,H,W,17) - def test_5(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('sslice_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output'), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 37, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 227, 227, 18])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 54, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 227, 227, 17])}, - - 'sslice_3': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(1, 19, 1)])}, - 'sslice_3_data': {'shape': np.array([1, 227, 227, 18])}, - - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'split_1'), - ('split_1', 'split_1_data'), - ('split_1', 'split_2_data'), - ('split_1', 'split_3_data'), - ('split_1', 'split_4_data'), - ('split_2_data', 'concat_1'), - ('split_3_data', 'concat_1'), - ('split_4_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output'), - ('split_1_data', 'op_output_1'), - - ('axis_const', 'axis_const_data'), - ('split_dim_const', 'split_dim_const_data'), - ('axis_const_data', 'split_1', {'in': 1}), - ('split_dim_const_data', 'split_1', {'in': 2}), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'axis_const': {'value': 3}, - 'split_1_data': {'shape': np.array([1, 227, 227, 1])}, - 'split_2_data': {'shape': np.array([1, 227, 227, 18])}, - 'split_3_data': {'shape': np.array([1, 227, 227, 17])}, - 'split_4_data': {'shape': np.array([1, 227, 227, 18])}, - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - - ConvertGroupedStridedSlice().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - assert flag, resp - - # Split(1,H,W,54) - # |`---->Sslice1_out (1,H,W,(0,18)) - # |`---->Fake_data (1,H,W,(18,27)) - # |`---->Sslice3_out (1,H,W,(27,45)) - # `----->Fake_data (1,H,W,(45,54)) - def test_6(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 18, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 227, 227, 18])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(27, 45, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 227, 227, 18])}, - - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'split_1'), - ('split_1', 'split_1_data'), - ('split_1', 'split_2_data'), - ('split_1', 'split_3_data'), - ('split_1', 'split_4_data'), - ('split_1_data', 'concat_1'), - ('split_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output'), - ('split_2_data', 'op_output_1'), - ('split_4_data', 'op_output_2'), - - ('axis_const', 'axis_const_data'), - ('split_dim_const', 'split_dim_const_data'), - ('axis_const_data', 'split_1', {'in': 1}), - ('split_dim_const_data', 'split_1', {'in': 2}), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'axis_const': {'value': 3}, - 'split_1_data': {'shape': np.array([1, 227, 227, 18])}, - 'split_2_data': {'shape': np.array([1, 227, 227, 9])}, - 'split_3_data': {'shape': np.array([1, 227, 227, 18])}, - 'split_4_data': {'shape': np.array([1, 227, 227, 9])}, - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - - ConvertGroupedStridedSlice().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - assert flag, resp - - def test_7_neg(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 10, 1), slice(0, 227, 1), slice(0, 18, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 10, 227, 18])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(10, 227, 1), slice(0, 227, 1), slice(27, 45, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 217, 227, 18])}, - - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 10, 1), slice(0, 227, 1), slice(0, 18, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 10, 227, 18])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(10, 227, 1), slice(0, 227, 1), slice(27, 45, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 217, 227, 18])}, - - 'concat_1_data': {'shape': np.array([1, 227, 227, 54])}, - }) - - pattern = ConvertGroupedStridedSlice() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - assert flag, resp - - # Split(1,54,W,C) - # |`---->Sslice1_out (1,(0,18),W,C) - # |`---->Sslice2_out (1,(18,36),W,C) - # `----->Fake_data (1,(36,54),W,C) - def test_8(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 54, 54, 3])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 18, 1), slice(0, 54, 1), slice(0, 3, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 18, 54, 3])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(18, 36, 1), slice(0, 54, 1), slice(0, 3, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 18, 54, 3])}, - - 'concat_1_data': {'shape': np.array([1, 54, 54, 3])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'split_1'), - ('split_1', 'split_1_data'), - ('split_1', 'split_2_data'), - ('split_1', 'split_3_data'), - ('split_1_data', 'concat_1'), - ('split_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output'), - ('split_2_data', 'op_output_1'), - - ('axis_const', 'axis_const_data'), - ('split_dim_const', 'split_dim_const_data'), - ('axis_const_data', 'split_1', {'in': 1}), - ('split_dim_const_data', 'split_1', {'in': 2}), - ], - {'placeholder_1_data': {'shape': np.array([1, 54, 54, 3])}, - 'axis_const': {'value': 1}, - 'split_1_data': {'shape': np.array([1, 18, 54, 3])}, - 'split_2_data': {'shape': np.array([1, 18, 54, 3])}, - 'split_3_data': {'shape': np.array([1, 18, 54, 3])}, - 'concat_1_data': {'shape': np.array([1, 54, 54, 3])}, - }) - - pattern = ConvertGroupedStridedSlice() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - assert flag, resp - - # Test for the case when there is only 1 StridedSlice. - @pytest.mark.parametrize("input_shape, slices, output_shape",[(np.array([1, 227, 227, 54]), - np.array([slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 18, 1)]), - np.array([1, 227, 227, 18])), - (np.array([57, 16, 100, 23]), - np.array([slice(3, 16, 1), slice(0, 16, 1), slice(0, 100, 1), slice(0, 23, 1)]), - np.array([13, 16, 100, 23])), - (np.array([16, 800, 1024, 17]), - np.array([slice(0, 16, 1), slice(0, 800, 1), slice(13, 817, 1), slice(0, 17, 1)]), - np.array([16, 800, 804, 17]))]) - def test_9(self, input_shape, slices, output_shape): - graph = build_graph(nodes_attrs=one_strided_slice_case_node_attributes, - edges=one_strided_slice_case_edges, - update_attributes={ - 'placeholder_data': {'shape': input_shape}, - 'sslice': {'slices': slices}, - 'sslice_data': {'shape': output_shape}, - }) - graph.graph['layout'] = 'NHWC' - graph_ref = build_graph(nodes_attrs=one_strided_slice_case_node_attributes, - edges=one_strided_slice_case_edges, - update_attributes={ - 'placeholder_data': {'shape': input_shape}, - 'sslice': {'slices': slices}, - 'sslice_data': {'shape': output_shape}, - }) - pattern = ConvertGroupedStridedSlice() - pattern.find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'op_output', check_op_attrs=True) - assert flag, resp - - # Test for case when - # 1) There are 4 StridedSlice operations. - # 2) 2 of StridedSlice have the same data. - # 3) 2 others StridedSlice have the same data. - # 4) All StridedSlice operations outputs are consumed by different operations. - def test_10(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('placeholder_1_data', 'sslice_4'), - ('sslice_4', 'sslice_4_data'), - ('sslice_1_data', 'abs'), - ('abs', 'abs_data'), - ('sslice_2_data', 'relu'), - ('relu', 'relu_data'), - ('sslice_3_data', 'erf'), - ('erf', 'erf_data'), - ('sslice_4_data', 'gelu'), - ('gelu', 'gelu_data'), - ('abs_data', 'concat_1'), - ('relu_data', 'concat_1'), - ('erf_data', 'concat_1'), - ('gelu_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 54, 54, 3])}, - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 30, 1), slice(0, 54, 1), slice(0, 3, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 30, 54, 3])}, - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(30, 54, 1), slice(0, 54, 1), slice(0, 3, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 24, 54, 3])}, - 'sslice_3': {'slices': np.array( - [slice(0, 1, 1), slice(0, 30, 1), slice(0, 54, 1), slice(0, 3, 1)])}, - 'sslice_3_data': {'shape': np.array([1, 30, 54, 3])}, - 'sslice_4': {'slices': np.array( - [slice(0, 1, 1), slice(30, 54, 1), slice(0, 54, 1), slice(0, 3, 1)])}, - 'sslice_4_data': {'shape': np.array([1, 24, 54, 3])}, - 'concat_1_data': {'shape': np.array([1, 108, 54, 3])}, - 'abs_data': {'shape': np.array([1, 30, 54, 3])}, - 'relu_data': {'shape': np.array([1, 24, 54, 3])}, - 'erf_data': {'shape': np.array([1, 30, 54, 3])}, - 'gelu_data': {'shape': np.array([1, 24, 54, 3])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'split_1'), - ('split_1', 'split_1_data'), - ('split_1', 'split_2_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('placeholder_1_data', 'sslice_4'), - ('sslice_4', 'sslice_4_data'), - ('split_1_data', 'abs'), - ('abs', 'abs_data'), - ('split_2_data', 'relu'), - ('relu', 'relu_data'), - ('sslice_3_data', 'erf'), - ('erf', 'erf_data'), - ('sslice_4_data', 'gelu'), - ('gelu', 'gelu_data'), - ('abs_data', 'concat_1'), - ('relu_data', 'concat_1'), - ('erf_data', 'concat_1'), - ('gelu_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - - ('axis_const', 'axis_const_data'), - ('split_dim_const', 'split_dim_const_data'), - ('axis_const_data', 'split_1', {'in': 1}), - ('split_dim_const_data', 'split_1', {'in': 2}), - - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 54, 54, 3])}, - 'split_1_data': {'shape': np.array([1, 30, 54, 3])}, - 'split_2_data': {'shape': np.array([1, 24, 54, 3])}, - 'sslice_3': {'slices': np.array( - [slice(0, 1, 1), slice(0, 30, 1), slice(0, 54, 1), slice(0, 3, 1)])}, - 'sslice_3_data': {'shape': np.array([1, 30, 54, 3])}, - 'sslice_4': {'slices': np.array( - [slice(0, 1, 1), slice(30, 54, 1), slice(0, 54, 1), slice(0, 3, 1)])}, - 'sslice_4_data': {'shape': np.array([1, 24, 54, 3])}, - 'abs_data': {'shape': np.array([1, 30, 54, 3])}, - 'relu_data': {'shape': np.array([1, 24, 54, 3])}, - 'erf_data': {'shape': np.array([1, 30, 54, 3])}, - 'gelu_data': {'shape': np.array([1, 24, 54, 3])}, - 'axis_const': {'value': 1}, - 'concat_1_data': {'shape': np.array([1, 108, 54, 3])}, - }) - - pattern = ConvertGroupedStridedSlice() - pattern.find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - assert flag, resp - - # dynamic slice - def test_11(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('sslice_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - - 'sslice_1': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(19, 39, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 227, 227, 20])}, - - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(37, 54, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 227, 227, 17])}, - - 'sslice_3': {'slices': [slice(0, 1, 1), slice(0, 227, 1), 12, slice(0, 19, 1)]}, - 'sslice_3_data': {'shape': shape_array([1, 227, dynamic_dimension_value, 19])}, - - 'concat_1_data': {'shape': shape_array([1, 227, dynamic_dimension_value, 54])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = graph.copy() - - pattern = ConvertGroupedStridedSlice() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - assert flag, resp - - # one unuque StridedSlice - def test_12(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ], - {'placeholder_1_data': {'shape': np.array([1, 511])}, - - 'sslice_1': {'slices': np.array([slice(0, 1, 1), slice(0, 1, 1)]), - 'begin_mask': np.array([0, 1, 0]), - 'end_mask': np.array([0, 1, 0]), - 'new_axis_mask': np.array([0, 0, 0]), - 'shrink_axis_mask': np.array([0, 0, 0]), - 'ellipsis_mask': np.array([0, 0, 0])}, - 'sslice_1_data': {'shape': np.array([1, 1, 511])}, - - 'sslice_2': {'slices': np.array([slice(0, 1, 1), slice(0, 1, 1)]), - 'begin_mask': np.array([0, 1, 0]), - 'end_mask': np.array([0, 1, 0]), - 'new_axis_mask': np.array([0, 0, 0]), - 'shrink_axis_mask': np.array([0, 0, 0]), - 'ellipsis_mask': np.array([0, 0, 0])}, - 'sslice_2_data': {'shape': np.array([1, 1, 511])}, - }) - graph.graph['layout'] = 'NHWC' - - graph_ref = graph.copy() - - pattern = ConvertGroupedStridedSlice() - pattern.find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_1_data', check_op_attrs=True) - assert flag, resp - (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True) - assert flag, resp - - -class AddReshapeAfterStridedSliceTests(unittest.TestCase): - def test_ss_1_shrink_last(self): - slices = np.array([slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]) - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('placeholder_begin_data', 'sslice_1'), - ('placeholder_end_data', 'sslice_1'), - ('placeholder_stride_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('sslice_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_1': {'slices': slices, - 'shrink_axis_mask': [0, 0, 1, 0], - 'new_axis_mask': np.array([0, 0, 0, 0])}, - 'sslice_1_data': {'shape': np.array([1, 227, 54])}, - }, nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('placeholder_begin_data', 'sslice_1'), - ('placeholder_end_data', 'sslice_1'), - ('placeholder_stride_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('sslice_1_data', 'sslice_1/Squeeze_shrink'), - ('sslice_1/squeeze_const', 'sslice_1/squeeze_const_data'), - ('sslice_1/squeeze_const_data', 'sslice_1/Squeeze_shrink'), - ('sslice_1/Squeeze_shrink', 'sslice_1/Squeeze_shrink_data'), - ('sslice_1/Squeeze_shrink_data', 'op_output'), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_1': {'slices': slices, - 'shrink_axis_mask': np.array([0, 0, 0, 0]), - 'new_axis_mask': np.array([0, 0, 0, 0])}, - 'sslice_1_data': {'shape': np.array([1, 227, 1, 54])}, - 'sslice_1/Squeeze_shrink_data': {'shape': np.array([1, 227, 54])} - }, nodes_with_edges_only=True) - - ConvertGroupedStridedSlice().add_squeeze_for_shrink(graph, Node(graph, 'sslice_1')) - - (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_ss_1_shrink(self): - slices = np.array([slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]) - - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('placeholder_begin_data', 'sslice_2'), - ('placeholder_end_data', 'sslice_2'), - ('placeholder_stride_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_2_data', 'placeholder_2', {'out': 0}), - ('placeholder_2', 'placeholder_2_data'), - ('sslice_2_data', 'op_output', {'out': 0}) - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_2': {'slices': slices, - 'shrink_axis_mask': [0, 0, 1, 0], - 'new_axis_mask': np.array([0, 0, 0, 0])}, - 'sslice_2_data': {'shape': np.array([1, 227, 54])} - }, nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('placeholder_begin_data', 'sslice_2'), - ('placeholder_end_data', 'sslice_2'), - ('placeholder_stride_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_2_data', 'sslice_2/Squeeze_shrink'), - ('sslice_2/squeeze_const', 'sslice_2/squeeze_const_data'), - ('sslice_2/squeeze_const_data', 'sslice_2/Squeeze_shrink'), - ('sslice_2/Squeeze_shrink', 'sslice_2/Squeeze_shrink_data'), - ('sslice_2/Squeeze_shrink_data', 'placeholder_2', {'out': 0}), - ('placeholder_2', 'placeholder_2_data'), - ('sslice_2/Squeeze_shrink_data', 'op_output', {'out': 0}) - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_2': {'slices': slices, - 'shrink_axis_mask': np.array([0, 0, 0, 0]), - 'new_axis_mask': np.array([0, 0, 0, 0])}, - 'sslice_2_data': {'shape': np.array([1, 227, 1, 54])}, - 'sslice_2/squeeze_const': {'value': np.array([2])}, - 'sslice_2/Squeeze_shrink_data': {'shape': np.array([1, 227, 54])}, - }, nodes_with_edges_only=True) - - ConvertGroupedStridedSlice().add_squeeze_for_shrink(graph, Node(graph, 'sslice_2')) - - (flag, resp) = compare_graphs(graph, graph_ref, 'op_output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_ss_2_shrink(self): - slices = np.array([slice(0, 1, 1), slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1)]) - - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('placeholder_begin_data', 'sslice_2'), - ('placeholder_end_data', 'sslice_2'), - ('placeholder_stride_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_2_data', 'placeholder_2', {'out': 0}), - ('placeholder_2', 'placeholder_2_data'), - ('sslice_2_data', 'op_output', {'out': 0}) - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_2': {'slices': slices, - 'shrink_axis_mask': np.array([0, 1, 0, 1]), - 'new_axis_mask': np.array([0, 0, 0, 0])}, - 'sslice_2_data': {'shape': np.array([1, 227])} - }, nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('placeholder_begin_data', 'sslice_2'), - ('placeholder_end_data', 'sslice_2'), - ('placeholder_stride_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_2_data', 'sslice_2/Squeeze_shrink'), - ('sslice_2/squeeze_const', 'sslice_2/squeeze_const_data'), - ('sslice_2/squeeze_const_data', 'sslice_2/Squeeze_shrink'), - ('sslice_2/Squeeze_shrink', 'sslice_2/Squeeze_shrink_data'), - ('sslice_2/Squeeze_shrink_data', 'placeholder_2', {'out': 0}), - ('placeholder_2', 'placeholder_2_data'), - ('sslice_2/Squeeze_shrink_data', 'op_output', {'out': 0}) - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_2': {'slices': slices, - 'shrink_axis_mask': np.array([0, 0, 0, 0]), - 'new_axis_mask': np.array([0, 0, 0, 0])}, - 'sslice_2_data': {'shape': np.array([1, 1, 227, 1])}, - 'sslice_2/squeeze_const': {'value': np.array([1, 3])}, - 'sslice_2/Squeeze_shrink_data': {'shape': np.array([1, 227])}, - }, nodes_with_edges_only=True) - - ConvertGroupedStridedSlice().add_squeeze_for_shrink(graph, Node(graph, 'sslice_2')) - - (flag, resp) = compare_graphs(graph, graph_ref, 'op_output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_ss_1_new(self): - slices = np.array([slice(0, 1, 1), slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), slice(0, 54, 1)]) - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('placeholder_begin_data', 'sslice_2'), - ('placeholder_end_data', 'sslice_2'), - ('placeholder_stride_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_2_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_2': {'slices': slices, - 'shrink_axis_mask': np.array([0, 0, 0, 0, 0]), - 'new_axis_mask': np.array([0, 1, 0, 0, 0])}, - 'sslice_2_data': {'shape': np.array([1, 1, 227, 227, 54])} - }, nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('placeholder_begin_data', 'sslice_2'), - ('placeholder_end_data', 'sslice_2'), - ('placeholder_stride_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_2_data', 'sslice_2/Unsqueeze_new'), - ('sslice_2/unsqueeze_const', 'sslice_2/unsqueeze_const_data'), - ('sslice_2/unsqueeze_const_data', 'sslice_2/Unsqueeze_new'), - ('sslice_2/Unsqueeze_new', 'sslice_2/Unsqueeze_new_data'), - ('sslice_2/Unsqueeze_new_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data')], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_2': {'slices': slices, - 'shrink_axis_mask': np.array([0, 0, 0, 0, 0]), - 'new_axis_mask': np.array([0, 0, 0, 0, 0])}, - 'sslice_2_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_2/unsqueeze_const': {'value': int64_array([1])}, - 'sslice_2/Unsqueeze_new_data': {'shape': np.array([1, 1, 227, 227, 54])}, - }, nodes_with_edges_only=True) - - pattern = ConvertGroupedStridedSlice() - pattern.add_unsqueeze_for_new(graph, Node(graph, 'sslice_2')) - - (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_ss_shrink_new(self): - slices = np.array([slice(0, 1, 1), slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]) - - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('placeholder_begin_data', 'sslice_2'), - ('placeholder_end_data', 'sslice_2'), - ('placeholder_stride_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_2_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('sslice_2_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_2': {'slices': slices, - 'shrink_axis_mask': np.array([0, 0, 0, 1, 0]), - 'new_axis_mask': np.array([0, 1, 0, 0, 0])}, - 'sslice_2_data': {'shape': np.array([1, 1, 227, 54])} - }, nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('placeholder_begin_data', 'sslice_2'), - ('placeholder_end_data', 'sslice_2'), - ('placeholder_stride_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_2_data', 'sslice_2/Unsqueeze_new'), - ('sslice_2/unsqueeze_const', 'sslice_2/unsqueeze_const_data'), - ('sslice_2/unsqueeze_const_data', 'sslice_2/Unsqueeze_new'), - ('sslice_2/Unsqueeze_new', 'sslice_2/Unsqueeze_new_data'), - ('sslice_2/Unsqueeze_new_data', 'sslice_2/Squeeze_shrink'), - ('sslice_2/squeeze_const', 'sslice_2/squeeze_const_data'), - ('sslice_2/squeeze_const_data', 'sslice_2/Squeeze_shrink'), - ('sslice_2/Squeeze_shrink', 'sslice_2/Squeeze_shrink_data'), - ('sslice_2/Squeeze_shrink_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('sslice_2/Squeeze_shrink_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_2': {'slices': slices, - 'shrink_axis_mask': np.array([0, 0, 0, 0, 0]), - 'new_axis_mask': np.array([0, 0, 0, 0, 0])}, - 'sslice_2_data': {'shape': np.array([1, 227, 1, 54])}, - 'sslice_2/unsqueeze_const': {'value': int64_array([1])}, - 'sslice_2/Unsqueeze_new_data': {'shape': np.array([1, 1, 227, 1, 54])}, - 'sslice_2/squeeze_const': {'value': np.array([3])}, - 'sslice_2/Squeeze_shrink_data': {'shape': np.array([1, 1, 227, 54])}, - }, nodes_with_edges_only=True) - - pattern = ConvertGroupedStridedSlice() - pattern.add_squeeze_for_shrink(graph, Node(graph, 'sslice_2')) - pattern.add_unsqueeze_for_new(graph, Node(graph, 'sslice_2')) - - (flag, resp) = compare_graphs(graph, graph_ref, 'op_output', check_op_attrs=True) - self.assertTrue(flag, resp) - - # test case for strided slice that only shrinks dimension - def test_ss_shrink_only(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('placeholder_begin_data', 'sslice_2'), - ('placeholder_end_data', 'sslice_2'), - ('placeholder_stride_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_2_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 1, 54])}, - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]), - 'shrink_axis_mask': np.array([0, 0, 1, 0])}, - 'sslice_2_data': {'shape': np.array([1, 227, 54])} - }, nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - - graph_ref = graph.copy() - - ConvertGroupedStridedSlice().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_ss_shrink_only_short(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('placeholder_begin_data', 'sslice_2'), - ('placeholder_end_data', 'sslice_2'), - ('placeholder_stride_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_2_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 1, 54])}, - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]), - 'shrink_axis_mask': np.array([0, 0, 1])}, - 'sslice_2_data': {'shape': np.array([1, 227, 54])} - }, nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - - graph_ref = graph.copy() - - ConvertGroupedStridedSlice().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_ss_shrink_only_long(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('placeholder_begin_data', 'sslice_2'), - ('placeholder_end_data', 'sslice_2'), - ('placeholder_stride_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('sslice_2_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 1, 54])}, - 'sslice_2': {'slices': np.array( - [slice(0, 1, 1), slice(0, 227, 1), slice(0, 1, 1), slice(0, 54, 1)]), - 'shrink_axis_mask': np.array([0, 0, 1, 0, 0])}, - 'sslice_2_data': {'shape': np.array([1, 227, 54])} - }, nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - - graph_ref = graph.copy() - - ConvertGroupedStridedSlice().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'sslice_2_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - # test case when - # 1) There are 3 StridedSlice operations; - # 2) 2 of StridedSlice have the same attributes; - # 3) other StridedSlice have different attributes; - # 4) pair (some StridedSlice from the item 2, StridedSlice from the item 3) can be replaced by VariadicSplit. - def test_1(self): - graph = build_graph(nodes_attributes, - [ - ('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_1'), - ('sslice_1', 'sslice_1_data'), - ('placeholder_1_data', 'sslice_2'), - ('sslice_2', 'sslice_2_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('sslice_1_data', 'concat_1'), - ('sslice_2_data', 'concat_1'), - ('sslice_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output'), - ], - { - 'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_1': {'slices': np.array([slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), - slice(0, 27, 1)])}, - 'sslice_1_data': {'shape': np.array([1, 227, 227, 27])}, - 'sslice_2': {'slices': np.array([slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), - slice(27, 54, 1)])}, - 'sslice_2_data': {'shape': np.array([1, 227, 227, 27])}, - 'sslice_3': {'slices': np.array([slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), - slice(0, 27, 1)])}, - 'sslice_3_data': {'shape': np.array([1, 227, 227, 27])}, - 'concat_1': {'axis': 3}, - 'concat_1_data': {'shape': np.array([1, 227, 227, 81])}, - }) - graph.graph['layout'] = 'NHWC' - graph_ref = build_graph(nodes_attributes, - [ - ('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'sslice_3'), - ('sslice_3', 'sslice_3_data'), - ('placeholder_1_data', 'split_1'), - ('split_1', 'split_1_data'), - ('split_1', 'split_2_data'), - ('axis_const', 'axis_const_data'), - ('split_dim_const', 'split_dim_const_data'), - ('axis_const_data', 'split_1', {'in': 1}), - ('split_dim_const_data', 'split_1', {'in': 2}), - ('split_1_data', 'concat_1'), - ('split_2_data', 'concat_1'), - ('sslice_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - { - 'placeholder_1_data': {'shape': np.array([1, 227, 227, 54])}, - 'sslice_3': {'slices': np.array([slice(0, 1, 1), slice(0, 227, 1), slice(0, 227, 1), - slice(0, 27, 1)])}, - 'sslice_3_data': {'shape': np.array([1, 227, 227, 27])}, - 'split_1_data': {'shape': np.array([1, 227, 227, 27])}, - 'split_2_data': {'shape': np.array([1, 227, 227, 27])}, - 'axis_const': {'op': 'Const', 'type': 'Const', 'value': 3, 'shape': []}, - 'axis_const_data': {'value': 3, 'shape': []}, - 'split_dim_const': {'op': 'Const', 'type': 'Const', 'value': np.array([27, 27])}, - 'split_dim_const_data': {'value': np.array([27, 27])}, - 'concat_1': {'axis': 3}, - 'concat_1_data': {'shape': np.array([1, 227, 227, 81])} - }) - ConvertGroupedStridedSlice().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - -if __name__ == '__main__': - unittest.main() diff --git a/tools/mo/unit_tests/mo/middle/CutInputHavingZeroDimFromConcat_test.py b/tools/mo/unit_tests/mo/middle/CutInputHavingZeroDimFromConcat_test.py deleted file mode 100644 index e9f3b46c73be2d..00000000000000 --- a/tools/mo/unit_tests/mo/middle/CutInputHavingZeroDimFromConcat_test.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.CutInputHavingZeroDimFromConcat import CutInputHavingZeroDimFromConcat -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -node_attrs_for_the_case_when_there_are_no_zero_shape_constants = { - 'const0': { - 'kind': 'op', - 'type': 'Const', - 'op': 'Const', - 'shape': int64_array([1, 2, 5]), - 'value': np.zeros((1, 2, 5)) - }, - 'const0_data': {'kind': 'data', 'shape': int64_array([1, 2, 5]), 'value': None}, - 'const1': { - 'kind': 'op', - 'type': 'Const', - 'op': 'Const', - 'shape': int64_array([1, 2, 7]), - 'value': np.zeros((1, 2, 7)) - }, - 'const1_data': {'kind': 'data', 'shape': int64_array([1, 2, 7]), 'value': None}, - 'placeholder': {'kind': 'op', 'type': 'Parameter', 'op': 'Parameter'}, - 'placeholder_data': { - 'kind': 'data', - 'value': None, - 'shape': int64_array([1, 2, 8]), - 'data_type': None - }, - 'concat': {'kind': 'op', 'type': 'Concat', 'op': 'Concat', 'axis': 2}, - 'concat_data': {'kind': 'data', 'shape': int64_array([1, 2, 20]), 'value': None}, - 'output': {'kind': 'op', 'op': 'Result', 'type': 'Result'}, -} - - -edges_for_the_case_when_there_are_no_zero_shape_constants = [ - ('const0', 'const0_data'), - ('const1', 'const1_data'), - ('placeholder', 'placeholder_data'), - ('const0_data', 'concat', {'in': 0}), - ('const1_data', 'concat', {'in': 1}), - ('placeholder_data', 'concat', {'in': 2}), - ('concat', 'concat_data'), - ('concat_data', 'output') -] - - -class CutInputHavingZeroDimFromConcatTest(unittest.TestCase): - """ - This class tests deleting of inputs of Concat having zeros in their shapes, if not all inputs have such shapes. - """ - def test_when_need_to_do_nothing(self): - graph = build_graph( - nodes_attrs=node_attrs_for_the_case_when_there_are_no_zero_shape_constants, - edges=edges_for_the_case_when_there_are_no_zero_shape_constants - ) - ref_graph = build_graph( - nodes_attrs=node_attrs_for_the_case_when_there_are_no_zero_shape_constants, - edges=edges_for_the_case_when_there_are_no_zero_shape_constants - ) - CutInputHavingZeroDimFromConcat().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_when_there_are_three_inputs_and_middle_constant_has_zero_in_shape(self): - graph = build_graph( - nodes_attrs={ - 'const0': { - 'kind': 'op', - 'type': 'Const', - 'op': 'Const', - 'shape': int64_array([1, 2, 5]), - 'value': np.zeros((1, 2, 5)) - }, - 'const0_data': {'kind': 'data', 'shape': int64_array([1, 2, 5]), 'value': None}, - 'const1': { - 'kind': 'op', - 'type': 'Const', - 'op': 'Const', - 'shape': int64_array([1, 2, 0]), - 'value': np.zeros((1, 2, 0)) - }, - 'const1_data': {'kind': 'data', 'shape': int64_array([1, 2, 0]), 'value': None}, - 'placeholder': {'kind': 'op', 'type': 'Parameter', 'op': 'Parameter'}, - 'placeholder_data': { - 'kind': 'data', - 'value': None, - 'shape': int64_array([1, 2, 17]), - 'data_type': None - }, - 'concat': {'kind': 'op', 'type': 'Concat', 'op': 'Concat', 'axis': 2}, - 'concat_data': {'kind': 'data', 'shape': int64_array([1, 2, 22]), 'value': None}, - 'output': {'kind': 'op', 'op': 'Result', 'type': 'Result'}, - }, - edges=[ - ('const0', 'const0_data'), - ('const1', 'const1_data'), - ('placeholder', 'placeholder_data'), - ('const0_data', 'concat', {'in': 0}), - ('const1_data', 'concat', {'in': 1}), - ('placeholder_data', 'concat', {'in': 2}), - ('concat', 'concat_data'), - ('concat_data', 'output') - ] - ) - ref_graph = build_graph( - nodes_attrs={ - 'const0': { - 'kind': 'op', - 'type': 'Const', - 'op': 'Const', - 'shape': int64_array([1, 2, 5]), - 'value': np.zeros((1, 2, 5)) - }, - 'const0_data': {'kind': 'data', 'shape': int64_array([1, 2, 5]), 'value': None}, - 'placeholder': {'kind': 'op', 'type': 'Parameter', 'op': 'Parameter'}, - 'placeholder_data': { - 'kind': 'data', - 'value': None, - 'shape': int64_array([1, 2, 17]), - 'data_type': None - }, - 'concat': {'kind': 'op', 'type': 'Concat', 'op': 'Concat', 'axis': 2}, - 'concat_data': {'kind': 'data', 'shape': int64_array([1, 2, 22]), 'value': None}, - 'output': {'kind': 'op', 'op': 'Result', 'type': 'Result'}, - }, - edges=[ - ('const0', 'const0_data'), - ('placeholder', 'placeholder_data'), - ('const0_data', 'concat', {'in': 0}), - ('placeholder_data', 'concat', {'in': 1}), - ('concat', 'concat_data'), - ('concat_data', 'output') - ] - ) - CutInputHavingZeroDimFromConcat().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_there_are_four_inputs_and_first_and_third_input_have_zero_in_their_shapes(self): - graph = build_graph( - nodes_attrs={ - 'const0': { - 'kind': 'op', - 'type': 'Const', - 'op': 'Const', - 'shape': int64_array([5, 0]), - 'value': np.zeros((5, 0)) - }, - 'const0_data': {'kind': 'data', 'shape': int64_array([5, 0]), 'value': None}, - 'placeholder': {'kind': 'op', 'type': 'Parameter', 'op': 'Parameter'}, - 'placeholder_data': { - 'kind': 'data', - 'value': None, - 'shape': int64_array([5, 17]), - 'data_type': None - }, - 'const2': { - 'kind': 'op', - 'type': 'Const', - 'op': 'Const', - 'shape': int64_array([5, 0]), - 'value': np.zeros((5, 0)) - }, - 'const2_data': {'kind': 'data', 'shape': int64_array([5, 0]), 'value': None}, - 'const3': { - 'kind': 'op', - 'type': 'Const', - 'op': 'Const', - 'shape': int64_array([5, 23]), - 'value': np.zeros((5, 23)) - }, - 'const3_data': {'kind': 'data', 'shape': int64_array([5, 23]), 'value': None}, - 'concat': {'kind': 'op', 'type': 'Concat', 'op': 'Concat', 'axis': 1}, - 'concat_data': {'kind': 'data', 'shape': int64_array([5, 40]), 'value': None}, - 'output': {'kind': 'op', 'op': 'Result', 'type': 'Result'}, - }, - edges=[ - ('const0', 'const0_data'), - ('placeholder', 'placeholder_data'), - ('const2', 'const2_data'), - ('const3', 'const3_data'), - ('const0_data', 'concat', {'in': 0}), - ('placeholder_data', 'concat', {'in': 1}), - ('const2_data', 'concat', {'in': 2}), - ('const3_data', 'concat', {'in': 3}), - ('concat', 'concat_data'), - ('concat_data', 'output') - ] - ) - ref_graph = build_graph( - nodes_attrs={ - 'placeholder': {'kind': 'op', 'type': 'Parameter', 'op': 'Parameter'}, - 'placeholder_data': { - 'kind': 'data', - 'value': None, - 'shape': int64_array([5, 17]), - 'data_type': None - }, - 'const3': { - 'kind': 'op', - 'type': 'Const', - 'op': 'Const', - 'shape': int64_array([5, 23]), - 'value': np.zeros((5, 23)) - }, - 'const3_data': {'kind': 'data', 'shape': int64_array([5, 23]), 'value': None}, - 'concat': {'kind': 'op', 'type': 'Concat', 'op': 'Concat', 'axis': 1}, - 'concat_data': {'kind': 'data', 'shape': int64_array([5, 40]), 'value': None}, - 'output': {'kind': 'op', 'op': 'Result', 'type': 'Result'}, - }, - edges=[ - ('placeholder', 'placeholder_data'), - ('const3', 'const3_data'), - ('placeholder_data', 'concat', {'in': 0}), - ('const3_data', 'concat', {'in': 1}), - ('concat', 'concat_data'), - ('concat_data', 'output') - ] - ) - CutInputHavingZeroDimFromConcat().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/DilatedConvolution_test.py b/tools/mo/unit_tests/mo/middle/DilatedConvolution_test.py deleted file mode 100644 index e9563ec60302da..00000000000000 --- a/tools/mo/unit_tests/mo/middle/DilatedConvolution_test.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.DilatedConvolution import DilatedConvolutionConverter -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, connect, \ - regular_op_with_shaped_data, valued_const_with_data - -shape = int64_array([1, 375, 500, 24]) -nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}), - **valued_const_with_data('stb_bs', int64_array([1, 32, 32, 1])), - **valued_const_with_data('stb_pad_begin', int64_array([0, 32, 32, 0])), - **valued_const_with_data('stb_pad_end', int64_array([0, 41, 44, 0])), - **regular_op_with_shaped_data('space_to_batch', int64_array([1024, 14, 18, 24]), - {'op': 'SpaceToBatch', 'name': 'stb'}), - **regular_op_with_shaped_data('conv', int64_array([1024, 12, 16, 24]), - {'op': 'Conv2D', 'name': 'conv', 'spatial_dims': int64_array([1, 2]), - 'dilation': int64_array([1, 1, 1, 1]), - 'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]])}), - **valued_const_with_data('bts_bs', int64_array([1, 32, 32, 1])), - **valued_const_with_data('bts_crop_begin', int64_array([0, 0, 0, 0])), - **valued_const_with_data('bts_crop_end', int64_array([0, 9, 12, 0])), - **regular_op_with_shaped_data('batch_to_space', shape, {'op': 'BatchToSpace', 'name': 'bts'}), - **result('result') - } - -edges = [*connect('input', '0:space_to_batch'), - *connect('stb_bs', '1:space_to_batch'), - *connect('stb_pad_begin', '2:space_to_batch'), - *connect('stb_pad_end', '3:space_to_batch'), - *connect('space_to_batch', '0:conv'), - *connect('conv', '0:batch_to_space'), - *connect('bts_bs', '1:batch_to_space'), - *connect('bts_crop_begin', '2:batch_to_space'), - *connect('bts_crop_end', '3:batch_to_space'), - *connect('batch_to_space', 'result') - ] - -ref_nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('conv', shape, - {'op': 'Conv2D', 'name': 'conv', 'spatial_dims': int64_array([1, 2]), - 'dilation': int64_array([1, 32, 32, 1]), 'auto_pad': None, - 'pad': int64_array([[0, 0], [32, 32], [32, 32], [0, 0]])}), - **result('result') - } -ref_edges = [*connect('input', '0:conv'), - *connect('conv', 'result') - ] - - -class DilatedConvolutionTest(unittest.TestCase): - def test_dilated_conv_1(self): - graph = build_graph(nodes, edges) - - graph_ref = build_graph(ref_nodes, ref_edges) - - graph.graph['layout'] = 'NHWC' - graph.stage = 'middle' - - DilatedConvolutionConverter().find_and_replace_pattern(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/EltwiseInputReshape_test.py b/tools/mo/unit_tests/mo/middle/EltwiseInputReshape_test.py deleted file mode 100644 index b906e4f0edcac1..00000000000000 --- a/tools/mo/unit_tests/mo/middle/EltwiseInputReshape_test.py +++ /dev/null @@ -1,531 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.EltwiseInputReshape import normalize_eltwise_inputs -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the -# dictionary with node attributes. -nodes_attributes = { - # Placeholder layers - 'placeholder_1': {'value': None, 'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2': {'value': None, 'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_3': {'value': None, 'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - - # Reshape layers - 'reshape_1': {'type': 'Unsqueeze', 'value': None, 'kind': 'op', 'op': 'Unsqueeze'}, - 'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'reshape_1_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None}, - 'reshape_1_const_data': {'kind': 'data', 'value': None, 'shape': None}, - - 'reshape_2': {'type': 'Unsqueeze', 'value': None, 'kind': 'op', 'op': 'Unsqueeze'}, - 'reshape_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'reshape_2_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None}, - 'reshape_2_const_data': {'kind': 'data', 'value': None, 'shape': None}, - - # Eltwise consumes layers - 'eltwise_1': {'kind': 'op', 'is_eltwise': True}, - 'eltwise_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'eltwise_2': {'kind': 'op', 'is_eltwise': True}, - 'eltwise_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'eltwise_3': {'kind': 'op', 'is_eltwise': True}, - 'eltwise_3_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'eltwise_4': {'kind': 'op', 'is_eltwise': True}, - 'eltwise_4_data': {'value': None, 'shape': None, 'kind': 'data'}, - - # Concat - 'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'}, -} - - -class EltwiseInputNormalizationTest(unittest.TestCase): - def test1_not_constant(self): - # - # data1(1,3,64,64)----. data(1,3,64,64)-------. - # data2(1,64,1)-------->Eltwise-->data(1,3,64,64) => data(1,64,1)->Reshape->data(1,1,64,1)-->Eltwise->... - # data3(64,1)------' data(64,1)->Reshape->data(1,1,64,1)-' - # - graph = build_graph(nodes_attributes, [ - ('placeholder_1', 'placeholder_1_data'), - ('placeholder_1', 'placeholder_2_data'), - ('placeholder_1', 'placeholder_3_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_2_data', 'eltwise_1'), - ('placeholder_3_data', 'eltwise_1'), - ('eltwise_1', 'eltwise_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])}, - 'placeholder_2_data': {'shape': np.array([1, 64, 1])}, - 'placeholder_3_data': {'shape': np.array([64, 1])}, - 'eltwise_1_data': {'shape': np.array([1, 3, 64, 64])} - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [ - ('placeholder_1', 'placeholder_1_data'), - ('placeholder_1', 'placeholder_2_data'), - ('placeholder_1', 'placeholder_3_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_2_data', 'reshape_1'), - ('reshape_1_const', 'reshape_1_const_data'), - ('reshape_1_const_data', 'reshape_1'), - ('placeholder_3_data', 'reshape_2'), - ('reshape_2_const', 'reshape_2_const_data'), - ('reshape_2_const_data', 'reshape_2'), - ('reshape_1', 'reshape_1_data'), - ('reshape_2', 'reshape_2_data'), - ('reshape_1_data', 'eltwise_1'), - ('reshape_2_data', 'eltwise_1'), - ('eltwise_1', 'eltwise_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])}, - 'reshape_1_const': {'value': int64_array([0]), 'shape': int64_array([1])}, - 'reshape_1_const_data': {'value': int64_array([0]), - 'shape': int64_array([1])}, - 'reshape_1_data': {'shape': np.array([1, 1, 64, 1])}, - 'reshape_2_const': {'value': int64_array([0, 1]), 'shape': int64_array([2])}, - 'reshape_2_const_data': {'value': int64_array([0, 1]), - 'shape': int64_array([2])}, - 'reshape_2_data': {'shape': np.array([1, 1, 64, 1])}, - 'eltwise_1_data': {'shape': np.array([1, 3, 64, 64])} - }, nodes_with_edges_only=True) - - normalize_eltwise_inputs(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'eltwise_1', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_mega_hardcore(self): - # ORIGINAL GRAPH - # - # data1(1,3,64,64)---,->Eltwise1->data(1,3,64,64)-----,->Eltwise2->data(1,3,64,64)---,->Eltwise4->data(1,3,64,64) - # /\ /\ /\ - # data2(64,1)-----,-'--------------------------------'------------------------------' - # \/ / - # data3(64,1)----`-->Eltwise3->data(64,1)----------' - # - # REFERENCE GRAPH AFTER TRANSFORMATION - # - # data1(1,3,64,64)---------------------,->Eltwise1->data(1,3,64,64)-----,->Eltwise2->data(1,3,64,64)---,->Eltwise4->data(1,3,64,64) - # /\ /\ /\ - # data2(64,1)-,- Reshape1(1,1,64,64)--'--------------------------------o-------------------------------' - # | | - # | Reshape(1,1,64,1) - # \/ | - # data3(64,1)----------->Eltwise3->data(64,1)--------------------------' - # - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_3', 'placeholder_3_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_2_data', 'eltwise_1'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_1_data', 'eltwise_2'), - ('placeholder_2_data', 'eltwise_3'), - ('placeholder_3_data', 'eltwise_3'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_3_data', 'eltwise_2'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_2_data', 'eltwise_4'), - ('placeholder_2_data', 'eltwise_4'), - ('eltwise_4', 'eltwise_4_data'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])}, - 'placeholder_2_data': {'shape': np.array([64, 1]), 'value': np.ones([64, 1])}, - 'placeholder_3_data': {'shape': np.array([64, 1])}, - 'eltwise_1_data': {'shape': np.array([1, 3, 64, 64])}, - 'eltwise_2_data': {'shape': np.array([1, 3, 64, 64])}, - 'eltwise_3_data': {'shape': np.array([64, 1])}, - 'eltwise_4_data': {'shape': np.array([1, 3, 64, 64])} - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_3', 'placeholder_3_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_2_data', 'reshape_1'), - ('reshape_1_const', 'reshape_1_const_data'), - ('reshape_1_const_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'eltwise_1'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_1_data', 'eltwise_2'), - ('placeholder_2_data', 'eltwise_3'), - ('placeholder_3_data', 'eltwise_3'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_3_data', 'reshape_2'), - ('reshape_2_const', 'reshape_2_const_data'), - ('reshape_2_const_data', 'reshape_2'), - ('reshape_2', 'reshape_2_data'), - ('reshape_2_data', 'eltwise_2'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_2_data', 'eltwise_4'), - ('reshape_1_data', 'eltwise_4'), - ('eltwise_4', 'eltwise_4_data'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])}, - 'placeholder_2_data': {'shape': np.array([64, 1]), - 'value': np.ones([64, 1])}, - 'placeholder_3_data': {'shape': np.array([64, 1])}, - 'reshape_1_const': {'value': int64_array([0, 1]), 'shape': int64_array([2])}, - 'reshape_1_const_data': {'value': int64_array([0, 1]), - 'shape': int64_array([2])}, - 'reshape_1_data': {'shape': np.array([1, 1, 64, 1])}, - - 'reshape_2_const': {'value': int64_array([0, 1]), 'shape': int64_array([2])}, - 'reshape_2_const_data': {'value': int64_array([0, 1]), - 'shape': int64_array([2])}, - 'reshape_2_data': {'shape': np.array([1, 1, 64, 1])}, - 'eltwise_1_data': {'shape': np.array([1, 3, 64, 64])}, - 'eltwise_2_data': {'shape': np.array([1, 3, 64, 64])}, - 'eltwise_3_data': {'shape': np.array([64, 1])}, - 'eltwise_4_data': {'shape': np.array([1, 3, 64, 64])} - }, nodes_with_edges_only=True) - - normalize_eltwise_inputs(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'eltwise_4', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test2_not_constant(self): - # ,-------------->consumer3 ,------------>consumer3 - # data---(new_shape1)-->consumer1 => data---->Reshape-->consumer1 - # `-(new_shape2)-->consumer2 `-->Reshape-->consumer2 - # - graph = build_graph(nodes_attributes, [ - ('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_1_data', 'eltwise_2'), - ('placeholder_1_data', 'eltwise_3'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_1_data', 'concat'), - ('eltwise_2_data', 'concat'), - ('eltwise_3_data', 'concat'), - ], - {'placeholder_1_data': {'shape': int64_array([1, 3])}, - 'eltwise_1_data': {'shape': int64_array([1, 1, 1, 3])}, - 'eltwise_2_data': {'shape': int64_array([1, 1, 3])}, - 'eltwise_3_data': {'shape': int64_array([1, 3])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [ - ('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'reshape_1'), - ('reshape_1_const', 'reshape_1_const_data'), - ('reshape_1_const_data', 'reshape_1'), - ('placeholder_1_data', 'reshape_2'), - ('reshape_2_const', 'reshape_2_const_data'), - ('reshape_2_const_data', 'reshape_2'), - ('placeholder_1_data', 'eltwise_3'), - ('reshape_1', 'reshape_1_data'), - ('reshape_2', 'reshape_2_data'), - ('reshape_1_data', 'eltwise_1'), - ('reshape_2_data', 'eltwise_2'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_1_data', 'concat'), - ('eltwise_2_data', 'concat'), - ('eltwise_3_data', 'concat'), - ], - {'placeholder_1_data': {'shape': int64_array([1, 3])}, - 'reshape_1_const': {'value': int64_array([0, 1]), 'shape': int64_array([2])}, - 'reshape_1_const_data': {'value': int64_array([0, 1]), - 'shape': int64_array([2])}, - 'reshape_1_data': {'shape': int64_array([1, 1, 1, 3])}, - 'reshape_2_const': {'value': int64_array([0]), 'shape': int64_array([1])}, - 'reshape_2_const_data': {'value': int64_array([0]), 'shape': int64_array([1])}, - 'reshape_2_data': {'shape': int64_array([1, 1, 3])}, - }, nodes_with_edges_only=True) - - normalize_eltwise_inputs(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test3_not_constant(self): - # ,--------------->consumer3 ,----------->consumer3 - # data---(new_shape1)-->consumer1 => data-->Reshape-->consumer1 - # `-(new_shape1)-->consumer2 `-->consumer2 - # - graph = build_graph(nodes_attributes, - [ - ('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_1_data', 'eltwise_2'), - ('placeholder_1_data', 'eltwise_3'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_1_data', 'concat'), - ('eltwise_2_data', 'concat'), - ('eltwise_3_data', 'concat'), - ], - {'placeholder_1_data': {'shape': int64_array([1, 3])}, - 'eltwise_1_data': {'shape': int64_array([1, 1, 1, 3])}, - 'eltwise_2_data': {'shape': int64_array([1, 1, 1, 3])}, - 'eltwise_3_data': {'shape': int64_array([1, 3])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [ - ('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'reshape_1'), - ('reshape_1_const', 'reshape_1_const_data'), - ('reshape_1_const_data', 'reshape_1'), - ('placeholder_1_data', 'eltwise_3'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'eltwise_1'), - ('reshape_1_data', 'eltwise_2'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_1_data', 'concat'), - ('eltwise_2_data', 'concat'), - ('eltwise_3_data', 'concat'), - ], - {'placeholder_1_data': {'shape': int64_array([1, 3])}, - 'reshape_1_const': {'value': int64_array([0, 1]), 'shape': int64_array([2])}, - 'reshape_1_const_data': {'value': int64_array([0, 1]), - 'shape': int64_array([2])}, - 'reshape_1_data': {'shape': int64_array([1, 1, 1, 3])}, - }, nodes_with_edges_only=True) - - normalize_eltwise_inputs(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test4_constant(self): - # ,--------------->consumer3 ,------------>consumer3 - # data---(new_shape1)-->consumer1 => data--->reshape1-->consumer1 - # `-(new_shape2)-->consumer2 `->reshape2-->consumer2 - # - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_1_data', 'eltwise_2'), - ('placeholder_1_data', 'eltwise_3'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_1_data', 'concat'), - ('eltwise_2_data', 'concat'), - ('eltwise_3_data', 'concat'), - ], - {'placeholder_1_data': {'shape': int64_array([1, 3]), 'value': np.ones([1, 3])}, - 'eltwise_1_data': {'shape': int64_array([1, 1, 1, 3])}, - 'eltwise_2_data': {'shape': int64_array([1, 1, 3])}, - 'eltwise_3_data': {'shape': int64_array([1, 3])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'reshape_1'), - ('reshape_1_const', 'reshape_1_const_data'), - ('reshape_1_const_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'eltwise_1'), - ('placeholder_1_data', 'reshape_2'), - ('reshape_2_const', 'reshape_2_const_data'), - ('reshape_2_const_data', 'reshape_2'), - ('reshape_2', 'reshape_2_data'), - ('reshape_2_data', 'eltwise_2'), - ('placeholder_1_data', 'eltwise_3'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_1_data', 'concat'), - ('eltwise_2_data', 'concat'), - ('eltwise_3_data', 'concat'), - ], - {'placeholder_1_data': {'shape': int64_array([1, 3]), 'value': np.ones([1, 3])}, - 'reshape_1_const': {'value': int64_array([0, 1]), 'shape': int64_array([2])}, - 'reshape_1_const_data': {'value': int64_array([0, 1]), - 'shape': int64_array([2])}, - 'reshape_1_data': {'shape': int64_array([1, 1, 1, 3])}, - - 'reshape_2_const': {'value': int64_array([0]), 'shape': int64_array([1])}, - 'reshape_2_const_data': {'value': int64_array([0]), - 'shape': int64_array([1])}, - 'reshape_2_data': {'shape': int64_array([1, 1, 3])}, - }, nodes_with_edges_only=True) - - normalize_eltwise_inputs(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test5_constant(self): - # ,-(new_shape)-->consumer3 ,-->consumer3 - # data---(new_shape)-->consumer1 => data-->reshape---->consumer1 - # `-(new_shape)-->consumer2 `-->consumer2 - # - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_1_data', 'eltwise_2'), - ('placeholder_1_data', 'eltwise_3'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_1_data', 'concat'), - ('eltwise_2_data', 'concat'), - ('eltwise_3_data', 'concat'), - ], - {'placeholder_1_data': {'shape': int64_array([1, 3]), 'value': np.ones([1, 3])}, - 'eltwise_1_data': {'shape': int64_array([1, 1, 3])}, - 'eltwise_2_data': {'shape': int64_array([1, 1, 3])}, - 'eltwise_3_data': {'shape': int64_array([1, 1, 3])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'reshape_1'), - ('reshape_1_const', 'reshape_1_const_data'), - ('reshape_1_const_data', 'reshape_1'), - ('reshape_1', 'reshape_1_data'), - ('reshape_1_data', 'eltwise_1'), - ('reshape_1_data', 'eltwise_2'), - ('reshape_1_data', 'eltwise_3'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_1_data', 'concat'), - ('eltwise_2_data', 'concat'), - ('eltwise_3_data', 'concat'), - ], - {'placeholder_1_data': {'shape': int64_array([1, 3]), 'value': np.ones([1, 3])}, - 'reshape_1_const': {'value': int64_array([0]), 'shape': int64_array([1])}, - 'reshape_1_const_data': {'value': int64_array([0]), - 'shape': int64_array([1])}, - 'reshape_1_data': {'shape': int64_array([1, 1, 3])}, - }, nodes_with_edges_only=True) - - normalize_eltwise_inputs(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test6_not_constant(self): - # ,--------------->consumer3 ,->consumer3 - # data---(new_shape1)-->consumer1 => data----->consumer1 - # `-(new_shape1)-->consumer2 `-->consumer2 - # - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_1_data', 'eltwise_2'), - ('placeholder_1_data', 'eltwise_3'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_1_data', 'concat'), - ('eltwise_2_data', 'concat'), - ('eltwise_3_data', 'concat'), - ], - {'placeholder_1_data': {'shape': int64_array([1, 3])}, - 'eltwise_1_data': {'shape': int64_array([1, 3])}, - 'eltwise_2_data': {'shape': int64_array([1, 3])}, - 'eltwise_3_data': {'shape': int64_array([1, 3])}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_1_data', 'eltwise_2'), - ('placeholder_1_data', 'eltwise_3'), - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_2', 'eltwise_2_data'), - ('eltwise_3', 'eltwise_3_data'), - ('eltwise_1_data', 'concat'), - ('eltwise_2_data', 'concat'), - ('eltwise_3_data', 'concat'), - ], - {'placeholder_1_data': {'shape': int64_array([1, 3])}}, nodes_with_edges_only=True) - - normalize_eltwise_inputs(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test7_axis1_not_constant(self): - # - # data1(1,3,64,64)----. data(1,3,64,64)-------. - # data2(3,64,1)-------->Eltwise-->data(1,3,64,64)=> data(3,64,1)->Unsqueeze(0)->data(1,3,64,1)-->Eltwise->... - # data3(3,1)------' data(3,1)->Unsqueeze(2, 0)->data(1,3,1,1)-' - # - graph = build_graph(nodes_attributes, [ - ('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_3', 'placeholder_3_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_2_data', 'eltwise_1'), - ('placeholder_3_data', 'eltwise_1'), - ('eltwise_1', 'eltwise_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])}, - 'placeholder_2_data': {'shape': np.array([3, 64, 1])}, - 'placeholder_3_data': {'shape': np.array([3, 1])}, - 'eltwise_1_data': {'shape': np.array([1, 3, 64, 64])}, - 'eltwise_1' : {'axis': 1} - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [ - ('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_3', 'placeholder_3_data'), - ('placeholder_1_data', 'eltwise_1'), - ('placeholder_2_data', 'reshape_1'), - ('reshape_1_const', 'reshape_1_const_data'), - ('reshape_1_const_data', 'reshape_1'), - ('placeholder_3_data', 'reshape_2'), - ('reshape_2_const', 'reshape_2_const_data'), - ('reshape_2_const_data', 'reshape_2'), - ('reshape_1', 'reshape_1_data'), - ('reshape_2', 'reshape_2_data'), - ('reshape_1_data', 'eltwise_1'), - ('reshape_2_data', 'eltwise_1'), - ('eltwise_1', 'eltwise_1_data') - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 64, 64])}, - 'placeholder_2_data': {'shape': np.array([3, 64, 1])}, - 'placeholder_3_data': {'shape': np.array([3, 1])}, - 'reshape_1_const': {'value': int64_array([0]), 'shape': int64_array([1])}, - 'reshape_1_const_data': {'value': int64_array([0]), - 'shape': int64_array([1])}, - 'reshape_1_data': {'shape': np.array([1, 3, 64, 1])}, - 'reshape_2_const': {'value': int64_array([2, 0]), 'shape': int64_array([2])}, - 'reshape_2_const_data': {'value': int64_array([2, 0]), - 'shape': int64_array([2])}, - 'reshape_2_data': {'shape': np.array([1, 3, 1, 1])}, - 'eltwise_1_data': {'shape': np.array([1, 3, 64, 64])} - }, nodes_with_edges_only=True) - - normalize_eltwise_inputs(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'eltwise_1', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/FakeSplitOutputs_test.py b/tools/mo/unit_tests/mo/middle/FakeSplitOutputs_test.py deleted file mode 100644 index cb14ca53234db1..00000000000000 --- a/tools/mo/unit_tests/mo/middle/FakeSplitOutputs_test.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.FakeSplitOutputs import AddFakeOutputsToSplit, AddFakeOutputsToVariadicSplit -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.eliminate import graph_clean_up -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder_1': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter', 'shape': np.array([1, 227, 227, 3])}, - # VariadicSplit operation - 'variadic_split': {'type': 'VariadicSplit', 'kind': 'op', 'op': 'VariadicSplit'}, - 'split': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 3, 'axis': 3}, - # Test operation - 'last': {'type': None, 'value': None, 'kind': 'op', 'op': None, 'infer': copy_shape_infer}, - 'res': {'type': 'Result', 'kind': 'op', 'op': 'Result'}, - # Data nodes - 'placeholder_data': {'kind': 'data', 'value': None, 'shape': np.array([1, 227, 227, 3])}, - 'variadic_split_data_1': {'kind': 'data', 'value': None, 'shape': np.array([1, 2, 227, 3])}, - 'split_data_1': {'kind': 'data', 'value': None, 'shape': np.array([1, 227, 227, 1])}, - 'last_data': {'kind': 'data', 'value': None, 'shape': np.array([1, 227, 227, 3])}, - - 'axis_const': {'kind': 'op', 'op': 'Const'}, - 'axis_const_data': {'value': np.int64(1), 'shape': None, 'kind': 'data'}, - 'split_dim_const': {'kind': 'op', 'op': 'Const'}, - 'split_dim_const_data': {'value': np.array([1, 2, 3]), 'shape': None, 'kind': 'data'}, - -} - - -class SplitSaveEmptyBranchesTest(unittest.TestCase): - def test_variadic_split_non_zero(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_data'), ('placeholder_data', 'variadic_split'), - ('variadic_split', 'variadic_split_data_1'), ('variadic_split_data_1', 'last'), - ('last', 'last_data'), ('last_data', 'res'), - - ('axis_const', 'axis_const_data'), - ('split_dim_const', 'split_dim_const_data'), - ('axis_const_data', 'variadic_split', {'in': 1}), - ('split_dim_const_data', 'variadic_split', {'in': 2}), - ], nodes_with_edges_only=True) - node = Node(graph, 'variadic_split') - - # extractor should do it - node['out_ports_count'] = 3 - for p in range(len(node.out_edges()), node.out_ports_count): - node.add_output_port(p) - - replacer = AddFakeOutputsToVariadicSplit() - replacer.find_and_replace_pattern(graph) - - for n in graph.get_op_nodes(): - n['need_shape_inference'] = False - graph_clean_up(graph) - - self.assertTrue(len(node.out_edges()) == 3) - - def test_split(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_data'), ('placeholder_data', 'split'), - ('split', 'split_data_1'), ('split_data_1', 'last'), - ('last', 'last_data'), ('last_data', 'res'), - ], nodes_with_edges_only=True) - node = Node(graph, 'split') - - # extractor should do it - node['out_ports_count'] = node.num_splits - for p in range(len(node.out_edges()), node.out_ports_count): - node.add_output_port(p) - - replacer = AddFakeOutputsToSplit() - replacer.find_and_replace_pattern(graph) - - for n in graph.get_op_nodes(): - n['need_shape_inference'] = False - graph_clean_up(graph) - - self.assertTrue(len(node.out_edges()) == node.num_splits) diff --git a/tools/mo/unit_tests/mo/middle/FuseReshapeSequenceKaldi_test.py b/tools/mo/unit_tests/mo/middle/FuseReshapeSequenceKaldi_test.py deleted file mode 100644 index 525ecd9185e894..00000000000000 --- a/tools/mo/unit_tests/mo/middle/FuseReshapeSequenceKaldi_test.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.middle.FuseReshapesSequence import FuseReshapesSequenceKaldi -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, valued_const_with_data, connect, regular_op_with_shaped_data - - -class FuseReshapesKaldiTests(unittest.TestCase): - ref_nodes = { - **regular_op_with_shaped_data('conv', [1, 128, 1, 9], {'kind': 'op', 'op': 'Convolution', - 'kernel': [1, 11, 1, 5], 'patch_stride': 5, - 'kernel_spatial': [1, 5]}), - **valued_const_with_data('transpose_out_order', int64_array([0, 2, 3, 1])), - **regular_op_with_shaped_data('transpose_out', [1, 1, 9, 128], {'op': 'Transpose', 'type': 'Transpose'}), - **valued_const_with_data('transpose_in_order', int64_array([0, 3, 1, 2])), - **regular_op_with_shaped_data('transpose_in', [1, 128, 1, 9], {'op': 'Transpose', 'type': 'Transpose'}), - **regular_op_with_shaped_data('pool', [1, 128, 1, 3], {'kind': 'op', 'op': 'Pooling', - 'pool_stride': 3, 'pool_step': [1, 1, 1, 1]}), - } - - nodes = { - **regular_op_with_shaped_data('conv', [1, 128, 1, 9], {'kind': 'op', 'op': 'Convolution', - 'kernel': [1, 1, 11, 5]}), - **valued_const_with_data('transpose_out_order', int64_array([0, 2, 3, 1])), - **regular_op_with_shaped_data('transpose_out', [1, 1, 9, 128], {'op': 'Transpose', 'type': 'Transpose'}), - **valued_const_with_data('reshape_out_shape', int64_array([0, -1])), - **regular_op_with_shaped_data('reshape_out', [1, 1152], {'op': 'Reshape', 'type': 'Reshape', - 'special_zero': True}), - - **regular_op_with_shaped_data('shapeof', [4], {'op': 'ShapeOf', 'type': 'ShapeOf'}), - **valued_const_with_data('ind', int64_array([0])), - **valued_const_with_data('axis', int64_array(0)), - **regular_op_with_shaped_data('gather_batch', [], {'op': 'Gather', 'type': 'Gather'}), - **valued_const_with_data('t', int64_array([1])), - **valued_const_with_data('h', int64_array([9])), - **valued_const_with_data('ind_h', int64_array([1])), - **regular_op_with_shaped_data('gather_h', [], {'op': "Gather", 'type': 'Gather'}), - **valued_const_with_data('th', int64_array([9])), - **regular_op_with_shaped_data('div', [], {'op': 'Div', 'type': 'Divide'}), - **regular_op_with_shaped_data('concat', [4], {'op': 'Concat', 'type': 'Concat'}), - - **regular_op_with_shaped_data('reshape_in', [1, 1, 9, 128], {'op': 'Reshape', 'type': 'Reshape'}), - **valued_const_with_data('transpose_in_order', int64_array([0, 3, 1, 2])), - **regular_op_with_shaped_data('transpose_in', [1, 128, 1, 9], {'op': 'Transpose', 'type': 'Transpose'}), - **regular_op_with_shaped_data('pool', [1, 128, 1, 3], {'kind': 'op', 'op': 'Pooling', 'pool_stride': 3, - 'pool_step': [1, 1, 1, 1]}), - } - - def test_conv_reshape_pool(self): - graph = build_graph(self.nodes, [ - *connect('conv', '0:transpose_out'), - *connect('transpose_out_order', '1:transpose_out'), - *connect('transpose_out', '0:reshape_out'), - *connect('reshape_out_shape', '1:reshape_out'), - *connect('reshape_out', 'shapeof'), - - *connect('shapeof', '0:gather_batch'), - *connect('ind', '1:gather_batch'), - *connect('axis', '2:gather_batch'), - *connect('shapeof', '0:gather_h', skip_data=True), - *connect('ind_h', '1:gather_h'), - *connect('axis', '2:gather_h', skip_data=True), - *connect('gather_h', '0:div'), - *connect('th', '1:div'), - *connect('gather_batch', '0:concat'), - *connect('t', '1:concat'), - *connect('h', '2:concat'), - *connect('div', '3:concat'), - *connect('concat', '1:reshape_in'), - - *connect('reshape_out', '0:reshape_in', skip_data=True), - *connect('reshape_in', '0:transpose_in'), - *connect('transpose_in_order', "1:transpose_in"), - *connect('transpose_in', 'pool'), - ], nodes_with_edges_only=True) - - FuseReshapesSequenceKaldi().find_and_replace_pattern(graph) - - ref_graph = build_graph(self.ref_nodes, - [ - *connect('conv', '0:transpose_out'), - *connect('transpose_out_order', '1:transpose_out'), - *connect('transpose_out', '0:transpose_in'), - *connect('transpose_in_order', "1:transpose_in"), - *connect('transpose_in', 'pool'), - ]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'pool') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/FusedBatchNormTraining_test.py b/tools/mo/unit_tests/mo/middle/FusedBatchNormTraining_test.py deleted file mode 100644 index 5a46980eb69400..00000000000000 --- a/tools/mo/unit_tests/mo/middle/FusedBatchNormTraining_test.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.middle.FusedBatchNormTraining import FusedBatchNormTraining -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.middle.passes.eliminate import shape_inference -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder': {'value': None, 'shape': int64_array([3, 10, 11, 5]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'placeholder_data': {'shape': int64_array([3, 10, 11, 5]), 'value': None, 'kind': 'data'}, - - 'scale': {'value': np.array([2, 3.5, 4.5, 5.1, 2.6], dtype=np.float32), 'shape': int64_array([5]), 'kind': 'op', - 'op': 'Const'}, - 'scale_data': {'value': np.array([2, 3.5, 4.5, 5.1, 2.6], dtype=np.float32), 'shape': int64_array([5]), - 'kind': 'data'}, - - 'offset': {'value': np.array([1, 2.5, 3.5, 4.1, 5.6], dtype=np.float32), 'shape': int64_array([5]), 'kind': 'op', - 'op': 'Const'}, - 'offset_data': {'value': np.array([1, 2.5, 3.5, 4.1, 5.6], dtype=np.float32), 'shape': int64_array([5]), - 'kind': 'data'}, - - 'mean': {'value': None, 'shape': int64_array([]), 'kind': 'op', 'op': 'Const'}, - 'mean_data': {'value': None, 'shape': int64_array([]), 'kind': 'data'}, - - 'variance': {'value': None, 'shape': int64_array([]), 'kind': 'op', 'op': 'Const'}, - 'variance_data': {'value': None, 'shape': int64_array([]), 'kind': 'data'}, - - 'batchnorm': {'value': None, 'shape': int64_array([3, 10, 11, 5]), 'type': None, 'kind': 'op', - 'op': 'FusedBatchNorm', 'is_training': True, 'eps': 1e-3}, - 'batchnorm_data': {'value': None, 'shape': int64_array([3, 10, 11, 5]), 'kind': 'data'}, - - 'result': {'kind': 'op', 'op': 'Result'}, - - # nodes after transformation - 'bn_mean': {'value': np.zeros([5]), 'shape': int64_array([5]), 'kind': 'op', 'op': 'Const'}, - 'bn_mean_data': {'value': np.zeros([5]), 'shape': int64_array([5]), 'kind': 'data'}, - - 'bn_variance': {'value': np.ones([5]), 'shape': int64_array([5]), 'kind': 'op', 'op': 'Const'}, - 'bn_variance_data': {'value': np.ones([5]), 'shape': int64_array([5]), 'kind': 'data'}, - - 'shapeof': {'type': 'ShapeOf', 'value': None, 'kind': 'op', 'op': 'ShapeOf'}, - 'shapeof_data': {'value': int64_array([3, 10, 11, 5]), 'shape': int64_array([4]), 'kind': 'data'}, - - 'reshape_to_orig': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'}, - 'reshape_to_orig_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'start': {'kind': 'op', 'op': 'Const', 'value': int64_array(1)}, - 'start_data': {'value': None, 'shape': None, 'kind': 'data', 'value': int64_array(1)}, - 'stop': {'kind': 'op', 'op': 'Const', 'value': int64_array(3)}, - 'stop_data': {'value': None, 'shape': None, 'kind': 'data', 'value': int64_array(3)}, - 'step': {'kind': 'op', 'op': 'Const', 'value': int64_array(1)}, - 'step_data': {'value': None, 'shape': None, 'kind': 'data', 'value': int64_array(1)}, - 'mvn_axes': {'kind': 'op', 'op': 'Range'}, - 'mvn_axes_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'mvn': {'type': 'MVN', 'value': None, 'kind': 'op', 'op': 'MVN', 'eps': 1e-3, 'eps_mode': 'inside_sqrt'}, - 'mvn_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'reshape_1': {'type': 'Reshape', 'value': None, 'kind': 'op', 'op': 'Reshape'}, - 'reshape_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'reshape_1_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': int64_array([1, -1, 0, 0])}, - 'reshape_1_const_data': {'kind': 'data', 'value': None, 'shape': None}, -} - - -class TestFusedBatchNormTrainingTest(): - @pytest.mark.parametrize("op",[ - 'FusedBatchNorm', 'FusedBatchNormV2', 'FusedBatchNormV3', - ]) - def test_transformation(self, op: str): - graph = build_graph(nodes_attributes, - [('placeholder', 'placeholder_data', {}), - ('scale', 'scale_data'), - ('offset', 'offset_data'), - ('mean', 'mean_data'), - ('variance', 'variance_data'), - ('placeholder_data', 'batchnorm', {'in': 0}), - ('scale_data', 'batchnorm', {'in': 1}), - ('offset_data', 'batchnorm', {'in': 2}), - ('mean_data', 'batchnorm', {'in': 3}), - ('variance_data', 'batchnorm', {'in': 4}), - ('batchnorm', 'batchnorm_data'), - ('batchnorm_data', 'result'), - ], - {}, nodes_with_edges_only=True) - graph.nodes['batchnorm']['op'] = op - graph_ref = build_graph(nodes_attributes, - [('placeholder', 'placeholder_data', {}), - ('scale', 'scale_data'), - ('offset', 'offset_data'), - ('bn_mean', 'bn_mean_data'), - ('bn_variance', 'bn_variance_data'), - ('scale_data', 'batchnorm', {'in': 1}), - ('offset_data', 'batchnorm', {'in': 2}), - ('bn_mean_data', 'batchnorm', {'in': 3}), - ('bn_variance_data', 'batchnorm', {'in': 4}), - - ('placeholder_data', 'reshape_1', {'in': 0}), - ('reshape_1_const', 'reshape_1_const_data'), - ('reshape_1_const_data', 'reshape_1', {'in': 1}), - ('reshape_1', 'reshape_1_data', {}), - ('reshape_1_data', 'mvn', {'in': 0}), - ('mvn', 'mvn_data'), - ('mvn_data', 'reshape_to_orig', {'in': 0}), - ('start', 'start_data'), - ('start_data', 'mvn_axes'), - ('stop', 'stop_data'), - ('stop_data', 'mvn_axes'), - ('step', 'step_data'), - ('step_data', 'mvn_axes'), - ('mvn_axes', 'mvn_axes_data'), - ('mvn_axes_data', 'mvn'), - ('placeholder_data', 'shapeof', {'in': 0}), - ('shapeof', 'shapeof_data'), - ('shapeof_data', 'reshape_to_orig', {'in': 1}), - ('reshape_to_orig', 'reshape_to_orig_data'), - ('reshape_to_orig_data', 'batchnorm', {'in': 0}), - - ('batchnorm', 'batchnorm_data'), - ('batchnorm_data', 'result'), - ], - {'batchnorm': {'is_training': False}, - - }, nodes_with_edges_only=True) - FusedBatchNormTraining().find_and_replace_pattern(graph) - shape_inference(graph) - - graph_ref.nodes['batchnorm']['op'] = op - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - assert flag, resp - - def test_non_training(self): - graph = build_graph(nodes_attributes, - [('placeholder', 'placeholder_data', {}), - ('scale', 'scale_data'), - ('offset', 'offset_data'), - ('mean', 'mean_data'), - ('variance', 'variance_data'), - ('placeholder_data', 'batchnorm', {'in': 0}), - ('scale_data', 'batchnorm', {'in': 1}), - ('offset_data', 'batchnorm', {'in': 2}), - ('mean_data', 'batchnorm', {'in': 3}), - ('variance_data', 'batchnorm', {'in': 4}), - ('batchnorm', 'batchnorm_data'), - ('batchnorm_data', 'result'), - ], - {'batchnorm': {'is_training': False}}, nodes_with_edges_only=True) - graph_ref = graph.copy() - - FusedBatchNormTraining().find_and_replace_pattern(graph) - shape_inference(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/middle/GatherNDDecomposition_test.py b/tools/mo/unit_tests/mo/middle/GatherNDDecomposition_test.py deleted file mode 100644 index bc6c15ec832a67..00000000000000 --- a/tools/mo/unit_tests/mo/middle/GatherNDDecomposition_test.py +++ /dev/null @@ -1,395 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -import numpy as np - -from openvino.tools.mo.middle.GatherNDDecomposition import GatherNDDecomposition -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -nodes = { - 'input': {'kind': 'op', 'op': 'Const'}, - 'input_data': {'kind': 'data'}, - - 'indices_input': {'kind': 'op', 'op': 'Const'}, - 'indices_input_data': {'kind': 'data'}, - - 'gathernd': {'kind': 'op', 'op': 'GatherND'}, - 'gathernd_data': {'kind': 'data'}, - - 'result': {'kind': 'op', 'op': 'Result'}, -} - -edges = [ - ('input', 'input_data'), - ('input_data', 'gathernd', {'in': 0}), - - ('indices_input', 'indices_input_data'), - ('indices_input_data', 'gathernd', {'in': 1}), - - ('gathernd', 'gathernd_data'), - ('gathernd_data', 'result'), -] - -nodes_expected = { - 'input': {'kind': 'op', 'op': 'Const'}, - 'input_data': {'kind': 'data'}, - - 'reshape_shape': {'kind': 'op', 'op': 'Const'}, - 'reshape_shape_data': {'kind': 'data'}, - - 'reshape': {'kind': 'op', 'op': 'Reshape'}, - 'reshape_data': {'kind': 'data'}, - - 'axis': {'kind': 'op', 'op': 'Const'}, - 'axis_data': {'kind': 'data'}, - - 'indices': {'kind': 'op', 'op': 'Const'}, - 'indices_data': {'kind': 'data'}, - - 'gather': {'kind': 'op', 'op': 'Gather'}, - 'gather_data': {'kind': 'data'}, - - 'result': {'kind': 'op', 'op': 'Result'}, -} - -edges_expected = [ - ('input', 'input_data'), - ('input_data', 'reshape', {'in': 0}), - - ('reshape_shape', 'reshape_shape_data'), - ('reshape_shape_data', 'reshape', {'in': 1}), - - ('reshape', 'reshape_data'), - ('reshape_data', 'gather', {'in': 0}), - - ('indices', 'indices_data'), - ('indices_data', 'gather', {'in': 1}), - - ('axis', 'axis_data'), - ('axis_data', 'gather', {'in': 2}), - - ('gather', 'gather_data'), - ('gather_data', 'result'), -] - - -class GatherNDDecompositionTest(unittest.TestCase): - - def test_GatherNDDecomposition_2by2indices_validinputs(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([2, 1]), 'value': np.array([1, 2]).reshape([2, 1])}, - 'indices_input_data': {'shape': np.array([2, 2]), 'value': np.array([1, 0, 0, 0]).reshape([2, 2])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = build_graph(nodes_expected, - edges_expected, - update_attributes={ - 'input_data': {'shape': np.array([2, 1]), 'value': np.array([1, 2]).reshape([2, 1])}, - 'indices': {'shape': np.array([2]), 'value': np.array([1, 0])}, - 'reshape_shape': {'shape': np.array([1]), 'value': np.array([-1])}, - 'axis': {'shape': np.array([]), 'value': 0} - }, - nodes_with_edges_only=True) - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_2by2indices_invalidinputs(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([2, 2]), 'value': np.array([1, 2, 3, 4]).reshape([2, 2])}, - 'indices_input_data': {'shape': np.array([2, 2]), 'value': np.array([1, 0, 0, 0]).reshape([2, 2])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = graph - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_2by1indices_validinputs(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([2, 2]), 'value': np.array([1, 2, 3, 4]).reshape([2, 2])}, - 'indices_input_data': {'shape': np.array([2, 2]), 'value': np.array([1, 0]).reshape([2, 1])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = build_graph(nodes_expected, - edges_expected, - update_attributes={ - 'input_data': {'shape': np.array([2, 2]), 'value': np.array([1, 2, 3, 4]).reshape([2, 2])}, - 'indices': {'shape': np.array([2]), 'value': np.array([1, 0])}, - 'reshape_shape': {'shape': np.array([2]), 'value': np.array([-1, 2])}, - 'axis': {'shape': np.array([]), 'value': 0} - }, - nodes_with_edges_only=True) - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_2by0indices_invalidinputs(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([1, 2]), 'value': np.array([1, 2]).reshape([1, 2])}, - 'indices_input_data': {'shape': np.array([2]), 'value': np.array([1, 0])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = graph - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_2by0indices_validinputs(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([2, 1]), 'value': np.array([1, 2])}, - 'indices_input_data': {'shape': np.array([2]), 'value': np.array([1, 0])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = build_graph(nodes_expected, - edges_expected, - update_attributes={ - 'input_data': {'shape': np.array([2, 1]), 'value': np.array([1, 2])}, - 'indices': {'shape': np.array([]), 'value': np.array([1])}, - 'reshape_shape': {'shape': np.array([1]), 'value': np.array([-1])}, - 'axis': {'shape': np.array([]), 'value': 0} - }, - nodes_with_edges_only=True) - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_1leadingdim(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([2, 2]), 'value': np.array([1, 2, 3, 4]).reshape([2, 2])}, - 'indices_input_data': {'shape': np.array([2, 1, 1]), 'value': np.array([1, 0]).reshape([2, 1, 1])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = build_graph(nodes_expected, - edges_expected, - update_attributes={ - 'input_data': {'shape': np.array([2, 2]), 'value': np.array([1, 2, 3, 4]).reshape([2, 2])}, - 'indices': {'shape': np.array([2, 1]), 'value': np.array([1, 0]).reshape([2, 1])}, - 'reshape_shape': {'shape': np.array([2]), 'value': np.array([-1, 2])}, - 'axis': {'shape': np.array([]), 'value': 0} - }, - nodes_with_edges_only=True) - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_3leadingdims(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([2, 2]), 'value': np.array([1, 2, 3, 4]).reshape([2, 2])}, - 'indices_input_data': {'shape': np.array([2, 1, 1, 1, 1]), 'value': np.array([1, 0]).reshape([2, 1, 1, 1, 1])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = build_graph(nodes_expected, - edges_expected, - update_attributes={ - 'input_data': {'shape': np.array([2, 2]), 'value': np.array([1, 2, 3, 4]).reshape([2, 2])}, - 'indices': {'shape': np.array([2, 1, 1, 1]), 'value': np.array([1, 0]).reshape([2, 1, 1, 1])}, - 'reshape_shape': {'shape': np.array([2]), 'value': np.array([-1, 2])}, - 'axis': {'shape': np.array([]), 'value': 0} - }, - nodes_with_edges_only=True) - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_nonzerobatchdim(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([2, 2]), 'value': np.array([1, 2, 3, 4]).reshape([2, 2])}, - 'indices_input_data': {'shape': np.array([2, 1]), 'value': np.array([1, 0]).reshape([2, 1])}, - 'gathernd': {'batch_dims': 1} - }, - nodes_with_edges_only=True) - graph_ref = graph - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_complexexample1_nonzerobatchdim(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([2, 3, 4]), 'value': np.array([i for i in range(24)]).reshape([2, 3, 4])}, - 'indices_input_data': {'shape': np.array([2, 1]), 'value': np.array([1, 0]).reshape([2, 1])}, - 'gathernd': {'batch_dims': 1} - }, - nodes_with_edges_only=True) - graph_ref = graph - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_complexexample2(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([2, 3, 4]), 'value': np.array([i for i in range(24)]).reshape([2, 3, 4])}, - 'indices_input_data': {'shape': np.array([2, 1]), 'value': np.array([1, 0]).reshape([2, 1])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = build_graph(nodes_expected, - edges_expected, - update_attributes={ - 'input_data': {'shape': np.array([2, 3, 4]), 'value': np.array([i for i in range(24)]).reshape([2, 3, 4])}, - 'indices': {'shape': np.array([2]), 'value': np.array([1, 0]).reshape([2])}, - 'reshape_shape': {'shape': np.array([3]), 'value': np.array([-1, 3, 4])}, - 'axis': {'shape': np.array([]), 'value': 0} - }, - nodes_with_edges_only=True) - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_complexexample3(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([1, 1, 5]), 'value': np.array([1, 2, 3, 4, 5]).reshape([1, 1, 5])}, - 'indices_input_data': {'shape': np.array([2, 2, 3]), 'value': np.array([0, 0, 3, 0, 0, 1, 0, 0, 4, 0, 0, 2]).reshape([2, 2, 3])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = build_graph(nodes_expected, - edges_expected, - update_attributes={ - 'input_data': {'shape': np.array([1, 1, 5]), 'value': np.array([1, 2, 3, 4, 5]).reshape([1, 1, 5])}, - 'indices': {'shape': np.array([2, 2]), 'value': np.array([3, 1, 4, 2]).reshape([2, 2])}, - 'reshape_shape': {'shape': np.array([1]), 'value': np.array([-1])}, - 'axis': {'shape': np.array([]), 'value': 0} - }, - nodes_with_edges_only=True) - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_complexexample4(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([1, 4, 1]), 'value': np.array([1, 2, 3, 4]).reshape([1, 4, 1])}, - 'indices_input_data': {'shape': np.array([2, 2, 2]), 'value': np.array([0, 1, 0, 3, 0, 2, 0, 0]).reshape([2, 2, 2])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = build_graph(nodes_expected, - edges_expected, - update_attributes={ - 'input_data': {'shape': np.array([1, 4, 1]), 'value': np.array([1, 2, 3, 4]).reshape([1, 4, 1])}, - 'indices': {'shape': np.array([2, 2]), 'value': np.array([1, 3, 2, 0]).reshape([2, 2])}, - 'reshape_shape': {'shape': np.array([2]), 'value': np.array([-1, 1])}, - 'axis': {'shape': np.array([]), 'value': 0} - }, - nodes_with_edges_only=True) - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_dynamic_data_shape(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([1, -1, 1]), 'value': np.array([1, 2, 3, 4]).reshape([1, 4, 1])}, - 'indices_input_data': {'shape': np.array([2, 2, 2]), 'value': np.array([0, 1, 0, 3, 0, 2, 0, 0]).reshape([2, 2, 2])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = graph - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_GatherNDDecomposition_dynamic_indices_shape(self): - - graph = build_graph(nodes, - edges, - update_attributes={ - 'input_data': {'shape': np.array([1, 4, 1]), 'value': np.array([1, 2, 3, 4]).reshape([1, 4, 1])}, - 'indices_input_data': {'shape': np.array([2, -1, 2]), 'value': np.array([0, 1, 0, 3, 0, 2, 0, 0]).reshape([2, 2, 2])}, - 'gathernd': {'batch_dims': 0} - }, - nodes_with_edges_only=True) - graph_ref = graph - - GatherNDDecomposition().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs( - graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/GroupNorm_test.py b/tools/mo/unit_tests/mo/middle/GroupNorm_test.py deleted file mode 100644 index 905289e06b61dd..00000000000000 --- a/tools/mo/unit_tests/mo/middle/GroupNorm_test.py +++ /dev/null @@ -1,115 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.GroupNorm import GroupNormToMVN -from openvino.tools.mo.front.common.partial_infer.utils import float_array, int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, connect, \ - regular_op_with_shaped_data, valued_const_with_data - -shape = int64_array([1, 3, 5, 2]) -nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}), - **valued_const_with_data('gamma', float_array([0.5])), - **valued_const_with_data('beta', float_array([0.5])), - **regular_op_with_shaped_data('group_norm', shape, - {'op': 'GroupNorm', 'name': 'group_norm', 'num_groups': 3, 'eps': 1e-9}), - **result('result') - } - -edges = [*connect('input:0', '0:group_norm'), - *connect('gamma', '1:group_norm'), - *connect('beta', '2:group_norm'), - *connect('group_norm:0', 'result'), - ] - -ref_nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('shape1', int64_array([4]), {'op': 'ShapeOf'}), - **regular_op_with_shaped_data('shape2', int64_array([4]), {'op': 'ShapeOf'}), - **regular_op_with_shaped_data('shape3', int64_array([1]), {'op': 'ShapeOf'}), - **regular_op_with_shaped_data('hcast1', int64_array([4]), {'op': 'Cast'}), - **regular_op_with_shaped_data('cast2', int64_array([2]), {'op': 'Cast'}), - **regular_op_with_shaped_data('cast3', int64_array([4]), {'op': 'Cast'}), - **regular_op_with_shaped_data('gather1', int64_array([2]), {'op': 'Gather'}), - **regular_op_with_shaped_data('gather2', int64_array([1]), {'op': 'Gather'}), - **regular_op_with_shaped_data('gather3', int64_array([1]), {'op': 'Gather'}), - **regular_op_with_shaped_data('mul1', int64_array([1]), {'op': 'Mul'}), - **regular_op_with_shaped_data('mul2', int64_array([1]), {'op': 'Mul'}), - **regular_op_with_shaped_data('mul3', shape, {'op': 'Mul'}), - **regular_op_with_shaped_data('concat', int64_array([4]), {'op': 'Concat'}), - **regular_op_with_shaped_data('reshape1', int64_array([3, 1, 5, 2]), {'op': 'Reshape'}), - **regular_op_with_shaped_data('reshape2', shape, {'op': 'Reshape'}), - **regular_op_with_shaped_data('squeeze', int64_array([]), {'op': 'Squeeze'}), - **regular_op_with_shaped_data('range', int64_array([3]), {'op': 'Range'}), - **regular_op_with_shaped_data('mvn', int64_array([3, 1, 5, 2]), {'op': 'MVN'}), - **regular_op_with_shaped_data('add', shape, {'op': 'Add'}), - **valued_const_with_data('shape/axis1', int64_array(0)), - **valued_const_with_data('shape/ind1', int64_array([2, 3])), - **valued_const_with_data('shape/axis2', int64_array(0)), - **valued_const_with_data('shape/ind2', int64_array([0])), - **valued_const_with_data('shape/axis3', int64_array(0)), - **valued_const_with_data('shape/ind3', int64_array([1])), - **valued_const_with_data('gn/rec', float_array([1./3])), - **valued_const_with_data('group', int64_array([3])), - **valued_const_with_data('squeeze/axis', int64_array([0])), - **valued_const_with_data('range/start', int64_array(1)), - **valued_const_with_data('range/step', int64_array(1)), - **valued_const_with_data('gamma', float_array([[[[0.5]]]])), - **valued_const_with_data('beta', float_array([[[[0.5]]]])), - **result('result') - } -ref_edges = [*connect('input', '0:reshape1'), - *connect('input', 'shape1', skip_data=True), - *connect('shape1:0', '0:gather1'), - *connect('shape1:0', 'hcast1', skip_data=True), - *connect('shape/ind1', '1:gather1'), - *connect('shape/axis1', '2:gather1'), - *connect('gather1', 'cast2'), - *connect('hcast1', '0:gather3'), - *connect('hcast1', '0:gather2', skip_data=True), - *connect('shape/ind2', '1:gather2'), - *connect('shape/axis2', '2:gather2'), - *connect('gather2', '0:mul2'), - *connect('group', '1:mul2'), - *connect('shape/ind3', '1:gather3'), - *connect('shape/axis3', '2:gather3'), - *connect('gather3', '0:mul1'), - *connect('gn/rec', '1:mul1'), - *connect('mul2', '0:concat'), - *connect('mul1', '1:concat'), - *connect('cast2', '2:concat'), - *connect('concat', 'cast3'), - *connect('cast3', '1:reshape1'), - *connect('reshape1', 'shape2'), - *connect('shape2', 'shape3'), - *connect('shape3', '0:squeeze'), - *connect('squeeze/axis', '1:squeeze'), - *connect('range/start', '0:range'), - *connect('squeeze', '1:range'), - *connect('range/step', '2:range'), - *connect('reshape1', '0:mvn', skip_data=True), - *connect('range', '1:mvn'), - *connect('mvn', '0:reshape2'), - *connect('shape1:0', '1:reshape2', skip_data=True), - *connect('reshape2', '0:mul3'), - *connect('gamma', '1:mul3'), - *connect('mul3', '0:add'), - *connect('beta', '1:add'), - *connect('add', 'result') - ] - - -class GroupNormToMVNTest(unittest.TestCase): - def test_group_norm_1(self): - graph = build_graph(nodes, edges) - - graph_ref = build_graph(ref_nodes, ref_edges) - - graph.graph['layout'] = 'NCHW' - - GroupNormToMVN().find_and_replace_pattern(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/InsertSelect_test.py b/tools/mo/unit_tests/mo/middle/InsertSelect_test.py deleted file mode 100644 index 31f1ffd5cf167b..00000000000000 --- a/tools/mo/unit_tests/mo/middle/InsertSelect_test.py +++ /dev/null @@ -1,446 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.InsertSelect import AddSelectBeforeMemoryNodePattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class InsertSelectTests(unittest.TestCase): - - # graph have no splices - selects should not be inserted - def test_insert_select_0(self): - graph = build_graph({ - 'input': {'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]}, - 'memory': {'kind': 'op', 'op': 'Assign'}, - }, - [('input', 'placeholder_data_1'), - ('placeholder_data_1', 'memory') - ], - nodes_with_edges_only=True) - ref_graph = graph.copy() - AddSelectBeforeMemoryNodePattern().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, ref_graph, 'memory') - self.assertTrue(flag, resp) - - # graph contains 1 splice with context length 5, should be inserted select with memory as counter with length 5 - def test_insert_select_1(self): - graph = build_graph({ - 'input': {'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 13]}, - 'placeholder_2': {'kind': 'op', 'op': None}, - 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'memory': {'kind': 'op', 'op': 'Assign', 'index': 0}, - }, - [('input', 'placeholder_data_1'), - ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'), - ('splice_data_1', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'), - ('placeholder_data_2', 'memory') - ], - nodes_with_edges_only=True) - AddSelectBeforeMemoryNodePattern().find_and_replace_pattern(graph) - ref_graph = build_graph({ - 'input': {'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 13]}, - 'placeholder_2': {'kind': 'op', 'op': None}, - - 'second_dim_mem_1': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])}, - 'second_dim_data_mem_1': {'kind': 'data'}, - 'gather_shape_mem_1': {'kind': 'op', 'op': 'Concat'}, - 'gather_shape_data_mem_1': {'kind': 'data'}, - 'fill_value': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_data': {'kind': 'data'}, - 'broadcast_mem_1': {'kind': 'op', 'op': 'Broadcast'}, - 'broadcast_data_mem_1': {'kind': 'data'}, - - 'shape': {'kind': 'op', 'op': 'ShapeOf'}, - 'shape_data': {'kind': 'data'}, - 'crop_batch': {'kind': 'op', 'op': 'Crop', 'offset': int64_array([0])}, - 'crop_batch_data': {'kind': 'data'}, - 'crop_batch_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([1])}, - 'crop_batch_dim_data': {'kind': 'data'}, - 'second_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])}, - 'second_dim_data': {'kind': 'data'}, - 'gather_shape': {'kind': 'op', 'op': 'Concat'}, - 'gather_shape_data': {'kind': 'data'}, - 'fill_value_ones': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_data_ones': {'kind': 'data'}, - 'broadcast': {'kind': 'op', 'op': 'Broadcast'}, - 'broadcast_data': {'kind': 'data'}, - - 'second_dim_mem_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([26])}, - 'second_dim_data_mem_2': {'kind': 'data'}, - 'gather_shape_mem_2': {'kind': 'op', 'op': 'Concat'}, - 'gather_shape_data_mem_2': {'kind': 'data'}, - 'fill_value_ones_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_data_ones_2': {'kind': 'data'}, - 'broadcast_mem_2': {'kind': 'op', 'op': 'Broadcast'}, - 'broadcast_data_mem_2': {'kind': 'data'}, - - 'memory_in': {'kind': 'op', 'op': 'ReadValue', 'shape': int64_array([5])}, - 'memory_in_data': {'kind': 'data'}, - 'memory_out': {'kind': 'op', 'op': 'Assign', 'shape': int64_array([5])}, - 'memory_out_data': {'kind': 'data'}, - 'result': {'kind': 'op', 'op': 'Result'}, - 'crop_in': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 1, 'dim': 4}, - 'crop_in_data': {'kind': 'data'}, - 'crop_out': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 0, 'dim': 1}, - 'crop_out_data': {'kind': 'data'}, - 'equal': {'kind': 'op', 'op': 'Equal'}, - 'equal_data': {'kind': 'data'}, - 'select': {'kind': 'op', 'op': 'Select'}, - 'select_out_data': {'kind': 'data', 'shape': [1, 26]}, - 'const_0': {'kind': 'op', 'op': 'Const'}, - 'const_0_data': {'kind': 'data'}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data'}, - - 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'memory': {'kind': 'op', 'op': 'Assign'}, - }, - [('input', 'placeholder_data_1'), - ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'), - ('splice_data_1', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'), - ('placeholder_data_2', 'select', {'in': 1}), - - ('second_dim_mem_1', 'second_dim_data_mem_1'), - ('second_dim_data_mem_1', 'gather_shape_mem_1', {'in': 1}), - ('crop_batch_data', 'gather_shape_mem_1', {'in': 0}), - ('gather_shape_mem_1', 'gather_shape_data_mem_1'), - ('fill_value', 'fill_value_data'), - ('fill_value_data', 'broadcast_mem_1', {'in': 0}), - ('gather_shape_data_mem_1', 'broadcast_mem_1', {'in': 1}), - ('broadcast_mem_1', 'broadcast_data_mem_1'), - ('broadcast_data_mem_1', 'memory_in'), - - ('memory_in', 'memory_in_data'), ('memory_in_data', 'crop_in'), - ('crop_in', 'crop_in_data'), ('crop_in_data', 'concat', {'in': 0}), - - ('second_dim_mem_2', 'second_dim_data_mem_2'), - ('second_dim_data_mem_2', 'gather_shape_mem_2', {'in': 1}), - ('crop_batch_data', 'gather_shape_mem_2', {'in': 0}), - ('gather_shape_mem_2', 'gather_shape_data_mem_2'), - ('fill_value_ones_2', 'fill_value_data_ones_2'), - ('fill_value_data_ones_2', 'broadcast_mem_2', {'in': 0}), - ('gather_shape_data_mem_2', 'broadcast_mem_2', {'in': 1}), - ('broadcast_mem_2', 'broadcast_data_mem_2'), - ('broadcast_data_mem_2', 'concat', {'in': 1}), - - ('concat', 'concat_data'), ('concat_data', 'memory_out'), - ('memory_out', 'memory_out_data'), ('memory_out_data', 'result'), - ('concat_data', 'crop_out'), ('crop_out', 'crop_out_data'), - ('crop_out_data', 'equal', {'in': 1}), ('broadcast_data_mem_2', 'equal', {'in': 0}), - ('equal', 'equal_data'), - ('equal_data', 'select', {'in': 0}), - - ('placeholder_data_2', 'shape'), ('shape', 'shape_data'), - ('shape_data', 'crop_batch'), ('crop_batch', 'crop_batch_data'), - ('crop_batch_dim', 'crop_batch_dim_data'), - ('crop_batch_dim_data', 'crop_batch', {'in': 1}), - ('second_dim', 'second_dim_data'), ('second_dim_data', 'gather_shape', {'in': 1}), - ('crop_batch_data', 'gather_shape', {'in': 0}), ('gather_shape', 'gather_shape_data'), - ('fill_value_ones', 'fill_value_data_ones'), - ('fill_value_data_ones', 'broadcast', {'in': 0}), - ('gather_shape_data', 'broadcast', {'in': 1}), ('broadcast', 'broadcast_data'), - ('broadcast_data', 'select', {'in': 2}), - - ('select', 'select_out_data'), - ('select_out_data', 'memory') - ], - nodes_with_edges_only=True - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'memory') - self.assertTrue(flag, resp) - - # graph contains 1 splice with context length 5 on the path to memory and 1 out of path, - # should be inserted select with memory as counter with length 5 - def test_insert_select_2(self): - graph = build_graph({ - 'input': {'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 65]}, - 'splice_2': {'kind': 'op', 'op': 'Splice', 'context': np.array([-1, 0, 1])}, - 'splice_data_2': {'kind': 'data', 'shape': [1, 39]}, - 'placeholder_2': {'kind': 'op', 'op': None}, - 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'memory': {'kind': 'op', 'op': 'Assign'}, - }, - [('input', 'placeholder_data_1'), - ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'), - ('placeholder_data_1', 'splice_2'), ('splice_2', 'splice_data_2'), - ('splice_data_1', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'), - ('placeholder_data_2', 'memory') - ], - nodes_with_edges_only=True) - AddSelectBeforeMemoryNodePattern().find_and_replace_pattern(graph) - ref_graph = build_graph({ - 'input': {'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 65]}, - 'splice_2': {'kind': 'op', 'op': 'Splice', 'context': np.array([-1, 0, 1])}, - 'splice_data_2': {'kind': 'data', 'shape': [1, 39]}, - 'placeholder_2': {'kind': 'op', 'op': None}, - - 'second_dim_mem_1': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])}, - 'second_dim_data_mem_1': {'kind': 'data'}, - 'gather_shape_mem_1': {'kind': 'op', 'op': 'Concat'}, - 'gather_shape_data_mem_1': {'kind': 'data'}, - 'fill_value': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_data': {'kind': 'data'}, - 'broadcast_mem_1': {'kind': 'op', 'op': 'Broadcast'}, - 'broadcast_data_mem_1': {'kind': 'data'}, - - 'shape': {'kind': 'op', 'op': 'ShapeOf'}, - 'shape_data': {'kind': 'data'}, - 'crop_batch': {'kind': 'op', 'op': 'Crop', 'offset': int64_array([0])}, - 'crop_batch_data': {'kind': 'data'}, - 'crop_batch_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([1])}, - 'crop_batch_dim_data': {'kind': 'data'}, - 'second_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])}, - 'second_dim_data': {'kind': 'data'}, - 'gather_shape': {'kind': 'op', 'op': 'Concat'}, - 'gather_shape_data': {'kind': 'data'}, - 'fill_value_ones': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_data_ones': {'kind': 'data'}, - 'broadcast': {'kind': 'op', 'op': 'Broadcast'}, - 'broadcast_data': {'kind': 'data'}, - - 'second_dim_mem_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([26])}, - 'second_dim_data_mem_2': {'kind': 'data'}, - 'gather_shape_mem_2': {'kind': 'op', 'op': 'Concat'}, - 'gather_shape_data_mem_2': {'kind': 'data'}, - 'fill_value_ones_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_data_ones_2': {'kind': 'data'}, - 'broadcast_mem_2': {'kind': 'op', 'op': 'Broadcast'}, - 'broadcast_data_mem_2': {'kind': 'data'}, - - 'memory_in': {'kind': 'op', 'op': 'ReadValue', 'shape': int64_array([5])}, - 'memory_in_data': {'kind': 'data'}, - 'memory_out': {'kind': 'op', 'op': 'Assign', 'shape': int64_array([5])}, - 'memory_out_data': {'kind': 'data'}, - 'result': {'kind': 'op', 'op': 'Result'}, - 'crop_in': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 1, 'dim': 4}, - 'crop_in_data': {'kind': 'data'}, - 'crop_out': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 0, 'dim': 1}, - 'crop_out_data': {'kind': 'data'}, - 'equal': {'kind': 'op', 'op': 'Equal'}, - 'equal_data': {'kind': 'data'}, - 'select': {'kind': 'op', 'op': 'Select'}, - 'select_out_data': {'kind': 'data', 'shape': [1, 26]}, - 'const_0': {'kind': 'op', 'op': 'Const'}, - 'const_0_data': {'kind': 'data'}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data'}, - - 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'memory': {'kind': 'op', 'op': 'Assign'}, - }, - [('input', 'placeholder_data_1'), - ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'), - ('placeholder_data_1', 'splice_2'), ('splice_2', 'splice_data_2'), - ('splice_data_1', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'), - ('placeholder_data_2', 'select', {'in': 1}), - - ('second_dim_mem_1', 'second_dim_data_mem_1'), - ('second_dim_data_mem_1', 'gather_shape_mem_1', {'in': 1}), - ('crop_batch_data', 'gather_shape_mem_1', {'in': 0}), - ('gather_shape_mem_1', 'gather_shape_data_mem_1'), - ('fill_value', 'fill_value_data'), - ('fill_value_data', 'broadcast_mem_1', {'in': 0}), - ('gather_shape_data_mem_1', 'broadcast_mem_1', {'in': 1}), - ('broadcast_mem_1', 'broadcast_data_mem_1'), - ('broadcast_data_mem_1', 'memory_in'), - - ('memory_in', 'memory_in_data'), ('memory_in_data', 'crop_in'), - ('crop_in', 'crop_in_data'), ('crop_in_data', 'concat', {'in': 0}), - - ('second_dim_mem_2', 'second_dim_data_mem_2'), - ('second_dim_data_mem_2', 'gather_shape_mem_2', {'in': 1}), - ('crop_batch_data', 'gather_shape_mem_2', {'in': 0}), - ('gather_shape_mem_2', 'gather_shape_data_mem_2'), - ('fill_value_ones_2', 'fill_value_data_ones_2'), - ('fill_value_data_ones_2', 'broadcast_mem_2', {'in': 0}), - ('gather_shape_data_mem_2', 'broadcast_mem_2', {'in': 1}), - ('broadcast_mem_2', 'broadcast_data_mem_2'), - ('broadcast_data_mem_2', 'concat', {'in': 1}), - - ('concat', 'concat_data'), ('concat_data', 'memory_out'), - ('memory_out', 'memory_out_data'), ('memory_out_data', 'result'), - ('concat_data', 'crop_out'), ('crop_out', 'crop_out_data'), - ('crop_out_data', 'equal', {'in': 1}), ('broadcast_data_mem_2', 'equal', {'in': 0}), - ('equal', 'equal_data'), - ('equal_data', 'select', {'in': 0}), - - ('placeholder_data_2', 'shape'), ('shape', 'shape_data'), - ('shape_data', 'crop_batch'), ('crop_batch', 'crop_batch_data'), - ('crop_batch_dim', 'crop_batch_dim_data'), - ('crop_batch_dim_data', 'crop_batch', {'in': 1}), - ('second_dim', 'second_dim_data'), ('second_dim_data', 'gather_shape', {'in': 1}), - ('crop_batch_data', 'gather_shape', {'in': 0}), ('gather_shape', 'gather_shape_data'), - ('fill_value_ones', 'fill_value_data_ones'), - ('fill_value_data_ones', 'broadcast', {'in': 0}), - ('gather_shape_data', 'broadcast', {'in': 1}), ('broadcast', 'broadcast_data'), - ('broadcast_data', 'select', {'in': 2}), - - ('select', 'select_out_data'), - ('select_out_data', 'memory') - ], - nodes_with_edges_only=True - ) - (flag, resp) = compare_graphs(graph, ref_graph, 'memory') - self.assertTrue(flag, resp) - - # graph contains 2 splices with sum context length 8 on the path to memory, - # should be inserted select with memory as counter with length 7 - def test_insert_select_3(self): - graph = build_graph({ - 'input': {'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 65]}, - 'splice_2': {'kind': 'op', 'op': 'Splice', 'context': np.array([-1, 0, 1])}, - 'splice_data_2': {'kind': 'data', 'shape': [1, 39]}, - 'placeholder_2': {'kind': 'op', 'op': None}, - 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'memory': {'kind': 'op', 'op': 'Assign', 'index': 0}, - }, - [('input', 'placeholder_data_1'), - ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'), - ('splice_data_1', 'splice_2'), ('splice_2', 'splice_data_2'), - ('splice_data_2', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'), - ('placeholder_data_2', 'memory') - ], - nodes_with_edges_only=True) - AddSelectBeforeMemoryNodePattern().find_and_replace_pattern(graph) - ref_graph = build_graph({ - 'input': {'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data_1': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': np.array([-2, -1, 0, 1, 2])}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 65]}, - 'splice_2': {'kind': 'op', 'op': 'Splice', 'context': np.array([-1, 0, 1])}, - 'splice_data_2': {'kind': 'data', 'shape': [1, 39]}, - 'placeholder_2': {'kind': 'op', 'op': None}, - - 'second_dim_mem_1': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])}, - 'second_dim_data_mem_1': {'kind': 'data'}, - 'gather_shape_mem_1': {'kind': 'op', 'op': 'Concat'}, - 'gather_shape_data_mem_1': {'kind': 'data'}, - 'fill_value': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_data': {'kind': 'data'}, - 'broadcast_mem_1': {'kind': 'op', 'op': 'Broadcast'}, - 'broadcast_data_mem_1': {'kind': 'data'}, - - 'shape': {'kind': 'op', 'op': 'ShapeOf'}, - 'shape_data': {'kind': 'data'}, - 'crop_batch': {'kind': 'op', 'op': 'Crop', 'offset': int64_array([0])}, - 'crop_batch_data': {'kind': 'data'}, - 'crop_batch_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([1])}, - 'crop_batch_dim_data': {'kind': 'data'}, - 'second_dim': {'kind': 'op', 'op': 'Const', 'value': int64_array([5])}, - 'second_dim_data': {'kind': 'data'}, - 'gather_shape': {'kind': 'op', 'op': 'Concat'}, - 'gather_shape_data': {'kind': 'data'}, - 'fill_value_ones': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_data_ones': {'kind': 'data'}, - 'broadcast': {'kind': 'op', 'op': 'Broadcast'}, - 'broadcast_data': {'kind': 'data'}, - - 'second_dim_mem_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([26])}, - 'second_dim_data_mem_2': {'kind': 'data'}, - 'gather_shape_mem_2': {'kind': 'op', 'op': 'Concat'}, - 'gather_shape_data_mem_2': {'kind': 'data'}, - 'fill_value_ones_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_data_ones_2': {'kind': 'data'}, - 'broadcast_mem_2': {'kind': 'op', 'op': 'Broadcast'}, - 'broadcast_data_mem_2': {'kind': 'data'}, - - 'memory_in': {'kind': 'op', 'op': 'ReadValue', 'shape': int64_array([5])}, - 'memory_in_data': {'kind': 'data'}, - 'memory_out': {'kind': 'op', 'op': 'Assign', 'shape': int64_array([5])}, - 'memory_out_data': {'kind': 'data'}, - 'result': {'kind': 'op', 'op': 'Result'}, - 'crop_in': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 1, 'dim': 4}, - 'crop_in_data': {'kind': 'data'}, - 'crop_out': {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 0, 'dim': 1}, - 'crop_out_data': {'kind': 'data'}, - 'equal': {'kind': 'op', 'op': 'Equal'}, - 'equal_data': {'kind': 'data'}, - 'select': {'kind': 'op', 'op': 'Select'}, - 'select_out_data': {'kind': 'data', 'shape': [1, 26]}, - 'const_0': {'kind': 'op', 'op': 'Const'}, - 'const_0_data': {'kind': 'data'}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data'}, - - 'placeholder_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'memory': {'kind': 'op', 'op': 'Assign', 'index': 0}, - }, - [('input', 'placeholder_data_1'), - ('placeholder_data_1', 'splice_1'), ('splice_1', 'splice_data_1'), - ('splice_data_1', 'splice_2'), ('splice_2', 'splice_data_2'), - ('splice_data_2', 'placeholder_2'), ('placeholder_2', 'placeholder_data_2'), - ('placeholder_data_2', 'select', {'in': 1}), - - ('second_dim_mem_1', 'second_dim_data_mem_1'), - ('second_dim_data_mem_1', 'gather_shape_mem_1', {'in': 1}), - ('crop_batch_data', 'gather_shape_mem_1', {'in': 0}), - ('gather_shape_mem_1', 'gather_shape_data_mem_1'), - ('fill_value', 'fill_value_data'), - ('fill_value_data', 'broadcast_mem_1', {'in': 0}), - ('gather_shape_data_mem_1', 'broadcast_mem_1', {'in': 1}), - ('broadcast_mem_1', 'broadcast_data_mem_1'), - ('broadcast_data_mem_1', 'memory_in'), - - ('memory_in', 'memory_in_data'), ('memory_in_data', 'crop_in'), - ('crop_in', 'crop_in_data'), ('crop_in_data', 'concat', {'in': 0}), - - ('second_dim_mem_2', 'second_dim_data_mem_2'), - ('second_dim_data_mem_2', 'gather_shape_mem_2', {'in': 1}), - ('crop_batch_data', 'gather_shape_mem_2', {'in': 0}), - ('gather_shape_mem_2', 'gather_shape_data_mem_2'), - ('fill_value_ones_2', 'fill_value_data_ones_2'), - ('fill_value_data_ones_2', 'broadcast_mem_2', {'in': 0}), - ('gather_shape_data_mem_2', 'broadcast_mem_2', {'in': 1}), - ('broadcast_mem_2', 'broadcast_data_mem_2'), - ('broadcast_data_mem_2', 'concat', {'in': 1}), - - ('concat', 'concat_data'), ('concat_data', 'memory_out'), - ('memory_out', 'memory_out_data'), ('memory_out_data', 'result'), - ('concat_data', 'crop_out'), ('crop_out', 'crop_out_data'), - ('crop_out_data', 'equal', {'in': 1}), ('broadcast_data_mem_2', 'equal', {'in': 0}), - ('equal', 'equal_data'), - ('equal_data', 'select', {'in': 0}), - - ('placeholder_data_2', 'shape'), ('shape', 'shape_data'), - ('shape_data', 'crop_batch'), ('crop_batch', 'crop_batch_data'), - ('crop_batch_dim', 'crop_batch_dim_data'), - ('crop_batch_dim_data', 'crop_batch', {'in': 1}), - ('second_dim', 'second_dim_data'), ('second_dim_data', 'gather_shape', {'in': 1}), - ('crop_batch_data', 'gather_shape', {'in': 0}), ('gather_shape', 'gather_shape_data'), - ('fill_value_ones', 'fill_value_data_ones'), - ('fill_value_data_ones', 'broadcast', {'in': 0}), - ('gather_shape_data', 'broadcast', {'in': 1}), ('broadcast', 'broadcast_data'), - ('broadcast_data', 'select', {'in': 2}), - - ('select', 'select_out_data'), - ('select_out_data', 'memory') - ], - nodes_with_edges_only=True - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'memory') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/InterpolateSequenceToInterpolate_test.py b/tools/mo/unit_tests/mo/middle/InterpolateSequenceToInterpolate_test.py deleted file mode 100644 index 6eebb89c1ee2bb..00000000000000 --- a/tools/mo/unit_tests/mo/middle/InterpolateSequenceToInterpolate_test.py +++ /dev/null @@ -1,1584 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import unittest - -from openvino.tools.mo.middle.InterpolateSequenceToInterpolate import InterpolateSequenceToInterpolate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -graph_node_attrs_for_2d_case_1_opset4_case = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'size_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660]) - }, - 'size_1_data': {'value': int64_array([660]), 'shape': [1], 'kind': 'data'}, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([3.0]) - }, - 'scale_1_data': {'value': np.array([3.0]), 'shape': [1], 'kind': 'data'}, - 'axes_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2]) - }, - 'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'shape_calculation_mode': 'scales', - 'version': 'opset4' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'}, - 'size_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700]) - }, - 'size_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0]) - }, - 'scale_2_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'}, - 'axes_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3]) - }, - 'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'shape_calculation_mode': 'scales', - 'version': 'opset4' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'}, - 'size_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320]) - }, - 'size_3_data': {'value': int64_array([1320]), 'shape': [1], 'kind': 'data'}, - 'scale_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0]) - }, - 'scale_3_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'}, - 'axes_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2]) - }, - 'axes_3_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'}, - 'interpolate_3': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'shape_calculation_mode': 'scales', - 'version': 'opset4' - }, - 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges_for_2d_case_1_opset4_case = [ - ('placeholder', 'placeholder_data'), - - ('placeholder_data', 'interpolate_1', {'in': 0}), - ('size_1', 'size_1_data'), - ('scale_1', 'scale_1_data'), - ('axes_1', 'axes_1_data'), - ('size_1_data', 'interpolate_1', {'in': 1}), - ('scale_1_data', 'interpolate_1', {'in': 2}), - ('axes_1_data', 'interpolate_1', {'in': 3}), - ('interpolate_1', 'interpolate_1_data'), - - ('interpolate_1_data', 'interpolate_2', {'in': 0}), - ('size_2', 'size_2_data'), - ('scale_2', 'scale_2_data'), - ('axes_2', 'axes_2_data'), - ('size_2_data', 'interpolate_2', {'in': 1}), - ('scale_2_data', 'interpolate_2', {'in': 2}), - ('axes_2_data', 'interpolate_2', {'in': 3}), - ('interpolate_2', 'interpolate_2_data'), - - ('interpolate_2_data', 'interpolate_3', {'in': 0}), - ('size_3', 'size_3_data'), - ('scale_3', 'scale_3_data'), - ('axes_3', 'axes_3_data'), - ('size_3_data', 'interpolate_3', {'in': 1}), - ('scale_3_data', 'interpolate_3', {'in': 2}), - ('axes_3_data', 'interpolate_3', {'in': 3}), - ('interpolate_3', 'interpolate_3_data'), - - ('interpolate_3_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), -] - - -ref_graph_node_attrs_for_2d_case_1_opset4_case = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'size_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660, 700]) - }, - 'size_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([3.0, 2.0]) - }, - 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'axes_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3]) - }, - 'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'shape_calculation_mode': 'scales', - 'antialias': 0, - 'pads_begin': int64_array([0]), - 'pads_end': int64_array([0]), - 'coordinate_transformation_mode': 'half_pixel', - 'nearest_mode': 'round_prefer_floor', - 'cube_coeff': -0.75, - 'version': 'opset4' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'}, - 'size_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320]) - }, - 'size_3_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'scale_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0]) - }, - 'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'axes_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2]) - }, - 'axes_3_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'}, - 'interpolate_3': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'shape_calculation_mode': 'scales', - 'version': 'opset4' - }, - 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -ref_edges_for_2d_case_1_opset4_case = [ - ('placeholder', 'placeholder_data'), - - ('placeholder_data', 'interpolate_1', {'in': 0}), - ('size_1', 'size_1_data'), - ('scale_1', 'scale_1_data'), - ('axes_1', 'axes_1_data'), - ('size_1_data', 'interpolate_1', {'in': 1}), - ('scale_1_data', 'interpolate_1', {'in': 2}), - ('axes_1_data', 'interpolate_1', {'in': 3}), - ('interpolate_1', 'interpolate_1_data'), - - ('interpolate_1_data', 'interpolate_3', {'in': 0}), - ('size_3', 'size_3_data'), - ('scale_3', 'scale_3_data'), - ('axes_3', 'axes_3_data'), - ('size_3_data', 'interpolate_3', {'in': 1}), - ('scale_3_data', 'interpolate_3', {'in': 2}), - ('axes_3_data', 'interpolate_3', {'in': 3}), - ('interpolate_3', 'interpolate_3_data'), - - ('interpolate_3_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), -] - - -graph_node_attrs_for_2d_case_1 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660]) - }, - 'scale_1_data': {'value': int64_array([660]), 'shape': [1], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700]) - }, - 'scale_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([3]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'}, - 'scale_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320]) - }, - 'scale_3_data': {'value': int64_array([1320]), 'shape': [1], 'kind': 'data'}, - 'interpolate_3': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges_for_2d_case_1 = [ - ('placeholder', 'placeholder_data'), - - ('placeholder_data', 'interpolate_1', {'in': 0}), - ('scale_1', 'scale_1_data'), - ('scale_1_data', 'interpolate_1', {'in': 1}), - ('interpolate_1', 'interpolate_1_data'), - - ('interpolate_1_data', 'interpolate_2', {'in': 0}), - ('scale_2', 'scale_2_data'), - ('scale_2_data', 'interpolate_2', {'in': 1}), - ('interpolate_2', 'interpolate_2_data'), - - ('interpolate_2_data', 'interpolate_3', {'in': 0}), - ('scale_3', 'scale_3_data'), - ('scale_3_data', 'interpolate_3', {'in': 1}), - ('interpolate_3', 'interpolate_3_data'), - - ('interpolate_3_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), -] - - -graph_node_attrs_for_2d_case_2 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660]) - }, - 'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges_for_2d_case_2 = [ - ('placeholder', 'placeholder_data'), - - ('placeholder_data', 'interpolate_1', {'in': 0}), - ('scale_1', 'scale_1_data'), - ('scale_1_data', 'interpolate_1', {'in': 1}), - ('interpolate_1', 'interpolate_1_data'), - - ('interpolate_1_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), -] - - -graph_node_attrs_for_2d_case_3 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660]) - }, - 'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 350]), 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700]) - }, - 'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([3]), - 'mode': 'linear', - 'version': 'opset1' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'}, - 'scale_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320]) - }, - 'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'interpolate_3': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2]), - 'mode': 'cubic', - 'version': 'opset1' - }, - 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges_for_2d_case_3 = edges_for_2d_case_1 - - -new_graph_node_attrs_for_2d_case_4_opset4_case = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'size_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200]) - }, - 'size_1_data': {'value': int64_array([2200]), 'shape': [1], 'kind': 'data'}, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([10.0]) - }, - 'scale_1_data': {'value': np.array([10.0]), 'shape': [1], 'kind': 'data'}, - 'axes_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2]) - }, - 'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'linear', - 'coordinate_transformation_mode': 'asymmetric', - 'nearest_mode': 'simple', - 'cube_coeff': -0.4, - 'antialias': 1, - 'shape_calculation_mode': 'scales', - 'version': 'opset4' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'}, - 'size_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700]) - }, - 'size_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([2.0]) - }, - 'scale_2_data': {'value': np.array([2.0]), 'shape': [1], 'kind': 'data'}, - 'axes_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3]) - }, - 'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'linear', - 'coordinate_transformation_mode': 'asymmetric', - 'nearest_mode': 'simple', - 'cube_coeff': -0.4, - 'antialias': 1, - 'shape_calculation_mode': 'scales', - 'version': 'opset4' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -new_edges_for_2d_case_4_opset4_case = [ - ('placeholder', 'placeholder_data'), - - ('placeholder_data', 'interpolate_1', {'in': 0}), - ('size_1', 'size_1_data'), - ('size_1_data', 'interpolate_1', {'in': 1}), - ('scale_1', 'scale_1_data'), - ('scale_1_data', 'interpolate_1', {'in': 2}), - ('axes_1', 'axes_1_data'), - ('axes_1_data', 'interpolate_1', {'in': 3}), - ('interpolate_1', 'interpolate_1_data'), - - ('interpolate_1_data', 'interpolate_2', {'in': 0}), - ('size_2', 'size_2_data'), - ('size_2_data', 'interpolate_2', {'in': 1}), - ('scale_2', 'scale_2_data'), - ('scale_2_data', 'interpolate_2', {'in': 2}), - ('axes_2', 'axes_2_data'), - ('axes_2_data', 'interpolate_2', {'in': 3}), - ('interpolate_2', 'interpolate_2_data'), - - ('interpolate_2_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), -] - - -new_ref_graph_node_attrs_for_2d_case_4_opset4_case = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'size_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200, 700]) - }, - 'size_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([10.0, 2.0]) - }, - 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'axes_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3]) - }, - 'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'linear', - 'coordinate_transformation_mode': 'asymmetric', - 'nearest_mode': 'simple', - 'cube_coeff': -0.4, - 'antialias': 1, - 'shape_calculation_mode': 'scales', - 'version': 'opset4' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -new_ref_edges_for_2d_case_4_opset4_case = [ - ('placeholder', 'placeholder_data'), - - ('placeholder_data', 'interpolate_1', {'in': 0}), - ('size_1', 'size_1_data'), - ('size_1_data', 'interpolate_1', {'in': 1}), - ('scale_1', 'scale_1_data'), - ('scale_1_data', 'interpolate_1', {'in': 2}), - ('axes_1', 'axes_1_data'), - ('axes_1_data', 'interpolate_1', {'in': 3}), - ('interpolate_1', 'interpolate_1_data'), - - ('interpolate_1_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), -] - - -graph_node_attrs_for_2d_case_4_opset4_case = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200]) - }, - 'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'axes_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2]) - }, - 'axes_1_data': {'value': int64_array([2]), 'shape': [1], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'linear', - 'coordinate_transformation_mode': 'asymmetric', - 'nearest_mode': 'simple', - 'cube_coeff': -0.4, - 'antialias': 1, - 'version': 'opset4' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700]) - }, - 'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'axes_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3]) - }, - 'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'linear', - 'coordinate_transformation_mode': 'asymmetric', - 'nearest_mode': 'simple', - 'cube_coeff': -0.4, - 'antialias': 1, - 'version': 'opset4' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges_for_2d_case_4_opset4_case = [ - ('placeholder', 'placeholder_data'), - - ('placeholder_data', 'interpolate_1', {'in': 0}), - ('scale_1', 'scale_1_data'), - ('scale_1_data', 'interpolate_1', {'in': 1}), - ('axes_1', 'axes_1_data'), - ('axes_1_data', 'interpolate_1', {'in': 2}), - ('interpolate_1', 'interpolate_1_data'), - - ('interpolate_1_data', 'interpolate_2', {'in': 0}), - ('scale_2', 'scale_2_data'), - ('scale_2_data', 'interpolate_2', {'in': 1}), - ('axes_2', 'axes_2_data'), - ('axes_2_data', 'interpolate_2', {'in': 2}), - ('interpolate_2', 'interpolate_2_data'), - - ('interpolate_2_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), -] - - -graph_node_attrs_for_2d_case_4 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200]) - }, - 'scale_1_data': {'value': int64_array([2200]), 'shape': [1], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2]), - 'mode': 'linear', - 'align_corners': 0, - 'antialias': 1, - 'pads_begin': 5, - 'pads_end': 3, - 'version': 'opset1' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 2200, 350]), 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([700]) - }, - 'scale_2_data': {'value': int64_array([700]), 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([3]), - 'mode': 'linear', - 'align_corners': 0, - 'antialias': 1, - 'pads_begin': 5, - 'pads_end': 3, - 'version': 'opset1' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges_for_2d_case_4 = [ - ('placeholder', 'placeholder_data'), - - ('placeholder_data', 'interpolate_1', {'in': 0}), - ('scale_1', 'scale_1_data'), - ('scale_1_data', 'interpolate_1', {'in': 1}), - ('interpolate_1', 'interpolate_1_data'), - - ('interpolate_1_data', 'interpolate_2', {'in': 0}), - ('scale_2', 'scale_2_data'), - ('scale_2_data', 'interpolate_2', {'in': 1}), - ('interpolate_2', 'interpolate_2_data'), - - ('interpolate_2_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), -] - - -graph_node_attrs_for_2d_case_6 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([220, 350]) - }, - 'scale_1_data': {'value': None, 'shape': [2], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2, 3]), - 'mode': 'linear', - 'align_corners': 0, - 'antialias': 1, - 'pads_begin': 5, - 'pads_end': 3, - 'version': 'opset1' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([220]) - }, - 'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2]), - 'mode': 'linear', - 'align_corners': 0, - 'antialias': 1, - 'pads_begin': 5, - 'pads_end': 3, - 'version': 'opset1' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 220, 350]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges_for_2d_case_6 = edges_for_2d_case_4 - - -new_ref_graph_node_attrs_for_3d_case_1_opset4_case = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 5, 1024, 256, 800]), - 'kind': 'data', - 'data_type': None - }, - 'size_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280, 2400]) - }, - 'size_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4.0, 5.0, 3.0]) - }, - 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'axes_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3, 4]) - }, - 'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'shape_calculation_mode': 'sizes', - 'version': 'opset4' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'}, - 'size_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512]) - }, - 'size_3_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'scale_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([512.0 / 2400.0]) - }, - 'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'axes_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4]) - }, - 'axes_3_data': {'value': int64_array([4]), 'shape': [1], 'kind': 'data'}, - 'interpolate_3': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'shape_calculation_mode': 'sizes', - 'version': 'opset4' - }, - 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - - -new_ref_edges_for_3d_case_1_opset4_case = ref_edges_for_2d_case_1_opset4_case - - -new_graph_node_attrs_for_3d_case_1_opset4_case = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 5, 1024, 256, 800]), - 'kind': 'data', - 'data_type': None - }, - 'size_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 2400]) - }, - 'size_1_data': {'value': int64_array([4096, 2400]), 'shape': [2], 'kind': 'data'}, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4.0, 3.0]) - }, - 'scale_1_data': {'value': np.array([4.0, 3.0]), 'shape': [2], 'kind': 'data'}, - 'axes_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 4]) - }, - 'axes_1_data': {'value': int64_array([2, 4]), 'shape': [2], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'shape_calculation_mode': 'sizes', - 'version': 'opset4' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 256, 2400]), 'kind': 'data'}, - 'size_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1280]) - }, - 'size_2_data': {'value': int64_array([1280]), 'shape': [1], 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([5.0]) - }, - 'scale_2_data': {'value': np.array([5.0]), 'shape': [1], 'kind': 'data'}, - 'axes_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3]) - }, - 'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'shape_calculation_mode': 'sizes', - 'version': 'opset4' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'}, - 'size_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512]) - }, - 'size_3_data': {'value': int64_array([512]), 'shape': [1], 'kind': 'data'}, - 'scale_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([512.0 / 2400.0]) - }, - 'scale_3_data': {'value': np.array([512.0 / 2400.0]), 'shape': [1], 'kind': 'data'}, - 'axes_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4]) - }, - 'axes_3_data': {'value': int64_array([4]), 'shape': [1], 'kind': 'data'}, - 'interpolate_3': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'shape_calculation_mode': 'sizes', - 'version': 'opset4' - }, - 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -new_edges_for_3d_case_1_opset4_case = edges_for_2d_case_1_opset4_case - - -graph_node_attrs_for_3d_case_1 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 5, 1024, 256, 800]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 2400]) - }, - 'scale_1_data': {'value': int64_array([4096, 2400]), 'shape': [2], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2, 4]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 256, 2400]), 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1280]) - }, - 'scale_2_data': {'value': int64_array([1280]), 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([3]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'}, - 'scale_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512]) - }, - 'scale_3_data': {'value': int64_array([512]), 'shape': [1], 'kind': 'data'}, - 'interpolate_3': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([4]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_3_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges_for_3d_case_1 = edges_for_2d_case_1 - - -graph_node_attrs_for_3d_case_2 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 5, 1024, 256, 800]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280]) - }, - 'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2, 3]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 800]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 800]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges_for_3d_case_2 = edges_for_2d_case_2 - - -graph_node_attrs_for_3d_case_3 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([16, 44, 512, 87, 790]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([256]) - }, - 'scale_1_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([16, 44, 256, 87, 790]), 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2370]) - }, - 'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([4]), - 'mode': 'linear', - 'version': 'opset1' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([16, 44, 256, 87, 2370]), 'kind': 'data'}, - 'scale_3': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([435]) - }, - 'scale_3_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'interpolate_3': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([3]), - 'mode': 'cubic', - 'version': 'opset1' - }, - 'interpolate_3_data': {'value': None, 'shape': int64_array([16, 44, 256, 435, 2370]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([16, 44, 256, 435, 2370]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges_for_3d_case_3 = edges_for_2d_case_3 - - -new_ref_graph_node_attrs_for_3d_case_4_opset4_case = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([10, 64, 511, 416, 10240]), - 'kind': 'data', - 'data_type': None - }, - 'size_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 912, 133120]) - }, - 'size_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', - 'value': np.array([4599.0 / 511.0, 912.0 / 416.0, 133120.0 / 10240.0]) - }, - 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'axes_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 3, 4]) - }, - 'axes_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'linear', - 'antialias': 1, - 'shape_calculation_mode': 'sizes', - 'version': 'opset4' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -new_ref_edges_for_3d_case_4_opset4_case = new_ref_edges_for_2d_case_4_opset4_case - - -new_graph_node_attrs_for_3d_case_4_opset4_case = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([10, 64, 511, 416, 10240]), - 'kind': 'data', - 'data_type': None - }, - 'size_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 133120]) - }, - 'size_1_data': {'value': int64_array([4599, 133120]), 'shape': [2], 'kind': 'data'}, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([4599.0 / 511.0, 133120.0 / 10240.0]) - }, - 'scale_1_data': {'value': np.array([4599.0 / 511.0, 133120.0 / 10240.0]), 'shape': [2], 'kind': 'data'}, - 'axes_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2, 4]) - }, - 'axes_1_data': {'value': int64_array([2, 4]), 'shape': [2], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'linear', - 'antialias': 1, - 'shape_calculation_mode': 'sizes', - 'version': 'opset4' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 416, 133120]), 'kind': 'data'}, - 'size_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([912]) - }, - 'size_2_data': {'value': int64_array([912]), 'shape': [1], 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': np.array([912.0 / 416.0]) - }, - 'scale_2_data': {'value': np.array([912.0 / 416.0]), 'shape': [1], 'kind': 'data'}, - 'axes_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([3]) - }, - 'axes_2_data': {'value': int64_array([3]), 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'linear', - 'antialias': 1, - 'shape_calculation_mode': 'sizes', - 'version': 'opset4' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -new_edges_for_3d_case_4_opset4_case = new_edges_for_2d_case_4_opset4_case - - -graph_node_attrs_for_3d_case_4 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([10, 64, 511, 416, 10240]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 133120]) - }, - 'scale_1_data': {'value': int64_array([4599, 133120]), 'shape': [2], 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2, 4]), - 'mode': 'linear', - 'align_corners': 0, - 'antialias': 1, - 'pads_begin': 5, - 'pads_end': 3, - 'version': 'opset1' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([10, 64, 4599, 416, 133120]), 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([912]) - }, - 'scale_2_data': {'value': int64_array([912]), 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([3]), - 'mode': 'linear', - 'align_corners': 0, - 'antialias': 1, - 'pads_begin': 5, - 'pads_end': 3, - 'version': 'opset1' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges_for_3d_case_4 = edges_for_2d_case_4 - - -class InterpolateSequenceToInterpolateTest(unittest.TestCase): - def test_2d_interpolate_sequence_1(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_1, - edges=edges_for_2d_case_1 - ) - - ref_graph = build_graph( - nodes_attrs={ - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([660, 700]) - }, - 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2, 3]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 4, 660, 700]), 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1320]) - }, - 'scale_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 1320, 700]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, - }, - edges=[ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'interpolate_1', {'in': 0}), - ('scale_1', 'scale_1_data'), - ('scale_1_data', 'interpolate_1', {'in': 1}), - ('interpolate_1', 'interpolate_1_data'), - ('scale_2', 'scale_2_data'), - ('interpolate_2', 'interpolate_2_data'), - ('interpolate_1_data', 'interpolate_2', {'in': 0}), - ('scale_2_data', 'interpolate_2', {'in': 1}), - ('interpolate_2_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), - ] - ) - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_2d_interpolate_sequence_1_opset4_case(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_1_opset4_case, - edges=edges_for_2d_case_1_opset4_case - ) - - ref_graph = build_graph( - nodes_attrs=ref_graph_node_attrs_for_2d_case_1_opset4_case, - edges=ref_edges_for_2d_case_1_opset4_case - ) - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_2d_interpolate_sequence_2(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_2, - edges=edges_for_2d_case_2 - ) - ref_graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_2, - edges=edges_for_2d_case_2 - ) - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_2d_interpolate_sequence_3(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_3, - edges=edges_for_2d_case_3 - ) - - ref_graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_3, - edges=edges_for_2d_case_3 - ) - - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_2d_interpolate_sequence_4(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_4, - edges=edges_for_2d_case_4 - ) - - ref_graph = build_graph( - nodes_attrs={ - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 4, 220, 350]), - 'kind': 'data', - 'data_type': None - }, - 'scale': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2200, 700]) - }, - 'scale_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'interpolate': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2, 3]), - 'mode': 'linear', - 'align_corners': 0, - 'antialias': 1, - 'pads_begin': 5, - 'pads_end': 3, - 'version': 'opset1' - }, - 'interpolate_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 4, 2200, 700]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, - }, - edges=[ - ('placeholder', 'placeholder_data'), - - ('placeholder_data', 'interpolate', {'in': 0}), - ('scale', 'scale_data'), - ('scale_data', 'interpolate', {'in': 1}), - ('interpolate', 'interpolate_data'), - - ('interpolate_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), - ] - ) - - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_2d_interpolate_sequence_4_opset4_case(self): - graph = build_graph( - nodes_attrs=new_graph_node_attrs_for_2d_case_4_opset4_case, - edges=new_edges_for_2d_case_4_opset4_case - ) - - ref_graph = build_graph( - nodes_attrs=new_ref_graph_node_attrs_for_2d_case_4_opset4_case, - edges=new_ref_edges_for_2d_case_4_opset4_case - ) - - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_2d_interpolate_sequence_5(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_4, - edges=edges_for_2d_case_4, - update_attributes={ - 'interpolate_1': { - 'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 0 - } - } - ) - - ref_graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_4, - edges=edges_for_2d_case_4, - update_attributes={ - 'interpolate_1': { - 'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 0 - } - } - ) - - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_2d_interpolate_sequence_5_opset4_case(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_4_opset4_case, - edges=edges_for_2d_case_4_opset4_case, - update_attributes={ - 'interpolate_1': { - 'antialias': 0, 'cube_coeff': -0.1 - } - } - ) - - ref_graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_4_opset4_case, - edges=edges_for_2d_case_4_opset4_case, - update_attributes={ - 'interpolate_1': { - 'antialias': 0, 'cube_coeff': -0.1 - } - } - ) - - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_2d_interpolate_sequence_6(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_6, - edges=edges_for_2d_case_6, - ) - - ref_graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_case_6, - edges=edges_for_2d_case_6 - ) - - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_3d_interpolate_sequence_1(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_3d_case_1, - edges=edges_for_3d_case_1 - ) - - ref_graph = build_graph( - nodes_attrs={ - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 5, 1024, 256, 800]), - 'kind': 'data', - 'data_type': None - }, - 'scale_1': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4096, 1280, 2400]) - }, - 'scale_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'interpolate_1': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2, 3, 4]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_1_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 2400]), 'kind': 'data'}, - 'scale_2': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([512]) - }, - 'scale_2_data': {'value': None, 'shape': [1], 'kind': 'data'}, - 'interpolate_2': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([4]), - 'mode': 'nearest', - 'version': 'opset1' - }, - 'interpolate_2_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 5, 4096, 1280, 512]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, - }, - edges=[ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'interpolate_1', {'in': 0}), - ('scale_1', 'scale_1_data'), - ('scale_1_data', 'interpolate_1', {'in': 1}), - ('interpolate_1', 'interpolate_1_data'), - ('scale_2', 'scale_2_data'), - ('interpolate_2', 'interpolate_2_data'), - ('interpolate_1_data', 'interpolate_2', {'in': 0}), - ('scale_2_data', 'interpolate_2', {'in': 1}), - ('interpolate_2_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), - ] - ) - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_3d_interpolate_sequence_1_opset4_case(self): - graph = build_graph( - nodes_attrs=new_graph_node_attrs_for_3d_case_1_opset4_case, - edges=new_edges_for_3d_case_1_opset4_case - ) - - ref_graph = build_graph( - nodes_attrs=new_ref_graph_node_attrs_for_3d_case_1_opset4_case, - edges=new_ref_edges_for_3d_case_1_opset4_case - ) - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_3d_interpolate_sequence_2(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_3d_case_2, - edges=edges_for_3d_case_2 - ) - ref_graph = build_graph( - nodes_attrs=graph_node_attrs_for_3d_case_2, - edges=edges_for_3d_case_2 - ) - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_3d_interpolate_sequence_3(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_3d_case_3, - edges=edges_for_3d_case_3 - ) - ref_graph = build_graph( - nodes_attrs=graph_node_attrs_for_3d_case_3, - edges=edges_for_3d_case_3 - ) - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_3d_interpolate_sequence_4(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_3d_case_4, - edges=edges_for_3d_case_4 - ) - - ref_graph = build_graph( - nodes_attrs={ - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([10, 64, 511, 416, 10240]), - 'kind': 'data', - 'data_type': None - }, - 'scale': { - 'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4599, 912, 133120]) - }, - 'scale_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'interpolate': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([2, 3, 4]), - 'mode': 'linear', - 'align_corners': 0, - 'antialias': 1, - 'pads_begin': 5, - 'pads_end': 3, - 'version': 'opset1' - }, - 'interpolate_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([10, 64, 4599, 912, 133120]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, - }, - edges=[ - ('placeholder', 'placeholder_data'), - - ('placeholder_data', 'interpolate', {'in': 0}), - ('scale', 'scale_data'), - ('scale_data', 'interpolate', {'in': 1}), - ('interpolate', 'interpolate_data'), - - ('interpolate_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), - ] - ) - - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_3d_interpolate_sequence_4_opset4_case(self): - graph = build_graph( - nodes_attrs=new_graph_node_attrs_for_3d_case_4_opset4_case, - edges=new_edges_for_3d_case_4_opset4_case - ) - - ref_graph = build_graph( - nodes_attrs=new_ref_graph_node_attrs_for_3d_case_4_opset4_case, - edges=new_ref_edges_for_3d_case_4_opset4_case - ) - - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_3d_interpolate_sequence_5(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_3d_case_4, - edges=edges_for_3d_case_4, - update_attributes={ - 'interpolate_1': { - 'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 7 - } - } - ) - - ref_graph = build_graph( - nodes_attrs=graph_node_attrs_for_3d_case_4, - edges=edges_for_3d_case_4, - update_attributes={ - 'interpolate_1': { - 'align_corners': 1, 'antialias': 1, 'pads_begin': 3, 'pads_end': 7 - } - } - ) - - InterpolateSequenceToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/L2NormFusing_test.py b/tools/mo/unit_tests/mo/middle/L2NormFusing_test.py deleted file mode 100644 index 49aaac6d84e2cf..00000000000000 --- a/tools/mo/unit_tests/mo/middle/L2NormFusing_test.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.middle.L2NormFusing import L2NormToNorm -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph_with_attrs - -# A list with nodes attributes used to build various graphs. -nodes = [ - ('l2_normalize_mul', dict(kind='op', op='Mul', name='l2_norm_name')), - ('l2_normalize_mul_data', dict(kind='data')), - ('maximum', dict(kind='op', op='Maximum')), - ('maximum_data', dict(kind='data')), - ('maximum_y_const', dict(kind='op', op='Const', value=np.array(12.e-13, dtype=np.float32))), - ('maximum_y_data', dict(kind='data', value=np.array(12.e-13, dtype=np.float32))), - ('rsqrt_pow', dict(kind='data', value=-0.5)), - ('rsqrt', dict(kind='op', op='Pow')), - ('rsqrt_data', dict(kind='data')), - ('square_pow', dict(kind='op', op='Const', value=2.)), - ('square_pow_data', dict(kind='data', value=2.)), - ('square', dict(kind='op', op='Pow')), - ('sum', dict(kind='op', op='ReduceSum')), - ('sum_data', dict(kind='data')), - ('sum_axes', dict(kind='op', op='Const')), - # nodes added after replacement - ('normalize_node', dict(kind='op', op='NormalizeL2')), - ('weights_node', dict(kind='op', op='Const')), - ('result', dict(kind='op', op='Result')) -] - -edges = [ - ('input', 'input_data', {'out': 0}), - ('input_data', 'square', {'in': 0}), - ('square_pow', 'square_pow_data', {'out': 0}), - ('square_pow_data', 'square', {'in': 1}), - ('square', 'square_data'), - ('square_data', 'sum'), - ('sum_axes', 'sum_axes_data'), - ('sum_axes_data', 'sum'), - ('sum', 'sum_data'), - ('maximum_y_const', 'maximum_y_data'), - ('maximum_y_data', 'maximum'), - ('sum_data', 'maximum'), - ('maximum', 'maximum_data'), - ('maximum_data', 'rsqrt', {'in': 0}), - ('rsqrt_pow', 'rsqrt', {'in': 1}), - ('rsqrt', 'rsqrt_data'), - ('rsqrt_data', 'l2_normalize_mul'), - ('input_data', 'l2_normalize_mul'), - ('l2_normalize_mul', 'l2_normalize_mul_data'), - ('l2_normalize_mul_data', 'result'), -] - -edges_after_replacement = [ - ('input', 'input_data', {'out': 0}), - ('input_data', 'normalize_node'), - ('weights_node', 'weights_node_data'), - ('weights_node_data', 'normalize_node'), - ('normalize_node', 'l2_normalize_mul_data'), - ('l2_normalize_mul_data', 'result'), -] - - -class TestL2NormToNormTest(): - @pytest.mark.parametrize("input_shape, axes, layout", - [(int64_array([2, 3]), int64_array([1]), 'NCHW'), # NC layout, normalize C dimension - (int64_array([2, 3]), int64_array([1]), 'NHWC'), # NC layout, normalize C dimension - (int64_array([2, 3, 5]), int64_array([1]), 'NCHW'), # NCH layout, normalize C dimension - (int64_array([2, 3, 5]), int64_array([1]), 'NHWC'), # NCH layout, normalize C dimension - (int64_array([2, 3, 5]), int64_array([-1, -2]), 'NHWC'), # NCH layout, normalize CH dimensions - (int64_array([2, 3, 5]), int64_array([-1, -2]), 'NCHW'), # NCH layout, normalize CH dimensions - (int64_array([2, 3, 5]), int64_array([1, 2]), 'NCHW'), # NCH layout, normalize CH dimensions - (int64_array([2, 3, 5]), int64_array([1, 2]), 'NHWC'), # NCH layout, normalize CH dimensions - (int64_array([2, 3, 5, 7]), int64_array([1]), 'NCHW'), # NCHW layout, normalize C dimension - (int64_array([2, 3, 5, 7]), int64_array([-1]), 'NHWC'), # NHWC layout, normalize C dimension - (int64_array([2, 3, 5, 7]), int64_array([3]), 'NHWC'), # NCHW layout, normalize C dimension - (int64_array([2, 3, 5, 7]), int64_array([-1, 1, 2]), 'NCHW'), # NCHW layout, normalize CHW dimensions - (int64_array([2, 3, 5, 7]), int64_array([-3, -2, -1]), 'NHWC'), # NCHW layout, normalize HWC dimensions - ]) - def test_positive(self, input_shape, axes, layout): - graph = build_graph_with_attrs(nodes + [ - ('input', dict(kind='op', shape=input_shape, op='Parameter', data_type=np.float32)), - ('input_data', dict(kind='data', shape=input_shape, data_type=np.float32)), - ('square_data', dict(kind='data', shape=input_shape)), - ('sum_axes_data', dict(kind='data', value=axes, shape=None)), - ], edges, nodes_with_edges_only=True) - graph.stage = 'middle' - graph.graph['layout'] = layout - - L2NormToNorm().find_and_replace_pattern(graph) - - graph_ref = build_graph_with_attrs(nodes + [ - ('input', dict(kind='op', shape=input_shape, op='Parameter', data_type=np.float32)), - ('input_data', dict(kind='data', shape=input_shape, data_type=np.float32)), - ('weights_node_data', dict(kind='data', value=axes.sort())), - ], edges_after_replacement, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - assert (graph.node[graph.get_nodes_with_attributes(type='NormalizeL2')[0]]['name'] == 'l2_norm_name') - assert flag, resp - - @pytest.mark.parametrize("input_shape, axes, layout", - [(int64_array([2]), int64_array([0]), 'NCHW'), - (int64_array([2, 3]), int64_array([0]), 'NCHW'), - (int64_array([2, 3]), int64_array([0]), 'NHWC'), - (int64_array([2, 3]), int64_array([0, 1]), 'NCHW'), - (int64_array([2, 3]), int64_array([0, 1]), 'NHWC'), - (int64_array([2, 3, 5]), int64_array([0]), 'NCHW'), - (int64_array([2, 3, 5]), int64_array([0]), 'NHWC'), - (int64_array([2, 3, 5]), int64_array([-1]), 'NCHW'), - (int64_array([2, 3, 5]), int64_array([-1]), 'NHWC'), - (int64_array([2, 3, 5]), int64_array([0, 1]), 'NCHW'), - (int64_array([2, 3, 5]), int64_array([0, 1]), 'NHWC'), - (int64_array([2, 3, 5]), int64_array([0, 2]), 'NCHW'), - (int64_array([2, 3, 5]), int64_array([0, 2]), 'NHWC'), - (int64_array([2, 3, 5, 7]), int64_array([0]), 'NCHW'), - (int64_array([2, 3, 5, 7]), int64_array([0]), 'NHWC'), - (int64_array([2, 3, 5, 7]), int64_array([2]), 'NCHW'), - (int64_array([2, 3, 5, 7]), int64_array([2]), 'NHWC'), - (int64_array([2, 3, 5, 7]), int64_array([3]), 'NCHW'), - (int64_array([2, 3, 5, 7]), int64_array([1]), 'NHWC'), - (int64_array([2, 3, 5, 7]), int64_array([1, 2]), 'NCHW'), - (int64_array([2, 3, 5, 7]), int64_array([1, -1]), 'NHWC'), - (int64_array([2, 3, 5, 7]), int64_array([1, -1]), 'NCHW'), - (int64_array([2, 3, 5, 7]), int64_array([-2, -1]), 'NHWC'), - (int64_array([2, 3, 5, 7]), int64_array([1, 3]), 'NCHW'), - (int64_array([2, 3, 5, 7]), int64_array([2, 3]), 'NHWC'), - (int64_array([2, 3, 5, 7]), int64_array([0, 1, 2]), 'NCHW'), - (int64_array([2, 3, 5, 7]), int64_array([0, 1, 2]), 'NHWC'), - (int64_array([2, 3, 5, 7]), int64_array([0, 2, 3]), 'NCHW'), - (int64_array([2, 3, 5, 7]), int64_array([0, 2, 3]), 'NHWC'), - (int64_array([2, 3, 5, 7]), int64_array([0, 1, 2, 3]), 'NCHW'), - (int64_array([2, 3, 5, 7]), int64_array([0, 1, 2, 3]), 'NHWC'), - (int64_array([2, 3, 5, 7, 9]), int64_array([1]), 'NCHW'), - (int64_array([2, 3, 5, 7, 9]), int64_array([-1]), 'NHWC'), - (int64_array([2, 3, 5, 7, 9]), int64_array([1, 2, 3, 4]), 'NCHW'), - (int64_array([2, 3, 5, 7, 9]), int64_array([-1, -2, -3, -4]), 'NHWC'), - ]) - def test_negative(self, input_shape, axes, layout): - graph = build_graph_with_attrs(nodes + [ - ('input', dict(kind='op', shape=input_shape, op='Parameter', data_type=np.float32)), - ('input_data', dict(kind='data', shape=input_shape, data_type=np.float32)), - ('square_data', dict(kind='data', shape=input_shape)), - ('sum_axes_data', dict(kind='data', value=axes, shape=None)), - ], edges, nodes_with_edges_only=True) - graph.stage = 'middle' - graph.graph['layout'] = layout - - L2NormToNorm().find_and_replace_pattern(graph) - - graph_ref = build_graph_with_attrs(nodes + [ - ('input', dict(kind='op', shape=input_shape, op='Parameter', data_type=np.float32)), - ('input_data', dict(kind='data', shape=input_shape, data_type=np.float32)), - ('square_data', dict(kind='data', shape=input_shape)), - ('sum_axes_data', dict(kind='data', value=axes, shape=None)), - ], edges, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/middle/LayoutChangeForEinsum_test.py b/tools/mo/unit_tests/mo/middle/LayoutChangeForEinsum_test.py deleted file mode 100644 index 4106e9f9d35149..00000000000000 --- a/tools/mo/unit_tests/mo/middle/LayoutChangeForEinsum_test.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.LayoutChangeForEinsum import LayoutChangeForEinsum -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, valued_const_with_data, connect - -nodes_attributes = { - # Parameter layers - **regular_op_with_shaped_data('placeholder_1', None, {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('placeholder_2', None, {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('placeholder_3', None, {'type': 'Parameter', 'op': 'Parameter'}), - - # Einsum layer - **regular_op_with_shaped_data('einsum', None, {'type': 'Einsum', 'op': 'Einsum'}), - - # Result layer - **result(), - - # Transpose layers - **regular_op_with_shaped_data('transpose_1', None, - {'type': 'Transpose', 'op': 'Transpose', 'need_shape_inference': True}), - **regular_op_with_shaped_data('transpose_3', None, - {'type': 'Transpose', 'op': 'Transpose', 'need_shape_inference': True}), - - # Const layers - **valued_const_with_data('axis_1_const', int64_array([0, 2, 3, 1])), - **valued_const_with_data('axis_3_const', int64_array([0, 4, 1, 2, 3])), -} - - -class LayoutChangeForEinsumTests(unittest.TestCase): - def test_layout_change_einsum(self): - graph = build_graph(nodes_attributes, - [*connect('placeholder_1', '0:einsum'), - *connect('placeholder_2', '1:einsum'), - *connect('placeholder_3', '2:einsum'), - *connect('einsum', 'output')], - { # this input stays as is since it is of a rank equal to 3 - 'placeholder_1_d': {'shape': np.array([2, 3, 5])}, - # [3, 5, 7, 8] - NHWC, [3, 8, 5, 7] - NCHW - # this input does not require additional transpose - # since the corresponding subscript can be adjusted - 'placeholder_2_d': {'shape': np.array([3, 8, 5, 7])}, - # [3, 8, 10, 12] - NHWC, [3, 12, 8, 10] - NCHW - # the third input must be transposed to NHWC layout - # since ellipsis covers multiple dimensions in the end - # the corresponding subscript is not changed - 'placeholder_3_d': {'shape': np.array([3, 12, 8, 10])}, - # equation is still for NHWC layout - 'einsum': {'equation': "abc,bcde,bc...->ade..."}, - # [2, 7, 8, 10, 12] - NHWC, [2, 12, 7, 8, 10] - NCHW - # the output is in NCHW layout but its shape will be re-inferred since - # the output stays in NHWC layout due to ellipsis in the end - # and additional transpose to NCHW will be inserted - 'einsum_d': {'shape': np.array([2, 12, 7, 8, 10])}, - }, nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [*connect('placeholder_3', '0:transpose_1'), - *connect('axis_1_const', '1:transpose_1'), - *connect('placeholder_1', '0:einsum'), - *connect('placeholder_2', '1:einsum'), - *connect('transpose_1', '2:einsum'), - *connect('einsum', '0:transpose_3'), - *connect('axis_3_const', '1:transpose_3'), - *connect('transpose_3', 'output')], - {'placeholder_1_d': {'shape': np.array([2, 3, 5])}, - 'placeholder_2_d': {'shape': np.array([3, 8, 5, 7])}, - 'einsum': {'equation': "abc,becd,bc...->ade..."}, - 'einsum_d': {'shape': np.array([2, 12, 7, 8, 10])} - }) - - LayoutChangeForEinsum().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_no_adjustment_layout_einsum(self): - graph = build_graph(nodes_attributes, - [*connect('placeholder_1', '0:einsum'), - *connect('placeholder_2', '1:einsum'), - *connect('placeholder_3', '2:einsum'), - *connect('einsum', 'output')], - { # this input stays as is since it is of a rank equal to 3 - 'placeholder_1_d': {'shape': np.array([2, 3, 5])}, - # [3, 5, 7, 8] - NHWC - # this input does not require additional transpose - # since the corresponding layout is correct - 'placeholder_2_d': {'shape': np.array([3, 5, 7, 8])}, - # [3, 8, 10, 12] - NHWC - # this input does not require additional transpose - # since the corresponding layout is correct - 'placeholder_3_d': {'shape': np.array([3, 8, 10, 12])}, - # equation is still for NHWC layout - 'einsum': {'equation': "abc,bcde,bc...->ade...", - 'correct_in_data_layout': [0, 1, 2], - 'correct_out_data_layout': [0]}, - # [2, 7, 8, 10, 12] - NHWC - # this output does not require additional transpose - # since the corresponding layout is correct - 'einsum_d': {'shape': np.array([2, 7, 8, 10, 12])}, - }, nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - - graph_ref = build_graph(nodes_attributes, - [*connect('placeholder_1', '0:einsum'), - *connect('placeholder_2', '1:einsum'), - *connect('placeholder_3', '2:einsum'), - *connect('einsum', 'output')], - {'placeholder_1_d': {'shape': np.array([2, 3, 5])}, - 'placeholder_2_d': {'shape': np.array([3, 5, 7, 8])}, - 'placeholder_3_d': {'shape': np.array([3, 8, 10, 12])}, - 'einsum': {'equation': "abc,bcde,bc...->ade..."}, - 'einsum_d': {'shape': np.array([2, 7, 8, 10, 12])} - }) - - LayoutChangeForEinsum().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/LeakyReluPattern_test.py b/tools/mo/unit_tests/mo/middle/LeakyReluPattern_test.py deleted file mode 100644 index 9b29af71f0054a..00000000000000 --- a/tools/mo/unit_tests/mo/middle/LeakyReluPattern_test.py +++ /dev/null @@ -1,95 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.LeakyReluPattern import LeakyReLUFusion -from openvino.tools.mo.front.common.partial_infer.utils import float_array, int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.result import Result -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, build_graph_with_edge_attrs, connect, \ - regular_op_with_shaped_data, valued_const_with_data, connect_data - -shape = int64_array([1, 3, 5, 2]) -nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('mul', shape, {'type': 'Multiply', 'name': 'mul'}), - **regular_op_with_shaped_data('max', shape, {'type': 'Maximum', 'name': 'final_max'}), - **valued_const_with_data('const', float_array([0.5])), - **result('result') - } - -edges = [*connect('input:0', '0:mul'), - *connect('const', '1:mul'), - *connect_data('input', '0:max'), - *connect('mul:0', '1:max'), - *connect('max:0', 'result'), - ] - -ref_nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('leaky_relu', shape, {'type': 'LeakyReLU', 'name': 'max_final', - 'negative_slope': None}), - **result('result') - } -ref_edges = [*connect('input:0', 'leaky_relu'), *connect('leaky_relu', 'result')] - - -class LeakyReluFusionTest(unittest.TestCase): - def test_leaky_relu_data_port_0(self): - graph = build_graph_with_edge_attrs(nodes, edges, {}) - graph_ref = build_graph(ref_nodes, ref_edges) - Node(graph_ref, 'leaky_relu')['negative_slope'] = 0.5 - - LeakyReLUFusion().find_and_replace_pattern(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - self.assertTrue(len(graph.get_op_nodes(name='final_max')) == 1 and - graph.get_op_nodes(name='final_max')[0].op == 'LeakyReLU') - - def test_leaky_relu_not_applicable_non_scalar_const(self): - # const value is not a scalar or 1D tensor with 1 element so the transformation is not applicable - graph = build_graph_with_edge_attrs(nodes, edges, {}) - Node(graph, 'const')['value'] = float_array([0.5, 0.7]) - Node(graph, 'const_d')['value'] = float_array([0.5, 0.7]) - graph_ref = graph.copy() - - LeakyReLUFusion().find_and_replace_pattern(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - def test_leaky_relu_mul_multiple_consumers(self): - # multiple consumers of Mul operation - graph = build_graph_with_edge_attrs(nodes, edges, {}) - additional_result = Result(graph, {'name': 'result_2'}).create_node() - Node(graph, 'mul').out_port(0).connect(additional_result.in_port(0)) - - ref_nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('mul', shape, {'type': 'Multiply', 'name': 'mul'}), - **regular_op_with_shaped_data('max', shape, {'type': 'Maximum', 'name': 'final_max'}), - **valued_const_with_data('const', float_array([0.5])), - **regular_op_with_shaped_data('leaky_relu', shape, {'type': 'LeakyReLU', 'name': 'max_final', - 'negative_slope': None}), - **result('result'), - **result('result_2') - } - ref_edges = [*connect('input:0', '0:mul'), - *connect('const', '1:mul'), - *connect('max:0', 'result'), - *connect('mul:0', 'result_2'), - *connect_data('input', 'leaky_relu'), - *connect('leaky_relu', 'result') - ] - graph_ref = build_graph_with_edge_attrs(ref_nodes, ref_edges) - - LeakyReLUFusion().find_and_replace_pattern(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result_2') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/MXTileReplacer_test.py b/tools/mo/unit_tests/mo/middle/MXTileReplacer_test.py deleted file mode 100644 index 7eebae8395e33a..00000000000000 --- a/tools/mo/unit_tests/mo/middle/MXTileReplacer_test.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.MXTileReplacer import MXTileReplacer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder': {'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': {'kind': 'data'}, - 'tile': {'kind': 'op', 'op': 'Tile'}, - 'tile_data': {'kind': 'data', 'shape': int64_array([1, 1, 1, 1])}, - 'result': {'kind': 'op', 'op': 'Result'}, - - 'unsqueeze_1': {'kind': 'op', 'op': 'Unsqueeze'}, - 'unsqueeze_1_data': {'kind': 'data'}, - 'unsqueeze_1_const': {'kind': 'op', 'op': 'Const'}, - 'unsqueeze_1_const_data': {'kind': 'data'}, -} - - -class MXTileReplacerTest(unittest.TestCase): - - def test_insert_one_unsqueeze(self): - graph = build_graph( - nodes_attributes, - [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'tile'), - ('tile', 'tile_data'), - ('tile_data', 'result') - ], - { - 'placeholder_data': {'shape': int64_array([1, 1, 1])} - }, - nodes_with_edges_only=True - ) - - ref_graph = build_graph( - nodes_attributes, - [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'unsqueeze_1', {'in': 0}), - ('unsqueeze_1_const', 'unsqueeze_1_const_data'), - ('unsqueeze_1_const_data', 'unsqueeze_1', {'in': 1}), - ('unsqueeze_1', 'unsqueeze_1_data'), - ('unsqueeze_1_data', 'tile'), - ('tile', 'tile_data'), - ('tile_data', 'result') - ], - { - 'placeholder_data': {'shape': int64_array([1, 1, 1])}, - 'unsqueeze_1_const_data': {'value': int64_array([0])} - }, - nodes_with_edges_only=True - ) - - MXTileReplacer().find_and_replace_pattern(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_insert_two_unsqueezes(self): - graph = build_graph( - nodes_attributes, - [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'tile'), - ('tile', 'tile_data'), - ('tile_data', 'result') - ], - { - 'placeholder_data': {'shape': int64_array([1, 1])} - }, - nodes_with_edges_only=True - ) - - ref_graph = build_graph( - nodes_attributes, - [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'unsqueeze_1', {'in': 0}), - ('unsqueeze_1_const', 'unsqueeze_1_const_data'), - ('unsqueeze_1_const_data', 'unsqueeze_1', {'in': 1}), - ('unsqueeze_1', 'unsqueeze_1_data'), - ('unsqueeze_1_data', 'tile'), - ('tile', 'tile_data'), - ('tile_data', 'result') - ], - { - 'placeholder_data': {'shape': int64_array([1, 1])}, - 'unsqueeze_1_const_data': {'value': int64_array([0, 1])} - }, - nodes_with_edges_only=True - ) - - MXTileReplacer().find_and_replace_pattern(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/MakeKaldiConstReshapable_test.py b/tools/mo/unit_tests/mo/middle/MakeKaldiConstReshapable_test.py deleted file mode 100644 index 15afc8ac796015..00000000000000 --- a/tools/mo/unit_tests/mo/middle/MakeKaldiConstReshapable_test.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.MakeKaldiConstReshapable import MakeKaldiConstReshapable -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, regular_op_with_shaped_data, connect - -nodes = { - **regular_op_with_shaped_data('placeholder_1', [1, 13], {'kind': 'op', 'op': 'Parameter', 'shape': [1, 13]}), - **regular_op_with_shaped_data('splice_1', [1, 13], {'kind': 'op', 'op': 'Splice', - 'context': np.array([-2, -1, 0, 1, 2])}), - **regular_op_with_shaped_data('placeholder_2', [1, 26], {'kind': 'op', 'op': None}), - **regular_op_with_shaped_data('memory_in', [1, 5], {'kind': 'op', 'op': 'ReadValue', - 'shape': int64_array([1, 5])}), - **regular_op_with_shaped_data('memory_out', [1, 5], {'kind': 'op', 'op': 'Assign', 'shape': int64_array([1, 5])}), - **result('result'), - **regular_op_with_shaped_data('crop_in', [1, 4], {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 1, 'dim': 4}), - **regular_op_with_shaped_data('crop_out', [1, 1], {'kind': 'op', 'op': 'Crop', 'axis': 1, 'offset': 0, 'dim': 1}), - **regular_op_with_shaped_data('equal', [1, 1], {'kind': 'op', 'op': 'Equal'}), - **regular_op_with_shaped_data('select', [1, 26], {'kind': 'op', 'op': 'Select'}), - **regular_op_with_shaped_data('const_0', [1, 1], {'kind': 'op', 'op': 'Const', 'shape': [1, 1], - 'value': [0], 'data_type': np.float32}), - **regular_op_with_shaped_data('const_1', [1, 1], {'kind': 'op', 'op': 'Const', 'shape': [1, 1], - 'value': [0], 'data_type': np.float32}), - **regular_op_with_shaped_data('concat', [1, 5], {'kind': 'op', 'op': 'Concat'}), - **regular_op_with_shaped_data('memory', [1, 26], {'kind': 'op', 'op': 'Assign'}), - - **regular_op_with_shaped_data('shape', None, {'kind': 'op', 'op': 'ShapeOf'}), - **regular_op_with_shaped_data('crop_batch', None, {'kind': 'op', 'op': 'Crop', 'offset': int64_array([0])}), - **regular_op_with_shaped_data('crop_batch_dim', None, {'kind': 'op', 'op': 'Const', 'shape': [1], - 'value': [1], 'data_type': np.int64}), - **regular_op_with_shaped_data('second_dim', None, {'kind': 'op', 'op': 'Const', 'shape': [1], - 'value': [5], 'data_type': np.int64}), - **regular_op_with_shaped_data('gather_shape', None, {'kind': 'op', 'op': 'Concat'}), - **regular_op_with_shaped_data('fill_value', [1, 5], {'kind': 'op', 'op': 'Const', 'shape': [1, 5], - 'value': np.zeros([1, 5]), 'data_type': np.float32}), - **regular_op_with_shaped_data('fill_value_2', None, {'kind': 'op', 'op': 'Const', 'shape': [1], - 'value': [0], 'data_type': np.float32}), - **regular_op_with_shaped_data('broadcast', [1, 5], {'kind': 'op', 'op': 'Broadcast'}), - - **regular_op_with_shaped_data('fill_value_ones', [1, 26], {'kind': 'op', 'op': 'Const', 'shape': [1, 26], - 'value': np.zeros([1, 26]), 'data_type': np.int64}), - **regular_op_with_shaped_data('fill_value_ones_2', [1, 1], {'kind': 'op', 'op': 'Const', 'shape': [1, 1], - 'value': [1], 'data_type': np.int64}), -} - - -class MakeKaldiConstReshapableTests(unittest.TestCase): - - # graph contains 1 splice with context length 5, should be inserted select with memory as counter with length 5 - def test_reshapable_const(self): - graph = build_graph(nodes, - [*connect('placeholder_1', 'splice_1'), - *connect('splice_1', 'placeholder_2'), - *connect('placeholder_2', '1:select'), - *connect('fill_value', 'memory_in'), - *connect('memory_in', 'crop_in'), - *connect('crop_in', '0:concat'), - *connect('fill_value_ones_2:0', '1:concat'), - *connect('concat', 'memory_out'), - *connect('memory_out', 'result'), - *connect('concat', 'crop_out'), - *connect('crop_out', '1:equal'), - *connect('fill_value_ones_2:0', '0:equal'), - *connect('equal', '0:select'), - *connect('fill_value_ones', '2:select'), - *connect('select', 'memory') - ], - nodes_with_edges_only=True) - graph.strict_mode = False - MakeKaldiConstReshapable().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes, - [*connect('placeholder_1:0', 'splice_1'), - *connect('splice_1', 'placeholder_2'), - *connect('placeholder_2', '1:select'), - *connect('placeholder_1:0', 'shape', skip_data=True), - *connect('shape', '0:crop_batch'), - *connect('crop_batch_dim', '1:crop_batch'), - *connect('second_dim', '1:gather_shape'), - *connect('crop_batch', '0:gather_shape'), - *connect('fill_value_2', '0:broadcast'), - *connect('gather_shape', '1:broadcast'), - *connect('broadcast', 'memory_in'), - *connect('memory_in', 'crop_in'), - *connect('crop_in', '0:concat'), - *connect('fill_value_ones_2', '1:concat'), - *connect('concat', 'memory_out'), - *connect('memory_out', 'result'), - *connect('concat', 'crop_out'), - *connect('crop_out', '1:equal'), - *connect('fill_value_ones_2', '0:equal'), - *connect('equal', '0:select'), - *connect('const_0', '2:select'), - *connect('fill_value_ones', '2:select'), - *connect('select', 'memory') - ], nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, ref_graph, 'memory') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/MulQuantizeFuse_test.py b/tools/mo/unit_tests/mo/middle/MulQuantizeFuse_test.py deleted file mode 100644 index 62ca58619bd422..00000000000000 --- a/tools/mo/unit_tests/mo/middle/MulQuantizeFuse_test.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.MulFakeQuantizeFuse import MulFakeQuantizeFuse -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -# The dictionary with nodes attributes used to build various graphs. A key is the name of the node and the value is the -# dictionary with node attributes. -nodes = { - 'x': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'x_data': {'value': None, 'shape': np.array([1, 64, 56, 56]), 'kind': 'data'}, - - 'mul_const': {'op': 'Const', 'type': 'Const', 'kind': 'op', 'value': None, 'shape': None}, - 'mul_const_data': {'value': np.array([]), 'shape': np.array([]), 'kind': 'data'}, - - 'mul': {'op': 'Mul', 'kind': 'op'}, - 'mul_data': {'value': np.array([]), 'shape': np.array([]), 'kind': 'data'}, - - 'mi_i': {'op': 'Const', 'type': 'Const', 'kind': 'op', 'value': None, 'shape': None}, - 'mi_i_data': {'value': np.array([-10]), 'shape': np.array([]), 'kind': 'data'}, - - 'ma_i': {'op': 'Const', 'type': 'Const', 'kind': 'op', 'value': None, 'shape': None}, - 'ma_i_data': {'value': np.array([10]), 'shape': np.array([]), 'kind': 'data'}, - - 'mi_o': {'op': 'Const', 'type': 'Const', 'kind': 'op', 'value': None, 'shape': None}, - 'mi_o_data': {'value': np.array([]), 'shape': np.array([]), 'kind': 'data'}, - - 'ma_o': {'op': 'Const', 'type': 'Const', 'kind': 'op', 'value': None, 'shape': None}, - 'ma_o_data': {'value': np.array([]), 'shape': np.array([]), 'kind': 'data'}, - - 'quantize': {'type': 'FakeQuantize', 'kind': 'op', 'op': 'FakeQuantize', 'levels': 2}, - 'quantize_data': {'value': None, 'shape': np.array([1, 64, 56, 56]), 'kind': 'data'}, - - 'output': {'op': 'Result', 'kind': 'op'}, -} - -edges = [ - ('x', 'x_data'), - ('mul_const', 'mul_const_data'), - ('mul', 'mul_data'), - ('mi_i', 'mi_i_data'), - ('ma_i', 'ma_i_data'), - ('mi_o', 'mi_o_data'), - ('ma_o', 'ma_o_data'), - ('quantize', 'quantize_data'), - ('quantize_data', 'output'), - - ('x_data', 'mul', {'in': 0}), - ('mul_const_data', 'mul', {'in': 1}), - - ('mul_data', 'quantize', {'in': 0}), - ('mi_i_data', 'quantize', {'in': 1}), - ('ma_i_data', 'quantize', {'in': 2}), - ('mi_o_data', 'quantize', {'in': 3}), - ('ma_o_data', 'quantize', {'in': 4}), -] - -edges_ref = [ - ('x', 'x_data'), - ('mul_const', 'mul_const_data'), - ('mul', 'mul_data'), - ('mi_i', 'mi_i_data'), - ('ma_i', 'ma_i_data'), - ('mi_o', 'mi_o_data'), - ('ma_o', 'ma_o_data'), - ('quantize', 'quantize_data'), - ('quantize_data', 'output'), - - ('x_data', 'quantize', {'in': 0}), - ('mi_i_data', 'quantize', {'in': 1}), - ('ma_i_data', 'quantize', {'in': 2}), - ('mi_o_data', 'quantize', {'in': 3}), - ('ma_o_data', 'quantize', {'in': 4}), - - ('x_data', 'mul', {'in': 0}), - ('mul_const_data', 'mul', {'in': 1}), -] - - -class MulQuantizeFuseTest(unittest.TestCase): - def test_1(self): - graph = build_graph(nodes, edges, { - 'mul': {'can_be_fused': True}, - 'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.broadcast_to(np.array([1]), (3, 1, 1))}, - 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, - 'mi_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([0]), (1, 1, 1, 1))}, - 'ma_o_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.broadcast_to(np.array([1]), (1, 3, 1, 1))}, - }, nodes_with_edges_only=True) - graph.stage = 'middle' - graph_ref = build_graph(nodes, edges_ref, { - 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, - 'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.broadcast_to(np.array([1]), (3, 1, 1))}, - 'mi_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([0]), (1, 1, 1, 1))}, - 'ma_o_data': {'shape': np.array([1, 3, 1, 1]), 'value': np.broadcast_to(np.array([1]), (1, 3, 1, 1))}, - 'mi_i_data': {'shape': np.array([3, 1, 1]), 'value': np.broadcast_to(np.array([-10]), (3, 1, 1))}, - 'ma_i_data': {'shape': np.array([3, 1, 1]), 'value': np.broadcast_to(np.array([10]), (3, 1, 1))}, - }, nodes_with_edges_only=True) - - MulFakeQuantizeFuse().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - - self.assertTrue(flag, resp) - - def test_2(self): - graph = build_graph(nodes, edges, { - 'mul': {'can_be_fused': True}, - 'mul_const_data': {'shape': np.array([1]), 'value': np.array([2])}, - 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, - 'mi_o_data': {'shape': np.array([1]), 'value': np.array([0])}, - 'ma_o_data': {'shape': np.array([1]), 'value': np.array([1])}, - }, nodes_with_edges_only=True) - graph.stage = 'middle' - graph_ref = build_graph(nodes, edges_ref, { - 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, - 'mul_const_data': {'shape': np.array([1]), 'value': np.array([2])}, - 'mi_o_data': {'shape': np.array([1]), 'value': np.array([0])}, - 'ma_o_data': {'shape': np.array([1]), 'value': np.array([1])}, - 'mi_i_data': {'shape': np.array([1]), 'value': np.array([-5])}, - 'ma_i_data': {'shape': np.array([1]), 'value': np.array([5])}, - }, nodes_with_edges_only=True) - - MulFakeQuantizeFuse().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - - self.assertTrue(flag, resp) - - def test_negative_1(self): - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - graph.stage = 'middle' - graph_ref = build_graph(nodes, edges, nodes_with_edges_only=True) - - MulFakeQuantizeFuse().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - - self.assertTrue(flag, resp) - - def test_negative_2(self): - graph = build_graph(nodes, edges, {'mul': {'can_be_fused': False}}, nodes_with_edges_only=True) - graph.stage = 'middle' - graph_ref = build_graph(nodes, edges, {'mul': {'can_be_fused': False}}, nodes_with_edges_only=True) - - MulFakeQuantizeFuse().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - - self.assertTrue(flag, resp) - - def test_negative_3(self): - graph = build_graph(nodes, edges, { - 'mul': {'can_be_fused': True}, - 'mul_const_data': {'shape': np.array([1]), 'value': np.array([-1])}, - 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, - 'mi_o_data': {'shape': np.array([1]), 'value': np.array([0])}, - 'ma_o_data': {'shape': np.array([1]), 'value': np.array([1])}, - }, nodes_with_edges_only=True) - graph.stage = 'middle' - graph_ref = graph.copy() - - MulFakeQuantizeFuse().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - - self.assertTrue(flag, resp) - - def test_negative_4(self): - graph = build_graph(nodes, edges, { - 'mul': {'can_be_fused': True}, - 'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.array([[[-1]], [[1]], [[-1]]])}, - 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, - 'mi_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([0]), (1, 1, 1, 1))}, - 'ma_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([1]), (1, 1, 1, 1))}, - }, nodes_with_edges_only=True) - graph.stage = 'middle' - graph_ref = graph.copy() - - MulFakeQuantizeFuse().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - - self.assertTrue(flag, resp) - - def test_negative_5(self): - graph = build_graph(nodes, edges, { - 'mul': {'can_be_fused': True}, - 'mul_const_data': {'shape': np.array([3, 1, 1]), 'value': np.array([[[0]], [[1]], [[2]]])}, - 'quantize_data': {'shape': np.array([2, 3, 4, 4])}, - 'mi_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([0]), (1, 1, 1, 1))}, - 'ma_o_data': {'shape': np.array([1, 1, 1, 1]), 'value': np.broadcast_to(np.array([1]), (1, 1, 1, 1))}, - }, nodes_with_edges_only=True) - graph.stage = 'middle' - graph_ref = graph.copy() - - MulFakeQuantizeFuse().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/PoolV2ToAttributedPool_test.py b/tools/mo/unit_tests/mo/middle/PoolV2ToAttributedPool_test.py deleted file mode 100644 index b4d3bde8ab94e0..00000000000000 --- a/tools/mo/unit_tests/mo/middle/PoolV2ToAttributedPool_test.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.PoolV2ToAttributedPool import PoolV2ToAttributedPool -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from openvino.tools.mo.utils.shape import int64_array -from unit_tests.utils.graph import build_graph, valued_const_with_data, regular_op_with_empty_data, \ - connect, shaped_const_with_data, result - - -class TestPoolV2ToAttributedPool(unittest.TestCase): - - def test_pool_v2_to_attributed_pool(self): - nodes = { - **shaped_const_with_data('input', int64_array([200, 200])), - **valued_const_with_data('windows', int64_array([4, 4])), - **valued_const_with_data('strides', int64_array([4, 4])), - - **regular_op_with_empty_data('pool_v2', {'op': 'PoolingV2', - 'pad': [2, 2], - 'spatial_dims': [1, 2], - 'auto_pad': 'same_upper', - 'output_spatial_shape': [2, 3], - 'pad_spatial_shape': [1, 2], - 'pool_method': 'max', - 'permute_attrs': None}), - - **regular_op_with_empty_data('pool_v1', {'type': 'Pooling', - 'pad': [2, 2], - 'spatial_dims': [1, 2], - 'auto_pad': 'same_upper', - 'output_spatial_shape': [2, 3], - 'pad_spatial_shape': [1, 2], - 'pool_method': 'max'}), - - **result('output') - } - - edges = [ - *connect('input', 'pool_v2:0'), - *connect('windows', 'pool_v2:1'), - *connect('strides', 'pool_v2:2'), - *connect('pool_v2', 'output'), - ] - - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - PoolV2ToAttributedPool().find_and_replace_pattern(graph) - - ref_graph = build_graph(nodes, [*connect('input', 'pool_v1'), *connect('pool_v1', 'output')], - nodes_with_edges_only=True) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/PreserveRuntimeInfo_test.py b/tools/mo/unit_tests/mo/middle/PreserveRuntimeInfo_test.py deleted file mode 100644 index 1237d37f6f409e..00000000000000 --- a/tools/mo/unit_tests/mo/middle/PreserveRuntimeInfo_test.py +++ /dev/null @@ -1,244 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.PreserveRuntimeInfo import PreserveRuntimeInfo -from openvino.tools.mo.ops.op import PermuteAttrs -from openvino.tools.mo.ops.transpose import Transpose -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from openvino.tools.mo.utils.runtime_info import RTInfo -from unit_tests.utils.graph import build_graph, connect, valued_const_with_data, regular_op_with_empty_data, \ - regular_op_with_shaped_data - -nodes = { - **regular_op_with_empty_data('placeholder2', {'type': 'Parameter'}), - **regular_op_with_empty_data('transpose_parameter', - {'type': 'Transpose', 'op': 'Transpose', 'infer': Transpose.infer}), - **regular_op_with_empty_data('transpose_result', - {'type': 'Transpose', 'op': 'Transpose', 'infer': Transpose.infer}), -} - -edges = [*connect('placeholder1', '0:add'), *connect('placeholder2', '1:add'), *connect('add', 'result')] -edges_with_transpose = [*connect('placeholder1', '0:transpose_parameter'), - *connect('transpose_parameter_order', '1:transpose_parameter'), - *connect('transpose_parameter', '0:add'), - *connect('placeholder2', '1:add'), - *connect('add', '0:transpose_result'), - *connect('transpose_result_order', '1:transpose_result'), - *connect('transpose_result', 'result')] - - -nodes_for_case_with_two_results = { - 'placeholder1': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': np.float32}, - 'placeholder2': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': np.float32}, - 'add': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'infer': copy_shape_infer}, - 'add_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': np.float32}, - 'result1': {'kind': 'op', 'op': 'Result'}, - 'result2': {'kind': 'op', 'op': 'Result'}, - 'fft': {'kind': 'op', 'op': 'IDFT', 'type': 'IDFT', 'infer': copy_shape_infer}, - 'fft_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': np.float32}, - 'fft_axes': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': int64_array([1]), 'value': int64_array([-1]) - }, - 'fft_axes_data': {'value': int64_array([-1]), 'shape': int64_array([1]), 'kind': 'data', 'data_type': np.int64}, - 'transpose_parameter_order': { - 'type': 'Const', 'kind': 'op', 'op': 'Const', 'shape': None, 'value': None - }, - 'transpose_parameter_order_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': np.int64}, - 'transpose_parameter': {'type': 'Transpose', 'kind': 'op', 'op': 'Transpose', 'infer': Transpose.infer}, - 'transpose_parameter_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, -} - -edges_for_case_with_two_results = [ - ('transpose_parameter_order', 'transpose_parameter_order_data'), - ('transpose_parameter_order_data', 'transpose_parameter', {'in': 1}), - ('transpose_parameter', 'transpose_parameter_data'), - ('placeholder1', 'placeholder1_data'), - ('placeholder2', 'placeholder2_data'), - ('placeholder1_data', 'add', {'in': 0}), - ('placeholder2_data', 'add', {'in': 1}), - ('add', 'add_data'), - ('add_data', 'result1', {'out': 0, 'in': 0}), - ('add_data', 'fft', {'out': 0, 'in': 0}), - ('fft_axes', 'fft_axes_data'), - ('fft_axes_data', 'fft', {'in': 1}), - ('fft', 'fft_data'), - ('fft_data', 'result2'), -] - -edges_with_transpose_for_case_with_two_results = [ - ('transpose_parameter_order', 'transpose_parameter_order_data'), - ('placeholder1_data', 'transpose_parameter', {'in': 0}), - ('transpose_parameter_order_data', 'transpose_parameter', {'in': 1}), - ('transpose_parameter', 'transpose_parameter_data'), - ('placeholder1', 'placeholder1_data'), - ('placeholder2', 'placeholder2_data'), - ('transpose_parameter_data', 'add', {'in': 0}), - ('placeholder2_data', 'add', {'in': 1}), - ('add', 'add_data'), - ('add_data', 'result1', {'out': 0, 'in': 0}), - ('add_data', 'fft', {'out': 0, 'in': 0}), - ('fft_axes', 'fft_axes_data'), - ('fft_axes_data', 'fft', {'in': 1}), - ('fft', 'fft_data'), - ('fft_data', 'result2'), -] - - -class TestPreserveRuntimeInfoTest(): - @pytest.mark.parametrize("nhwc_to_nchw_order, nchw_to_nhwc_order, add_permutation_attrs",[ - ([0, 3, 1, 2], [0, 2, 3, 1], True), - ([0, 4, 1, 2, 3], [0, 2, 3, 4, 1], True), - (None, None, False), - ]) - def test_transpose_insert(self, nhwc_to_nchw_order, nchw_to_nhwc_order, add_permutation_attrs): - graph_nodes = { - **valued_const_with_data('transpose_parameter_order', np.array(nhwc_to_nchw_order)), - **valued_const_with_data('transpose_result_order', np.array(nchw_to_nhwc_order)) - } - graph_nodes.update(nodes) - shape_len = len(nhwc_to_nchw_order) if add_permutation_attrs else 3 - shape = np.array(range(shape_len)) - add_shape = shape if nhwc_to_nchw_order is None else shape[nhwc_to_nchw_order] - graph_nodes.update( - { - **regular_op_with_shaped_data('placeholder1', shape, - {'type': 'Parameter', 'rt_info': RTInfo(), 'shape': shape}), - **regular_op_with_shaped_data('result', shape, {'type': 'Result', 'rt_info': RTInfo(), 'shape': shape}), - **regular_op_with_shaped_data('add', add_shape, - {'type': 'Add', 'op': 'Add', 'infer': copy_shape_infer}), - } - ) - - graph = build_graph(graph_nodes, edges) - graph_ref = build_graph(graph_nodes, edges_with_transpose if add_permutation_attrs else edges) - - param_node = Node(graph, 'placeholder1') - result_node = Node(graph, 'result') - - if add_permutation_attrs: - shape_len = len(nhwc_to_nchw_order) - param_node['permute_attrs'] = PermuteAttrs().update_attrs(attrs=[('shape', 'output:0')]) - param_node.out_node(0)['permutation'] = PermuteAttrs().get_nhwc_to_nchw_permutation(shape_len) - result_node.in_node(0)['permutation'] = PermuteAttrs().get_nhwc_to_nchw_permutation(shape_len) - - PreserveRuntimeInfo().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - assert flag, resp - - assert not param_node.has_valid('permute_attrs') - assert not param_node.out_node(0).has_valid('permutation') - - if add_permutation_attrs: - rt_info = param_node.rt_info.info - old_api_map = rt_info[('old_api_map_order', 0)].info - assert np.array_equal(old_api_map['inverse_order'], nchw_to_nhwc_order) - - rt_info = result_node.rt_info.info - old_api_map = rt_info[('old_api_map_order', 0)].info - assert np.array_equal(old_api_map['order'], nhwc_to_nchw_order) - - def test_auto_disable_nhwc_to_nchw(self): - shape_len = 4 - shape = np.array(range(shape_len)) - add_shape = shape - graph_nodes = { - **regular_op_with_shaped_data('placeholder1', shape, - {'type': 'Parameter', 'rt_info': RTInfo(), 'shape': shape}), - **regular_op_with_shaped_data('placeholder2', shape, - {'type': 'Parameter', 'rt_info': RTInfo(), 'shape': shape}), - **regular_op_with_shaped_data('result', shape, {'type': 'Result', 'rt_info': RTInfo(), 'shape': shape}), - **regular_op_with_shaped_data('add', add_shape, - {'type': 'Add', 'op': 'Add', 'infer': copy_shape_infer}), - } - - graph = build_graph(graph_nodes, edges) - graph.graph['cmd_params'].auto_disable_nhwc_to_nchw = True - graph_ref = build_graph(graph_nodes, edges) - - param_node = Node(graph, 'placeholder1') - result_node = Node(graph, 'result') - - PreserveRuntimeInfo().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - assert flag, resp - - rt_info = param_node.rt_info.info - old_api_map = rt_info[('old_api_map_order', 0)].info - assert np.array_equal(old_api_map['inverse_order'], [0, 2, 3, 1]) - - rt_info = result_node.rt_info.info - old_api_map = rt_info[('old_api_map_order', 0)].info - assert np.array_equal(old_api_map['order'], [0, 3, 1, 2]) - - @pytest.mark.parametrize("nhwc_to_nchw_order, nchw_to_nhwc_order,add_permutation_attrs, fft_kind", - [([0, 3, 1, 2], [0, 2, 3, 1], True, 'DFT'), - ([0, 3, 1, 2], [0, 2, 3, 1], True, 'IDFT'), - (None, None, False, 'DFT'), - (None, None, False, 'IDFT'), - ([0, 4, 1, 2, 3], [0, 2, 3, 4, 1], True, 'DFT'), - ([0, 4, 1, 2, 3], [0, 2, 3, 4, 1], True, 'IDFT'), - ]) - def test_transpose_insert_with_two_result_nodes(self, nhwc_to_nchw_order, nchw_to_nhwc_order, - add_permutation_attrs, fft_kind): - shape_len = len(nhwc_to_nchw_order) if add_permutation_attrs else 3 - shape = np.array(range(shape_len)) - add_shape = shape if nhwc_to_nchw_order is None else shape[nhwc_to_nchw_order] - graph = build_graph(nodes_attrs=nodes_for_case_with_two_results, - edges=edges_for_case_with_two_results, - update_attributes={ - 'placeholder1_data': {'shape': int64_array(shape)}, - 'placeholder1': {'shape': int64_array(shape), 'rt_info': RTInfo()}, - 'transpose_parameter_order': { - 'value': np.array(nhwc_to_nchw_order), - 'shape': int64_array(np.array(nhwc_to_nchw_order).shape) - }, - 'transpose_parameter_order_data': { - 'value': np.array(nhwc_to_nchw_order), - 'shape': int64_array(np.array(nhwc_to_nchw_order).shape) - }, - 'fft': {'op': fft_kind, 'type': fft_kind}, - 'add_data': {'shape': add_shape}, - 'fft_data': {'shape': add_shape}, - 'result1': {'shape': shape, 'rt_info': RTInfo()}, - 'result2': {'shape': shape, 'rt_info': RTInfo()}, - }) - - if add_permutation_attrs: - graph_ref = build_graph(nodes_for_case_with_two_results, edges_with_transpose_for_case_with_two_results) - else: - graph_ref = build_graph(nodes_for_case_with_two_results, edges_for_case_with_two_results) - - param1_node = Node(graph, 'placeholder1') - result1_node = Node(graph, 'result1') - result2_node = Node(graph, 'result2') - - if add_permutation_attrs: - shape_len = len(nhwc_to_nchw_order) - param1_node['permute_attrs'] = PermuteAttrs().update_attrs(attrs=[('shape', 'output:0')]) - param1_node.out_node(0)['permutation'] = PermuteAttrs().get_nhwc_to_nchw_permutation(shape_len) - result1_node.in_node(0)['permutation'] = PermuteAttrs().get_nhwc_to_nchw_permutation(shape_len) - result2_node.in_node(0)['permutation'] = PermuteAttrs().get_nhwc_to_nchw_permutation(shape_len) - - PreserveRuntimeInfo().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result1') - assert flag, resp - - assert not param1_node.has_valid('permute_attrs') - assert not param1_node.out_node(0).has_valid('permutation') - - if add_permutation_attrs: - rt_info = param1_node.rt_info.info - old_api_map = rt_info[('old_api_map_order', 0)].info - assert np.array_equal(old_api_map['inverse_order'], nchw_to_nhwc_order) diff --git a/tools/mo/unit_tests/mo/middle/ReluQuantizeFuse_test.py b/tools/mo/unit_tests/mo/middle/ReluQuantizeFuse_test.py deleted file mode 100644 index 1ccd9e76ea503b..00000000000000 --- a/tools/mo/unit_tests/mo/middle/ReluQuantizeFuse_test.py +++ /dev/null @@ -1,308 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.ReluQuantizeFuse import ReluQuantizeFuse, ReluFakeQuantizeMark -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes = { - # input - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_d': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - - # Relu - 'relu': {'kind': 'op', 'op': 'ReLU'}, - 'relu_d': {'value': None, 'shape': None, 'kind': 'data'}, - - # Quantize - 'const_1': {'op': 'Const', 'kind': 'op'}, - 'const_1_d': {'kind': 'data', 'value': None}, - 'const_2': {'op': 'Const', 'kind': 'op'}, - 'const_2_d': {'kind': 'data', 'value': None}, - 'const_3': {'op': 'Const', 'kind': 'op'}, - 'const_3_d': {'kind': 'data', 'value': None}, - 'const_4': {'op': 'Const', 'kind': 'op'}, - 'const_4_d': {'kind': 'data', 'value': None}, - - 'quantize': {'kind': 'op', 'op': 'FakeQuantize'}, - 'quantize_d': {'value': None, 'shape': None, 'kind': 'data'}, - - 'quantize_1': {'kind': 'op', 'op': 'FakeQuantize'}, - 'quantize_1_d': {'value': None, 'shape': None, 'kind': 'data'}, - - # Result - 'output': {'kind': 'op', 'op': 'Result'}, - 'output_1': {'kind': 'op', 'op': 'Result'}, - - # Ops for extra connection expressing - 'extra_op': {'kind': 'op', 'op': 'SomeOp'}, - 'extra_data': {'kind': 'data'}, -} - -i8_edges = [ - # op to data connections - ('placeholder', 'placeholder_d'), - ('relu', 'relu_d', {'out': 0}), - ('const_1', 'const_1_d'), - ('const_2', 'const_2_d'), - ('const_3', 'const_3_d'), - ('const_4', 'const_4_d'), - ('quantize', 'quantize_d'), - - # data to op connections - ('placeholder_d', 'relu'), - ('relu_d', 'quantize', {'in': 0}), - ('const_1_d', 'quantize', {'in': 1}), - ('const_2_d', 'quantize', {'in': 2}), - ('const_3_d', 'quantize', {'in': 3}), - ('const_4_d', 'quantize', {'in': 4}), - ('quantize_d', 'output'), -] - -ref_i8_edges = [ - # op to data connections - ('placeholder', 'placeholder_d'), - ('const_1', 'const_1_d'), - ('const_2', 'const_2_d'), - ('const_3', 'const_3_d'), - ('const_4', 'const_4_d'), - ('quantize', 'quantize_d'), - - # data to op connections - ('placeholder_d', 'quantize', {'in': 0}), - ('const_1_d', 'quantize', {'in': 1}), - ('const_2_d', 'quantize', {'in': 2}), - ('const_3_d', 'quantize', {'in': 3}), - ('const_4_d', 'quantize', {'in': 4}), - ('quantize_d', 'output'), - - ('placeholder_d', 'relu', {'out': 0}), - ('relu', 'relu_d', {'out': 0}), -] - -i1_edges = [ - # op to data connections - ('placeholder', 'placeholder_d'), - ('relu', 'relu_d', {'out': 0}), - ('const_1', 'const_1_d'), - ('const_3', 'const_3_d'), - ('const_4', 'const_4_d'), - ('quantize', 'quantize_d'), - - # data to op connections - ('placeholder_d', 'relu'), - ('relu_d', 'quantize', {'in': 0}), - ('const_1_d', 'quantize', {'in': 1}), - ('const_1_d', 'quantize', {'in': 2}), - ('const_3_d', 'quantize', {'in': 3}), - ('const_4_d', 'quantize', {'in': 4}), - ('quantize_d', 'output'), -] - -ref_i1_edges = [ - # op to data connections - ('placeholder', 'placeholder_d'), - ('const_1', 'const_1_d'), - ('const_3', 'const_3_d'), - ('const_4', 'const_4_d'), - ('quantize', 'quantize_d'), - - # data to op connections - ('placeholder_d', 'quantize', {'in': 0}), - ('const_1_d', 'quantize', {'in': 1}), - ('const_1_d', 'quantize', {'in': 2}), - ('const_3_d', 'quantize', {'in': 3}), - ('const_4_d', 'quantize', {'in': 4}), - ('quantize_d', 'output'), - - ('placeholder_d', 'relu', {'out': 0}), - ('relu', 'relu_d', {'out': 0}), - -] - -relu_extra_output = [ - # op to data connections - ('placeholder', 'placeholder_d'), - ('relu', 'relu_d', {'out': 0}), - - ('const_1', 'const_1_d'), - ('const_3', 'const_3_d'), - ('const_4', 'const_4_d'), - ('quantize', 'quantize_d'), - - # data to op connections - ('placeholder_d', 'relu'), - ('relu_d', 'quantize', {'in': 0}), - ('const_1_d', 'quantize', {'in': 1}), - ('const_1_d', 'quantize', {'in': 2}), - ('const_3_d', 'quantize', {'in': 3}), - ('const_4_d', 'quantize', {'in': 4}), - ('quantize_d', 'output'), - - # extra output of relu - ('relu_d', 'extra_op'), - ('extra_op', 'extra_data'), - ('extra_data', 'output_1'), -] - -const_extra = [ - # op to data connections - ('placeholder', 'placeholder_d'), - ('relu', 'relu_d', {'out': 0}), - - ('const_1', 'const_1_d'), - ('const_3', 'const_3_d'), - ('const_4', 'const_4_d'), - ('quantize', 'quantize_d'), - ('quantize_1', 'quantize_1_d'), - - # data to op connections - ('placeholder_d', 'relu', {'out': 0}), - ('relu_d', 'quantize', {'in': 0}), - ('relu_d', 'quantize_1', {'in': 0}), - - ('const_1_d', 'quantize', {'in': 1}), - ('const_1_d', 'quantize', {'in': 2}), - ('const_1_d', 'quantize_1', {'in': 1}), - ('const_1_d', 'quantize_1', {'in': 2}), - - ('const_3_d', 'quantize', {'in': 3}), - ('const_3_d', 'quantize_1', {'in': 3}), - ('const_4_d', 'quantize', {'in': 4}), - ('const_4_d', 'quantize_1', {'in': 4}), - ('quantize_d', 'output'), - ('quantize_1_d', 'output_1'), -] - -ref_const_extra = [ - # op to data connections - ('placeholder', 'placeholder_d'), - ('const_1', 'const_1_d'), - ('const_2', 'const_2_d'), - ('const_3', 'const_3_d'), - ('const_4', 'const_4_d'), - ('quantize', 'quantize_d'), - ('quantize_1', 'quantize_1_d'), - - # data to op connections - ('placeholder_d', 'quantize', {'in': 0, 'out': 0}), - ('placeholder_d', 'quantize_1', {'in': 0, 'out': 0}), - - ('const_1_d', 'quantize', {'out': 0, 'in': 1}), - ('const_1_d', 'quantize', {'out': 0, 'in': 2}), - ('const_2_d', 'quantize_1', {'out': 0, 'in': 1}), - ('const_2_d', 'quantize_1', {'out': 0, 'in': 2}), - - ('const_3_d', 'quantize', {'in': 3}), - ('const_3_d', 'quantize_1', {'in': 3}), - ('const_4_d', 'quantize', {'in': 4}), - ('const_4_d', 'quantize_1', {'in': 4}), - ('quantize_d', 'output'), - ('quantize_1_d', 'output_1'), - - ('placeholder_d', 'relu', {'out': 0}), - ('relu', 'relu_d', {'out': 0}), -] - - -class ReluQuantizeFuseTests(unittest.TestCase): - def test_classic_i8_positive_case(self): - graph = build_graph(nodes, i8_edges, - {'const_1_d': {'value': np.zeros([1, 2, 3, 4])}, 'quantize': {'levels': 256}}, - nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - graph.stage = 'middle' - - graph_ref = build_graph(nodes, ref_i8_edges, nodes_with_edges_only=True) - - ReluFakeQuantizeMark().find_and_replace_pattern(graph) - ReluQuantizeFuse().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_classic_i8_negative_case(self): - graph = build_graph(nodes, i8_edges, - {'const_1_d': {'value': np.full([1, 2, 3, 4], -1)}, 'quantize': {'levels': 256}}, - nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - graph.stage = 'middle' - - graph_ref = build_graph(nodes, i8_edges, nodes_with_edges_only=True) - - ReluFakeQuantizeMark().find_and_replace_pattern(graph) - ReluQuantizeFuse().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_classic_i1_positive_case(self): - graph = build_graph(nodes, i1_edges, - {'const_1_d': {'value': np.zeros([1, 2, 3, 4], dtype=np.float32)}, - 'quantize': {'levels': 2}}, - nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - graph.stage = 'middle' - - graph_ref = build_graph(nodes, ref_i1_edges, nodes_with_edges_only=True) - - ReluFakeQuantizeMark().find_and_replace_pattern(graph) - ReluQuantizeFuse().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_classic_i1_negative_case(self): - graph = build_graph(nodes, i1_edges, - {'const_1_d': {'value': np.full([1, 2, 3, 4], -1, dtype=np.float32)}, - 'quantize': {'levels': 2}}, - nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - graph.stage = 'middle' - - graph_ref = build_graph(nodes, ref_i1_edges, nodes_with_edges_only=True) - - ReluFakeQuantizeMark().find_and_replace_pattern(graph) - ReluQuantizeFuse().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - np.array_equal(np.full([1, 2, 3, 4], float('-inf'), dtype=np.float32), graph_ref.node['const_1_d']['value']) - - def test_relu_extra_outputs_i1_case(self): - graph = build_graph(nodes, relu_extra_output, - {'const_1_d': {'value': np.full([1, 2, 3, 4], -1, dtype=np.float32)}, - 'quantize': {'levels': 2}}, - nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - graph.stage = 'middle' - - graph_ref = build_graph(nodes, relu_extra_output, nodes_with_edges_only=True) - - ReluFakeQuantizeMark().find_and_replace_pattern(graph) - ReluQuantizeFuse().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'relu', check_op_attrs=True) - self.assertTrue(flag, resp) - np.array_equal(np.full([1, 2, 3, 4], float('-inf'), dtype=np.float32), graph_ref.node['const_1_d']['value']) - - def test_const_extra_outputs_i1_case(self): - graph = build_graph(nodes, const_extra, - {'const_1_d': {'value': np.full([1, 2, 3, 4], -1, dtype=np.float32)}, - 'quantize': {'levels': 2}, 'quantize_1': {'levels': 2}}, - nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - graph.stage = 'middle' - - graph_ref = build_graph(nodes, ref_const_extra, nodes_with_edges_only=True) - - ReluFakeQuantizeMark().find_and_replace_pattern(graph) - ReluQuantizeFuse().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'relu', check_op_attrs=True) - self.assertTrue(flag, resp) - np.array_equal(np.full([1, 2, 3, 4], float('-inf'), dtype=np.float32), graph_ref.node['const_1_d']['value']) diff --git a/tools/mo/unit_tests/mo/middle/RemoveDuplicationMemory_test.py b/tools/mo/unit_tests/mo/middle/RemoveDuplicationMemory_test.py deleted file mode 100644 index e08aef013ab987..00000000000000 --- a/tools/mo/unit_tests/mo/middle/RemoveDuplicationMemory_test.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.RemoveDuplicationMemory import RemoveMemoryDuplicationPattern, MergeNeighborSplicePattern -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class RemoveMemoryDuplicationPatternTests(unittest.TestCase): - - def test_remove_duplication(self): - graph = build_graph({'input': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': range(-5, 6)}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 143]}, - 'placeholder_1': {'kind': 'op', 'op': None}, - 'splice_2': {'kind': 'op', 'op': 'Splice', 'context': range(-1, 2)}, - 'splice_data_2': {'kind': 'data', 'shape': [1, 39]}, - 'placeholder_2': {'kind': 'op', 'op': None}, - }, - [('input', 'in_node'), ('in_node', 'splice_1'), - ('splice_1', 'splice_data_1'), ('splice_data_1', 'placeholder_1'), - ('in_node', 'splice_2'), ('splice_2', 'splice_data_2'), ('splice_data_2', 'placeholder_2'), - ], - nodes_with_edges_only=True) - RemoveMemoryDuplicationPattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'input': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': range(-5, 6)}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 143]}, - 'placeholder_1': {'kind': 'op'}, - 'crop_2': {'kind': 'op', 'op': 'Crop', 'offset': 52, 'dim': 39, 'axis': -1}, - 'splice_data_2': {'kind': 'data', 'shape': [1, 39]}, - 'placeholder_2': {'kind': 'op'}, - }, - [ - ('input', 'in_node'), ('in_node', 'splice_1'), - ('splice_1', 'splice_data_1'), ('splice_data_1', 'placeholder_1'), - ('splice_data_1', 'crop_2'), ('crop_2', 'splice_data_2'), - ('splice_data_2', 'placeholder_2'), - ], - nodes_with_edges_only=True - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder_2') - self.assertTrue(flag, resp) - - def test_remove_duplication_with_crops(self): - graph = build_graph({'input': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': range(-5, 6)}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 143]}, - 'crop_1': {'kind': 'op', 'op': 'Crop', 'offset': 13, 'dim': 13, 'axis': -1}, - 'splice_2': {'kind': 'op', 'op': 'Splice', 'context': range(-1, 2)}, - 'splice_data_2': {'kind': 'data', 'shape': [1, 39]}, - 'crop_2': {'kind': 'op', 'op': 'Crop', 'offset': 13, 'dim': 13, 'axis': -1}, - }, - [('input', 'in_node'), ('in_node', 'splice_1'), - ('splice_1', 'splice_data_1'), ('splice_data_1', 'crop_1'), - ('in_node', 'splice_2'), ('splice_2', 'splice_data_2'), ('splice_data_2', 'crop_2'), - ], - nodes_with_edges_only=True) - RemoveMemoryDuplicationPattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'input': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': range(-5, 6)}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 143]}, - 'crop_1': {'kind': 'op', 'op': 'Crop', 'offset': 13, 'dim': 13}, - 'crop_2': {'kind': 'op', 'op': 'Crop', 'offset': 65, 'dim': 13, 'axis': -1}, - }, - [ - ('input', 'in_node'), ('in_node', 'splice_1'), - ('splice_1', 'splice_data_1'), - ('splice_data_1', 'crop_1'), ('splice_data_1', 'crop_2'), - ], - nodes_with_edges_only=True - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'crop_2') - self.assertTrue(flag, resp) - - def test_remove_duplication_neibor(self): - graph = build_graph({'input': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': range(-5, 1)}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 78], 'value': None}, - 'placeholder_1': {'kind': 'op', 'op': None}, - 'splice_2': {'kind': 'op', 'op': 'Splice', 'context': range(0, 2)}, - 'splice_data_2': {'kind': 'data', 'shape': [1, 26], 'value': None}, - 'placeholder_2': {'kind': 'op', 'op': None}, - }, - [('input', 'in_node'), ('in_node', 'splice_1'), - ('splice_1', 'splice_data_1'), ('splice_data_1', 'placeholder_1'), - ('in_node', 'splice_2'), ('splice_2', 'splice_data_2'), ('splice_data_2', 'placeholder_2'), - ], - nodes_with_edges_only=True) - MergeNeighborSplicePattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'input': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'splice_1': {'kind': 'op', 'op': 'Splice', 'context': range(-5, 2)}, - 'splice_data_1': {'kind': 'data', 'shape': [1, 91], 'value': None}, - 'crop_1': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 78, 'axis': -1}, - 'crop_1_data': {'kind': 'data', 'shape': [1, 78]}, - 'placeholder_1': {'kind': 'op'}, - 'crop_2': {'kind': 'op', 'op': 'Crop', 'offset': 65, 'dim': 26, 'axis': -1}, - 'splice_data_2': {'kind': 'data', 'shape': [1, 26], 'value': None}, - 'placeholder_2': {'kind': 'op'}, - }, - [ - ('input', 'in_node'), ('in_node', 'splice_1'), - ('splice_1', 'splice_data_1'), ('splice_data_1', 'crop_1'), - ('crop_1', 'crop_1_data'), ('crop_1_data', 'placeholder_1'), - ('splice_data_1', 'crop_2'), ('crop_2', 'splice_data_2'), - ('splice_data_2', 'placeholder_2'), - ], - nodes_with_edges_only=True - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder_2') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/RemoveUselessConcatSplit_test.py b/tools/mo/unit_tests/mo/middle/RemoveUselessConcatSplit_test.py deleted file mode 100644 index d5c8c94687d909..00000000000000 --- a/tools/mo/unit_tests/mo/middle/RemoveUselessConcatSplit_test.py +++ /dev/null @@ -1,240 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.RemoveUselessConcatSplit import RemoveUselessConcatSplitPattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class RemoveUselessConcatSplitTests(unittest.TestCase): - - def test_useless_concat_split(self): - graph = build_graph({'br1': {'kind': 'op', 'op': None}, - 'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'br2': {'kind': 'op', 'op': None}, - 'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'br3': {'kind': 'op', 'op': None}, - 'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])}, - 'split': {'kind': 'op', 'op': 'Split'}, - 'split_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'split_br1': {'kind': 'op', 'op': None}, - 'split_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'split_br2': {'kind': 'op', 'op': None}, - 'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'split_br3': {'kind': 'op', 'op': None}, - }, - [('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'), - ('br_data_1', 'concat', {'in': 0}), - ('br_data_2', 'concat', {'in': 1}), - ('br_data_3', 'concat', {'in': 2}), - ('concat', 'concat_data'), - ('concat_data', 'split'), - ('split', 'split_data_1', {'out': 0}), - ('split', 'split_data_2', {'out': 1}), - ('split', 'split_data_3', {'out': 2}), - ('split_data_1', 'split_br1'), - ('split_data_2', 'split_br2'), - ('split_data_3', 'split_br3')]) - RemoveUselessConcatSplitPattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'br1': {'kind': 'op', 'op': None}, - 'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'br2': {'kind': 'op', 'op': None}, - 'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'br3': {'kind': 'op', 'op': None}, - 'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'split_br1': {'kind': 'op', 'op': None}, - 'split_br2': {'kind': 'op', 'op': None}, - 'split_br3': {'kind': 'op', 'op': None}}, - [('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'), - ('br_data_1', 'split_br1'), - ('br_data_2', 'split_br2'), - ('br_data_3', 'split_br3'), - ]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'split_br3') - self.assertTrue(flag, resp) - - def test_usefull_concat_split(self): - graph = build_graph({'br1': {'kind': 'op', 'op': None}, - 'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'br2': {'kind': 'op', 'op': None}, - 'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'br3': {'kind': 'op', 'op': None}, - 'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])}, - 'split': {'kind': 'op', 'op': 'Split'}, - 'split_data_1': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'split_br1': {'kind': 'op', 'op': None}, - 'split_data_2': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'split_br2': {'kind': 'op', 'op': None}, - 'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'split_br3': {'kind': 'op', 'op': None}, - }, - [('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'), - ('br_data_1', 'concat', {'in': 0}), - ('br_data_2', 'concat', {'in': 1}), - ('br_data_3', 'concat', {'in': 2}), - ('concat', 'concat_data'), - ('concat_data', 'split'), - ('split', 'split_data_1', {'out': 0}), - ('split', 'split_data_2', {'out': 1}), - ('split', 'split_data_3', {'out': 2}), - ('split_data_1', 'split_br1'), - ('split_data_2', 'split_br2'), - ('split_data_3', 'split_br3')]) - RemoveUselessConcatSplitPattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'br1': {'kind': 'op', 'op': None}, - 'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'br2': {'kind': 'op', 'op': None}, - 'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'br3': {'kind': 'op', 'op': None}, - 'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])}, - 'split': {'kind': 'op', 'op': 'Split'}, - 'split_data_1': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'split_br1': {'kind': 'op', 'op': None}, - 'split_data_2': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'split_br2': {'kind': 'op', 'op': None}, - 'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'split_br3': {'kind': 'op', 'op': None}, - }, - [('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'), - ('br_data_1', 'concat', {'in': 0}), - ('br_data_2', 'concat', {'in': 1}), - ('br_data_3', 'concat', {'in': 2}), - ('concat', 'concat_data'), - ('concat_data', 'split'), - ('split', 'split_data_1', {'out': 0}), - ('split', 'split_data_2', {'out': 1}), - ('split', 'split_data_3', {'out': 2}), - ('split_data_1', 'split_br1'), - ('split_data_2', 'split_br2'), - ('split_data_3', 'split_br3')]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'split_br3') - self.assertTrue(flag, resp) - - def test_useful_concat_2_outputs_split(self): - graph = build_graph({'br1': {'kind': 'op', 'op': None}, - 'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'br2': {'kind': 'op', 'op': None}, - 'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'br3': {'kind': 'op', 'op': None}, - 'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])}, - 'placeholder': {'kind': 'op', 'op': None}, - 'split': {'kind': 'op', 'op': 'Split'}, - 'split_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'split_br1': {'kind': 'op', 'op': None}, - 'split_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'split_br2': {'kind': 'op', 'op': None}, - 'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'split_br3': {'kind': 'op', 'op': None}, - }, - [('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'), - ('br_data_1', 'concat', {'in': 0}), - ('br_data_2', 'concat', {'in': 1}), - ('br_data_3', 'concat', {'in': 2}), - ('concat', 'concat_data'), - ('concat_data', 'split'), - ('concat_data', 'placeholder'), - ('split', 'split_data_1', {'out': 0}), - ('split', 'split_data_2', {'out': 1}), - ('split', 'split_data_3', {'out': 2}), - ('split_data_1', 'split_br1'), - ('split_data_2', 'split_br2'), - ('split_data_3', 'split_br3')]) - RemoveUselessConcatSplitPattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'br1': {'kind': 'op', 'op': None}, - 'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'br2': {'kind': 'op', 'op': None}, - 'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'br3': {'kind': 'op', 'op': None}, - 'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])}, - 'placeholder': {'kind': 'op', 'op': None}, - 'split': {'kind': 'op', 'op': 'Split'}, - 'split_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'split_br1': {'kind': 'op', 'op': None}, - 'split_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'split_br2': {'kind': 'op', 'op': None}, - 'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'split_br3': {'kind': 'op', 'op': None}, - }, - [('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'), - ('br_data_1', 'concat', {'in': 0}), - ('br_data_2', 'concat', {'in': 1}), - ('br_data_3', 'concat', {'in': 2}), - ('concat', 'concat_data'), - ('concat_data', 'split'), - ('concat_data', 'placeholder'), - ('split', 'split_data_1', {'out': 0}), - ('split', 'split_data_2', {'out': 1}), - ('split', 'split_data_3', {'out': 2}), - ('split_data_1', 'split_br1'), - ('split_data_2', 'split_br2'), - ('split_data_3', 'split_br3')]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'split_br3') - self.assertTrue(flag, resp) - - def test_useless_concat_split_2_outputs(self): - graph = build_graph({'br1': {'kind': 'op', 'op': None}, - 'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'br2': {'kind': 'op', 'op': None}, - 'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'br3': {'kind': 'op', 'op': None}, - 'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': int64_array([1, 108])}, - 'split': {'kind': 'op', 'op': 'Split'}, - 'split_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'split_br1': {'kind': 'op', 'op': None}, - 'split_br1_1': {'kind': 'op', 'op': None}, - 'split_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'split_br2': {'kind': 'op', 'op': None}, - 'split_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'split_br3': {'kind': 'op', 'op': None}, - }, - [('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'), - ('br_data_1', 'concat', {'in': 0}), - ('br_data_2', 'concat', {'in': 1}), - ('br_data_3', 'concat', {'in': 2}), - ('concat', 'concat_data'), - ('concat_data', 'split'), - ('split', 'split_data_1', {'out': 0}), - ('split', 'split_data_2', {'out': 1}), - ('split', 'split_data_3', {'out': 2}), - ('split_data_1', 'split_br1'), - ('split_data_1', 'split_br1_1'), - ('split_data_2', 'split_br2'), - ('split_data_3', 'split_br3')]) - RemoveUselessConcatSplitPattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'br1': {'kind': 'op', 'op': None}, - 'br_data_1': {'kind': 'data', 'shape': int64_array([1, 26])}, - 'br2': {'kind': 'op', 'op': None}, - 'br_data_2': {'kind': 'data', 'shape': int64_array([1, 36])}, - 'br3': {'kind': 'op', 'op': None}, - 'br_data_3': {'kind': 'data', 'shape': int64_array([1, 46])}, - 'split_br1': {'kind': 'op', 'op': None}, - 'split_br1_1': {'kind': 'op', 'op': None}, - 'split_br2': {'kind': 'op', 'op': None}, - 'split_br3': {'kind': 'op', 'op': None}}, - [('br1', 'br_data_1'), ('br2', 'br_data_2'), ('br3', 'br_data_3'), - ('br_data_1', 'split_br1'), - ('br_data_1', 'split_br1_1'), - ('br_data_2', 'split_br2'), - ('br_data_3', 'split_br3'), - ]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'split_br3') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/RemoveUselessCrops_test.py b/tools/mo/unit_tests/mo/middle/RemoveUselessCrops_test.py deleted file mode 100644 index fc52faf6d9ebf3..00000000000000 --- a/tools/mo/unit_tests/mo/middle/RemoveUselessCrops_test.py +++ /dev/null @@ -1,329 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.RemoveUselessCrops import RemoveUselessCropsPattern -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class RemoveUselessCropsPatternTests(unittest.TestCase): - - def test_useless_crops(self): - graph = build_graph({'placeholder_in': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 130]}, - 'crop1': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 26, 'axis': -1}, - 'crop_data_1': {'kind': 'data', 'shape': [1, 26]}, - 'crop2': {'kind': 'op', 'op': 'Crop', 'offset': 26, 'dim': 26, 'axis': -1}, - 'crop_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'crop3': {'kind': 'op', 'op': 'Crop', 'offset': 52, 'dim': 26, 'axis': -1}, - 'crop_data_3': {'kind': 'data', 'shape': [1, 26]}, - 'crop4': {'kind': 'op', 'op': 'Crop', 'offset': 78, 'dim': 26, 'axis': -1}, - 'crop_data_4': {'kind': 'data', 'shape': [1, 26]}, - 'crop5': {'kind': 'op', 'op': 'Crop', 'offset': 104, 'dim': 26, 'axis': -1}, - 'crop_data_5': {'kind': 'data', 'shape': [1, 26]}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': [1, 130]}, - 'placeholder': {'kind': 'op', 'op': 'Parameter'}, - }, - [('placeholder_in', 'in_node'), - ('in_node', 'crop1'), ('crop1', 'crop_data_1'), - ('in_node', 'crop2'), ('crop2', 'crop_data_2'), - ('in_node', 'crop3'), ('crop3', 'crop_data_3'), - ('in_node', 'crop4'), ('crop4', 'crop_data_4'), - ('in_node', 'crop5'), ('crop5', 'crop_data_5'), - ('crop_data_1', 'concat'), - ('crop_data_2', 'concat'), - ('crop_data_3', 'concat'), - ('crop_data_4', 'concat'), - ('crop_data_5', 'concat'), - ('concat', 'concat_data'), - ('concat_data', 'placeholder')]) - RemoveUselessCropsPattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'placeholder_in': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 130]}, - 'crop1': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 26, 'axis': -1}, - 'crop_data_1': {'kind': 'data', 'shape': [1, 26]}, - 'crop2': {'kind': 'op', 'op': 'Crop', 'offset': 26, 'dim': 26, 'axis': -1}, - 'crop_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'crop3': {'kind': 'op', 'op': 'Crop', 'offset': 52, 'dim': 26, 'axis': -1}, - 'crop_data_3': {'kind': 'data', 'shape': [1, 26]}, - 'crop4': {'kind': 'op', 'op': 'Crop', 'offset': 78, 'dim': 26, 'axis': -1}, - 'crop_data_4': {'kind': 'data', 'shape': [1, 26]}, - 'crop5': {'kind': 'op', 'op': 'Crop', 'offset': 104, 'dim': 26, 'axis': -1}, - 'crop_data_5': {'kind': 'data', 'shape': [1, 26]}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': [1, 130]}, - 'placeholder': {'kind': 'op', 'op': 'Parameter'}, - }, - [ - ('placeholder_in', 'in_node'), - ('in_node', 'crop1'), ('crop1', 'crop_data_1'), - ('in_node', 'crop2'), ('crop2', 'crop_data_2'), - ('in_node', 'crop3'), ('crop3', 'crop_data_3'), - ('in_node', 'crop4'), ('crop4', 'crop_data_4'), - ('in_node', 'crop5'), ('crop5', 'crop_data_5'), - ('concat', 'concat_data'), - ('in_node', 'placeholder') - ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder') - self.assertTrue(flag, resp) - - def test_useless_crops_type2(self): - graph = build_graph({'placeholder_in': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 130]}, - 'crop1': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 26, 'axis': -1}, - 'crop_data_1': {'kind': 'data', 'shape': [1, 26]}, - 'const_26': {'kind': 'op', 'op': 'Const', 'value': 26}, - 'const_26_data': {'kind': 'data', 'value': 26}, - 'crop2': {'kind': 'op', 'op': 'Crop', 'offset': 26, 'axis': -1}, - 'crop_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'crop3': {'kind': 'op', 'op': 'Crop', 'offset': 52, 'dim': 26, 'axis': -1}, - 'crop_data_3': {'kind': 'data', 'shape': [1, 26]}, - 'crop4': {'kind': 'op', 'op': 'Crop', 'offset': 78, 'dim': 26, 'axis': -1}, - 'crop_data_4': {'kind': 'data', 'shape': [1, 26]}, - 'crop5': {'kind': 'op', 'op': 'Crop', 'offset': 104, 'dim': 26, 'axis': -1}, - 'crop_data_5': {'kind': 'data', 'shape': [1, 26]}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': [1, 130]}, - 'placeholder': {'kind': 'op', 'op': 'Parameter'}, - }, - [('placeholder_in', 'in_node'), - ('in_node', 'crop1'), ('crop1', 'crop_data_1'), - ('in_node', 'crop2', {'in': 0}), ('const_26', 'const_26_data'), - ('const_26_data', 'crop2', {'in': 1}), ('crop2', 'crop_data_2'), - ('in_node', 'crop3'), ('crop3', 'crop_data_3'), - ('in_node', 'crop4'), ('crop4', 'crop_data_4'), - ('in_node', 'crop5'), ('crop5', 'crop_data_5'), - ('crop_data_1', 'concat'), - ('crop_data_2', 'concat'), - ('crop_data_3', 'concat'), - ('crop_data_4', 'concat'), - ('crop_data_5', 'concat'), - ('concat', 'concat_data'), - ('concat_data', 'placeholder')]) - RemoveUselessCropsPattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'placeholder_in': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 130]}, - 'crop1': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 26, 'axis': -1}, - 'crop_data_1': {'kind': 'data', 'shape': [1, 26]}, - 'const_26': {'kind': 'op', 'op': 'Const', 'value': 26}, - 'const_26_data': {'kind': 'data', 'value': 26}, - 'crop2': {'kind': 'op', 'op': 'Crop', 'offset': 26, 'dim': 26, 'axis': -1}, - 'crop_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'crop3': {'kind': 'op', 'op': 'Crop', 'offset': 52, 'dim': 26, 'axis': -1}, - 'crop_data_3': {'kind': 'data', 'shape': [1, 26]}, - 'crop4': {'kind': 'op', 'op': 'Crop', 'offset': 78, 'dim': 26, 'axis': -1}, - 'crop_data_4': {'kind': 'data', 'shape': [1, 26]}, - 'crop5': {'kind': 'op', 'op': 'Crop', 'offset': 104, 'dim': 26, 'axis': -1}, - 'crop_data_5': {'kind': 'data', 'shape': [1, 26]}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': [1, 130]}, - 'placeholder': {'kind': 'op', 'op': 'Parameter'}, - }, - [ - ('placeholder_in', 'in_node'), - ('in_node', 'crop1'), ('crop1', 'crop_data_1'), - ('in_node', 'crop2', {'in': 0}), ('const_26', 'const_26_data'), - ('const_26_data', 'crop2', {'in': 1}), ('crop2', 'crop_data_2'), - ('in_node', 'crop3'), ('crop3', 'crop_data_3'), - ('in_node', 'crop4'), ('crop4', 'crop_data_4'), - ('in_node', 'crop5'), ('crop5', 'crop_data_5'), - ('concat', 'concat_data'), - ('in_node', 'placeholder') - ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder') - self.assertTrue(flag, resp) - - def test_useless_crops_type3(self): - graph = build_graph({'placeholder_in': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 130]}, - 'crop1': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 26, 'axis': -1}, - 'crop_data_1': {'kind': 'data', 'shape': [1, 26]}, - 'crop2': {'kind': 'op', 'op': 'Crop', 'crop_begin': 26, 'crop_end': 52, 'axis': -1}, - 'crop_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'crop3': {'kind': 'op', 'op': 'Crop', 'offset': 52, 'dim': 26, 'axis': -1}, - 'crop_data_3': {'kind': 'data', 'shape': [1, 26]}, - 'crop4': {'kind': 'op', 'op': 'Crop', 'offset': 78, 'dim': 26, 'axis': -1}, - 'crop_data_4': {'kind': 'data', 'shape': [1, 26]}, - 'crop5': {'kind': 'op', 'op': 'Crop', 'offset': 104, 'dim': 26, 'axis': -1}, - 'crop_data_5': {'kind': 'data', 'shape': [1, 26]}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': [1, 130]}, - 'placeholder': {'kind': 'op', 'op': 'Parameter'}, - }, - [('placeholder_in', 'in_node'), - ('in_node', 'crop1'), ('crop1', 'crop_data_1'), - ('in_node', 'crop2'), ('crop2', 'crop_data_2'), - ('in_node', 'crop3'), ('crop3', 'crop_data_3'), - ('in_node', 'crop4'), ('crop4', 'crop_data_4'), - ('in_node', 'crop5'), ('crop5', 'crop_data_5'), - ('crop_data_1', 'concat'), - ('crop_data_2', 'concat'), - ('crop_data_3', 'concat'), - ('crop_data_4', 'concat'), - ('crop_data_5', 'concat'), - ('concat', 'concat_data'), - ('concat_data', 'placeholder')]) - RemoveUselessCropsPattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'placeholder_in': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 130]}, - 'crop1': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 26, 'axis': -1}, - 'crop_data_1': {'kind': 'data', 'shape': [1, 26]}, - 'crop2': {'kind': 'op', 'op': 'Crop', 'crop_begin': 26, 'crop_end': 52, 'axis': -1}, - 'crop_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'crop3': {'kind': 'op', 'op': 'Crop', 'offset': 52, 'dim': 26, 'axis': -1}, - 'crop_data_3': {'kind': 'data', 'shape': [1, 26]}, - 'crop4': {'kind': 'op', 'op': 'Crop', 'offset': 78, 'dim': 26, 'axis': -1}, - 'crop_data_4': {'kind': 'data', 'shape': [1, 26]}, - 'crop5': {'kind': 'op', 'op': 'Crop', 'offset': 104, 'dim': 26, 'axis': -1}, - 'crop_data_5': {'kind': 'data', 'shape': [1, 26]}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': [1, 130]}, - 'placeholder': {'kind': 'op', 'op': 'Parameter'}, - }, - [ - ('placeholder_in', 'in_node'), - ('in_node', 'crop1'), ('crop1', 'crop_data_1'), - ('in_node', 'crop2'), ('crop2', 'crop_data_2'), - ('in_node', 'crop3'), ('crop3', 'crop_data_3'), - ('in_node', 'crop4'), ('crop4', 'crop_data_4'), - ('in_node', 'crop5'), ('crop5', 'crop_data_5'), - ('concat', 'concat_data'), - ('in_node', 'placeholder') - ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder') - self.assertTrue(flag, resp) - - def test_useful_crops(self): - graph = build_graph({'placeholder_in': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 130]}, - 'crop1': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 26, 'axis': -1}, - 'crop_data_1': {'kind': 'data', 'shape': [1, 26]}, - 'crop2': {'kind': 'op', 'op': 'Crop', 'offset': 26, 'dim': 26, 'axis': -1}, - 'crop_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'crop4': {'kind': 'op', 'op': 'Crop', 'offset': 78, 'dim': 26, 'axis': -1}, - 'crop_data_4': {'kind': 'data', 'shape': [1, 26]}, - 'crop5': {'kind': 'op', 'op': 'Crop', 'offset': 104, 'dim': 26, 'axis': -1}, - 'crop_data_5': {'kind': 'data', 'shape': [1, 26]}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': [1, 104]}, - 'placeholder': {'kind': 'op', 'op': 'Parameter'}, - }, - [('placeholder_in', 'in_node'), - ('in_node', 'crop1'), ('crop1', 'crop_data_1'), - ('in_node', 'crop2'), ('crop2', 'crop_data_2'), - ('in_node', 'crop4'), ('crop4', 'crop_data_4'), - ('in_node', 'crop5'), ('crop5', 'crop_data_5'), - ('crop_data_1', 'concat'), - ('crop_data_2', 'concat'), - ('crop_data_4', 'concat'), - ('crop_data_5', 'concat'), - ('concat', 'concat_data'), - ('concat_data', 'placeholder')]) - - RemoveUselessCropsPattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'placeholder_in': {'kind': 'op', 'op': 'Placeholder'}, - 'in_node': {'kind': 'data', 'shape': [1, 130]}, - 'crop1': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 26, 'axis': -1}, - 'crop_data_1': {'kind': 'data', 'shape': [1, 26]}, - 'crop2': {'kind': 'op', 'op': 'Crop', 'offset': 26, 'dim': 26, 'axis': -1}, - 'crop_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'crop4': {'kind': 'op', 'op': 'Crop', 'offset': 78, 'dim': 26, 'axis': -1}, - 'crop_data_4': {'kind': 'data', 'shape': [1, 26]}, - 'crop5': {'kind': 'op', 'op': 'Crop', 'offset': 104, 'dim': 26, 'axis': -1}, - 'crop_data_5': {'kind': 'data', 'shape': [1, 26]}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': [1, 104]}, - 'placeholder': {'kind': 'op', 'op': 'Placeholder'}, - }, - [('placeholder_in', 'in_node'), - ('in_node', 'crop1'), ('crop1', 'crop_data_1'), - ('in_node', 'crop2'), ('crop2', 'crop_data_2'), - ('in_node', 'crop4'), ('crop4', 'crop_data_4'), - ('in_node', 'crop5'), ('crop5', 'crop_data_5'), - ('crop_data_1', 'concat'), - ('crop_data_2', 'concat'), - ('crop_data_4', 'concat'), - ('crop_data_5', 'concat'), - ('concat', 'concat_data'), - ('concat_data', 'placeholder')] - ) - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder') - self.assertTrue(flag, resp) - - def test_useless_crops_without_concat(self): - graph = build_graph({'placeholder_in': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 130]}, - 'crop1': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 26, 'axis': -1}, - 'crop_data_1': {'kind': 'data', 'shape': [1, 26]}, - 'crop2': {'kind': 'op', 'op': 'Crop', 'offset': 26, 'dim': 26, 'axis': -1}, - 'crop_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'crop3': {'kind': 'op', 'op': 'Crop', 'offset': 52, 'dim': 26, 'axis': -1}, - 'crop_data_3': {'kind': 'data', 'shape': [1, 26]}, - 'crop4': {'kind': 'op', 'op': 'Crop', 'offset': 78, 'dim': 26, 'axis': -1}, - 'crop_data_4': {'kind': 'data', 'shape': [1, 26]}, - 'crop5': {'kind': 'op', 'op': 'Crop', 'offset': 104, 'dim': 26, 'axis': -1}, - 'crop_data_5': {'kind': 'data', 'shape': [1, 26]}, - 'placeholder_concat': {'kind': 'op', 'op': None}, - 'placeholder_concat_data': {'kind': 'data', 'shape': [1, 100]}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': [1, 230]}, - 'placeholder': {'kind': 'op', 'op': None}, - }, - [('placeholder_in', 'in_node'), - ('in_node', 'crop1'), ('crop1', 'crop_data_1'), - ('in_node', 'crop2'), ('crop2', 'crop_data_2'), - ('in_node', 'crop3'), ('crop3', 'crop_data_3'), - ('in_node', 'crop4'), ('crop4', 'crop_data_4'), - ('in_node', 'crop5'), ('crop5', 'crop_data_5'), - ('placeholder_concat', 'placeholder_concat_data'), - ('crop_data_1', 'concat', {'in': 0}), - ('crop_data_2', 'concat', {'in': 1}), - ('crop_data_3', 'concat', {'in': 2}), - ('crop_data_4', 'concat', {'in': 3}), - ('crop_data_5', 'concat', {'in': 4}), - ('placeholder_concat_data', 'concat', {'in': 5}), - ('concat', 'concat_data'), - ('concat_data', 'placeholder')]) - RemoveUselessCropsPattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'placeholder_in': {'kind': 'op', 'op': 'Parameter'}, - 'in_node': {'kind': 'data', 'shape': [1, 130]}, - 'crop1': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 26, 'axis': -1}, - 'crop_data_1': {'kind': 'data', 'shape': [1, 26]}, - 'crop2': {'kind': 'op', 'op': 'Crop', 'offset': 26, 'dim': 26, 'axis': -1}, - 'crop_data_2': {'kind': 'data', 'shape': [1, 26]}, - 'crop3': {'kind': 'op', 'op': 'Crop', 'offset': 52, 'dim': 26, 'axis': -1}, - 'crop_data_3': {'kind': 'data', 'shape': [1, 26]}, - 'crop4': {'kind': 'op', 'op': 'Crop', 'offset': 78, 'dim': 26, 'axis': -1}, - 'crop_data_4': {'kind': 'data', 'shape': [1, 26]}, - 'crop5': {'kind': 'op', 'op': 'Crop', 'offset': 104, 'dim': 26, 'axis': -1}, - 'crop_data_5': {'kind': 'data', 'shape': [1, 26]}, - 'placeholder_concat': {'kind': 'op', 'op': None}, - 'placeholder_concat_data': {'kind': 'data', 'shape': [1, 100]}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': [1, 230]}, - 'placeholder': {'kind': 'op', 'op': 'Parameter'}, - }, - [ - ('placeholder_in', 'in_node'), - ('in_node', 'crop1'), ('crop1', 'crop_data_1'), - ('in_node', 'crop2'), ('crop2', 'crop_data_2'), - ('in_node', 'crop3'), ('crop3', 'crop_data_3'), - ('in_node', 'crop4'), ('crop4', 'crop_data_4'), - ('in_node', 'crop5'), ('crop5', 'crop_data_5'), - ('placeholder_concat', 'placeholder_concat_data'), - ('in_node', 'concat', {'in': 4}), - ('placeholder_concat_data', 'concat', {'in': 5}), - ('concat', 'concat_data'), - ('concat_data', 'placeholder')]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'placeholder') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/RemoveUselessPad_test.py b/tools/mo/unit_tests/mo/middle/RemoveUselessPad_test.py deleted file mode 100644 index a99a663a5dc7c0..00000000000000 --- a/tools/mo/unit_tests/mo/middle/RemoveUselessPad_test.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.RemoveUselessPad import RemoveUselessPad -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, result, connect, \ - connect_data - - -class RemoveUselessPadTests(unittest.TestCase): - def test_useless_pad_constant_input(self): - nodes = { - **regular_op_with_shaped_data('placeholder', [1, 10, 20, 3], {'type': 'Parameter'}), - **regular_op_with_shaped_data('pad', [1, 10, 20, 3], {'type': 'Pad', 'op': 'Pad'}), - **valued_const_with_data('pads_begin', int64_array([0, 0, 0, 0])), - **valued_const_with_data('pads_end', int64_array([0, 0, 0, 0])), - **valued_const_with_data('fill_value', np.array(1)), - **result('result'), - } - edges = [*connect('placeholder', '0:pad'), - *connect('pads_begin', '1:pad'), - *connect('pads_end', '2:pad'), - *connect('fill_value', '3:pad'), - *connect('pad', 'result'), - ] - graph = build_graph(nodes, edges) - RemoveUselessPad().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes, [*connect('placeholder', 'result')]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) - - def test_not_useless_pad_constant_input(self): - nodes = { - **regular_op_with_shaped_data('placeholder', [1, 10, 20, 3], {'type': 'Parameter'}), - **regular_op_with_shaped_data('pad', [1, 10, 20, 3], {'type': 'Pad', 'op': 'Pad'}), - **valued_const_with_data('pads_begin', int64_array([0, 0, 0, 0])), - **valued_const_with_data('pads_end', int64_array([0, 1, 0, 0])), - **valued_const_with_data('fill_value', np.array(1)), - **result('result'), - } - edges = [*connect('placeholder', '0:pad'), - *connect('pads_begin', '1:pad'), - *connect('pads_end', '2:pad'), - *connect('fill_value', '3:pad'), - *connect('pad', 'result'), - ] - graph = build_graph(nodes, edges) - RemoveUselessPad().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes, edges) - - (flag, resp) = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) - - def test_not_useless_pad_non_constant_input(self): - nodes = { - **regular_op_with_shaped_data('placeholder', [10, 20, 3], {'type': 'Parameter'}), - **regular_op_with_shaped_data('shape_of_1', [3], {'type': 'ShapeOf'}), - **regular_op_with_shaped_data('sub', [3], {'type': 'Subtract', 'op': 'Sub'}), - **valued_const_with_data('desired_output_size', int64_array([10, 20, 3])), - **regular_op_with_shaped_data('pad', [10, 20, 3], {'type': 'Pad', 'op': 'Pad'}), - **valued_const_with_data('fill_value', np.array(1)), - **result('result'), - } - edges = [*connect('placeholder', '0:pad'), - *connect('placeholder', 'shape_of_1'), - *connect('shape_of_1', '0:sub'), - *connect('desired_output_size', '1:sub'), - *connect('sub', '1:pad'), - *connect_data('sub', '2:pad'), - *connect('fill_value', '3:pad'), - *connect('pad', 'result'), - ] - graph = build_graph(nodes, edges) - RemoveUselessPad().find_and_replace_pattern(graph) - ref_graph = build_graph(nodes, edges) - - (flag, resp) = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/ReplaceMemoryOffsetWithSplice_test.py b/tools/mo/unit_tests/mo/middle/ReplaceMemoryOffsetWithSplice_test.py deleted file mode 100644 index e40fc71479a2c3..00000000000000 --- a/tools/mo/unit_tests/mo/middle/ReplaceMemoryOffsetWithSplice_test.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.ReplaceMemoryOffsetWithSplice import ReplaceMemoryOffsetNodePattern -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class ReplaceMemoryOffsetNodePatternTests(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.nodes_attributes = { - 'in_placeholder': {'kind': 'op', 'op': 'placeholder'}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'memoryoffset': {'kind': 'op', 'op': 'MemoryOffset', 't': -5, - 'pair_name': 'memoryoffset_2', 'has_default': False}, - 'memoryoffset_data': {'kind': 'data', 'shape': [1, 13]}, - 'memoryoffset_2': {'kind': 'op', 'op': 'MemoryOffset', 't': -5, - 'pair_name': 'memoryoffset', 'has_default': False, - 'in_ports_count': 1}, - 'memoryoffset_2_data': {'kind': 'data', 'shape': [1, 13]}, - 'crop_data': {'kind': 'data', 'shape': [1, 13]}, - 'out_placeholder': {'kind': 'op', 'op': 'placeholder'}, - 'opoutput': {'kind': 'op', 'op': 'OpOutput'}, - } - - def test_memoryoffset_pos(self): - graph = build_graph(self.nodes_attributes, - [('in_placeholder', 'in_node'), - ('in_node', 'memoryoffset'), - ('memoryoffset', 'memoryoffset_data'), - ('memoryoffset_data', 'opoutput'), - ('memoryoffset_2', 'memoryoffset_2_data'), - ('memoryoffset_2_data', 'out_placeholder')]) - memoryoffset_node = Node(graph, 'memoryoffset') - memoryoffset_node['t'] = 5 - ReplaceMemoryOffsetNodePattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'in_placeholder': {'kind': 'op', 'op': 'placeholder'}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'splice': {'kind': 'op', 'op': 'Splice', 'context': range(0, 6)}, - 'splice_data': {'kind': 'data', 'shape': [1, 78]}, - 'crop': {'kind': 'op', 'op': 'Crop', 'offset': 130, 'dim': 13}, - 'crop_data': {'kind': 'data', 'shape': [1, 13]}, - 'out_placeholder': {'kind': 'op', 'op': 'placeholder'}, - }, - [ - ('in_placeholder', 'in_node'), - ('in_node', 'splice'), - ('splice', 'splice_data'), - ('splice_data', 'crop'), - ('crop', 'crop_data'), - ('crop_data', 'out_placeholder') - ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'out_placeholder') - self.assertTrue(flag, resp) - - def test_memoryoffset_neg(self): - graph = build_graph(self.nodes_attributes, - [('in_placeholder', 'in_node'), - ('in_node', 'memoryoffset'), - ('memoryoffset', 'memoryoffset_data'), - ('memoryoffset_data', 'opoutput'), - ('memoryoffset_2', 'memoryoffset_2_data'), - ('memoryoffset_2_data', 'out_placeholder')]) - memoryoffset_node = Node(graph, 'memoryoffset') - memoryoffset_node['t'] = -5 - ReplaceMemoryOffsetNodePattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'in_placeholder': {'kind': 'op', 'op': 'placeholder'}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'splice': {'kind': 'op', 'op': 'Splice', 'context': range(-5, 1)}, - 'splice_data': {'kind': 'data', 'shape': [1, 78]}, - 'crop': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 13}, - 'memoryoffset_2_data': {'kind': 'data', 'shape': [1, 13]}, - 'out_placeholder': {'kind': 'op', 'op': 'placeholder'}, - }, - [ - ('in_placeholder', 'in_node'), - ('in_node', 'splice'), - ('splice', 'splice_data'), - ('splice_data', 'crop'), - ('crop', 'memoryoffset_2_data'), - ('memoryoffset_2_data', 'out_placeholder') - ] - ) - (flag, resp) = compare_graphs(graph, ref_graph, 'out_placeholder') - self.assertTrue(flag, resp) - - def test_memoryoffset_neg_0(self): - graph = build_graph(self.nodes_attributes, - [('in_placeholder', 'in_node'), - ('in_node', 'memoryoffset'), - ('memoryoffset', 'memoryoffset_data'), - ('memoryoffset_data', 'opoutput'), - ('memoryoffset_2', 'memoryoffset_2_data'), - ('memoryoffset_2_data', 'out_placeholder'), - ('in_node', 'out_placeholder')]) - memoryoffset_node = Node(graph, 'memoryoffset') - memoryoffset_node['t'] = -5 - ReplaceMemoryOffsetNodePattern().find_and_replace_pattern(graph) - ref_graph = build_graph({'in_placeholder': {'kind': 'op', 'op': 'placeholder'}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'splice': {'kind': 'op', 'op': 'Splice', 'context': range(-5, 1)}, - 'splice_data': {'kind': 'data', 'shape': [1, 78]}, - 'crop': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 13}, - 'memoryoffset_2_data': {'kind': 'data', 'shape': [1, 13]}, - 'out_placeholder': {'kind': 'op', 'op': 'placeholder'}, - }, - [ - ('in_placeholder', 'in_node'), - ('in_node', 'splice'), - ('splice', 'splice_data'), - ('splice_data', 'crop'), - ('crop', 'memoryoffset_2_data'), - ('memoryoffset_2_data', 'out_placeholder'), - ('in_node', 'out_placeholder') - ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'out_placeholder') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/ReplacePNormNodePattern_test.py b/tools/mo/unit_tests/mo/middle/ReplacePNormNodePattern_test.py deleted file mode 100644 index 5e83fa2e946b55..00000000000000 --- a/tools/mo/unit_tests/mo/middle/ReplacePNormNodePattern_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.ReplacePNorm import ReplacePNormNodePattern -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class ReplacePNormNodePatternTests(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.nodes_attributes = { - 'placeholder': {'kind': 'op', 'op': None}, - 'in_node': {'kind': 'data', 'shape': [1, 3500]}, - 'pnorm': {'kind': 'op', 'op': 'pnorm', 'group': 10, 'p': 2.0}, - 'pnorm_data': {'kind': 'data', 'shape': [1, 350]}, - 'out_placeholder': {'kind': 'op', 'op': 'placeholder'}, - } - - def test_pnorm(self): - graph = build_graph(self.nodes_attributes, - [('placeholder', 'in_node'), - ('in_node', 'pnorm'), - ('pnorm', 'pnorm_data'), - ('pnorm_data', 'out_placeholder')]) - ReplacePNormNodePattern().find_and_replace_pattern(graph) - - ref_graph = build_graph({'in_placeholder': {'kind': 'op', 'op': None}, - 'in_node': {'kind': 'data', 'shape': [1, 3500]}, - 'pow_const': {'kind': 'op', 'value': 2.0}, - 'pow_const_d': {'kind': 'data'}, - 'pow': {'kind': 'op', 'op': 'Pow'}, - 'pow_data': {'kind': 'data'}, - 'reshape': {'kind': 'op', 'op': 'Reshape'}, - 'reshape_data': {'kind': 'data'}, - 'const': {'kind': 'op', 'op': 'Const', 'value': [1, 350, 10]}, - 'const_data': {'kind': 'data'}, - 'reduce': {'kind': 'op', 'op': 'ReduceSum'}, - 'reduce_data': {'kind': 'data'}, - 'const_1': {'kind': 'op', 'op': 'Const', 'value': 2}, - 'const_data_1': {'kind': 'data'}, - - 'invpow_const': {'kind': 'op', 'value': 0.5}, - 'invpow_const_d': {'kind': 'data'}, - 'invpow': {'kind': 'op', 'op': 'Pow'}, - 'invpow_data': {'kind': 'data'}, - 'out_placeholder': {'kind': 'op', 'op': 'placeholder'}, - }, - [ - ('in_placeholder', 'in_node'), - ('in_node', 'pow', {'in': 0}), - ('pow', 'pow_data'), - ('pow_data', 'reshape', {'in': 0}), - ('reshape', 'reshape_data'), - ('const', 'const_data'), - ('const_data', 'reshape', {'in': 1}), - ('reshape_data', 'reduce', {'in': 0}), - ('const_1', 'const_data_1'), - ('const_data_1', 'reduce', {'in': 1}), - ('reduce', 'reduce_data'), - ('reduce_data', 'invpow', {'in': 0}), - ('invpow', 'invpow_data'), - ('invpow_data', 'out_placeholder'), - - ('pow_const', 'pow_const_d'), - ('invpow_const', 'invpow_const_d'), - ('pow_const_d', 'pow', {'in': 1}), - ('invpow_const_d', 'invpow', {'in': 1}), - ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'out_placeholder', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/ReplaceSpliceNodePattern_test.py b/tools/mo/unit_tests/mo/middle/ReplaceSpliceNodePattern_test.py deleted file mode 100644 index e8831b82741862..00000000000000 --- a/tools/mo/unit_tests/mo/middle/ReplaceSpliceNodePattern_test.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.ReplaceSpliceNodePattern import ReplaceSpliceNodePattern -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class ReplaceSpliceNodePatternTests(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.nodes_attributes = { - 'placeholder': {'kind': 'op', 'op': None}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'splice': {'kind': 'op', 'op': 'Splice', 'context': range(-5, 6), 'const_dim': 0}, - 'splice_data': {'kind': 'data', 'shape': [1, 143]}, - 'out_placeholder': {'kind': 'op', 'op': 'placeholder'}, - } - - def test_splice(self): - graph = build_graph(self.nodes_attributes, - [('placeholder', 'in_node'), - ('in_node', 'splice'), - ('splice', 'splice_data'), - ('splice_data', 'out_placeholder')]) - ReplaceSpliceNodePattern().find_and_replace_pattern(graph) - - ref_graph = build_graph({'in_placeholder': {'kind': 'op', 'op': None}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - - 'fill_value': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_data': {'kind': 'data'}, - - 'memory_in': {'kind': 'op', 'op': 'ReadValue'}, - 'memory_in_data': {'kind': 'data'}, - 'crop_mem': {'kind': 'op', 'op': 'Crop', 'offset': 13, 'dim': 130}, - 'crop_mem_data': {'kind': 'data'}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data', 'shape': [1, 143]}, - 'memory_out': {'kind': 'op', 'op': 'Assign'}, - 'memory_out_data': {'kind': 'data'}, - 'result': {'kind': 'op', 'op': 'Result'}, - 'out_placeholder': {'kind': 'op', 'op': 'placeholder'}, - }, - [ - ('in_placeholder', 'in_node'), - - ('fill_value', 'fill_value_data'), ('fill_value_data', 'memory_in'), - - ('memory_in', 'memory_in_data'), - ('memory_in_data', 'crop_mem'), - ('crop_mem', 'crop_mem_data'), - ('crop_mem_data', 'concat', {'in': 0}), - ('in_node', 'concat', {'in': 1}), - ('concat', 'concat_data'), - ('concat_data', 'memory_out'), - ('memory_out', 'memory_out_data'), - ('memory_out_data', 'result'), - ('concat_data', 'out_placeholder'), - ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'out_placeholder') - self.assertTrue(flag, resp) - - def test_splice_with_constdim(self): - graph = build_graph(self.nodes_attributes, - [('placeholder', 'in_node'), - ('in_node', 'splice'), - ('splice', 'splice_data'), - ('splice_data', 'out_placeholder')]) - Node(graph, 'splice')['const_dim'] = 10 - Node(graph, 'splice_data')['shape'] = [1, 43] - ReplaceSpliceNodePattern().find_and_replace_pattern(graph) - - ref_graph = build_graph({'in_placeholder': {'kind': 'op', 'op': None}, - 'in_node': {'kind': 'data', 'shape': [1, 13]}, - 'split': {'kind': 'op', 'op': 'Split'}, - 'split_data_0': {'kind': 'data'}, - 'split_data_1': {'kind': 'data'}, - - 'fill_value': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_data': {'kind': 'data'}, - - 'memory_in': {'kind': 'op', 'op': 'ReadValue'}, - 'memory_in_data': {'kind': 'data'}, - 'crop_mem': {'kind': 'op', 'op': 'Crop', 'offset': 3, 'dim': 30}, - 'crop_mem_data': {'kind': 'data'}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data'}, - 'memory_out': {'kind': 'op', 'op': 'Assign'}, - 'memory_out_data': {'kind': 'data'}, - 'result': {'kind': 'op', 'op': 'Result'}, - - - 'fill_value_2': {'kind': 'op', 'op': 'Const', 'value': int64_array([0])}, - 'fill_value_2_data': {'kind': 'data'}, -\ - 'memory_in_constdims': {'kind': 'op', 'op': 'ReadValue'}, - 'memory_in_constdims_data': {'kind': 'data'}, - 'crop_mem_constdims': {'kind': 'op', 'op': 'Crop', 'offset': 10, 'dim': 100}, - 'crop_mem_constdims_data': {'kind': 'data'}, - 'concat_constdims': {'kind': 'op', 'op': 'Concat'}, - 'concat_constdims_data': {'kind': 'data'}, - 'memory_out_constdims': {'kind': 'op', 'op': 'Assign'}, - 'memory_out_constdims_data': {'kind': 'data'}, - 'result_constdims': {'kind': 'op', 'op': 'Result'}, - 'crop_first_constdims': {'kind': 'op', 'op': 'Crop', 'offset': 0, 'dim': 10}, - 'crop_first_constdims_data': {'kind': 'data'}, - 'concat_all': {'kind': 'op', 'op': 'Concat'}, - 'concat_all_data': {'kind': 'data', 'shape': [1, 43]}, - 'out_placeholder': {'kind': 'op', 'op': 'placeholder'}, - - 'axis_const': {'kind': 'op'}, - 'axis_const_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'split_dim_const': {'kind': 'op'}, - 'split_dim_const_data': {'value': None, 'shape': None, 'kind': 'data'}, - - }, - [ - ('in_placeholder', 'in_node'), - ('in_node', 'split', {'in': 0}), - ('split', 'split_data_0', {'out': 0}), - ('split', 'split_data_1', {'out': 1}), - - ('fill_value', 'fill_value_data'), ('fill_value_data', 'memory_in'), - - ('memory_in', 'memory_in_data'), - ('memory_in_data', 'crop_mem'), - ('crop_mem', 'crop_mem_data'), - ('crop_mem_data', 'concat', {'in': 0}), - ('split_data_0', 'concat', {'in': 1}), - ('concat', 'concat_data'), - ('concat_data', 'memory_out'), - ('memory_out', 'memory_out_data'), - ('memory_out_data', 'result'), - - ('fill_value_2', 'fill_value_2_data'), ('fill_value_2_data', 'memory_in_constdims'), - - ('memory_in_constdims', 'memory_in_constdims_data'), - ('memory_in_constdims_data', 'crop_mem_constdims'), - ('crop_mem_constdims', 'crop_mem_constdims_data'), - ('crop_mem_constdims_data', 'concat_constdims', {'in': 0}), - ('split_data_1', 'concat_constdims', {'in': 1}), - ('concat_constdims', 'concat_constdims_data'), - ('concat_constdims_data', 'memory_out_constdims'), - ('memory_out_constdims', 'memory_out_constdims_data'), - ('memory_out_constdims_data', 'result_constdims'), - ('concat_constdims_data', 'crop_first_constdims'), - ('crop_first_constdims', 'crop_first_constdims_data'), - ('crop_first_constdims_data', 'concat_all', {'in': 1}), - ('concat_data', 'concat_all', {'in': 0}), - ('concat_all', 'concat_all_data'), - ('concat_all_data', 'out_placeholder'), - - ('axis_const', 'axis_const_data'), - ('split_dim_const', 'split_dim_const_data'), - ('axis_const_data', 'split', {'in': 1}), - ('split_dim_const_data', 'split', {'in': 2}), - - ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'out_placeholder') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/ReverseTransposeNormalization_test.py b/tools/mo/unit_tests/mo/middle/ReverseTransposeNormalization_test.py deleted file mode 100644 index ed3272b9587a39..00000000000000 --- a/tools/mo/unit_tests/mo/middle/ReverseTransposeNormalization_test.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.ReverseTransposeNormalization import ReverseTransposeNormalization -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, result, connect - - -class ReverseTransposeNormalizationTests(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.nodes_attributes = { - **regular_op_with_shaped_data('placeholder', [1, 10, 20, 3], {'type': 'Parameter'}), - **regular_op_with_shaped_data('transpose', [3, 20, 10, 1], - {'type': 'Transpose', 'op': 'Transpose', 'reverse_order': True}), - **result('result'), - } - - cls.ref_nodes_attributes = { - **regular_op_with_shaped_data('placeholder', [1, 10, 20, 3], {'type': 'Parameter'}), - **regular_op_with_shaped_data('transpose', [3, 20, 10, 1], - {'type': 'Transpose', 'op': 'Transpose'}), - **valued_const_with_data('transpose_order', np.array([3, 2, 1, 0])), - **result('result'), - } - - def test_splice(self): - graph = build_graph(self.nodes_attributes, - [*connect('placeholder', '0:transpose'), - *connect('transpose', 'result'), ]) - ReverseTransposeNormalization().find_and_replace_pattern(graph) - graph.clean_up() - - ref_graph = build_graph(self.ref_nodes_attributes, - [*connect('placeholder', '0:transpose'), - *connect('transpose_order', '1:transpose'), - *connect('transpose', 'result'), ] - ) - - (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/SSliceComplex_test.py b/tools/mo/unit_tests/mo/middle/SSliceComplex_test.py deleted file mode 100644 index 0b26541ef4b407..00000000000000 --- a/tools/mo/unit_tests/mo/middle/SSliceComplex_test.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.middle.SSliceComplex import SSliceComplex -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, connect, \ - regular_op_with_shaped_data, valued_const_with_data - -graph_node_attrs = { - **regular_op_with_shaped_data('placeholder', int64_array([3, 100, 100, 2]), - {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('strided_slice_real', int64_array([3, 100, 100]), - { - 'type': 'StridedSlice', 'op': 'StridedSlice', 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), 'ellipsis_mask': int64_array([1]), 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0, 1]), - 'slices': np.array([Ellipsis, 0]) - }), - **regular_op_with_shaped_data('strided_slice_imag', int64_array([3, 100, 100]), - { - 'type': 'StridedSlice', 'op': 'StridedSlice', 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), 'ellipsis_mask': int64_array([1]), 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0, 1]), - 'slices': np.array([Ellipsis, 1]) - }), - **regular_op_with_shaped_data('complex', int64_array([3, 100, 100, 2]), {'op': 'Complex'}), - **valued_const_with_data('real_begin', int64_array([0, 0])), - **valued_const_with_data('imag_begin', int64_array([0, 1])), - **valued_const_with_data('real_end', int64_array([0, 1])), - **valued_const_with_data('imag_end', int64_array([0, 2])), - **valued_const_with_data('real_strides', int64_array([1, 1])), - **valued_const_with_data('imag_strides', int64_array([1, 1])), - **regular_op_with_shaped_data('abs', int64_array([3, 100, 100, 2]), {'type': 'Abs', 'op': 'Abs'}), - **result('output'), -} - -graph_edges = [ - ('placeholder', 'placeholder_d', {'out': 0}), - ('placeholder_d', 'strided_slice_real', {'out': 0, 'in': 0}), - ('placeholder_d', 'strided_slice_imag', {'out': 0, 'in': 0}), - *connect('strided_slice_real:0', '0:complex'), - *connect('strided_slice_imag:0', '1:complex'), - *connect('real_begin:0', '1:strided_slice_real'), - *connect('imag_begin:0', '1:strided_slice_imag'), - *connect('real_end:0', '2:strided_slice_real'), - *connect('imag_end:0', '2:strided_slice_imag'), - *connect('real_strides:0', '3:strided_slice_real'), - *connect('imag_strides:0', '3:strided_slice_imag'), - *connect('complex:0', '0:abs'), - *connect('abs:0', 'output'), -] - - -ref_graph_node_attrs = { - **regular_op_with_shaped_data('placeholder', int64_array([3, 100, 100, 2]), - {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('abs', int64_array([3, 100, 100, 2]), {'type': 'Abs', 'op': 'Abs'}), - **result('output'), -} - -ref_graph_edges = [ - *connect('placeholder:0', '0:abs'), - *connect('abs:0', 'output'), -] - - -non_transformed_graph_node_attrs = { - **regular_op_with_shaped_data('placeholder_0', int64_array([3, 100, 100, 2]), - {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('placeholder_1', int64_array([3, 100, 100, 2]), - {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('strided_slice_real', int64_array([3, 100, 100]), - { - 'type': 'StridedSlice', 'op': 'StridedSlice', 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), 'ellipsis_mask': int64_array([1]), 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0, 1]), - 'slices': np.array([Ellipsis, 0]) - }), - **regular_op_with_shaped_data('strided_slice_imag', int64_array([3, 100, 100]), - { - 'type': 'StridedSlice', 'op': 'StridedSlice', 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), 'ellipsis_mask': int64_array([1]), 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0, 1]), - 'slices': np.array([Ellipsis, 1]) - }), - **regular_op_with_shaped_data('complex', int64_array([3, 100, 100, 2]), {'op': 'Complex'}), - **valued_const_with_data('real_begin', int64_array([0, 0])), - **valued_const_with_data('imag_begin', int64_array([0, 1])), - **valued_const_with_data('real_end', int64_array([0, 1])), - **valued_const_with_data('imag_end', int64_array([0, 2])), - **valued_const_with_data('real_strides', int64_array([1, 1])), - **valued_const_with_data('imag_strides', int64_array([1, 1])), - **regular_op_with_shaped_data('abs', int64_array([3, 100, 100, 2]), {'type': 'Abs', 'op': 'Abs'}), - **result('output'), -} - -non_transformed_graph_edges = [ - *connect('placeholder_0:0', '0:strided_slice_real'), - *connect('placeholder_1:0', '0:strided_slice_imag'), - *connect('strided_slice_real:0', '0:complex'), - *connect('strided_slice_imag:0', '1:complex'), - *connect('real_begin:0', '1:strided_slice_real'), - *connect('imag_begin:0', '1:strided_slice_imag'), - *connect('real_end:0', '2:strided_slice_real'), - *connect('imag_end:0', '2:strided_slice_imag'), - *connect('real_strides:0', '3:strided_slice_real'), - *connect('imag_strides:0', '3:strided_slice_imag'), - *connect('complex:0', '0:abs'), - *connect('abs:0', 'output'), -] - - -graph_node_attrs_2 = { - **regular_op_with_shaped_data('placeholder', int64_array([3, 100, 2, 66, 34]), - {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('strided_slice_real', int64_array([3, 100, 66, 34]), - { - 'type': 'StridedSlice', 'op': 'StridedSlice', - 'begin_mask': int64_array([0, 0, 1, 0, 0]), - 'end_mask': int64_array([0, 0, 1, 0, 0]), - 'ellipsis_mask': int64_array([0, 0, 0, 0, 0]), - 'new_axis_mask': int64_array([0, 0, 0, 0, 0]), - 'shrink_axis_mask': int64_array([0, 0, 1, 0, 0]), - 'slices': np.array([slice(None, None, 1), - slice(None, None, 1), - 0, - slice(None, None, 1), - slice(None, None, 1)]) - }), - **regular_op_with_shaped_data('strided_slice_imag', int64_array([3, 100, 66, 34]), - { - 'type': 'StridedSlice', 'op': 'StridedSlice', - 'begin_mask': int64_array([0, 0, 1, 0, 0]), - 'end_mask': int64_array([0, 0, 1, 0, 0]), - 'ellipsis_mask': int64_array([0, 0, 0, 0, 0]), - 'new_axis_mask': int64_array([0, 0, 0, 0, 0]), - 'shrink_axis_mask': int64_array([0, 0, 1, 0, 0]), - 'slices': np.array([slice(None, None, 1), - slice(None, None, 1), - 1, - slice(None, None, 1), - slice(None, None, 1)]) - }), - **regular_op_with_shaped_data('complex', int64_array([3, 100, 66, 34, 2]), {'op': 'Complex'}), - **valued_const_with_data('real_begin', int64_array([0, 0, 0, 0, 0])), - **valued_const_with_data('imag_begin', int64_array([0, 0, 1, 0, 0])), - **valued_const_with_data('real_end', int64_array([0, 0, 1, 0, 0])), - **valued_const_with_data('imag_end', int64_array([0, 0, 2, 0, 0])), - **valued_const_with_data('real_strides', int64_array([1, 1, 1, 1, 1])), - **valued_const_with_data('imag_strides', int64_array([1, 1, 1, 1, 1])), - **regular_op_with_shaped_data('abs', int64_array([3, 100, 66, 34, 2]), {'type': 'Abs', 'op': 'Abs'}), - **result('output'), -} - - -ref_graph_node_attrs_2 = { - **regular_op_with_shaped_data('placeholder', int64_array([3, 100, 2, 66, 34]), - {'type': 'Parameter', 'op': 'Parameter'}), - **valued_const_with_data('perm', int64_array([0, 1, 3, 4, 2])), - **regular_op_with_shaped_data('transpose', int64_array([3, 100, 66, 34, 2]), - {'type': 'Transpose', 'op': 'Transpose'}), - **regular_op_with_shaped_data('abs', int64_array([3, 100, 66, 34, 2]), {'type': 'Abs', 'op': 'Abs'}), - **result('output'), -} - -ref_graph_edges_2 = [ - *connect('placeholder:0', '0:transpose'), - *connect('perm:0', '1:transpose'), - *connect('transpose:0', '0:abs'), - *connect('abs:0', 'output'), -] - - -graph_node_attrs_3 = { - **regular_op_with_shaped_data('placeholder', int64_array([3, 100, 2, 66, 34]), - {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_shaped_data('strided_slice_real', int64_array([3, 100, 66, 34]), - { - 'type': 'StridedSlice', 'op': 'StridedSlice', - 'begin_mask': int64_array([0, 0, 1, 0, 0]), - 'end_mask': int64_array([0, 0, 1, 0, 0]), - 'ellipsis_mask': int64_array([0, 0, 0, 0, 0]), - 'new_axis_mask': int64_array([0, 0, 0, 0, 0]), - 'shrink_axis_mask': int64_array([0, 0, 1, 0, 0]), - 'slices': np.array([slice(None, None, 1), - slice(None, None, 1), - 0, - slice(None, None, 1), - slice(None, None, 1)]) - }), - **regular_op_with_shaped_data('strided_slice_imag', int64_array([3, 100, 66, 34]), - { - 'type': 'StridedSlice', 'op': 'StridedSlice', - 'begin_mask': int64_array([0, 0, 1, 0, 0]), - 'end_mask': int64_array([0, 0, 1, 0, 0]), - 'ellipsis_mask': int64_array([0, 0, 0, 0, 0]), - 'new_axis_mask': int64_array([0, 0, 0, 0, 0]), - 'shrink_axis_mask': int64_array([0, 0, 1, 0, 0]), - 'slices': np.array([slice(None, None, 1), - slice(None, None, 1), - 1, - slice(None, None, 1), - slice(None, None, 1)]) - }), - **regular_op_with_shaped_data('complex', int64_array([3, 100, 66, 34, 2]), {'op': 'Complex'}), - **regular_op_with_shaped_data('roll', int64_array([3, 100, 66, 34, 2]), {'type': 'Roll', 'op': 'Roll'}), - **valued_const_with_data('real_begin', int64_array([0, 0, 0, 0, 0])), - **valued_const_with_data('imag_begin', int64_array([0, 0, 1, 0, 0])), - **valued_const_with_data('real_end', int64_array([0, 0, 1, 0, 0])), - **valued_const_with_data('imag_end', int64_array([0, 0, 2, 0, 0])), - **valued_const_with_data('real_strides', int64_array([1, 1, 1, 1, 1])), - **valued_const_with_data('imag_strides', int64_array([1, 1, 1, 1, 1])), - **regular_op_with_shaped_data('abs', int64_array([3, 100, 66, 34, 2]), {'type': 'Abs', 'op': 'Abs'}), - **valued_const_with_data('shift', int64_array([20, 20])), - **valued_const_with_data('axis', int64_array([1, -2, -1])), - **result('output'), -} - -graph_edges_2 = [ - ('placeholder', 'placeholder_d', {'out': 0}), - ('placeholder_d', 'strided_slice_real', {'out': 0, 'in': 0}), - ('placeholder_d', 'strided_slice_imag', {'out': 0, 'in': 0}), - *connect('strided_slice_real:0', '0:complex'), - *connect('strided_slice_imag:0', '1:complex'), - *connect('real_begin:0', '1:strided_slice_real'), - *connect('imag_begin:0', '1:strided_slice_imag'), - *connect('real_end:0', '2:strided_slice_real'), - *connect('imag_end:0', '2:strided_slice_imag'), - *connect('real_strides:0', '3:strided_slice_real'), - *connect('imag_strides:0', '3:strided_slice_imag'), - *connect('complex:0', '0:roll'), - *connect('shift:0', '1:roll'), - *connect('axis:0', '2:roll'), - *connect('roll:0', '0:abs'), - *connect('abs:0', 'output'), -] - -ref_graph_node_attrs_3 = { - **regular_op_with_shaped_data('placeholder', int64_array([3, 100, 2, 66, 34]), - {'type': 'Parameter', 'op': 'Parameter'}), - **valued_const_with_data('perm', int64_array([0, 1, 3, 4, 2])), - **regular_op_with_shaped_data('transpose', int64_array([3, 100, 66, 34, 2]), - {'type': 'Transpose', 'op': 'Transpose'}), - **regular_op_with_shaped_data('roll', int64_array([3, 100, 66, 34, 2]), {'type': 'Roll', 'op': 'Roll'}), - **valued_const_with_data('shift', int64_array([20, 20])), - **valued_const_with_data('axis', int64_array([1, 3, 4])), - **regular_op_with_shaped_data('abs', int64_array([3, 100, 66, 34, 2]), {'type': 'Abs', 'op': 'Abs'}), - **result('output'), -} - -ref_graph_edges_3 = [ - *connect('placeholder:0', '0:transpose'), - *connect('perm:0', '1:transpose'), - *connect('transpose:0', '0:roll'), - *connect('shift:0', '1:roll'), - *connect('axis:0', '2:roll'), - *connect('roll:0', '0:abs'), - *connect('abs:0', 'output'), -] - - -class SSliceComplexMiddleStageTest(unittest.TestCase): - def test_replacement_for_the_last_axis(self): - graph = build_graph(nodes_attrs=graph_node_attrs, edges=graph_edges) - SSliceComplex().find_and_replace_pattern(graph) - graph.clean_up() - ref_graph = build_graph(nodes_attrs=ref_graph_node_attrs, edges=ref_graph_edges) - (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_nonreplacement_for_the_last_axis(self): - graph = build_graph(nodes_attrs=non_transformed_graph_node_attrs, edges=non_transformed_graph_edges) - ref_graph = build_graph(nodes_attrs=non_transformed_graph_node_attrs, edges=non_transformed_graph_edges) - SSliceComplex().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_replacement_for_non_last_axis(self): - graph = build_graph(nodes_attrs=graph_node_attrs_2, edges=graph_edges) - SSliceComplex().find_and_replace_pattern(graph) - graph.clean_up() - ref_graph = build_graph(nodes_attrs=ref_graph_node_attrs_2, edges=ref_graph_edges_2) - (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_replacement_with_update_roll_axes(self): - graph = build_graph(nodes_attrs=graph_node_attrs_3, edges=graph_edges_2) - SSliceComplex().find_and_replace_pattern(graph) - graph.clean_up() - ref_graph = build_graph(nodes_attrs=ref_graph_node_attrs_3, edges=ref_graph_edges_3) - (flag, resp) = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/SharedWeightsDuplication_test.py b/tools/mo/unit_tests/mo/middle/SharedWeightsDuplication_test.py deleted file mode 100644 index dc80ca1b2a73c5..00000000000000 --- a/tools/mo/unit_tests/mo/middle/SharedWeightsDuplication_test.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.SharedWeightsDuplication import SharedWeightsDuplication -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'const': {'shape': None, 'type': 'Const', 'kind': 'op', 'op': 'Const'}, - # Mul and Add operations - 'mul_1': {'type': None, 'kind': 'op', 'op': 'Mul'}, - 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'}, - 'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'mul_3': {'type': None, 'kind': 'op', 'op': 'Mul'}, - 'mul_3_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'mul_3_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Concat1 operation - 'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'}, - 'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'op_output': {'op': 'Result', 'kind': 'op'} -} - - -class DuplicateSharedWeightsTests(unittest.TestCase): - def test_duplicate_shared_weights_1(self): - graph = build_graph(nodes_attributes, - [('const', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_1_w', 'mul_3'), - ('mul_3', 'mul_3_data'), - ('mul_1_data', 'concat_1'), - ('mul_2_data', 'concat_1'), - ('mul_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}}, - nodes_with_edges_only=True - ) - - graph_ref = build_graph(nodes_attributes, - [ - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_3_w', 'mul_3'), - ('mul_3', 'mul_3_data'), - ('mul_1_data', 'concat_1'), - ('mul_2_data', 'concat_1'), - ('mul_3_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_3_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - }, nodes_with_edges_only=True) - - SharedWeightsDuplication().find_and_replace_pattern(graph) - graph.clean_up() - graph_ref.clean_up() - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/SliceConverter_test.py b/tools/mo/unit_tests/mo/middle/SliceConverter_test.py deleted file mode 100644 index 1c29820e09df9c..00000000000000 --- a/tools/mo/unit_tests/mo/middle/SliceConverter_test.py +++ /dev/null @@ -1,383 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.SliceConverter import ConvertSlice -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, \ - regular_op_with_empty_data, result, connect, connect_data - -nodes_attributes = { - **regular_op_with_shaped_data('input', [2, 3, 300, 300], {'type': 'Parameter', 'op': 'Parameter'}), - **regular_op_with_empty_data('starts', {'op': 'Const', 'type': 'Const'}), - **regular_op_with_empty_data('ends', {'op': 'Const', 'type': 'Const'}), - **regular_op_with_empty_data('axes', {'op': 'Const', 'type': 'Const'}), - **regular_op_with_empty_data('steps', {'op': 'Const', 'type': 'Const'}), - **regular_op_with_empty_data('slice', {'op': 'Slice', 'type': None}), - - **regular_op_with_empty_data('ss_begin_cast', {'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}), - **regular_op_with_empty_data('ss_begin_clamp', {'op': 'Clamp', 'type': None}), - **regular_op_with_empty_data('ss_begin_clamp_min', {'value': np.iinfo(np.int32).min, 'op': 'Const', 'type': 'Const'}), - **regular_op_with_empty_data('ss_begin_clamp_max', {'value': np.iinfo(np.int32).max, 'op': 'Const', 'type': 'Const'}), - **regular_op_with_empty_data('ss_begin_gather_0', {'op': 'Gather', 'type': 'Gather'}), - **valued_const_with_data('ss_begin_gather_0_idx', int64_array([0])), - **regular_op_with_shaped_data('ss_begin_gather_0_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), - **regular_op_with_empty_data('ss_begin_gather_1', {'op': 'Gather', 'type': 'Gather'}), - **valued_const_with_data('ss_begin_gather_1_idx', int64_array([1])), - **regular_op_with_shaped_data('ss_begin_gather_1_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), - **regular_op_with_empty_data('ss_begin_gather_2', {'op': 'Gather', 'type': 'Gather'}), - **valued_const_with_data('ss_begin_gather_2_idx', int64_array([2])), - **regular_op_with_shaped_data('ss_begin_gather_2_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), - **regular_op_with_empty_data('ss_begin_gather_3', {'op': 'Gather', 'type': 'Gather'}), - **valued_const_with_data('ss_begin_gather_3_idx', int64_array([3])), - **regular_op_with_shaped_data('ss_begin_gather_3_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), - **regular_op_with_empty_data('ss_begin_const_0', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), - **regular_op_with_empty_data('ss_begin_const_1', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), - **regular_op_with_empty_data('ss_begin_const_2', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), - **regular_op_with_empty_data('ss_begin_const_3', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), - **regular_op_with_empty_data('ss_begin_concat', {'op': 'Concat', 'type': 'Concat'}), - - **regular_op_with_empty_data('ss_end_cast', {'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}), - **regular_op_with_empty_data('ss_end_clamp', {'op': 'Clamp', 'type': None}), - **regular_op_with_empty_data('ss_end_clamp_min', {'value': np.iinfo(np.int32).min, 'op': 'Const', 'type': 'Const'}), - **regular_op_with_empty_data('ss_end_clamp_max', {'value': np.iinfo(np.int32).max, 'op': 'Const', 'type': 'Const'}), - **regular_op_with_empty_data('ss_end_gather_0', {'op': 'Gather', 'type': 'Gather'}), - **valued_const_with_data('ss_end_gather_0_idx', int64_array([0])), - **regular_op_with_shaped_data('ss_end_gather_0_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), - **regular_op_with_empty_data('ss_end_gather_1', {'op': 'Gather', 'type': 'Gather'}), - **valued_const_with_data('ss_end_gather_1_idx', int64_array([1])), - **regular_op_with_shaped_data('ss_end_gather_1_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), - **regular_op_with_empty_data('ss_end_gather_2', {'op': 'Gather', 'type': 'Gather'}), - **valued_const_with_data('ss_end_gather_2_idx', int64_array([2])), - **regular_op_with_shaped_data('ss_end_gather_2_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), - **regular_op_with_empty_data('ss_end_gather_3', {'op': 'Gather', 'type': 'Gather'}), - **valued_const_with_data('ss_end_gather_3_idx', int64_array([3])), - **regular_op_with_shaped_data('ss_end_gather_3_axis', [], {'op': 'Const', 'type': 'Const', 'value': [0]}), - **regular_op_with_empty_data('ss_end_const_0', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), - **regular_op_with_empty_data('ss_end_const_1', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), - **regular_op_with_empty_data('ss_end_const_2', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), - **regular_op_with_empty_data('ss_end_const_3', {'op': 'Const', 'type': 'Const', 'value': int64_array([0])}), - **regular_op_with_empty_data('ss_end_concat', {'op': 'Concat', 'type': 'Concat'}), - - **regular_op_with_empty_data('ss_strides', {'op': 'Const', 'type': 'Const'}), - **regular_op_with_empty_data('ss', {'op': 'StridedSlice', 'type': 'StridedSlice', - 'new_axis_mask': np.zeros(4, dtype=np.int64), - 'shrink_axis_mask': np.zeros(4, dtype=np.int64), - 'ellipsis_mask': np.zeros(4, dtype=np.int64)}), - **result('result') -} - -pattern_graph = [ - *connect('input:0', '0:slice'), - *connect('starts:0', '1:slice'), - *connect('ends:0', '2:slice'), - *connect('axes:0', '3:slice'), - *connect('steps:0', '4:slice'), - *connect('slice:0', '0:result') -] - -pattern_ref_graph = [ - *connect('input:0', '0:ss'), - *connect('starts:0', '0:ss_begin_clamp'), - *connect('ss_begin_clamp:0', '0:ss_begin_cast'), - *connect('ss_begin_clamp_min:0', '1:ss_begin_clamp'), - *connect('ss_begin_clamp_max:0', '2:ss_begin_clamp'), - *connect('ss_begin_concat:0', '1:ss'), - *connect('ends:0', '0:ss_end_clamp'), - *connect('ss_end_clamp:0', '0:ss_end_cast'), - *connect('ss_end_clamp_min:0', '1:ss_end_clamp'), - *connect('ss_end_clamp_max:0', '2:ss_end_clamp'), - *connect('ss_end_concat:0', '2:ss'), - *connect('ss_strides:0', '3:ss'), - *connect('ss:0', '0:result'), - - *connect('ss_begin_gather_0_idx:0', '1:ss_begin_gather_0'), - *connect('ss_begin_gather_0_axis:0', '2:ss_begin_gather_0'), - *connect('ss_begin_gather_1_idx:0', '1:ss_begin_gather_1'), - *connect('ss_begin_gather_1_axis:0', '2:ss_begin_gather_1'), - *connect('ss_begin_gather_2_idx:0', '1:ss_begin_gather_2'), - *connect('ss_begin_gather_2_axis:0', '2:ss_begin_gather_2'), - *connect('ss_begin_gather_3_idx:0', '1:ss_begin_gather_3'), - *connect('ss_begin_gather_3_axis:0', '2:ss_begin_gather_3'), - - *connect('ss_end_gather_0_idx:0', '1:ss_end_gather_0'), - *connect('ss_end_gather_0_axis:0', '2:ss_end_gather_0'), - *connect('ss_end_gather_1_idx:0', '1:ss_end_gather_1'), - *connect('ss_end_gather_1_axis:0', '2:ss_end_gather_1'), - *connect('ss_end_gather_2_idx:0', '1:ss_end_gather_2'), - *connect('ss_end_gather_2_axis:0', '2:ss_end_gather_2'), - *connect('ss_end_gather_3_idx:0', '1:ss_end_gather_3'), - *connect('ss_end_gather_3_axis:0', '2:ss_end_gather_3'), -] - - -class ConvertSliceTests(unittest.TestCase): - - def test_convert_slice_to_strided_slice_one_axis(self): - graph = build_graph( - nodes_attrs=nodes_attributes, - edges=pattern_graph, - update_attributes={ - 'starts': {'value': int64_array([0]), 'shape': [1]}, - 'ends': {'value': int64_array([1]), 'shape': [1]}, - 'axes': {'value': int64_array([0]), 'shape': [1]}, - 'axes_d': {'value': int64_array([0]), 'shape': [1]}, - 'steps': {'value': int64_array([1]), 'shape': [1]}, - 'steps_d': {'value': int64_array([1]), 'shape': [1]} - }, - nodes_with_edges_only=True - ) - - ref_graph = build_graph( - nodes_attrs=nodes_attributes, - edges=pattern_ref_graph + [ - *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), - *connect('ss_begin_gather_0:0', '0:ss_begin_concat'), - *connect('ss_begin_const_1:0', '1:ss_begin_concat'), - *connect('ss_begin_const_2:0', '2:ss_begin_concat'), - *connect('ss_begin_const_3:0', '3:ss_begin_concat'), - - *connect('ss_end_cast:0', '0:ss_end_gather_0'), - *connect('ss_end_gather_0:0', '0:ss_end_concat'), - *connect('ss_end_const_1:0', '1:ss_end_concat'), - *connect('ss_end_const_2:0', '2:ss_end_concat'), - *connect('ss_end_const_3:0', '3:ss_end_concat'), - ], - update_attributes={ - 'starts': {'value': int64_array([0]), 'shape': [1]}, - 'ends': {'value': int64_array([1]), 'shape': [1]}, - 'ss_strides': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, - 'ss': {'begin_mask': int64_array([1, 0, 0, 0]), 'end_mask': int64_array([1, 0, 0, 0])} - } - ) - ConvertSlice().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_convert_slice_to_strided_slice_one_axis_steps_is_2(self): - graph = build_graph( - nodes_attrs=nodes_attributes, - edges=pattern_graph, - update_attributes={ - 'starts': {'value': int64_array([0]), 'shape': [1]}, - 'ends': {'value': int64_array([150]), 'shape': [1]}, - 'axes': {'value': int64_array([2]), 'shape': [1]}, - 'axes_d': {'value': int64_array([2]), 'shape': [1]}, - 'steps': {'value': int64_array([2]), 'shape': [1]}, - 'steps_d': {'value': int64_array([2]), 'shape': [1]} - }, - nodes_with_edges_only=True - ) - - ref_graph = build_graph( - nodes_attrs=nodes_attributes, - edges=pattern_ref_graph + [ - *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), - *connect('ss_begin_gather_0:0', '2:ss_begin_concat'), - *connect('ss_begin_const_0:0', '0:ss_begin_concat'), - *connect('ss_begin_const_1:0', '1:ss_begin_concat'), - *connect('ss_begin_const_3:0', '3:ss_begin_concat'), - - *connect('ss_end_cast:0', '0:ss_end_gather_0'), - *connect('ss_end_gather_0:0', '2:ss_end_concat'), - *connect('ss_end_const_0:0', '0:ss_end_concat'), - *connect('ss_end_const_1:0', '1:ss_end_concat'), - *connect('ss_end_const_3:0', '3:ss_end_concat'), - ], - update_attributes={ - 'starts': {'value': int64_array([0]), 'shape': [1]}, - 'ends': {'value': int64_array([150]), 'shape': [1]}, - 'ss_strides': {'value': int64_array([1, 1, 2, 1]), 'shape': [4]}, - 'ss': {'begin_mask': int64_array([0, 0, 1, 0]), 'end_mask': int64_array([0, 0, 1, 0])} - } - ) - ConvertSlice().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_convert_slice_to_strided_slice_two_axes(self): - graph = build_graph( - nodes_attrs=nodes_attributes, - edges=pattern_graph, - update_attributes={ - 'starts': {'value': int64_array([0, 0]), 'shape': [2]}, - 'ends': {'value': int64_array([150, 150]), 'shape': [2]}, - 'axes': {'value': int64_array([2, 3]), 'shape': [2]}, - 'axes_d': {'value': int64_array([2, 3]), 'shape': [2]}, - 'steps': {'value': int64_array([1, 1]), 'shape': [2]}, - 'steps_d': {'value': int64_array([1, 1]), 'shape': [2]} - }, - nodes_with_edges_only=True - ) - - ref_graph = build_graph( - nodes_attrs=nodes_attributes, - edges=pattern_ref_graph + [ - *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), - *connect('ss_begin_gather_0:0', '2:ss_begin_concat'), - *connect_data('ss_begin_cast:0', '0:ss_begin_gather_1'), - *connect('ss_begin_gather_1:0', '3:ss_begin_concat'), - *connect('ss_begin_const_0:0', '0:ss_begin_concat'), - *connect('ss_begin_const_1:0', '1:ss_begin_concat'), - - *connect('ss_end_cast:0', '0:ss_end_gather_0'), - *connect('ss_end_gather_0:0', '2:ss_end_concat'), - *connect_data('ss_end_cast:0', '0:ss_end_gather_1'), - *connect('ss_end_gather_1:0', '3:ss_end_concat'), - *connect('ss_end_const_0:0', '0:ss_end_concat'), - *connect('ss_end_const_1:0', '1:ss_end_concat'), - ], - update_attributes={ - 'starts': {'value': int64_array([0, 0]), 'shape': [2]}, - 'ends': {'value': int64_array([150, 150]), 'shape': [2]}, - 'ss_strides': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, - 'ss': {'begin_mask': int64_array([0, 0, 1, 1]), 'end_mask': int64_array([0, 0, 1, 1])} - } - ) - ConvertSlice().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_convert_slice_to_strided_slice_three_axes(self): - graph = build_graph( - nodes_attrs=nodes_attributes, - edges=pattern_graph, - update_attributes={ - 'starts': {'value': int64_array([0, 0, 0]), 'shape': [3]}, - 'ends': {'value': int64_array([2, 150, 150]), 'shape': [3]}, - 'axes': {'value': int64_array([1, 2, 3]), 'shape': [3]}, - 'axes_d': {'value': int64_array([1, 2, 3]), 'shape': [3]}, - 'steps': {'value': int64_array([1, 1, 1]), 'shape': [3]}, - 'steps_d': {'value': int64_array([1, 1, 1]), 'shape': [3]} - }, - nodes_with_edges_only=True - ) - - ref_graph = build_graph( - nodes_attrs=nodes_attributes, - edges=pattern_ref_graph + [ - *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), - *connect('ss_begin_gather_0:0', '1:ss_begin_concat'), - *connect_data('ss_begin_cast:0', '0:ss_begin_gather_1'), - *connect('ss_begin_gather_1:0', '2:ss_begin_concat'), - *connect_data('ss_begin_cast:0', '0:ss_begin_gather_2'), - *connect('ss_begin_gather_2:0', '3:ss_begin_concat'), - *connect('ss_begin_const_0:0', '0:ss_begin_concat'), - - *connect('ss_end_cast:0', '0:ss_end_gather_0'), - *connect('ss_end_gather_0:0', '1:ss_end_concat'), - *connect_data('ss_end_cast:0', '0:ss_end_gather_1'), - *connect('ss_end_gather_1:0', '2:ss_end_concat'), - *connect_data('ss_end_cast:0', '0:ss_end_gather_2'), - *connect('ss_end_gather_2:0', '3:ss_end_concat'), - *connect('ss_end_const_0:0', '0:ss_end_concat'), - ], - update_attributes={ - 'starts': {'value': int64_array([0, 0, 0]), 'shape': [3]}, - 'ends': {'value': int64_array([2, 150, 150]), 'shape': [3]}, - 'ss_strides': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, - 'ss': {'begin_mask': int64_array([0, 1, 1, 1]), 'end_mask': int64_array([0, 1, 1, 1])} - } - ) - ConvertSlice().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_convert_slice_to_strided_slice_not_sorted_axes(self): - graph = build_graph( - nodes_attrs=nodes_attributes, - edges=pattern_graph, - update_attributes={ - 'starts': {'value': int64_array([0, 1, 1, 0]), 'shape': [4]}, - 'ends': {'value': int64_array([1, 150, 150, 2]), 'shape': [4]}, - 'axes': {'value': int64_array([0, 2, 3, 1]), 'shape': [4]}, - 'axes_d': {'value': int64_array([0, 2, 3, 1]), 'shape': [4]}, - 'steps': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, - 'steps_d': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]} - }, - nodes_with_edges_only=True - ) - - ref_graph = build_graph( - nodes_attrs=nodes_attributes, - edges=pattern_ref_graph + [ - *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), - *connect('ss_begin_gather_0:0', '0:ss_begin_concat'), - *connect_data('ss_begin_cast:0', '0:ss_begin_gather_1'), - *connect('ss_begin_gather_1:0', '2:ss_begin_concat'), - *connect_data('ss_begin_cast:0', '0:ss_begin_gather_2'), - *connect('ss_begin_gather_2:0', '3:ss_begin_concat'), - *connect_data('ss_begin_cast:0', '0:ss_begin_gather_3'), - *connect('ss_begin_gather_3:0', '1:ss_begin_concat'), - - *connect('ss_end_cast:0', '0:ss_end_gather_0'), - *connect('ss_end_gather_0:0', '0:ss_end_concat'), - *connect_data('ss_end_cast:0', '0:ss_end_gather_1'), - *connect('ss_end_gather_1:0', '2:ss_end_concat'), - *connect_data('ss_end_cast:0', '0:ss_end_gather_2'), - *connect('ss_end_gather_2:0', '3:ss_end_concat'), - *connect_data('ss_end_cast:0', '0:ss_end_gather_3'), - *connect('ss_end_gather_3:0', '1:ss_end_concat'), - ], - update_attributes={ - 'starts': {'value': int64_array([0, 1, 1, 0]), 'shape': [4]}, - 'ends': {'value': int64_array([1, 150, 150, 2]), 'shape': [4]}, - 'ss_strides': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, - 'ss': {'begin_mask': int64_array([1, 1, 1, 1]), 'end_mask': int64_array([1, 1, 1, 1])} - } - ) - ConvertSlice().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_convert_slice_to_strided_slice_without_axes_and_steps(self): - graph = build_graph( - nodes_attrs=nodes_attributes, - edges=[ - *connect('input:0', '0:slice'), - *connect('starts:0', '1:slice'), - *connect('ends:0', '2:slice'), - *connect('slice:0', '0:result') - ], - update_attributes={ - 'starts': {'value': int64_array([0, 0, 0, 0]), 'shape': [4]}, - 'ends': {'value': int64_array([1, 2, 150, 150]), 'shape': [4]}, - }, - nodes_with_edges_only=True - ) - - ref_graph = build_graph( - nodes_attrs=nodes_attributes, - edges=pattern_ref_graph + [ - *connect('ss_begin_cast:0', '0:ss_begin_gather_0'), - *connect('ss_begin_gather_0:0', '0:ss_begin_concat'), - *connect_data('ss_begin_cast:0', '0:ss_begin_gather_1'), - *connect('ss_begin_gather_1:0', '1:ss_begin_concat'), - *connect_data('ss_begin_cast:0', '0:ss_begin_gather_2'), - *connect('ss_begin_gather_2:0', '2:ss_begin_concat'), - *connect_data('ss_begin_cast:0', '0:ss_begin_gather_3'), - *connect('ss_begin_gather_3:0', '3:ss_begin_concat'), - - *connect('ss_end_cast:0', '0:ss_end_gather_0'), - *connect('ss_end_gather_0:0', '0:ss_end_concat'), - *connect_data('ss_end_cast:0', '0:ss_end_gather_1'), - *connect('ss_end_gather_1:0', '1:ss_end_concat'), - *connect_data('ss_end_cast:0', '0:ss_end_gather_2'), - *connect('ss_end_gather_2:0', '2:ss_end_concat'), - *connect_data('ss_end_cast:0', '0:ss_end_gather_3'), - *connect('ss_end_gather_3:0', '3:ss_end_concat'), - ], - update_attributes={ - 'starts': {'value': int64_array([0, 0, 0, 0]), 'shape': [4]}, - 'ends': {'value': int64_array([1, 2, 150, 150]), 'shape': [4]}, - 'ss_strides': {'value': int64_array([1, 1, 1, 1]), 'shape': [4]}, - 'ss': {'begin_mask': int64_array([1, 1, 1, 1]), 'end_mask': int64_array([1, 1, 1, 1])} - } - ) - ConvertSlice().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/SliceLikeToStridedSlice_test.py b/tools/mo/unit_tests/mo/middle/SliceLikeToStridedSlice_test.py deleted file mode 100644 index 739055d0136bab..00000000000000 --- a/tools/mo/unit_tests/mo/middle/SliceLikeToStridedSlice_test.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.SliceLikeToStridedSlice import SliceLikeToStridedSlice -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'input': {'kind': 'op', 'op': 'Const'}, - 'input_data': {'kind': 'data'}, - - 'shape_like_input': {'kind': 'op', 'op': 'Const'}, - 'shape_like_input_data': {'kind': 'data'}, - - 'slice_like': {'kind': 'op', 'op': 'slice_like'}, - 'slice_like_data': {'kind': 'data', 'shape': None, 'value': None}, - - 'result': {'kind': 'op', 'op': 'Result'}, - - 'shape': {'kind': 'op', 'op': 'ShapeOf'}, - 'shape_data': {'kind': 'data'}, - 'rank_1_d': {'kind': 'op', 'op': 'ShapeOf'}, - 'rank_1_d_data': {'kind': 'data'}, - 'rank': {'kind': 'op', 'op': 'Squeeze'}, - 'rank_data': {'kind': 'data'}, - 'rank_const': {'kind': 'op', 'op': 'Const'}, - 'rank_const_data': {'kind': 'data'}, - - 'shape_like': {'kind': 'op', 'op': 'ShapeOf'}, - 'shape_like_data': {'kind': 'data'}, - 'rank_like_1_d': {'kind': 'op', 'op': 'ShapeOf'}, - 'rank_like_1_d_data': {'kind': 'data'}, - 'rank_like': {'kind': 'op', 'op': 'Squeeze'}, - 'rank_like_const': {'kind': 'op', 'op': 'Const'}, - 'rank_like_const_data': {'kind': 'data'}, - - 'begin': {'kind': 'op', 'op': 'Const'}, - 'begin_data': {'kind': 'data'}, - 'ss': {'kind': 'op', 'op': 'StridedSlice'}, - - 'start_idx_like': {'kind': 'op', 'op': 'Const'}, - 'start_idx_like_data': {'kind': 'data'}, - 'end_idx_like': {'kind': 'op', 'op': 'Const'}, - 'end_idx_like_data': {'kind': 'data'}, - 'end_idx_like_const': {'kind': 'op', 'op': 'Const'}, - 'end_idx_like_const_data': {'kind': 'data'}, - 'end_idx_like_add': {'kind': 'op', 'op': 'Add'}, - 'end_idx_like_add_data': {'kind': 'data'}, - 'delta_like': {'kind': 'op', 'op': 'Const'}, - 'delta_like_data': {'kind': 'data'}, - 'range_like': {'kind': 'op', 'op': 'Range'}, - 'range_like_data': {'kind': 'data'}, - 'gather_like': {'kind': 'op', 'op': 't_gather'}, - 'gather_like_data': {'kind': 'data'}, - 'gather_like_axis': {'kind': 'op', 'op': 'Const'}, - 'gather_like_axis_data': {'kind': 'data'}, - 'concat': {'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'kind': 'data'}, - - 'start_idx': {'kind': 'op', 'op': 'Const'}, - 'start_idx_data': {'kind': 'data'}, - 'start_idx_const': {'kind': 'op', 'op': 'Const'}, - 'start_idx_const_data': {'kind': 'data'}, - 'start_idx_add': {'kind': 'op', 'op': 'Add'}, - 'start_idx_add_data': {'kind': 'data'}, - 'end_idx': {'kind': 'op', 'op': 'Add'}, - 'end_idx_data': {'kind': 'data'}, - 'end_idx_axis': {'kind': 'op', 'op': 'Const'}, - 'end_idx_axis_data': {'kind': 'data'}, - 'end_idx_const': {'kind': 'op', 'op': 'Const'}, - 'end_idx_const_data': {'kind': 'data'}, - 'end_idx_add': {'kind': 'op', 'op': 'Add'}, - 'end_idx_add_data': {'kind': 'data'}, - 'delta': {'kind': 'op', 'op': 'Const'}, - 'delta_data': {'kind': 'data'}, - 'range': {'kind': 'op', 'op': 'Range'}, - 'range_data': {'kind': 'data'}, - 't_gather': {'kind': 'op', 'op': 't_gather'}, - 'gather_data': {'kind': 'data'}, - 'gather_axis': {'kind': 'op', 'op': 'Const'}, - 'gather_axis_data': {'kind': 'data'} - -} - -edges = [ - ('input', 'input_data'), - ('input_data', 'slice_like', {'in': 0}), - ('shape_like_input', 'shape_like_input_data'), - ('shape_like_input_data', 'slice_like', {'in': 1}), - ('slice_like', 'slice_like_data'), - ('slice_like_data', 'result') -] - -same_input_shapes_dims_edges = [ - ('input', 'input_data'), - ('input_data', 'ss', {'in': 0}), - ('ss', 'slice_like_data'), - ('slice_like_data', 'result'), - ('shape_like_input', 'shape_like_input_data'), - ('shape_like_input_data', 'shape_like'), - ('shape_like', 'shape_like_data'), - ('shape_like_data', 'ss', {'in': 2}), - ('begin', 'begin_data'), - ('begin_data', 'ss', {'in': 1}) -] - -shape_like_sub_graph_edges = [ - ('input', 'input_data'), - ('input_data', 'ss', {'in': 0}), - ('ss', 'slice_like_data'), - ('slice_like_data', 'result'), - ('begin', 'begin_data'), - ('begin_data', 'ss', {'in': 1}), - ('shape_like_input', 'shape_like_input_data'), - ('shape_like_input_data', 'shape_like'), - ('shape_like', 'shape_like_data'), - ('shape_like_data', 'rank_like_1_d'), - ('rank_like_1_d', 'rank_like_1_d_data'), - ('rank_like_1_d_data', 'rank_like', {'in': 0}), - ('rank_like_const', 'rank_like_const_data'), - ('rank_like_const_data', 'rank_like', {'in': 1}), - ('end_idx_like', 'end_idx_like_data'), - ('end_idx_like_const', 'end_idx_like_const_data'), - ('end_idx_like_data', 'end_idx_like_add', {'in': 0}), - ('end_idx_like_const_data', 'end_idx_like_add', {'in': 1}), - ('end_idx_like_add', 'end_idx_like_add_data'), - ('end_idx_like_add_data', 'range_like', {'in': 1}), - ('start_idx_like', 'start_idx_like_data'), - ('start_idx_like_data', 'range_like', {'in': 0}), - ('delta_like', 'delta_like_data'), - ('delta_like_data', 'range_like', {'in': 2}), - ('range_like', 'range_like_data'), - ('range_like_data', 'gather_like', {'in': 1}), - ('shape_like_data', 'gather_like', {'in': 0}), - ('gather_like_axis', 'gather_like_axis_data'), - ('gather_like_axis_data', 'gather_like', {'in': 2}), - ('gather_like', 'gather_like_data') -] - -last_axis_index = shape_like_sub_graph_edges + [('gather_like_data', 'ss', {'in': 2})] - -input_sub_graph_edges = [ - ('input_data', 'shape'), - ('shape', 'shape_data'), - ('shape_data', 'rank_1_d'), - ('rank_1_d', 'rank_1_d_data'), - ('rank_1_d_data', 'rank', {'in': 0}), - ('rank_const', 'rank_const_data'), - ('rank_const_data', 'rank', {'in': 1}), - ('rank', 'rank_data'), - ('rank_data', 'end_idx', {'in': 0}), - ('end_idx_axis', 'end_idx_axis_data'), - ('end_idx_axis_data', 'end_idx', {'in': 1}), - ('end_idx', 'end_idx_data'), - ('end_idx_data', 'end_idx_add', {'in': 0}), - ('end_idx_const', 'end_idx_const_data'), - ('end_idx_const_data', 'end_idx_add', {'in': 1}), - ('start_idx', 'start_idx_data'), - ('start_idx_data', 'start_idx_add', {'in': 0}), - ('start_idx_const', 'start_idx_const_data'), - ('start_idx_const_data', 'start_idx_add', {'in': 1}), - ('end_idx_add', 'end_idx_add_data'), - ('start_idx_add', 'start_idx_add_data'), - ('delta', 'delta_data'), - ('start_idx_add_data', 'range', {'in': 0}), - ('end_idx_add_data', 'range', {'in': 1}), - ('delta_data', 'range', {'in': 2}), - ('range', 'range_data'), - ('range_data', 't_gather', {'in': 1}), - ('shape_data', 't_gather', {'in': 0}), - ('gather_axis', 'gather_axis_data'), - ('gather_axis_data', 't_gather', {'in': 2}), - ('t_gather', 'gather_data'), - ('gather_data', 'concat', {'in': 1}), - ('concat', 'concat_data'), - ('concat_data', 'ss', {'in': 2}), - ('gather_like_data', 'concat', {'in': 0}) -] - -input_part_shape_edges = shape_like_sub_graph_edges + input_sub_graph_edges - - -class SliceLikeToStridedSliceTest(unittest.TestCase): - - def test_1(self): - graph = build_graph( - nodes_attributes, - edges, - update_attributes={ - 'input_data': {'shape': int64_array([1, 224, 224, 3])}, - 'shape_like_input_data': {'shape': int64_array([2, 2, 2, 2])}, - 'slice_like': {'axes': int64_array([2, 3])} - }, - nodes_with_edges_only=True - ) - SliceLikeToStridedSlice().find_and_replace_pattern(graph) - ref_graph = build_graph( - nodes_attributes, - same_input_shapes_dims_edges, - nodes_with_edges_only=True - ) - - flag, resp = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) - - def test_2(self): - graph = build_graph( - nodes_attributes, - edges, - update_attributes={ - 'input_data': {'shape': int64_array([1, 224, 224, 3])}, - 'shape_like_input_data': {'shape': int64_array([2, 2, 2, 2, 2])}, - 'slice_like': {'axes': int64_array([2, 3])} - }, - nodes_with_edges_only=True - ) - SliceLikeToStridedSlice().find_and_replace_pattern(graph) - ref_graph = build_graph( - nodes_attributes, - last_axis_index, - nodes_with_edges_only=True - ) - - flag, resp = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) - - def test_3(self): - graph = build_graph( - nodes_attributes, - edges, - update_attributes={ - 'input_data': {'shape': int64_array([1, 224, 224, 3])}, - 'shape_like_input_data': {'shape': int64_array([2, 2, 2, 2, 2])}, - 'slice_like': {'axes': int64_array([1, 2])} - }, - nodes_with_edges_only=True - ) - SliceLikeToStridedSlice().find_and_replace_pattern(graph) - ref_graph = build_graph( - nodes_attributes, - input_part_shape_edges, - nodes_with_edges_only=True - ) - flag, resp = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/SplitConcatPairToInterpolate_test.py b/tools/mo/unit_tests/mo/middle/SplitConcatPairToInterpolate_test.py deleted file mode 100644 index a7834fa67a09e4..00000000000000 --- a/tools/mo/unit_tests/mo/middle/SplitConcatPairToInterpolate_test.py +++ /dev/null @@ -1,677 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.SplitConcatPairToInterpolate import SplitConcatPairToInterpolate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -graph_node_attrs_for_2d_spatial_case = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 100, 120, 150]), - 'kind': 'data', - 'data_type': None - }, - 'split': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 3}, - 'split_axis_const': { - 'kind': 'op', - 'value': np.array(3, dtype=np.int64), - 'op': 'Const', - 'type': 'Const' - }, - 'split_axis_const_data': { - 'value': np.array(3, dtype=np.int64), - 'shape': np.array(3, dtype=np.int64).shape, - 'kind': 'data' - }, - 'concat': {'type': 'Concat', 'kind': 'op', 'axis': 3}, - 'split_data_0': {'value': None, 'shape': int64_array([1, 100, 120, 50]), 'kind': 'data'}, - 'split_data_1': {'value': None, 'shape': int64_array([1, 100, 120, 50]), 'kind': 'data'}, - 'split_data_2': {'value': None, 'shape': int64_array([1, 100, 120, 50]), 'kind': 'data'}, - 'concat_data': {'value': None, 'shape': int64_array([1, 100, 120, 300]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 100, 120, 300]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - - -graph_node_attrs_for_3d_spatial_case = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 3, 100, 120, 150]), - 'kind': 'data', - 'data_type': None - }, - 'split': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 3}, - 'split_axis_const': { - 'kind': 'op', - 'value': np.array(4, dtype=np.int64), - 'op': 'Const', - 'type': 'Const' - }, - 'split_axis_const_data': { - 'value': np.array(4, dtype=np.int64), - 'shape': np.array(4, dtype=np.int64).shape, - 'kind': 'data' - }, - 'concat': {'type': 'Concat', 'kind': 'op', 'axis': 4}, - 'split_data_0': {'value': None, 'shape': int64_array([1, 3, 100, 120, 50]), 'kind': 'data'}, - 'split_data_1': {'value': None, 'shape': int64_array([1, 3, 100, 120, 50]), 'kind': 'data'}, - 'split_data_2': {'value': None, 'shape': int64_array([1, 3, 100, 120, 50]), 'kind': 'data'}, - 'concat_data': {'value': None, 'shape': int64_array([1, 3, 100, 120, 300]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 3, 100, 120, 300]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, - } - - -graph_edges = [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'split', {'in': 0}), - ('split_axis_const', 'split_axis_const_data'), - ('split_axis_const_data', 'split', {'in': 1}), - ('split', 'split_data_0', {'out': 0}), - ('split', 'split_data_1', {'out': 1}), - ('split', 'split_data_2', {'out': 2}), - ('split_data_0', 'concat', {'in': 0}), - ('split_data_0', 'concat', {'in': 1}), - ('split_data_1', 'concat', {'in': 2}), - ('split_data_1', 'concat', {'in': 3}), - ('split_data_2', 'concat', {'in': 4}), - ('split_data_2', 'concat', {'in': 5}), - ('concat', 'concat_data'), - ('concat_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output') -] - - -ref_graph_edges_opset4 = [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'interpolate', {'in': 0}), - ('placeholder_data', 'shape'), - ('shape', 'shape_data'), - ('shape_data', 'sslice', {'in': 0}), - ('slice_begin', 'slice_begin_data'), - ('slice_begin_data', 'sslice', {'in': 1}), - ('slice_end', 'slice_end_data'), - ('slice_end_data', 'sslice', {'in': 2}), - ('sslice', 'sslice_data'), - ('sslice_data', 'cast_shape_to_float'), - ('cast_shape_to_float', 'cast_shape_to_float_data'), - ('scales', 'scales_data'), - ('axes', 'axes_data'), - ('cast_shape_to_float_data', 'mul', {'in': 0}), - ('scales_data', 'mul', {'in': 1, 'out': 0}), - ('mul', 'mul_data'), - ('mul_data', 'floor'), - ('floor', 'floor_data'), - ('floor_data', 'cast_mul_to_float'), - ('cast_mul_to_float', 'cast_mul_to_float_data'), - ('cast_mul_to_float_data', 'interpolate', {'in': 1}), - ('scales_data', 'interpolate', {'in': 2, 'out': 0}), - ('axes_data', 'interpolate', {'in': 3}), - ('interpolate', 'interpolate_data'), - ('interpolate_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), - ] - -ref_graph_node_attrs_for_2d_spatial_case_1_opset4 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 100, 120, 150]), - 'kind': 'data', - 'data_type': None - }, - 'interpolate': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'antialias': 0, - 'pads_begin': int64_array([0]), - 'pads_end': int64_array([0]), - 'coordinate_transformation_mode': 'half_pixel', - 'nearest_mode': 'round_prefer_floor', - 'cube_coeff': -0.75, - 'version': 'opset4', - 'shape_calculation_mode': 'scales' - }, - 'shape': {'type': 'ShapeOf', 'kind': 'op', 'op': 'ShapeOf'}, - 'shape_data': {'kind': 'data', 'shape': None, 'value': None}, - 'slice_begin': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': int64_array([3]), - 'shape': int64_array([1]) - }, - 'slice_begin_data': {'kind': 'data', 'shape': int64_array([1]), 'value': int64_array([3])}, - 'slice_end': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'value': int64_array([4]), 'shape': int64_array([1])}, - 'slice_end_data': {'kind': 'data', 'value': int64_array([4]), 'shape': int64_array([1])}, - 'sslice': { - 'kind': 'op', - 'type': 'StridedSlice', - 'op': 'StridedSlice', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0]), - }, - 'sslice_data': {'kind': 'data', 'shape': None}, - 'scales': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': np.array([2], dtype=np.float32), - 'shape': int64_array([1]) - }, - 'scales_data': {'kind': 'data', 'shape': None}, - 'cast_shape_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float32}, - 'cast_shape_to_float_data': {'kind': 'data', 'shape': None}, - 'axes': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': int64_array([3]), - 'shape': int64_array([1]) - }, - 'axes_data': {'kind': 'data', 'shape': None}, - 'mul': {'kind': 'op', 'op': 'Mul', 'type': 'Multiply'}, - 'mul_data': {'kind': 'data', 'shape': None}, - 'floor': {'kind': 'op', 'op': 'Floor', 'type': 'Floor'}, - 'floor_data': {'kind': 'data', 'shape': None}, - 'cast_mul_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}, - 'cast_mul_to_float_data': {'kind': 'data', 'shape': None}, - 'interpolate_data': {'value': None, 'shape': int64_array([1, 100, 120, 300]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 100, 120, 300]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -ref_graph_node_attrs_for_2d_spatial_case_1 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 100, 120, 150]), - 'kind': 'data', - 'data_type': None - }, - 'interpolate': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'axes': int64_array([3]), - 'mode': 'nearest' - }, - 'shape': {'type': 'ShapeOf', 'kind': 'op', 'op': 'ShapeOf'}, - 'shape_data': {'kind': 'data', 'shape': None, 'value': None}, - 'slice_begin': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': int64_array([3]), - 'shape': int64_array([1]) - }, - 'slice_begin_data': {'kind': 'data', 'shape': None, 'value': None}, - 'slice_end': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'value': int64_array([4])}, - 'slice_end_data': {'kind': 'data', 'shape': None, 'value': None}, - 'sslice': { - 'kind': 'op', - 'type': 'StridedSlice', - 'op': 'StridedSlice', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0]), - }, - 'sslice_data': {'kind': 'data', 'shape': None}, - 'scales': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': int64_array([2]), - 'shape': int64_array([1]) - }, - 'scales_data': {'kind': 'data', 'shape': None}, - 'axes': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': int64_array([3]), - 'shape': int64_array([1]) - }, - 'axes_data': {'kind': 'data', 'shape': None}, - 'mul': {'kind': 'op', 'op': 'Mul', 'type': 'Multiply'}, - 'mul_data': {'kind': 'data', 'shape': None}, - 'interpolate_data': {'value': None, 'shape': int64_array([1, 100, 120, 300]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 100, 120, 300]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - -ref_graph_node_attrs_for_2d_spatial_case_2 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 100, 120, 150]), - 'kind': 'data', - 'data_type': None - }, - 'interpolate': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'antialias': 0, - 'pads_begin': int64_array([0]), - 'pads_end': int64_array([0]), - 'coordinate_transformation_mode': 'half_pixel', - 'nearest_mode': 'round_prefer_floor', - 'cube_coeff': -0.75, - 'version': 'opset4', - 'shape_calculation_mode': 'scales' - }, - 'shape': {'type': 'ShapeOf', 'kind': 'op', 'op': 'ShapeOf'}, - 'shape_data': {'kind': 'data', 'shape': None, 'value': None}, - 'slice_begin': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': int64_array([2]), - 'shape': int64_array([1]) - }, - 'slice_begin_data': {'kind': 'data', 'shape': None, 'value': None}, - 'slice_end': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'value': int64_array([3])}, - 'slice_end_data': {'kind': 'data', 'shape': None, 'value': None}, - 'sslice': { - 'kind': 'op', - 'type': 'StridedSlice', - 'op': 'StridedSlice', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0]), - }, - 'sslice_data': {'kind': 'data', 'shape': None}, - 'scales': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': np.array([2], dtype=np.float32), - 'shape': int64_array([1]) - }, - 'scales_data': {'kind': 'data', 'shape': None}, - 'cast_shape_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float32}, - 'cast_shape_to_float_data': {'kind': 'data', 'shape': None}, - 'axes': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': int64_array([3]), - 'shape': int64_array([1]) - }, - 'axes_data': {'kind': 'data', 'shape': None}, - 'mul': {'kind': 'op', 'op': 'Mul', 'type': 'Multiply'}, - 'mul_data': {'kind': 'data', 'shape': None}, - 'floor': {'kind': 'op', 'op': 'Floor', 'type': 'Floor'}, - 'floor_data': {'kind': 'data', 'shape': None}, - 'cast_mul_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}, - 'cast_mul_to_float_data': {'kind': 'data', 'shape': None}, - 'interpolate_data': {'value': None, 'shape': int64_array([1, 100, 240, 150]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 100, 240, 150]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - - -ref_graph_node_attrs_for_3d_spatial_case_1 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 3, 100, 120, 150]), - 'kind': 'data', - 'data_type': None - }, - 'interpolate': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'antialias': 0, - 'pads_begin': int64_array([0]), - 'pads_end': int64_array([0]), - 'coordinate_transformation_mode': 'half_pixel', - 'nearest_mode': 'round_prefer_floor', - 'cube_coeff': -0.75, - 'version': 'opset4', - 'shape_calculation_mode': 'scales' - }, - 'shape': {'type': 'ShapeOf', 'kind': 'op', 'op': 'ShapeOf'}, - 'shape_data': {'kind': 'data', 'shape': None, 'value': None}, - 'slice_begin': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': int64_array([4]), - 'shape': int64_array([1]) - }, - 'slice_begin_data': {'kind': 'data', 'shape': None, 'value': None}, - 'slice_end': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'value': int64_array([5])}, - 'slice_end_data': {'kind': 'data', 'shape': None, 'value': None}, - 'sslice': { - 'kind': 'op', - 'type': 'StridedSlice', - 'op': 'StridedSlice', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0]), - }, - 'sslice_data': {'kind': 'data', 'shape': None}, - 'scales': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': np.array([2], dtype=np.float32), - 'shape': int64_array([1]) - }, - 'scales_data': {'kind': 'data', 'shape': None}, - 'cast_shape_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float32}, - 'cast_shape_to_float_data': {'kind': 'data', 'shape': None}, - 'axes': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': int64_array([3]), - 'shape': int64_array([1]) - }, - 'axes_data': {'kind': 'data', 'shape': None}, - 'mul': {'kind': 'op', 'op': 'Mul', 'type': 'Multiply'}, - 'mul_data': {'kind': 'data', 'shape': None}, - 'floor': {'kind': 'op', 'op': 'Floor', 'type': 'Floor'}, - 'floor_data': {'kind': 'data', 'shape': None}, - 'cast_mul_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}, - 'cast_mul_to_float_data': {'kind': 'data', 'shape': None}, - 'interpolate_data': {'value': None, 'shape': int64_array([1, 3, 100, 120, 300]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 3, 100, 120, 300]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - - -ref_graph_node_attrs_for_3d_spatial_case_2 = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 3, 100, 120, 150]), - 'kind': 'data', - 'data_type': None - }, - 'interpolate': { - 'type': 'Interpolate', - 'kind': 'op', - 'op': 'Interpolate', - 'mode': 'nearest', - 'antialias': 0, - 'pads_begin': int64_array([0]), - 'pads_end': int64_array([0]), - 'coordinate_transformation_mode': 'half_pixel', - 'nearest_mode': 'round_prefer_floor', - 'cube_coeff': -0.75, - 'version': 'opset4', - 'shape_calculation_mode': 'scales' - }, - 'shape': {'type': 'ShapeOf', 'kind': 'op', 'op': 'ShapeOf'}, - 'shape_data': {'kind': 'data', 'shape': None, 'value': None}, - 'slice_begin': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': int64_array([4]), - 'shape': int64_array([1]) - }, - 'slice_begin_data': {'kind': 'data', 'shape': None, 'value': None}, - 'slice_end': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'value': int64_array([5])}, - 'slice_end_data': {'kind': 'data', 'shape': None, 'value': None}, - 'sslice': { - 'kind': 'op', - 'type': 'StridedSlice', - 'op': 'StridedSlice', - 'begin_mask': int64_array([1]), - 'end_mask': int64_array([1]), - 'new_axis_mask': int64_array([0]), - 'shrink_axis_mask': int64_array([0]), - 'ellipsis_mask': int64_array([0]), - }, - 'sslice_data': {'kind': 'data', 'shape': None}, - 'scales': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': np.array([2], dtype=np.float32), - 'shape': int64_array([1]) - }, - 'scales_data': {'kind': 'data', 'shape': None}, - 'cast_shape_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float32}, - 'cast_shape_to_float_data': {'kind': 'data', 'shape': None}, - 'axes': { - 'type': 'Const', - 'op': 'Const', - 'kind': 'op', - 'value': int64_array([3]), - 'shape': int64_array([1]) - }, - 'axes_data': {'kind': 'data', 'shape': None}, - 'mul': {'kind': 'op', 'op': 'Mul', 'type': 'Multiply'}, - 'mul_data': {'kind': 'data', 'shape': None}, - 'floor': {'kind': 'op', 'op': 'Floor', 'type': 'Floor'}, - 'floor_data': {'kind': 'data', 'shape': None}, - 'cast_mul_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.int64}, - 'cast_mul_to_float_data': {'kind': 'data', 'shape': None}, - 'interpolate_data': {'value': None, 'shape': int64_array([1, 3, 100, 240, 150]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 3, 100, 240, 150]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - - -graph_node_attrs_when_there_are_two_splits_one_concat = { - 'placeholder1': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder1_data': { - 'value': None, - 'shape': int64_array([1, 13, 13, 3, 2]), - 'kind': 'data', - 'data_type': None - }, - 'placeholder2': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder2_data': { - 'value': None, - 'shape': int64_array([1, 13, 13, 3, 2]), - 'kind': 'data', - 'data_type': None - }, - 'split1': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 2}, - 'split1_axis_const': { - 'kind': 'op', - 'value': np.array(4, dtype=np.int64), - 'op': 'Const', - 'type': 'Const' - }, - 'split1_axis_const_data': { - 'value': np.array(4, dtype=np.int64), - 'shape': np.array(4, dtype=np.int64).shape, - 'kind': 'data' - }, - 'split2': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 2}, - 'split2_axis_const': { - 'kind': 'op', - 'value': np.array(4, dtype=np.int64), - 'op': 'Const', - 'type': 'Const' - }, - 'split2_axis_const_data': { - 'value': np.array(4, dtype=np.int64), - 'shape': np.array(4, dtype=np.int64).shape, - 'kind': 'data' - }, - 'split1_data_0': {'value': None, 'shape': int64_array([1, 13, 13, 3, 1]), 'kind': 'data'}, - 'split1_data_1': {'value': None, 'shape': int64_array([1, 13, 13, 3, 1]), 'kind': 'data'}, - 'split2_data_0': {'value': None, 'shape': int64_array([1, 13, 13, 3, 1]), 'kind': 'data'}, - 'split2_data_1': {'value': None, 'shape': int64_array([1, 13, 13, 3, 1]), 'kind': 'data'}, - 'concat': {'type': 'Concat', 'kind': 'op', 'axis': 4}, - 'concat_data': {'value': None, 'shape': int64_array([1, 13, 13, 3, 4]), 'kind': 'data'}, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 13, 13, 3, 4]), 'kind': 'data'}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - - -graph_edges_when_there_are_two_splits_one_concat = [ - ('placeholder1', 'placeholder1_data'), - ('placeholder2', 'placeholder2_data'), - ('placeholder1_data', 'split1', {'in': 0}), - ('split1_axis_const', 'split1_axis_const_data'), - ('split1_axis_const_data', 'split1', {'in': 1}), - ('split1', 'split1_data_0', {'out': 0}), - ('split1', 'split1_data_1', {'out': 1}), - ('placeholder2_data', 'split2', {'in': 0}), - ('split2_axis_const', 'split2_axis_const_data'), - ('split2_axis_const_data', 'split2', {'in': 1}), - ('split2', 'split2_data_0', {'out': 0}), - ('split2', 'split2_data_1', {'out': 1}), - ('split1_data_0', 'concat', {'in': 0}), - ('split1_data_1', 'concat', {'in': 1}), - ('split2_data_0', 'concat', {'in': 2}), - ('split2_data_1', 'concat', {'in': 3}), - ('concat', 'concat_data'), - ('concat_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output') -] - - -class SplitConcatPairToInterpolateTest(unittest.TestCase): - def test_spatial_2d_split_concat_1(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_spatial_case, - edges=graph_edges - ) - ref_graph = build_graph( - nodes_attrs=ref_graph_node_attrs_for_2d_spatial_case_1_opset4, - edges=ref_graph_edges_opset4 - ) - SplitConcatPairToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_spatial_2d_split_concat_2(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_2d_spatial_case, - edges=graph_edges, - update_attributes={ - 'split': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 3}, - 'split_axis_const': { - 'kind': 'op', - 'value': np.array(2, dtype=np.int64), - 'op': 'Const', - 'type': 'Const' - }, - 'split_axis_const_data': { - 'value': np.array(2, dtype=np.int64), - 'shape': np.array(2, dtype=np.int64).shape, - 'kind': 'data' - }, - 'concat': {'type': 'Concat', 'kind': 'op', 'axis': 2}, - 'split_data_0': {'value': None, 'shape': int64_array([1, 100, 40, 150]), 'kind': 'data'}, - 'split_data_1': {'value': None, 'shape': int64_array([1, 100, 40, 150]), 'kind': 'data'}, - 'split_data_2': {'value': None, 'shape': int64_array([1, 100, 40, 150]), 'kind': 'data'}, - 'concat_data': {'value': None, 'shape': int64_array([1, 100, 240, 150]), 'kind': 'data'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 100, 240, 150]), 'kind': 'data'}, - } - ) - ref_graph = build_graph( - nodes_attrs=ref_graph_node_attrs_for_2d_spatial_case_2, - edges=ref_graph_edges_opset4, - update_attributes={ - 'axes': {'shape': int64_array([1]), 'value': int64_array([2])} - } - ) - SplitConcatPairToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_spatial_3d_split_concat_1(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_3d_spatial_case, - edges=graph_edges - ) - ref_graph = build_graph( - nodes_attrs=ref_graph_node_attrs_for_3d_spatial_case_1, - edges=ref_graph_edges_opset4, - update_attributes={ - 'axes': {'shape': int64_array([1]), 'value': int64_array([4])} - } - ) - SplitConcatPairToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_spatial_3d_split_concat_2(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_for_3d_spatial_case, - edges=graph_edges, - update_attributes={ - 'split': {'type': 'Split', 'kind': 'op', 'op': 'Split', 'num_splits': 3}, - 'split_axis_const': { - 'kind': 'op', - 'value': np.array(3, dtype=np.int64), - 'op': 'Const', - 'type': 'Const' - }, - 'split_axis_const_data': { - 'value': np.array(3, dtype=np.int64), - 'shape': np.array(3, dtype=np.int64).shape, - 'kind': 'data' - }, - 'concat': {'type': 'Concat', 'kind': 'op', 'axis': 3}, - 'split_data_0': {'value': None, 'shape': int64_array([1, 3, 100, 40, 150]), 'kind': 'data'}, - 'split_data_1': {'value': None, 'shape': int64_array([1, 3, 100, 40, 150]), 'kind': 'data'}, - 'split_data_2': {'value': None, 'shape': int64_array([1, 3, 100, 40, 150]), 'kind': 'data'}, - 'concat_data': {'value': None, 'shape': int64_array([1, 3, 100, 240, 150]), 'kind': 'data'}, - 'abs_data': {'value': None, 'shape': int64_array([1, 3, 100, 240, 150]), 'kind': 'data'}, - } - ) - ref_graph = build_graph( - nodes_attrs=ref_graph_node_attrs_for_3d_spatial_case_2, - edges=ref_graph_edges_opset4 - ) - SplitConcatPairToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_two_splits_one_concat(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_when_there_are_two_splits_one_concat, - edges=graph_edges_when_there_are_two_splits_one_concat - ) - ref_graph = build_graph( - nodes_attrs=graph_node_attrs_when_there_are_two_splits_one_concat, - edges=graph_edges_when_there_are_two_splits_one_concat - ) - SplitConcatPairToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/StridedSliceNormalizer_test.py b/tools/mo/unit_tests/mo/middle/StridedSliceNormalizer_test.py deleted file mode 100644 index 8c81768a75b092..00000000000000 --- a/tools/mo/unit_tests/mo/middle/StridedSliceNormalizer_test.py +++ /dev/null @@ -1,2072 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import numpy.testing as npt - -from openvino.tools.mo.middle.StridedSliceNormalizer import StridedSliceNormalizer -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.ops.split import VariadicSplit -from openvino.tools.mo.front.common.partial_infer.concat import concat_infer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.ops.strided_slice import StridedSlice -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, valued_const_with_data, regular_op_with_empty_data, \ - connect, regular_op, empty_data, regular_op_with_shaped_data - -edges = ( - *connect('input', '0:strided_slice'), - *connect('begin', '1:strided_slice'), - *connect('end', '2:strided_slice'), - *connect('strides', '3:strided_slice'), - *connect('strided_slice', 'res') -) - -edges_without_strides = ( - *connect('input', '0:strided_slice'), - *connect('begin', '1:strided_slice'), - *connect('end', '2:strided_slice'), - *connect('strided_slice', 'res') -) - - -class TestStridedSliceNormalizer(unittest.TestCase): - - def test_strided_slice_extend_inputs(self): - input_shape = (16, 100, 100, 3) - nodes = { - **valued_const_with_data('input', np.arange(np.product(input_shape)).reshape(*input_shape)), - **regular_op_with_empty_data('strided_slice', {'op': 'StridedSlice', - 'type': 'StridedSlice', - 'begin_mask': [1, 1, 1], - 'end_mask': [1, 1, 1], - 'shrink_axis_mask': [0, 0, 0], - 'new_axis_mask': [0, 0, 0], - 'ellipsis_mask': [0, 0, 0], - 'infer': StridedSlice.infer}), - - **regular_op_with_empty_data('strided_slice_ref', {'op': 'StridedSlice', - 'type': 'StridedSlice', - 'begin_mask': [1, 1, 1, 0], - 'end_mask': [1, 1, 1, 0], - 'new_axis_mask': [0, 0, 0, 0], - 'shrink_axis_mask': [0, 0, 0, 0], - 'ellipsis_mask': [0, 0, 0, 0], - 'infer': StridedSlice.infer}), - **valued_const_with_data('begin', int64_array([0, 0, 0])), - **valued_const_with_data('begin_placeholder', int64_array([0])), - **regular_op_with_empty_data('begin_concat', - {'op': 'Concat', 'infer': concat_infer, 'axis': 0, 'dim_attrs': {}}), - **valued_const_with_data('end', int64_array([4, 25, 50])), - **valued_const_with_data('end_placeholder', int64_array([0])), - **regular_op_with_empty_data('end_concat', - {'op': 'Concat', 'infer': concat_infer, 'axis': 0, 'dim_attrs': {}}), - **valued_const_with_data('strides', int64_array([1, 1, 1])), - **valued_const_with_data('strides_placeholder', int64_array([1])), - **regular_op_with_empty_data('strides_concat', - {'op': 'Concat', 'infer': concat_infer, 'axis': 0, 'dim_attrs': {}}), - **regular_op('res', {'kind': 'op', 'type': 'Result', 'op': 'Result', 'infer': lambda x: None}) - } - - edges_ref_extended_inputs = ( - *connect('input', '0:strided_slice_ref'), - - *connect('begin', '0:begin_concat'), - *connect('begin_placeholder', '1:begin_concat'), - *connect('begin_concat', '1:strided_slice_ref'), - - *connect('end', '0:end_concat'), - *connect('end_placeholder', '1:end_concat'), - *connect('end_concat', '2:strided_slice_ref'), - - *connect('strides', '0:strides_concat'), - *connect('strides_placeholder', '1:strides_concat'), - *connect('strides_concat', '3:strided_slice_ref'), - - *connect('strided_slice_ref', 'res') - ) - - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - graph_ref = build_graph(nodes, edges_ref_extended_inputs, nodes_with_edges_only=True) - graph.stage = 'middle' - graph_ref.stage = 'middle' - - graph = partial_infer(graph) - StridedSliceNormalizer().find_and_replace_pattern(graph) - graph = partial_infer(graph) - graph_ref = partial_infer(graph_ref) - - (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=False) - self.assertTrue(flag, 'Graphs after StridedSliceNormalizer do not match to reference: {}'.format(resp)) - - def test_strided_slice_extend_inputs_without_strides(self): - input_shape = (16, 100, 100, 3) - nodes = { - **valued_const_with_data('input', np.arange(np.product(input_shape)).reshape(*input_shape)), - **regular_op_with_empty_data('strided_slice', {'op': 'StridedSlice', - 'type': 'StridedSlice', - 'begin_mask': [1, 1, 1], - 'end_mask': [1, 1, 1], - 'shrink_axis_mask': [1, 0, 0], - 'new_axis_mask': [0, 0, 0], - 'ellipsis_mask': [0, 0, 0], - 'infer': StridedSlice.infer}), - - **regular_op_with_empty_data('strided_slice_ref', {'op': 'StridedSlice', - 'type': 'StridedSlice', - 'begin_mask': [1, 1, 1, 0], - 'end_mask': [1, 1, 1, 0], - 'new_axis_mask': [0, 0, 0, 0], - 'shrink_axis_mask': [1, 0, 0, 0], - 'ellipsis_mask': [0, 0, 0, 0], - 'infer': StridedSlice.infer}), - **valued_const_with_data('begin', int64_array([0, 0, 0])), - **valued_const_with_data('begin_placeholder', int64_array([0])), - **regular_op_with_empty_data('begin_concat', - {'op': 'Concat', 'infer': concat_infer, 'axis': 0, 'dim_attrs': {}}), - **valued_const_with_data('end', int64_array([4, 25, 50])), - **valued_const_with_data('end_placeholder', int64_array([0])), - **regular_op_with_empty_data('end_concat', - {'op': 'Concat', 'infer': concat_infer, 'axis': 0, 'dim_attrs': {}}), - **regular_op('res', {'kind': 'op', 'type': 'Result', 'op': 'Result', 'infer': lambda x: None}) - } - - edges_ref_extended_inputs = ( - *connect('input', '0:strided_slice_ref'), - - *connect('begin', '0:begin_concat'), - *connect('begin_placeholder', '1:begin_concat'), - *connect('begin_concat', '1:strided_slice_ref'), - - *connect('end', '0:end_concat'), - *connect('end_placeholder', '1:end_concat'), - *connect('end_concat', '2:strided_slice_ref'), - - *connect('strided_slice_ref', 'res') - ) - - graph = build_graph(nodes, edges_without_strides, nodes_with_edges_only=True) - graph_ref = build_graph(nodes, edges_ref_extended_inputs, nodes_with_edges_only=True) - graph.stage = 'middle' - graph_ref.stage = 'middle' - - graph = partial_infer(graph) - StridedSliceNormalizer().find_and_replace_pattern(graph) - graph = partial_infer(graph) - graph_ref = partial_infer(graph_ref) - - (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=False) - self.assertTrue(flag, 'Graphs after StridedSliceNormalizer do not match to reference: {}'.format(resp)) - - def test_strided_slice_unrooll_ellipsis(self): - input_shape = (10, 10, 10, 10) - # out = inp[1:4, ..., 0:5] -> inp[1:4, :, :, 0:5] => out_shape = (3, 10, 10, 5) - ellipsis_start = 1 - - nodes = { - **valued_const_with_data('input', np.arange(np.product(input_shape)).reshape(*input_shape)), - **regular_op_with_empty_data('strided_slice', {'op': 'StridedSlice', 'type': 'StridedSlice', - 'begin_mask': [1, 1, 1], 'end_mask': [1, 1, 1], - 'shrink_axis_mask': [0, 0, 0], - 'new_axis_mask': [0, 0, 0], - 'ellipsis_mask': [0, 1, 0], - 'infer': StridedSlice.infer}), - - **regular_op_with_empty_data('strided_slice_ref', {'op': 'StridedSlice', 'begin_mask': [1, 0, 0, 1], - 'end_mask': [1, 0, 0, 1], 'ellipsis_mask': [0, 0, 0, 0], - 'new_axis_mask': [0, 0, 0, 0], - 'shrink_axis_mask': [0, 0, 0, 0], - 'infer': StridedSlice.infer}), - - **valued_const_with_data('begin', int64_array([1, 0, 0])), - **valued_const_with_data('split_axis_begin', int64_array(0)), - **valued_const_with_data('splits_lengths_begin', int64_array([ellipsis_start, -1])), - **regular_op_with_empty_data('split_for_begin', {'op': 'VariadicSplit', 'infer': VariadicSplit.infer}), - **empty_data('split_for_begin_data_1'), - **valued_const_with_data('begin_placeholder', int64_array([0])), - **regular_op_with_empty_data('begin_concat', - {'op': 'Concat', 'infer': concat_infer, 'axis': 0, 'dim_attrs': {}}), - - **valued_const_with_data('end', int64_array([4, 0, 5])), - **valued_const_with_data('split_axis_end', int64_array(0)), - **valued_const_with_data('splits_lengths_end', int64_array([ellipsis_start, -1])), - **regular_op_with_empty_data('split_for_end', {'op': 'VariadicSplit', 'infer': VariadicSplit.infer}), - **empty_data('split_for_end_data_1'), - **valued_const_with_data('end_placeholder', int64_array([0])), - **regular_op_with_empty_data('end_concat', - {'op': 'Concat', 'infer': concat_infer, 'axis': 0, 'dim_attrs': {}}), - - **valued_const_with_data('strides', int64_array([1, 1, 1])), - **valued_const_with_data('split_axis_strides', int64_array(0)), - **valued_const_with_data('splits_lengths_strides', int64_array([ellipsis_start, -1])), - **regular_op_with_empty_data('split_for_strides', {'op': 'VariadicSplit', 'infer': VariadicSplit.infer}), - **empty_data('split_for_strides_data_1'), - **valued_const_with_data('strides_placeholder', int64_array([1])), - **regular_op_with_empty_data('strides_concat', - {'op': 'Concat', 'infer': concat_infer, 'axis': 0, 'dim_attrs': {}}), - - **regular_op('res', {'kind': 'op', 'type': 'Result', 'op': 'Result', 'infer': lambda x: None}) - } - - edges_ref_ellipsis_unrolled = ( - *connect('input', '0:strided_slice_ref'), - - *connect('begin', '0:split_for_begin'), - *connect('split_axis_begin', '1:split_for_begin'), - *connect('splits_lengths_begin', '2:split_for_begin'), - *connect('split_for_begin:0', '0:begin_concat'), - *connect('begin_placeholder', '1:begin_concat'), - ('split_for_begin', 'split_for_begin_data_1', {'out': 1, 'in': 2}), - ('split_for_begin_data_1', 'begin_concat', {'out': 1, 'in': 2}), - *connect('begin_concat', '1:strided_slice_ref'), - - *connect('end', '0:split_for_end'), - *connect('split_axis_end', '1:split_for_end'), - *connect('splits_lengths_end', '2:split_for_end'), - *connect('split_for_end:0', '0:end_concat'), - *connect('end_placeholder', '1:end_concat'), - ('split_for_end', 'split_for_end_data_1', {'out': 1, 'in': 2}), - ('split_for_end_data_1', 'end_concat', {'out': 1, 'in': 2}), - *connect('end_concat', '2:strided_slice_ref'), - - *connect('strides', '0:split_for_strides'), - *connect('split_axis_strides', '1:split_for_strides'), - *connect('splits_lengths_strides', '2:split_for_strides'), - *connect('split_for_strides:0', '0:strides_concat'), - *connect('strides_placeholder', '1:strides_concat'), - ('split_for_strides', 'split_for_strides_data_1', {'out': 1, 'in': 2}), - ('split_for_strides_data_1', 'strides_concat', {'out': 1, 'in': 2}), - *connect('strides_concat', '3:strided_slice_ref'), - - *connect('strided_slice_ref', 'res') - ) - - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - graph_ref = build_graph(nodes, edges_ref_ellipsis_unrolled, nodes_with_edges_only=True) - graph.stage = 'middle' - graph_ref.stage = 'middle' - graph = partial_infer(graph) - StridedSliceNormalizer().find_and_replace_pattern(graph) - graph = partial_infer(graph) - graph_ref = partial_infer(graph_ref) - - (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=False) - self.assertTrue(flag, 'Graphs after StridedSliceNormalizer do not match to reference: {}'.format(resp)) - - def test_strided_slice_unrooll_ellipsis_without_strides(self): - input_shape = (10, 10, 10, 10) - # out = inp[1:4, ..., 0:5] -> inp[1:4, :, :, 0:5] => out_shape = (3, 10, 10, 5) - ellipsis_start = 1 - - nodes = { - **valued_const_with_data('input', np.arange(np.product(input_shape)).reshape(*input_shape)), - **regular_op_with_empty_data('strided_slice', {'op': 'StridedSlice', 'type': 'StridedSlice', - 'begin_mask': [1, 1, 1], 'end_mask': [1, 1, 1], - 'shrink_axis_mask': [0, 0, 0], - 'new_axis_mask': [0, 0, 0], - 'ellipsis_mask': [0, 1, 0], - 'infer': StridedSlice.infer}), - - **regular_op_with_empty_data('strided_slice_ref', {'op': 'StridedSlice', 'begin_mask': [1, 0, 0, 1], - 'end_mask': [1, 0, 0, 1], 'ellipsis_mask': [0, 0, 0, 0], - 'new_axis_mask': [0, 0, 0, 0], - 'shrink_axis_mask': [0, 0, 0, 0], - 'infer': StridedSlice.infer}), - - **valued_const_with_data('begin', int64_array([1, 0, 0])), - **valued_const_with_data('split_axis_begin', int64_array(0)), - **valued_const_with_data('splits_lengths_begin', int64_array([ellipsis_start, -1])), - **regular_op_with_empty_data('split_for_begin', {'op': 'VariadicSplit', 'infer': VariadicSplit.infer}), - **empty_data('split_for_begin_data_1'), - **valued_const_with_data('begin_placeholder', int64_array([0])), - **regular_op_with_empty_data('begin_concat', - {'op': 'Concat', 'infer': concat_infer, 'axis': 0, 'dim_attrs': {}}), - - **valued_const_with_data('end', int64_array([4, 0, 5])), - **valued_const_with_data('split_axis_end', int64_array(0)), - **valued_const_with_data('splits_lengths_end', int64_array([ellipsis_start, -1])), - **regular_op_with_empty_data('split_for_end', {'op': 'VariadicSplit', 'infer': VariadicSplit.infer}), - **empty_data('split_for_end_data_1'), - **valued_const_with_data('end_placeholder', int64_array([0])), - **regular_op_with_empty_data('end_concat', - {'op': 'Concat', 'infer': concat_infer, 'axis': 0, 'dim_attrs': {}}), - - **regular_op('res', {'kind': 'op', 'type': 'Result', 'op': 'Result', 'infer': lambda x: None}) - } - - edges_ref_ellipsis_unrolled = ( - *connect('input', '0:strided_slice_ref'), - - *connect('begin', '0:split_for_begin'), - *connect('split_axis_begin', '1:split_for_begin'), - *connect('splits_lengths_begin', '2:split_for_begin'), - *connect('split_for_begin:0', '0:begin_concat'), - *connect('begin_placeholder', '1:begin_concat'), - ('split_for_begin', 'split_for_begin_data_1', {'out': 1, 'in': 2}), - ('split_for_begin_data_1', 'begin_concat', {'out': 1, 'in': 2}), - *connect('begin_concat', '1:strided_slice_ref'), - - *connect('end', '0:split_for_end'), - *connect('split_axis_end', '1:split_for_end'), - *connect('splits_lengths_end', '2:split_for_end'), - *connect('split_for_end:0', '0:end_concat'), - *connect('end_placeholder', '1:end_concat'), - ('split_for_end', 'split_for_end_data_1', {'out': 1, 'in': 2}), - ('split_for_end_data_1', 'end_concat', {'out': 1, 'in': 2}), - *connect('end_concat', '2:strided_slice_ref'), - - *connect('strided_slice_ref', 'res') - ) - - graph = build_graph(nodes, edges_without_strides, nodes_with_edges_only=True) - graph_ref = build_graph(nodes, edges_ref_ellipsis_unrolled, nodes_with_edges_only=True) - graph.stage = 'middle' - graph_ref.stage = 'middle' - graph = partial_infer(graph) - StridedSliceNormalizer().find_and_replace_pattern(graph) - graph = partial_infer(graph) - graph_ref = partial_infer(graph_ref) - - (flag, resp) = compare_graphs(graph, graph_ref, 'res', check_op_attrs=False) - self.assertTrue(flag, 'Graphs after StridedSliceNormalizer do not match to reference: {}'.format(resp)) - - -class TestStridedSliceShapeInferAfterNormalizer(unittest.TestCase): - # check that after inserting Splits and Concats we still get the same shape - - def run_infer_test(self, inp, ref_res, begin, end, strides, begin_mask, end_mask, - shrink_axis_mask, new_axis_mask, ellipsis_mask): - nodes = { - **valued_const_with_data('input', np.arange(np.product(inp)).reshape(*inp)), - **valued_const_with_data('begin', int64_array(begin)), - **valued_const_with_data('end', int64_array(end)), - **valued_const_with_data('strides', int64_array(strides)), - **regular_op_with_empty_data('strided_slice', {'op': 'StridedSlice', 'type': 'StridedSlice', - 'begin_mask': begin_mask, 'end_mask': end_mask, - 'shrink_axis_mask': shrink_axis_mask, - 'new_axis_mask': new_axis_mask, - 'ellipsis_mask': ellipsis_mask, - 'infer': StridedSlice.infer}), - **regular_op('res', {'kind': 'op', 'type': 'Result', 'op': 'Result', 'infer': lambda x: None}) - } - - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - graph.stage = 'middle' - graph = partial_infer(graph) - StridedSliceNormalizer().find_and_replace_pattern(graph) - graph = partial_infer(graph) - - node = Node(graph, 'strided_slice') - res = node.out_port(0).data.get_shape() - npt.assert_array_equal(res, ref_res) - - def test_strided_slice_infer_after_normalizer_1( - self, # inp[0, :34, 20, :2] - inp=(1, 35, 35, 3), ref_res=(34, 2), - begin=(0, 0, 0, 0), end=(1, 34, 20, 2), strides=(1, 1, 1, 1), - begin_mask=(0,), end_mask=(0,), - shrink_axis_mask=(1, 0, 1, 0), new_axis_mask=(0,), - ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_2( - self, # inp[0:3, 0:1, 5:0:-1] - inp=(10, 10, 10, 10), ref_res=(3, 1, 5, 10), - begin=(0, 0, 5), end=(3, 1, 0), strides=(1, 1, -1), begin_mask=(1, 1, 1), end_mask=(1, 1, 1), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,)): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_3( - self, # inp[1:34, 0, :, :2] - inp=(1, 35, 35, 3), ref_res=(1, 35, 2), - begin=(0, 0, 0, 0), end=(1, 34, 0, 2), strides=(1, 1, 1, 1), begin_mask=(1, 1, 0, 0), end_mask=(1, 0, 0, 1), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 0, 0), ellipsis_mask=(0, 0, 0, 0) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_4( - self, # inp[1:34, :, :, :2] begin mask is (1,) so only one value can be specified - inp=(1, 35, 35, 3), ref_res=(1, 35, 2), - begin=(0, 0, 0, 0), end=(1, 34, 20, 2), strides=(1, 1, 1, 1), begin_mask=(1, 0, 0,), end_mask=(1, 0, 0, 1), - shrink_axis_mask=(0, 1), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_5( - self, # inp[:, :, :, :] since all begin and end masks are zero - inp=(1, 35, 35, 3), ref_res=(1, 35, 35, 3), - begin=(1, 10, 10, 0), end=(1, 34, 20, 2), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), - end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_6( - self, # inp[0] - inp=(1, 35, 35, 3), ref_res=(35, 35, 3), - begin=(0,), end=(1,), strides=(1,), begin_mask=(1,), end_mask=(0,), - shrink_axis_mask=(1,), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_7( - self, # inp[0, 20], ends can be of any value - inp=(1, 35, 35, 3), ref_res=(35, 3), - begin=(0, 20), end=(1, 9999), strides=(1, 1), begin_mask=(0,), end_mask=(0,), - shrink_axis_mask=(1, 1), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_8( - self, # inp[0, 0:34, 20:22, new_axis], both new_axis and shrink_axis are present - inp=(1, 35, 35, 3), ref_res=(34, 2, 1, 3), - begin=(0, 0, 20, 0), end=(1, 34, 22, 2), strides=(1, 1, 1, 1), begin_mask=(0,), end_mask=(0,), - shrink_axis_mask=(1,), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_9( - self, # inp[:, 0:4, 20, new_axis], both new_axis and shrink_axis are present - inp=(1, 35, 35, 3), ref_res=(1, 4, 1, 3), - begin=(0, 0, 20, 0), end=(0, 4, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 1, 0, 0), end_mask=(0, 1, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_10( - self, # inp[:, 0:4, new_axis, 20], both new_axis and shrink_axis are present - inp=(1, 35, 35, 3), ref_res=(1, 4, 1, 3), - begin=(0, 0, 0, 20), end=(0, 4, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 1, 0, 0), end_mask=(0, 1, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_11( - self, # inp[0, :, 0:34, 20:22, new_axis], both new_axis and shrink_axis are present - inp=(1, 3, 35, 35), ref_res=(3, 34, 2, 1), - begin=(0, 0, 0, 20, 0), end=(1, 0, 34, 22, 0), strides=(1, 1, 1, 1, 1), - begin_mask=(1, 0, 1, 1, 1), end_mask=(1, 0, 1, 1, 1), - shrink_axis_mask=(1,), new_axis_mask=(0, 0, 0, 0, 1), ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_12( - self, # inp[0, :34, 20, :2] - inp=(1, 35, 35, 3), ref_res=(34, 2), - begin=(0, 0, 0, 0), end=(1, 34, 20, 2), strides=(1, 1, 1, 1), begin_mask=(0, 1, 1, 1), - end_mask=(0, 1, 1, 1), - shrink_axis_mask=(1, 0, 1, 0), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_13( - self, # inp[0, 0, 0], since it's shrink_axis ends can be of any value - inp=(1, 35, 35, 3), ref_res=(3,), - begin=(0, 0, 0), end=(1, 34444, 20), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(1, 1, 1), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_14( - self, # inp[0, 0, 0], since begin_mask is [0], begin can be of any value - inp=(1, 35, 35, 3), ref_res=(1, 18, 18, 3), - begin=(0, 0, 0), end=(1, 35, 35), strides=(2, 2, 2), begin_mask=(1, 1, 1), end_mask=(1, 1, 1), - shrink_axis_mask=(0, 0, 0), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - # with ellipsis - def test_strided_slice_infer_after_normalizer_15( - self, # inp[..., np.newaxis] - inp=(1, 35, 35), ref_res=(1, 35, 35, 1), - begin=(101, 0), end=(0, 0), strides=(-1, -1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(1, 0) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_16( - self, # inp_shape = (1, 720, 1080), out = inp[..., :100, None] => out_shape = (1, 720, 100, 1) - inp=(1, 720, 1080), ref_res=(1, 720, 100, 1), - begin=(0, 0, 0), end=(0, 100, 0), strides=(1, 1, 1), begin_mask=(0, 1, 0), end_mask=(0, 1, 0), - shrink_axis_mask=(0,), new_axis_mask=(0, 0, 1), ellipsis_mask=(1,) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_17( - self, # inp_shape = (1, 720, 1080, 3), out = inp[..., :-1] => out_shape = (1, 720, 100, 2) - inp=(1, 720, 1080, 3), ref_res=(1, 720, 1080, 2), - begin=(0, 0), end=(0, -1), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 1), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_infer_after_normalizer_18( - self, # inp_shape = (1, 720, 1080, 3), out = inp[..., 2] => out_shape = (1, 720, 1080) - inp=(1, 720, 1080, 3), ref_res=(1, 720, 1080), - begin=(0, 2), end=(0, 0), strides=(1, 1), begin_mask=(0, 1), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ): - self.run_infer_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - # automatically generated the whole range of 2d slices over 2d, 3d and 4d input tensors - def test_normalizer_auto_infer_strided_slice_2d_over_2d_0(self): - """ - inp_shape = (1, 100), out = inp[:, :] => out_shape = (1, 100) - """ - self.run_infer_test( - inp=(1, 100), ref_res=(1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_2d_1(self): - """ - inp_shape = (1, 100), out = inp[:, None] => out_shape = (1, 1, 100) - """ - self.run_infer_test( - inp=(1, 100), ref_res=(1, 1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_2d_2(self): - """ - inp_shape = (1, 100), out = inp[:, 0] => out_shape = (1,) - """ - self.run_infer_test( - inp=(1, 100), ref_res=(1,), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_2d_3(self): - """ - inp_shape = (1, 100), out = inp[..., :] => out_shape = (1, 100) - """ - self.run_infer_test( - inp=(1, 100), ref_res=(1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_2d_4(self): - """ - inp_shape = (1, 100), out = inp[..., None] => out_shape = (1, 100, 1) - """ - self.run_infer_test( - inp=(1, 100), ref_res=(1, 100, 1), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_2d_5(self): - """ - inp_shape = (1, 100), out = inp[..., 0] => out_shape = (1,) - """ - self.run_infer_test( - inp=(1, 100), ref_res=(1,), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_2d_6(self): - """ - inp_shape = (1, 100), out = inp[None, :] => out_shape = (1, 1, 100) - """ - self.run_infer_test( - inp=(1, 100), ref_res=(1, 1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_2d_7(self): - """ - inp_shape = (1, 100), out = inp[None, None] => out_shape = (1, 1, 1, 100) - """ - self.run_infer_test( - inp=(1, 100), ref_res=(1, 1, 1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 1), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_2d_8(self): - """ - inp_shape = (1, 100), out = inp[None, 0] => out_shape = (1, 100) - """ - self.run_infer_test( - inp=(1, 100), ref_res=(1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_2d_9(self): - """ - inp_shape = (1, 100), out = inp[0, :] => out_shape = (100,) - """ - self.run_infer_test( - inp=(1, 100), ref_res=(100,), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_2d_10(self): - """ - inp_shape = (1, 100), out = inp[0, None] => out_shape = (1, 100) - """ - self.run_infer_test( - inp=(1, 100), ref_res=(1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_2d_11(self): - """ - inp_shape = (1, 100), out = inp[0, 0] => out_shape = () - """ - self.run_infer_test( - inp=(1, 100), ref_res=(), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_0(self): - """ - inp_shape = (1, 100, 200), out = inp[:, :] => out_shape = (1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_1(self): - """ - inp_shape = (1, 100, 200), out = inp[:, None] => out_shape = (1, 1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_2(self): - """ - inp_shape = (1, 100, 200), out = inp[:, 0] => out_shape = (1, 200) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_3(self): - """ - inp_shape = (1, 100, 200), out = inp[..., :] => out_shape = (1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_4(self): - """ - inp_shape = (1, 100, 200), out = inp[..., None] => out_shape = (1, 100, 200, 1) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 100, 200, 1), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_5(self): - """ - inp_shape = (1, 100, 200), out = inp[..., 0] => out_shape = (1, 100) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_6(self): - """ - inp_shape = (1, 100, 200), out = inp[None, :] => out_shape = (1, 1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_7(self): - """ - inp_shape = (1, 100, 200), out = inp[None, None] => out_shape = (1, 1, 1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 1, 1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 1), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_8(self): - """ - inp_shape = (1, 100, 200), out = inp[None, 0] => out_shape = (1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_9(self): - """ - inp_shape = (1, 100, 200), out = inp[0, :] => out_shape = (100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_10(self): - """ - inp_shape = (1, 100, 200), out = inp[0, None] => out_shape = (1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_3d_11(self): - """ - inp_shape = (1, 100, 200), out = inp[0, 0] => out_shape = (200,) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(200,), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_0(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, :] => out_shape = (1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_1(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, None] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_2(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, 0] => out_shape = (1, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_3(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., :] => out_shape = (1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_4(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., None] => out_shape = (1, 100, 200, 3, 1) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3, 1), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_5(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., 0] => out_shape = (1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_6(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, :] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_7(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, None] => out_shape = (1, 1, 1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 1), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_8(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, 0] => out_shape = (1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_9(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, :] => out_shape = (100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_10(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, None] => out_shape = (1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_2d_over_4d_11(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, 0] => out_shape = (200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - # automatically generated slices from 3d to 5d d input tensors - # fixed number of ellipsis, newaxis and shrink_axis - def test_normalizer_auto_infer_strided_slice_3d_over_3d_0(self): - """ - inp_shape = (1, 100, 200), out = inp[None, ..., 0] => out_shape = (1, 1, 100) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 1, 100), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(0, 0, 1), new_axis_mask=(1, 0, 0), ellipsis_mask=(0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_3d_1(self): - """ - inp_shape = (1, 100, 200), out = inp[..., None, 0] => out_shape = (1, 100, 1) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 100, 1), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(0, 0, 1), new_axis_mask=(0, 1, 0), ellipsis_mask=(1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_3d_2(self): - """ - inp_shape = (1, 100, 200), out = inp[0, None, ...] => out_shape = (1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 100, 200), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(1, 0, 0), new_axis_mask=(0, 1, 0), ellipsis_mask=(0, 0, 1) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_3d_3(self): - """ - inp_shape = (1, 100, 200), out = inp[0, ..., None] => out_shape = (100, 200, 1) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(100, 200, 1), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(1, 0, 0), new_axis_mask=(0, 0, 1), ellipsis_mask=(0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_3d_4(self): - """ - inp_shape = (1, 100, 200), out = inp[None, 0, ...] => out_shape = (1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 100, 200), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(0, 1, 0), new_axis_mask=(1, 0, 0), ellipsis_mask=(0, 0, 1) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_3d_5(self): - """ - inp_shape = (1, 100, 200), out = inp[..., 0, None] => out_shape = (1, 100, 1) - """ - self.run_infer_test( - inp=(1, 100, 200), ref_res=(1, 100, 1), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(0, 1, 0), new_axis_mask=(0, 0, 1), ellipsis_mask=(1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_4d_0(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, ..., 0, :] => out_shape = (1, 1, 100, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_4d_1(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., None, 0, :] => out_shape = (1, 100, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_4d_2(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, None, ..., :] => out_shape = (1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_4d_3(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, ..., None, :] => out_shape = (100, 200, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(100, 200, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_4d_4(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, 0, ..., :] => out_shape = (1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_4d_5(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., 0, None, :] => out_shape = (1, 100, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_5d_0(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, ..., 0, :, :] => out_shape = (1, 1, 100, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 1, 100, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_5d_1(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., None, 0, :, :] => out_shape = (1, 100, 1, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_5d_2(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, None, ..., :, :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_5d_3(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, ..., None, :, :] => out_shape = (100, 200, 1, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(100, 200, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_5d_4(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, 0, ..., :, :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_3d_over_5d_5(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., 0, None, :, :] => out_shape = (1, 100, 1, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_0(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, ..., 0, :] => out_shape = (1, 1, 100, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_1(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., None, 0, :] => out_shape = (1, 100, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_2(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, None, ..., :] => out_shape = (1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_3(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, ..., None, :] => out_shape = (100, 200, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(100, 200, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_4(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, 0, ..., :] => out_shape = (1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_5(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., 0, None, :] => out_shape = (1, 100, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_6(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, ..., :, 0] => out_shape = (1, 1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 200), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_7(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., None, :, 0] => out_shape = (1, 100, 1, 200) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 1, 200), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_8(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, None, :, ...] => out_shape = (1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_9(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, ..., :, None] => out_shape = (100, 200, 3, 1) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(100, 200, 3, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_10(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, 0, :, ...] => out_shape = (1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_11(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., 0, :, None] => out_shape = (1, 100, 3, 1) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 3, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_12(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, :, ..., 0] => out_shape = (1, 1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 200), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_13(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., :, None, 0] => out_shape = (1, 100, 200, 1) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_14(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, :, None, ...] => out_shape = (100, 1, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(100, 1, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_15(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, :, ..., None] => out_shape = (100, 200, 3, 1) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(100, 200, 3, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_16(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, :, 0, ...] => out_shape = (1, 1, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_17(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., :, 0, None] => out_shape = (1, 100, 200, 1) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_18(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, None, ..., 0] => out_shape = (1, 1, 100, 200) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 200), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_19(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, ..., None, 0] => out_shape = (1, 100, 200, 1) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_20(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, 0, None, ...] => out_shape = (1, 1, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_21(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, 0, ..., None] => out_shape = (1, 200, 3, 1) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 200, 3, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_22(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, None, 0, ...] => out_shape = (1, 1, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_4d_23(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, ..., 0, None] => out_shape = (1, 100, 200, 1) - """ - self.run_infer_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_0(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, ..., 0, :, :] => out_shape = (1, 1, 100, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 1, 100, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_1(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., None, 0, :, :] => out_shape = (1, 100, 1, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_2(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, None, ..., :, :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_3(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, ..., None, :, :] => out_shape = (100, 200, 1, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(100, 200, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_4(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, 0, ..., :, :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_5(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., 0, None, :, :] => out_shape = (1, 100, 1, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_6(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, ..., :, 0, :] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 1, 100, 200, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_7(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., None, :, 0, :] => out_shape = (1, 100, 1, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 1, 200, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_8(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, None, :, ..., :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_9(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, ..., :, None, :] => out_shape = (100, 200, 10, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(100, 200, 10, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_10(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, 0, :, ..., :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_11(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., 0, :, None, :] => out_shape = (1, 100, 10, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 10, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_12(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, :, ..., 0, :] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 1, 100, 200, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_13(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., :, None, 0, :] => out_shape = (1, 100, 200, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 200, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_14(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, :, None, ..., :] => out_shape = (100, 1, 200, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(100, 1, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_15(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, :, ..., None, :] => out_shape = (100, 200, 10, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(100, 200, 10, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_16(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, :, 0, ..., :] => out_shape = (1, 1, 200, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 1, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_17(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., :, 0, None, :] => out_shape = (1, 100, 200, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 200, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_18(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, None, ..., 0, :] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 1, 100, 200, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_19(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, ..., None, 0, :] => out_shape = (1, 100, 200, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 200, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_20(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, 0, None, ..., :] => out_shape = (1, 1, 200, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 1, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_21(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, 0, ..., None, :] => out_shape = (1, 200, 10, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 200, 10, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_22(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, None, 0, ..., :] => out_shape = (1, 1, 200, 10, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 1, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_normalizer_auto_infer_strided_slice_4d_over_5d_23(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, ..., 0, None, :] => out_shape = (1, 100, 200, 1, 3) - """ - self.run_infer_test( - inp=(1, 100, 200, 10, 3), ref_res=(1, 100, 200, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), - end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - -class TestStridedSlicePermute(unittest.TestCase): - def run_permute_test(self, inp, ref_res, begin, end, strides, begin_mask, end_mask, - shrink_axis_mask, new_axis_mask, ellipsis_mask): - from openvino.tools.mo.middle.ApplyPermutations import ApplyPermutation - from openvino.tools.mo.middle.MergeNodesPermutations import MergeNodesPermutations - from openvino.tools.mo.middle.ApplyNHWCtoNCHWpermutation import ApplyNHWCtoNCHWpermutation - from openvino.tools.mo.middle.InsertLayoutPropagationTransposes import InsertLayoutPropagationTranspose - from openvino.tools.mo.middle.MarkSubgraphsWithCorrectLayout import MarkSubGraphsWithCorrectLayout - nodes = { - **regular_op_with_shaped_data('input', int64_array(inp), {'op': 'Parameter', 'type': 'Parameter', - # need to specify shape in 2 places - 'shape': int64_array(inp), - 'infer': Parameter.infer}), - **valued_const_with_data('begin', int64_array(begin)), - **valued_const_with_data('end', int64_array(end)), - **valued_const_with_data('strides', int64_array(strides)), - **regular_op_with_empty_data('strided_slice', - {'op': 'StridedSlice', 'type': 'StridedSlice', # need for permute - 'begin_mask': begin_mask, 'end_mask': end_mask, - 'shrink_axis_mask': shrink_axis_mask, - 'new_axis_mask': new_axis_mask, - 'ellipsis_mask': ellipsis_mask, - 'infer': StridedSlice.infer}), - **regular_op('res', {'kind': 'op', 'type': 'Result', 'op': 'Result', 'infer': lambda x: None}) - } - - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - graph.stage = 'middle' - graph.graph['layout'] = 'NHWC' - - graph = partial_infer(graph) - StridedSliceNormalizer().find_and_replace_pattern(graph) - graph = partial_infer(graph) - MarkSubGraphsWithCorrectLayout().find_and_replace_pattern(graph) - InsertLayoutPropagationTranspose().find_and_replace_pattern(graph) - ApplyNHWCtoNCHWpermutation().find_and_replace_pattern(graph) - MergeNodesPermutations().find_and_replace_pattern(graph) - ApplyPermutation().find_and_replace_pattern(graph) - graph = partial_infer(graph) - - node = Node(graph, 'strided_slice') - res = node.out_port(0).data.get_shape() - npt.assert_array_equal(res, ref_res) - - def test_strided_slice_permute_1( - self, # inp[0, :34, 20, :2] - inp=(1, 35, 35, 3), ref_res=(34, 2), - begin=(0, 0, 20, 0), end=(1, 34, 21, 2), strides=(1, 1, 1, 1), - begin_mask=(0,), end_mask=(0,), - shrink_axis_mask=(1, 0, 1, 0), new_axis_mask=(0,), - ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_2( - self, # inp[0:3, 0:1, 5:0:-1] - inp=(10, 10, 10, 10), ref_res=(3, 10, 1, 5), - begin=(0, 0, 5), end=(3, 1, 0), strides=(1, 1, -1), begin_mask=(1, 1, 1), end_mask=(1, 1, 1), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,)): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_3( - self, # inp[1:34, 0, :, :2] - inp=(1, 35, 35, 3), ref_res=(1, 35, 2), - begin=(0, 0, 0, 0), end=(1, 34, 0, 2), strides=(1, 1, 1, 1), begin_mask=(1, 1, 0, 0), end_mask=(1, 0, 0, 1), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 0, 0), ellipsis_mask=(0, 0, 0, 0) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_4( - # no shrink/new axis therefore will be permuted - # inp[0:1, :, :, :2] begin mask is (1,) so only one begin value need to be specified - self, - inp=(16, 35, 35, 3), ref_res=(1, 2, 35, 35), - begin=(0, 0, 0, 0), end=(1, 34, 20, 2), strides=(1, 1, 1, 1), begin_mask=(1, 0, 0,), end_mask=(1, 0, 0, 1), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_5( - self, # inp[:, :, :, :] since all begin and end masks are zero - inp=(1, 35, 35, 3), ref_res=(1, 3, 35, 35), - begin=(1, 10, 10, 0), end=(1, 34, 20, 2), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), - end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_6( - self, # inp[0] - inp=(1, 35, 35, 3), ref_res=(35, 35, 3), - begin=(0,), end=(1,), strides=(1,), begin_mask=(1,), end_mask=(0,), - shrink_axis_mask=(1,), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_7( - self, # inp[0, 20], ends can be of any value - inp=(1, 35, 35, 3), ref_res=(35, 3), - begin=(0, 20), end=(1, 9999), strides=(1, 1), begin_mask=(0,), end_mask=(0,), - shrink_axis_mask=(1, 1), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_8( - self, # inp[0, 0:34, 20:22, new_axis], both new_axis and shrink_axis are present - inp=(1, 35, 35, 3), ref_res=(34, 2, 1, 3), - begin=(0, 0, 20, 0), end=(1, 34, 22, 2), strides=(1, 1, 1, 1), begin_mask=(0,), end_mask=(0,), - shrink_axis_mask=(1,), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_9( - self, # inp[:, 0:4, 20, new_axis], both new_axis and shrink_axis are present - inp=(1, 35, 35, 3), ref_res=(1, 4, 1, 3), - begin=(0, 0, 20, 0), end=(0, 4, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 1, 0, 0), end_mask=(0, 1, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_10( - self, # inp[:, 0:4, new_axis, 20], both new_axis and shrink_axis are present - inp=(1, 35, 35, 3), ref_res=(1, 4, 1, 3), - begin=(0, 0, 0, 20), end=(0, 4, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 1, 0, 0), end_mask=(0, 1, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_11( - self, # inp[0, :, 0:34, 1:3, new_axis], both new_axis and shrink_axis are present - inp=(1, 35, 35, 3), ref_res=(35, 34, 2, 1), - begin=(0, 0, 0, 1, 0), end=(1, 0, 34, 3, 0), strides=(1, 1, 1, 1, 1), - begin_mask=(1, 0, 1, 1, 1), end_mask=(1, 0, 1, 1, 1), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 0, 0, 1), ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_12( - self, # inp[0, :34, 20, :2] - inp=(1, 35, 35, 3), ref_res=(34, 2), - begin=(0, 0, 0, 0), end=(1, 34, 20, 2), strides=(1, 1, 1, 1), begin_mask=(0, 1, 1, 1), - end_mask=(0, 1, 1, 1), - shrink_axis_mask=(1, 0, 1, 0), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_13( - self, # inp[0, 0, 0], since it's shrink_axis ends can be of any value - inp=(1, 35, 35, 3), ref_res=(3,), - begin=(0, 0, 0), end=(1, 34444, 20), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(1, 1, 1), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_14( - self, # inp[0, 0, 0], since begin_mask is [0], begin can be of any value - inp=(1, 35, 35, 3), ref_res=(1, 3, 18, 18), - begin=(0, 0, 0), end=(1, 35, 35), strides=(2, 2, 2), begin_mask=(1, 1, 1), end_mask=(1, 1, 1), - shrink_axis_mask=(0, 0, 0), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - # with ellipsis - - def test_strided_slice_permute_15( - self, # inp[..., np.newaxis] - inp=(1, 35, 35), ref_res=(1, 35, 35, 1), - begin=(101, 0), end=(0, 0), strides=(-1, -1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(1, 0) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_16( - self, # inp_shape = (1, 720, 1080), out = inp[..., :100, None] => out_shape = (1, 720, 100, 1) - inp=(1, 720, 1080), ref_res=(1, 720, 100, 1), - begin=(0, 0, 0), end=(0, 100, 0), strides=(1, 1, 1), begin_mask=(0, 1, 0), end_mask=(0, 1, 0), - shrink_axis_mask=(0,), new_axis_mask=(0, 0, 1), ellipsis_mask=(1,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_17( - self, # inp_shape = (1, 720, 1080, 3), out = inp[..., :-1] => out_shape = (1, 720, 100, 2) - inp=(1, 720, 1080, 3), ref_res=(1, 2, 720, 1080), - begin=(0, 0), end=(0, -1), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 1), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_18( - self, # inp_shape = (1, 720, 1080, 3), out = inp[..., 2] => out_shape = (1, 720, 1080) - inp=(1, 720, 1080, 3), ref_res=(1, 720, 1080), - begin=(0, 2), end=(0, 0), strides=(1, 1), begin_mask=(0, 1), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_permute_19( - self, # inp_shape = (1, 720, 1080, 3), out = input[..., 0:10, 0:3] => out_shape = (1, 720, 10, 3) - inp=(1, 720, 1080, 3), ref_res=(1, 3, 720, 10), - begin=(0, 0, 0), end=(0, 10, 3), strides=(1, 1, 1), begin_mask=(0, 1, 1), end_mask=(0, 1, 1), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(1,) - ): - self.run_permute_test(inp, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - # automatically generated permutation tests - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_0(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC, (1, 3, 100, 200) in NCHW, - out_nhwc = inp[:, :], - out_nchw = inp[:, :, :, :] => out_shape = (1, 3, 100, 200) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(1, 3, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_1(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC, (1, 3, 100, 200) in NCHW, - out_nhwc = inp[:, None] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_2(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC, (1, 3, 100, 200) in NCHW, - out_nhwc = inp[:, 0] => out_shape = (1, 200, 3) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(1, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_3(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC, (1, 3, 100, 200) in NCHW, - out_nhwc = inp[..., :], - out_nchw = inp[:, :, :, ...] => out_shape = (1, 3, 100, 200) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(1, 3, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_4(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC, (1, 3, 100, 200) in NCHW, - out_nhwc = inp[..., None] => out_shape = (1, 100, 200, 3, 1) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3, 1), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(1, 0) - ) - - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_5(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC, (1, 3, 100, 200) in NCHW, - out_nhwc = inp[..., 0], - out_nchw = inp[:, 0, :, ...] => out_shape = (1, 100, 200) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_6(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC, (1, 3, 100, 200) in NCHW, - out_nhwc = inp[None, :] => out_shape = (1, 1, 100, 200, 3) - out_nchw = inp[None, :, :, :, :] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_7(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC, (1, 3, 100, 200) in NCHW, - out_nhwc = inp[None, None] => out_shape = (1, 1, 1, 100, 200, 3) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(1, 1, 1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 1), ellipsis_mask=(0, 0) - ) - - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_8(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC, (1, 3, 100, 200) in NCHW, - out_nhwc = inp[None, 0] => out_shape = (1, 100, 200, 3) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_9(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC - out_nhwc = inp[0, :] => out_shape = (100, 200, 3) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_10(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC - out_nhwc = inp[0, None] => out_shape = (1, 100, 200, 3) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_permute_auto_infer_strided_slice_2d_slice_over_4d_11(self): - """ - inp_shape = (1, 100, 200, 3) in NHWC - out_nhwc = inp[0, 0] => out_shape = (200, 3) - """ - self.run_permute_test( - inp=(1, 100, 200, 3), ref_res=(200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - -class TestStridedSliceMaskAlignment(unittest.TestCase): - def run_align_test(self, inp, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask, - begin_mask_ref, end_mask_ref, shrink_axis_mask_ref, new_axis_mask_ref, ellipsis_mask_ref): - nodes = { - **regular_op_with_shaped_data('input', int64_array(inp), {'op': 'Parameter', 'type': 'Parameter', - # need to specify shape in 2 places - 'shape': int64_array(inp), - 'infer': Parameter.infer}), - **valued_const_with_data('begin', int64_array(begin)), - **valued_const_with_data('end', int64_array(end)), - **valued_const_with_data('strides', int64_array(strides)), - **regular_op_with_empty_data('strided_slice', - {'op': 'StridedSlice', 'type': 'StridedSlice', # need for permute - 'begin_mask': begin_mask, 'end_mask': end_mask, - 'shrink_axis_mask': shrink_axis_mask, - 'new_axis_mask': new_axis_mask, - 'ellipsis_mask': ellipsis_mask}), - **regular_op('res', {'kind': 'op', 'type': 'Result', 'op': 'Result', 'infer': lambda x: None}) - } - - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - graph.stage = 'middle' - graph.graph['layout'] = 'NHWC' - - nodes_ref = nodes.copy() - nodes_ref.update({ - **regular_op_with_empty_data('strided_slice', - {'op': 'StridedSlice', 'type': 'StridedSlice', # need for permute - 'begin_mask': begin_mask_ref, 'end_mask': end_mask_ref, - 'shrink_axis_mask': shrink_axis_mask_ref, - 'new_axis_mask': new_axis_mask_ref, - 'ellipsis_mask': ellipsis_mask_ref}), - }) - - graph_ref = build_graph(nodes_ref, edges, nodes_with_edges_only=True) - res, msg = compare_graphs(graph, graph_ref, 'res', check_op_attrs=True) - assert res, msg - - def test_mask_align_compare_graphs_1(self): - self.run_align_test( - inp=(1, 100, 200, 3), begin=(0, 0), end=(0, 0), strides=(1, 1), - begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0), - begin_mask_ref=(0, 0), end_mask_ref=(0, 0), - shrink_axis_mask_ref=(1, 1), new_axis_mask_ref=(0, 0), ellipsis_mask_ref=(0, 0), - - ) - - def test_mask_align_compare_graphs_2(self): - # begin_masks have different values, but they alight to the same mask - self.run_align_test( - inp=(1, 100, 200, 3), begin=(0, 0), end=(0, 0), strides=(1, 1), - begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0), - begin_mask_ref=(0), end_mask_ref=(0, 0), - shrink_axis_mask_ref=(1, 1), new_axis_mask_ref=(0, 0), ellipsis_mask_ref=(0, 0), - - ) - - def test_mask_align_compare_graphs_3(self): - # begin_masks have different values, but they alight to the same mask - self.run_align_test( - inp=(1, 100, 200, 3), begin=(0, 0), end=(0, 0), strides=(1, 1), - begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0), - begin_mask_ref=(0, 0), end_mask_ref=(0), - shrink_axis_mask_ref=(1, 1), new_axis_mask_ref=(0), ellipsis_mask_ref=(0), - ) - - def test_mask_align_compare_graphs_4(self): - # begin_masks have different values, but they alight to the same mask - self.run_align_test( - inp=(1, 100, 200, 3), begin=(0, 0), end=(0, 0), strides=(1, 1), - begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0), - begin_mask_ref=(0, 0), end_mask_ref=(0), - shrink_axis_mask_ref=(1), new_axis_mask_ref=(0), ellipsis_mask_ref=(0), - ) - - # corner case with and empty slice - def test_mask_align_compare_graphs_5(self): - self.run_align_test( - inp=(1, 100, 200, 3), begin=[], end=[], strides=[], - begin_mask=[], end_mask=[], - shrink_axis_mask=[], new_axis_mask=[], ellipsis_mask=[], - begin_mask_ref=[], end_mask_ref=[], - shrink_axis_mask_ref=[], new_axis_mask_ref=[], ellipsis_mask_ref=[] - ) - - # emppty mask [] should be aligned into the length of begin - def test_mask_align_compare_graphs_6(self): - # begin_masks have different values, but they alight to the same mask - self.run_align_test( - inp=(1, 100, 200, 3), begin=(0, 0), end=(0, 0), strides=(1, 1), - begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0), - begin_mask_ref=[], end_mask_ref=(0, 0), - shrink_axis_mask_ref=(1, 1), new_axis_mask_ref=(0, 0), ellipsis_mask_ref=(0, 0), - - ) - - # empty mask "" should be transformed into [] and then aligned into the length of begin - def test_mask_align_compare_graphs_7(self): - # begin_masks have different values, but they alight to the same mask - self.run_align_test( - inp=(1, 100, 200, 3), begin=(0, 0), end=(0, 0), strides=(1, 1), - begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0), - begin_mask_ref="", end_mask_ref=(0, 0), - shrink_axis_mask_ref=(1, 1), new_axis_mask_ref=(0, 0), ellipsis_mask_ref=(0, 0), - - ) - - # negative test - def test_negative_mask_align_compare_graphs(self): - with self.assertRaisesRegex(AssertionError, 'have different attr "begin_mask"'): - self.run_align_test( - inp=(1, 100, 200, 3), begin=(0, 0), end=(0, 0), strides=(1, 1), - begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0), - begin_mask_ref=(0, 1), end_mask_ref=(0, 0), - shrink_axis_mask_ref=(1, 1), new_axis_mask_ref=(0, 0), ellipsis_mask_ref=(0, 0), - ) diff --git a/tools/mo/unit_tests/mo/middle/TensorIteratorBackEdge_test.py b/tools/mo/unit_tests/mo/middle/TensorIteratorBackEdge_test.py deleted file mode 100644 index 20051bb04be1ce..00000000000000 --- a/tools/mo/unit_tests/mo/middle/TensorIteratorBackEdge_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.TensorIteratorBackEdge import BackEdgesMatching -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph_with_attrs - - -class BackEdgesMatchingTests(unittest.TestCase): - def test_no_exit(self): - pattern_matcher = BackEdgesMatching() - pattern = pattern_matcher.pattern() - graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], update_edge_attrs=None, - new_nodes_with_attrs=[('from_body_data', {'kind':'data'})], - new_edges_with_attrs=[('from_body_data', 'NextIteration')]) - - pattern_matcher.find_and_replace_pattern(graph) - graph_ref = build_graph_with_attrs(nodes_with_attrs=[('condition', {'kind': 'op', 'op':'TensorIteratorCondition'}), - ('condition_data', {'kind': 'data'}), - ('back_edge', {'kind': 'op', 'op': 'TensorIteratorBackEdge'}), - ('enter_data', {'kind': 'data'}), - ('from_body_data', {'kind': 'data'}), - ('Identity_1_data', {'kind': 'data'}),], - edges_with_attrs=[('condition', 'condition_data'), - ('enter_data', 'back_edge', {'in': 0}), - ('condition_data', 'back_edge', {'in': 2}), # {in:2} - ('from_body_data', 'back_edge', {'in': 1}), - ('back_edge', 'Identity_1_data')], - update_edge_attrs=None, - new_nodes_with_attrs=[], - new_edges_with_attrs=[], - ) - (flag, resp) = compare_graphs(graph, graph_ref, 'Identity_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_with_exit(self): - pattern_matcher = BackEdgesMatching() - pattern = pattern_matcher.pattern() - graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], update_edge_attrs=None, - new_nodes_with_attrs=[('from_body_data', {'kind': 'data'}), - ('exit', {'kind': 'op', 'op': 'Exit', 'name': 'exit'}), - ('exit_data', {'kind': 'data'}), - ('Switch_1_data_exit', {'kind': 'data'})], - - new_edges_with_attrs=[('from_body_data', 'NextIteration'), - ('Switch_1', 'Switch_1_data_exit', {'out': 0}), - ('Switch_1_data_exit', 'exit', {'out': 0}), - ('exit', 'exit_data')]) - - pattern_matcher.find_and_replace_pattern(graph) - graph_ref = build_graph_with_attrs(nodes_with_attrs=[('condition', {'kind': 'op', 'op':'TensorIteratorCondition'}), - ('condition_data', {'kind': 'data'}), - ('back_edge', {'kind': 'op', 'op': 'TensorIteratorBackEdge'}), - ('enter_data', {'kind': 'data'}), - ('from_body_data', {'kind': 'data'}), - ('Identity_1_data', {'kind': 'data'}), - ('output', {'kind':'op', 'op':'TensorIteratorOutput'}), - ('exit_data', {'kind': 'data'}) - ], - edges_with_attrs=[('condition', 'condition_data'), - ('enter_data', 'back_edge', {'in': 0}), - ('condition_data', 'back_edge', {'in': 2}), - ('from_body_data', 'back_edge', {'in': 1}), - ('back_edge', 'Identity_1_data'), - ('condition_data', 'output'), - ('output', 'exit_data'), - ('from_body_data', 'output')], - update_edge_attrs=None, - new_nodes_with_attrs=[], - new_edges_with_attrs=[], - ) - (flag, resp) = compare_graphs(graph, graph_ref, 'Identity_1_data', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/TensorIteratorCondition_test.py b/tools/mo/unit_tests/mo/middle/TensorIteratorCondition_test.py deleted file mode 100644 index d4d2c48d2a6ffa..00000000000000 --- a/tools/mo/unit_tests/mo/middle/TensorIteratorCondition_test.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.TensorIteratorCondition import LoopConditionMatcher -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph_with_attrs, regular_op_with_empty_data, connect, build_graph - - -class TensorIteratorConditionTests(unittest.TestCase): - def test_not_dynamic_1(self): - pattern_matcher = LoopConditionMatcher() - pattern = pattern_matcher.pattern(1) - - graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], - new_nodes_with_attrs=[ - ('TensorIteratorInput', {'kind': 'op', 'op': 'TensorIteratorInput'})], - new_edges_with_attrs=[ - ('Identity_1_data', 'TensorIteratorInput')], - update_nodes_attributes=[ - ('init_1_data', {'value': np.array([0])}), - ('init_2_data', {'value': np.array([0])}), - ('add_1_y_data', {'value': np.array(1)}), - ('add_2_y_data', {'value': np.array(1)}), - ('loop_cond_data', {'value': None}), - ('Identity_2_data', {'value': None},), - ('Enter_1_less_data', {'value': None},), - ]) - - pattern_matcher.find_and_replace_pattern(graph) - nodes_attributes = { - **regular_op_with_empty_data('StridedSlice', {'op': 'StridedSlice', 'type': None}), - 'TensorIteratorCondition': {'kind': 'op', 'op': 'TensorIteratorCondition'}, - 'loop_cond_data': {'kind': 'data'}, - 'identity_data': {'kind': 'data'}, - 'minimum_data': {'kind': 'data'}, - 'TensorIteratorInput': {'kind': 'op', 'op': 'TensorIteratorInput'} - } - edges = [ - *connect('StridedSlice', '0:TensorIteratorCondition'), - ('minimum_data', 'TensorIteratorCondition', {'in':1}), - ('TensorIteratorCondition', 'loop_cond_data'), - ('TensorIteratorCondition', 'identity_data'), - ('identity_data', 'TensorIteratorInput') - ] - graph_ref = build_graph(nodes_attributes, edges) - (flag, resp) = compare_graphs(graph, graph_ref, 'loop_cond_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_not_dynamic_2(self): - pattern_matcher = LoopConditionMatcher() - pattern = pattern_matcher.pattern(2) - - graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], - new_nodes_with_attrs=[ - ('TensorIteratorInput', {'kind': 'op', 'op': 'TensorIteratorInput'}), - ('some_op', {'kind': 'op', 'op': 'Add'})], - new_edges_with_attrs=[ - ('Identity_1_data', 'TensorIteratorInput'), - ('loop_cond_data', 'some_op'), - ], - update_nodes_attributes=[ - ('init_1_data', {'value': np.array([0])}), - ('init_2_data', {'value': np.array([0])}), - ('add_1_y_data', {'value': np.array(1)}), - ('add_2_y_data', {'value': np.array(1)}), - ('loop_cond_data', {'value': None}), - ('Identity_2_data', {'value': None},), - ('Enter_1_less_data', {'value': None},), - ]) - - pattern_matcher.find_and_replace_pattern(graph) - nodes_attributes = { - **regular_op_with_empty_data('loop_cond', {'op': 'TensorIteratorCondition', 'type': None}), - **regular_op_with_empty_data('StridedSlice', {'op': 'StridedSlice', 'type': None}), - 'some_op': {'kind': 'op', 'op': 'Add'}, - 'identity_data': {'kind': 'data'}, - 'TensorIteratorInput': {'kind': 'op', 'op': 'TensorIteratorInput'} - } - edges = [ - *connect('StridedSlice', 'loop_cond'), - *connect('loop_cond', 'some_op'), - ('loop_cond', 'identity_data'), - ('identity_data', 'TensorIteratorInput') - ] - graph_ref = build_graph(nodes_attributes, edges) - (flag, resp) = compare_graphs(graph, graph_ref, 'some_op', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/TensorIteratorInput_test.py b/tools/mo/unit_tests/mo/middle/TensorIteratorInput_test.py deleted file mode 100644 index f64ccb87c37d54..00000000000000 --- a/tools/mo/unit_tests/mo/middle/TensorIteratorInput_test.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.TensorIteratorInput import SmartInputMatcher, SimpleInputMatcher, BackEdgeSimpleInputMatcher -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph_with_attrs - - -class SmartInputMatcherTests(unittest.TestCase): - def test(self): - pattern_matcher = SmartInputMatcher() - pattern = pattern_matcher.pattern() - - graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], - update_edge_attrs={('range_data', 'TensorArrayScatter', 0): {'in': 1}, - ('TensorArray_handle', 'TensorArrayScatter', 0): {'in': 0}, - ('TensorArray_flow', 'TensorArrayScatter', 0): {'in': 3}}, - new_nodes_with_attrs=[('ta_size', {'kind': 'data'}), - ('ta_size_op', {'kind': 'op'}), - ('value', {'kind': 'data'}), - ], - new_edges_with_attrs=[ - ('ta_size_op', 'ta_size'), - ('ta_size', 'TensorArray'), - ('value', 'TensorArrayScatter', {'in':2}), - ], - update_nodes_attributes=[('Enter_data', {'value': np.array([1])}), - ('stack_data', {'value': np.array([0])}), - ('stack_1_data', {'value': np.array([1])}), - ('stack_2_data', {'value': np.array([1])}), - ('start_data', {'value': np.array([0])}), - ('delta_data', {'value': np.array([1])}) - ]) - - pattern_matcher.find_and_replace_pattern(graph) - graph_ref = build_graph_with_attrs( - nodes_with_attrs=[('condition_data', {'kind': 'data'}), - ('TensorIteratorInput', {'kind': 'op', 'op': 'TensorIteratorInput'}), - ('TensorArrayRead_data', {'kind': 'data'}), - ('condition_data', {'kind': 'data'}), - ('value', {'kind': 'data'}), - ('ta_size', {'kind': 'data'}), - ('ta_size_op', {'kind': 'op'})], - edges_with_attrs=[('ta_size', 'TensorIteratorInput', {'in': 0}), - ('condition_data', 'TensorIteratorInput', {'in': 2}), - ('value', 'TensorIteratorInput', {'in': 1}), - ('TensorIteratorInput', 'TensorArrayRead_data'), - ('ta_size_op', 'ta_size')], - update_edge_attrs=None, - new_nodes_with_attrs=[], - new_edges_with_attrs=[], - ) - (flag, resp) = compare_graphs(graph, graph_ref, 'TensorArrayRead_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - -class SimpleInputMatcherTest(unittest.TestCase): - def test(self): - pattern_matcher = SimpleInputMatcher() - pattern = pattern_matcher.pattern() - - graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], - update_edge_attrs=None, - new_nodes_with_attrs=[('in_node', {'kind': 'data'}), - ('Enter_data', {'kind': 'data'})], - new_edges_with_attrs=[('in_node', 'Enter'), ('Enter', 'Enter_data')], - update_nodes_attributes=[]) - - pattern_matcher.find_and_replace_pattern(graph) - - graph_ref = build_graph_with_attrs( - nodes_with_attrs=[('TensorIteratorInput', {'kind': 'op', 'op': 'TensorIteratorInput'}), - ('in_node', {'kind': 'data'}), - ('Enter_data', {'kind': 'data'}) - ], - edges_with_attrs=[('in_node', 'TensorIteratorInput'), ('TensorIteratorInput', 'Enter_data')], - ) - (flag, resp) = compare_graphs(graph, graph_ref, 'Enter_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - -class BackEdgeInputMatcherTest(unittest.TestCase): - def test1(self): - """ - Case with constant input to init - """ - pattern_matcher = BackEdgeSimpleInputMatcher() - pattern = pattern_matcher.pattern() - - graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], - new_nodes_with_attrs=[('cycle_data', {'kind': 'data'}), - ('condition', {'kind': 'data'}), - ('init', {'kind': 'data', 'shape': np.array([1,3])}), - ], - new_edges_with_attrs=[('condition', 'BackEdge', {'in': 2}), - ('init', 'BackEdge', {'in': 0}), - ('cycle_data', 'BackEdge', {'in': 1})],) - - pattern_matcher.find_and_replace_pattern(graph) - graph_ref = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], - new_nodes_with_attrs=[('cycle_data', {'kind': 'data'}), - ('condition', {'kind': 'data'}), - ('init', {'kind': 'data', 'shape': np.array([1,3])}), - ('TensorIteratorInput', {'kind': 'op', 'op': 'TensorIteratorInput'}), - ('TensorIteratorInput_data', {'kind': 'data', 'shape': np.array([1,3])}), - ], - new_edges_with_attrs=[('TensorIteratorInput_data', 'TensorIteratorInput'), - ('TensorIteratorInput', 'init'), - ('condition', 'BackEdge', {'in': 2}), - ('init', 'BackEdge', {'in': 0}), - ('cycle_data', 'BackEdge', {'in': 1})],) - (flag, resp) = compare_graphs(graph, graph_ref, 'BackEdge', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test2(self): - """ - Case with non-constant input to init. - Nothing should happen with graph. - """ - pattern_matcher = BackEdgeSimpleInputMatcher() - pattern = pattern_matcher.pattern() - - graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], - new_nodes_with_attrs=[('cycle_data', {'kind': 'data'}), - ('condition', {'kind': 'data'}), - ('init', {'kind': 'data', 'shape': np.array([1, 3])}), - ('Enter', {'kind': 'op', 'op': 'Enter'}), - ], - new_edges_with_attrs=[('Enter', 'init'), - ('condition', 'BackEdge', {'in': 2}), - ('init', 'BackEdge', {'in': 0}), - ('cycle_data', 'BackEdge', {'in': 1})]) - - pattern_matcher.find_and_replace_pattern(graph) - - graph_ref = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], - new_nodes_with_attrs=[('cycle_data', {'kind': 'data'}), - ('condition', {'kind': 'data'}), - ('init', {'kind': 'data', 'shape': np.array([1, 3])}), - ('Enter', {'kind': 'op', 'op': 'Enter'}), - ], - new_edges_with_attrs=[('Enter', 'init'), - ('condition', 'BackEdge', {'in': 2}), - ('init', 'BackEdge', {'in': 0}), - ('cycle_data', 'BackEdge', {'in': 1})], ) - - (flag, resp) = compare_graphs(graph, graph_ref, 'BackEdge', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/TensorIteratorOutput_test.py b/tools/mo/unit_tests/mo/middle/TensorIteratorOutput_test.py deleted file mode 100644 index 33ba540f223ad7..00000000000000 --- a/tools/mo/unit_tests/mo/middle/TensorIteratorOutput_test.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.TensorIteratorOutput import SmartOutputMatcher -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph_with_attrs - - -class SmartOutputMatcherTests(unittest.TestCase): - def test(self): - pattern_matcher = SmartOutputMatcher() - pattern = pattern_matcher.pattern() - - graph = build_graph_with_attrs(nodes_with_attrs=pattern['nodes'], edges_with_attrs=pattern['edges'], - # update_edge_attrs=None, - new_nodes_with_attrs=[('index', {'kind': 'data'}), - ('value', {'kind': 'data'}), - ('ta_size', {'kind': 'data'}), - ], - new_edges_with_attrs=[('index', 'TensorArrayWrite', {'in':1}), - ('value', 'TensorArrayWrite', {'in': 2}), - ('ta_size', 'TensorArray') - ], - update_nodes_attributes=[('WriteEnter_data', {'value': np.array([1, 1])}), - - ('start_data', {'value': np.array([0])}), - ('delta_data', {'value': np.array([1])}), - ]) - - pattern_matcher.find_and_replace_pattern(graph) - - graph_ref = build_graph_with_attrs( - nodes_with_attrs=[ - ('TensorIteratorOutput', {'kind': 'op', 'op': 'TensorIteratorOutput'}), - ('TensorArrayGather_data', {'kind': 'data'}), - ('index', {'kind': 'data'}), - ('value', {'kind': 'data'}), - ('ta_size', {'kind': 'data'}), ], - edges_with_attrs=[('ta_size', 'TensorIteratorOutput', {'in': 0}), - ('index', 'TensorIteratorOutput', {'in': 2}), - ('value', 'TensorIteratorOutput', {'in': 1}), - ('TensorIteratorOutput', 'TensorArrayGather_data')], - update_edge_attrs=None, - new_nodes_with_attrs=[], - new_edges_with_attrs=[], - ) - (flag, resp) = compare_graphs(graph, graph_ref, 'TensorArrayGather_data', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/UnsqueezeTileReshapeBlockToInterpolate_test.py b/tools/mo/unit_tests/mo/middle/UnsqueezeTileReshapeBlockToInterpolate_test.py deleted file mode 100644 index 72755b9d4aa853..00000000000000 --- a/tools/mo/unit_tests/mo/middle/UnsqueezeTileReshapeBlockToInterpolate_test.py +++ /dev/null @@ -1,399 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -import numpy as np - -from openvino.tools.mo.middle.UnsqueezeTileReshapeBlockToInterpolate import UnsqueezeTileReshapeBlockToInterpolate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -graph_node_attrs = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 8, 32, 32, 64]), - 'kind': 'data', - 'data_type': None - }, - 'unsqueeze': {'type': 'Unsqueeze', 'kind': 'op', 'op': 'Unsqueeze'}, - 'dim': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': int64_array([2]), - 'shape': int64_array([1]), - }, - 'dim_data': { - 'kind': 'data', - 'value': int64_array([2]), - 'shape': int64_array([1]), - }, - 'unsqueeze_data': { - 'kind': 'data', - 'shape': int64_array([1, 8, 1, 32, 32, 64]), - 'value': None, - }, - 'tile': {'type': 'Tile', 'kind': 'op', 'op': 'Tile'}, - 'multipliers': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': int64_array([1, 1, 2, 1, 1, 1]), - 'shape': int64_array([6]), - }, - 'multipliers_data': { - 'kind': 'data', - 'value': int64_array([1, 1, 2, 1, 1, 1]), - 'shape': int64_array([6]), - }, - 'tile_data': { - 'kind': 'data', - 'shape': int64_array([1, 8, 2, 32, 32, 64]), - 'value': None, - }, - 'reshape': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'}, - 'reshape_data': { - 'kind': 'data', - 'shape': int64_array([1, 16, 32, 32, 64]), - 'value': None, - }, - 'shape': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': int64_array([1, 16, 32, 32, 64]), - 'shape': int64_array([5]), - }, - 'shape_data': { - 'kind': 'data', - 'value': int64_array([1, 16, 32, 32, 64]), - 'shape': int64_array([5]), - }, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': { - 'kind': 'data', - 'shape': int64_array([1, 16, 32, 32, 64]), - 'value': None, - }, - 'output': {'kind': 'op', 'op': 'Result', 'type': 'Result'}, -} - -graph_edges = [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'unsqueeze', {'in': 0}), - ('dim', 'dim_data'), - ('dim_data', 'unsqueeze', {'in': 1}), - ('unsqueeze', 'unsqueeze_data'), - ('unsqueeze_data', 'tile', {'in': 0}), - ('multipliers', 'multipliers_data'), - ('multipliers_data', 'tile', {'in': 1}), - ('tile', 'tile_data'), - ('tile_data', 'reshape', {'in': 0}), - ('reshape', 'reshape_data'), - ('shape', 'shape_data'), - ('shape_data', 'reshape', {'in': 1}), - ('reshape_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), -] - - -ref_graph_node_attrs_with_4_inputs_interpolate = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 8, 32, 32, 64]), - 'kind': 'data', - 'data_type': None - }, - 'shapeof': {'type': 'ShapeOf', 'kind': 'op', 'op': 'ShapeOf'}, - 'shapeof_data': { - 'kind': 'data', - 'shape': None, - 'value': None, - }, - 'gather': { - 'type': 'Gather', - 'kind': 'op', - 'op': 'Gather' - }, - 'gather_data': { - 'kind': 'data', - 'shape': None, - 'value': None, - }, - 'indices': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': int64_array([1]), - 'shape': int64_array([1]), - }, - 'indices_data': { - 'kind': 'data', - 'value': None, - 'shape': None, - }, - 'gather_axis': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': np.array(0, dtype=np.int64), - 'shape': np.array(0, dtype=np.int64).shape, - }, - 'gather_axis_data': { - 'kind': 'data', - 'value': None, - 'shape': None, - }, - 'scales_m': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': np.array([2], dtype=np.int64), - 'shape': int64_array([1]), - }, - 'scales_m_data': { - 'kind': 'data', - 'value': np.array([2], dtype=np.float32), - 'shape': int64_array([1]), - }, - 'mul': {'type': 'Mul', 'kind': 'op', 'op': 'Mul'}, - 'mul_data': { - 'kind': 'data', - 'value': None, - 'shape': None, - }, - 'scales': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': np.array([2], dtype=np.float32), - 'shape': int64_array([1]), - }, - 'scales_data': { - 'kind': 'data', - 'value': np.array([2], dtype=np.float32), - 'shape': int64_array([1]), - }, - 'axes': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': int64_array([1]), - 'shape': int64_array([1]), - }, - 'axes_data': { - 'kind': 'data', - 'value': int64_array([1]), - 'shape': int64_array([1]), - }, - 'interpolate': {'type': 'Interpolate', 'kind': 'op', 'op': 'Interpolate'}, - 'interpolate_data': { - 'kind': 'data', - 'value': None, - 'shape': int64_array([1, 16, 32, 32, 64]), - }, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': { - 'kind': 'data', - 'shape': int64_array([1, 16, 32, 32, 64]), - 'value': None, - }, - 'output': {'kind': 'op', 'op': 'Result', 'type': 'Result'}, -} - - -ref_graph_edges_attrs_with_4_inputs_interpolate = [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'shapeof'), - ('shapeof', 'shapeof_data'), - ('shapeof_data', 'gather', {'in': 0}), - ('gather', 'gather_data'), - ('indices', 'indices_data'), - ('indices_data', 'gather', {'in': 1}), - ('gather_axis', 'gather_axis_data'), - ('gather_axis_data', 'gather', {'in': 2}), - ('scales_m', 'scales_m_data'), - ('gather_data', 'mul', {'in': 0}), - ('scales_m_data', 'mul', {'in': 1}), - ('mul', 'mul_data'), - ('scales', 'scales_data'), - ('axes', 'axes_data'), - ('scales_data', 'interpolate', {'out': 0, 'in': 2}), - ('mul_data', 'interpolate', {'in': 1}), - ('placeholder_data', 'interpolate', {'in': 0}), - ('axes_data', 'interpolate', {'in': 3}), - ('interpolate', 'interpolate_data'), - ('interpolate_data', 'abs'), - ('abs', 'abs_data'), - ('abs_data', 'output'), -] - - -graph_node_attrs_when_transformation_is_not_applicable = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': { - 'value': None, - 'shape': int64_array([1, 8, 32]), - 'kind': 'data', - 'data_type': None - }, - 'unsqueeze': {'type': 'Unsqueeze', 'kind': 'op', 'op': 'Unsqueeze'}, - 'dim': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': int64_array([2]), - 'shape': int64_array([1]), - }, - 'dim_data': { - 'kind': 'data', - 'value': int64_array([2]), - 'shape': int64_array([1]), - }, - 'unsqueeze_data': { - 'kind': 'data', - 'shape': int64_array([1, 8, 1, 32]), - 'value': None, - }, - 'tile': {'type': 'Tile', 'kind': 'op', 'op': 'Tile'}, - 'multipliers': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': int64_array([1, 1, 2, 1]), - 'shape': int64_array([4]), - }, - 'multipliers_data': { - 'kind': 'data', - 'value': int64_array([1, 1, 2, 1]), - 'shape': int64_array([4]), - }, - 'tile_data': { - 'kind': 'data', - 'shape': int64_array([1, 8, 2, 32]), - 'value': None, - }, - 'reshape': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'}, - 'reshape_data': { - 'kind': 'data', - 'shape': int64_array([1, 16, 32]), - 'value': None, - }, - 'shape': { - 'kind': 'op', - 'op': 'Const', - 'type': 'Const', - 'value': int64_array([1, 16, 32]), - 'shape': int64_array([3]), - }, - 'shape_data': { - 'kind': 'data', - 'value': int64_array([1, 16, 32]), - 'shape': int64_array([3]), - }, - 'abs': {'type': 'Abs', 'kind': 'op', 'op': 'Abs'}, - 'abs_data': { - 'kind': 'data', - 'shape': int64_array([1, 16, 32]), - 'value': None, - }, - 'output': {'kind': 'op', 'op': 'Result', 'type': 'Result'}, -} - -graph_edges_when_transformation_is_not_applicable = graph_edges - - -class UnsqueezeTileReshapeBlockToInterpolateTest(unittest.TestCase): - def test_5d(self): - graph = build_graph(nodes_attrs=graph_node_attrs, edges=graph_edges) - ref_graph = build_graph(nodes_attrs=ref_graph_node_attrs_with_4_inputs_interpolate, - edges=ref_graph_edges_attrs_with_4_inputs_interpolate) - UnsqueezeTileReshapeBlockToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_4d(self): - graph = build_graph( - nodes_attrs=graph_node_attrs, - edges=graph_edges, - update_attributes={ - 'placeholder_data': {'shape': int64_array([1, 8, 32, 32])}, - 'unsqueeze_data': {'shape': int64_array([1, 8, 1, 32, 32])}, - 'multipliers': {'value': int64_array([1, 1, 2, 1, 1]), 'shape': int64_array([5])}, - 'multipliers_data': {'value': int64_array([1, 1, 2, 1, 1]), 'shape': int64_array([5])}, - 'tile_data': {'shape': int64_array([1, 8, 2, 32, 32])}, - 'reshape_data': {'shape': int64_array([1, 16, 32, 32]), 'value': None}, - 'shape': {'value': int64_array([1, 16, 32, 32]), 'shape': int64_array([4])}, - 'shape_data': {'value': int64_array([1, 16, 32, 32]), 'shape': int64_array([4])}, - 'abs_data': {'shape': int64_array([1, 16, 32, 32])}, - } - ) - ref_graph = build_graph( - nodes_attrs=ref_graph_node_attrs_with_4_inputs_interpolate, - edges=ref_graph_edges_attrs_with_4_inputs_interpolate, - update_attributes={ - 'placeholder_data': {'shape': int64_array([1, 8, 32, 32])}, - 'interpolate_data': {'shape': int64_array([1, 16, 32, 32])}, - 'abs_data': {'shape': int64_array([1, 16, 32, 32])}, - 'axes': {'shape': int64_array([1]), 'value': int64_array([1])}, - } - ) - UnsqueezeTileReshapeBlockToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_3d(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_when_transformation_is_not_applicable, - edges=graph_edges_when_transformation_is_not_applicable - ) - ref_graph = build_graph( - nodes_attrs=graph_node_attrs_when_transformation_is_not_applicable, - edges=graph_edges_when_transformation_is_not_applicable - ) - UnsqueezeTileReshapeBlockToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) - - def test_2d(self): - graph = build_graph( - nodes_attrs=graph_node_attrs_when_transformation_is_not_applicable, - edges=graph_edges_when_transformation_is_not_applicable, - update_attributes={ - 'placeholder_data': {'shape': int64_array([5, 8])}, - 'dim': {'value': int64_array([1])}, - 'dim_data': {'value': int64_array([1])}, - 'unsqueeze_data': {'shape': int64_array([5, 1, 8])}, - 'multipliers': {'value': int64_array([1, 10, 1])}, - 'multipliers_data': {'value': int64_array([1, 10, 1]), 'shape': int64_array([3])}, - 'tile_data': {'shape': int64_array([5, 10, 8])}, - 'reshape_data': {'shape': int64_array([50, 8])}, - 'shape': {'value': int64_array([50, 8]), 'shape': int64_array([2])}, - 'shape_data': {'value': int64_array([50, 8]), 'shape': int64_array([2])}, - 'abs_data': {'shape': int64_array([50, 8])}, - } - ) - ref_graph = build_graph( - nodes_attrs=graph_node_attrs_when_transformation_is_not_applicable, - edges=graph_edges_when_transformation_is_not_applicable, - update_attributes={ - 'placeholder_data': {'shape': int64_array([5, 8])}, - 'dim': {'value': int64_array([1])}, - 'dim_data': {'value': int64_array([1])}, - 'unsqueeze_data': {'shape': int64_array([5, 1, 8])}, - 'multipliers': {'value': int64_array([1, 10, 1])}, - 'multipliers_data': {'value': int64_array([1, 10, 1]), 'shape': int64_array([3])}, - 'tile_data': {'shape': int64_array([5, 10, 8])}, - 'reshape_data': {'shape': int64_array([50, 8])}, - 'shape': {'value': int64_array([50, 8]), 'shape': int64_array([2])}, - 'shape_data': {'value': int64_array([50, 8]), 'shape': int64_array([2])}, - 'abs_data': {'shape': int64_array([50, 8])}, - } - ) - UnsqueezeTileReshapeBlockToInterpolate().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py b/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py deleted file mode 100644 index 2129781e771fcf..00000000000000 --- a/tools/mo/unit_tests/mo/middle/UpsampleToResample_test.py +++ /dev/null @@ -1,215 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.middle.UpsampleToResample import UpsampleToResample -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -graph_node_attrs = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': np.float32}, - 'scales': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': None, 'shape': None}, - 'scales_data': {'kind': 'data', 'value': None, 'shape': None}, - 'upsample': {'type': None, 'kind': 'op', 'op': 'Upsample', 'mode': 'linear'}, - 'upsample_data': {'kind': 'data', 'shape': None, 'value': None}, - 'output': {'kind': 'op', 'op': 'Result', 'type': 'Result'}, -} - -graph_edges = [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'upsample', {'in': 0}), - ('scales', 'scales_data'), - ('scales_data', 'upsample', {'in': 1}), - ('upsample', 'upsample_data'), - ('upsample_data', 'output'), -] - -new_ref_graph_node_attr = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': np.float32}, - 'ss_begin': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2]), 'shape': int64_array([1])}, - 'ss_begin_data': {'kind': 'data', 'value': int64_array([2]), 'shape': int64_array([1])}, - 'ss_end': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4]), 'shape': int64_array([1])}, - 'ss_end_data': {'kind': 'data', 'value': int64_array([4]), 'shape': int64_array([1])}, - 'ss_stride': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1]), 'shape': int64_array([1])}, - 'ss_stride_data': {'kind': 'data', 'value': int64_array([1]), 'shape': int64_array([1])}, - 'strided_slice': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice'}, - 'strided_slice_data': {'kind': 'data', 'shape': None, 'value': None}, - 'cast_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float32}, - 'cast_to_float_d': {'kind': 'data', 'value': None, 'shape': None}, - 'factor': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([5, 5]), 'shape': int64_array([2])}, - 'factor_data': {'kind': 'data', 'value': int64_array([5, 5]), 'shape': int64_array([2])}, - 'shapeof': {'type': 'ShapeOf', 'kind': 'op', 'op': 'ShapeOf'}, - 'shapeof_data': {'kind': 'data', 'shape': None, 'value': None}, - 'mul': {'type': 'Multiply', 'kind': 'op', 'op': 'Multiply'}, - 'mul_data': {'kind': 'data', 'shape': None, 'value': None}, - 'cast_to_int': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.int32}, - 'cast_to_int_d': {'kind': 'data', 'shape': None, 'value': None}, - 'axes_const': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': None, 'shape': None}, - 'axes_const_data': {'kind': 'data', 'value': None, 'shape': None}, - 'scales': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([5, 5]), 'shape': int64_array([2])}, - 'scales_data': {'kind': 'data', 'value': None, 'shape': None}, - 'interpolate': {'type': 'Interpolate', 'kind': 'op', 'op': 'Interpolate', 'axes': None}, - 'interpolate_data': {'kind': 'data', 'shape': None, 'value': None}, - 'output': {'kind': 'op', 'op': 'Result', 'type': 'Result'}, -} - -new_ref_graph_edges = [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'shapeof', {'in': 0, 'out': 0}), - ('placeholder_data', 'interpolate', {'in': 0, 'out': 0}), - ('ss_begin', 'ss_begin_data'), - ('ss_begin_data', 'strided_slice', {'in': 1, 'out': 0}), - ('ss_end', 'ss_end_data'), - ('ss_end_data', 'strided_slice', {'in': 2, 'out': 0}), - ('ss_stride', 'ss_stride_data'), - ('ss_stride_data', 'strided_slice', {'in': 3, 'out': 0}), - ('strided_slice', 'strided_slice_data'), - ('strided_slice_data', 'cast_to_float'), - ('cast_to_float', 'cast_to_float_d'), - ('shapeof', 'shapeof_data'), - ('shapeof_data', 'strided_slice', {'in': 0, 'out': 0}), - ('factor', 'factor_data'), - ('cast_to_float_d', 'mul', {'in': 0, 'out': 0}), - ('factor_data', 'mul', {'in': 1, 'out': 0}), - ('mul', 'mul_data'), - ('mul_data', 'cast_to_int'), - ('cast_to_int', 'cast_to_int_d'), - ('cast_to_int_d', 'interpolate', {'in': 1, 'out': 0}), - ('axes_const', 'axes_const_data'), - ('axes_const_data', 'interpolate', {'in': 3, 'out': 0}), - ('scales', 'scales_data'), - ('scales_data', 'interpolate', {'in': 2, 'out': 0}), - ('interpolate', 'interpolate_data'), - ('interpolate_data', 'output') -] - -ref_graph_node_attrs = { - 'placeholder': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': np.float32}, - 'factor': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([5, 5]), 'shape': int64_array([2])}, - 'factor_data': {'kind': 'data', 'value': None, 'shape': None}, - 'shapeof': {'type': 'ShapeOf', 'kind': 'op', 'op': 'ShapeOf'}, - 'shapeof_data': {'kind': 'data', 'shape': None, 'value': None}, - 'strided_slice': {'type': 'StridedSlice', 'kind': 'op', 'op': 'StridedSlice'}, - 'strided_slice_data': {'kind': 'data', 'shape': None, 'value': None}, - 'ss_begin': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([2]), 'shape': int64_array([1])}, - 'ss_begin_data': {'kind': 'data', 'value': None, 'shape': None}, - 'ss_end': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([4]), 'shape': int64_array([1])}, - 'ss_end_data': {'kind': 'data', 'value': None, 'shape': None}, - 'ss_stride': {'kind': 'op', 'op': 'Const', 'type': 'Const', 'value': int64_array([1]), 'shape': int64_array([1])}, - 'ss_stride_data': {'kind': 'data', 'value': None, 'shape': None}, - 'cast_to_float': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.float32}, - 'cast_to_float_d': {'kind': 'data', 'value': None, 'shape': None}, - 'mul': {'type': 'Multiply', 'kind': 'op', 'op': 'Multiply'}, - 'mul_data': {'kind': 'data', 'shape': None, 'value': None}, - 'cast_to_int': {'kind': 'op', 'op': 'Cast', 'type': 'Convert', 'dst_type': np.int32}, - 'cast_to_int_d': {'kind': 'data', 'shape': None, 'value': None}, - 'interpolate': {'type': 'Interpolate', 'kind': 'op', 'op': 'Interpolate', 'axes': None}, - 'interpolate_data': {'kind': 'data', 'shape': None, 'value': None}, - 'output': {'kind': 'op', 'op': 'Result', 'type': 'Result'}, -} - -ref_graph_edges = [ - ('placeholder', 'placeholder_data'), - ('placeholder_data', 'interpolate', {'in': 0, 'out': 0}), - ('placeholder_data', 'shapeof', {'in': 0, 'out': 0}), - ('shapeof', 'shapeof_data'), - ('interpolate', 'interpolate_data'), - ('factor', 'factor_data'), - ('shapeof_data', 'strided_slice', {'in': 0, 'out': 0}), - ('ss_begin', 'ss_begin_data'), - ('ss_begin_data', 'strided_slice', {'in': 1, 'out': 0}), - ('ss_end', 'ss_end_data'), - ('ss_end_data', 'strided_slice', {'in': 2, 'out': 0}), - ('ss_stride', 'ss_stride_data'), - ('ss_stride_data', 'strided_slice', {'in': 3, 'out': 0}), - ('strided_slice', 'strided_slice_data'), - ('strided_slice_data', 'cast_to_float'), - ('cast_to_float', 'cast_to_float_d'), - ('cast_to_float_d', 'mul', {'in': 0, 'out': 0}), - ('factor_data', 'mul', {'in': 1, 'out': 0}), - ('mul', 'mul_data'), - ('mul_data', 'cast_to_int'), - ('cast_to_int', 'cast_to_int_d'), - ('cast_to_int_d', 'interpolate', {'in': 1, 'out': 0}), - ('interpolate_data', 'output'), -] - - -class TestUpsampleToResampleTest(): - @pytest.mark.parametrize("input_shape, scales, axes",[([2, 10, 20, 30], [1, 1, 5, 5], [2, 3]), - ([2, 20, 30, 40], [1, 1, 3, 3], [2, 3]), - ([2, 10, 20, 30], [1, 1, 6, 5], [2, 3]), - ([2, 20, 30, 40], [1, 1, 3, 4], [2, 3]), - ([2, 3, 20, 30, 40], [1, 1, 3, 3, 3], [2, 3, 4]), - ([2, 3, 20, 30, 40], [1, 1, 3, 4, 3], [2, 3, 4]), - ([2, 3, 20, 30, 40], [1, 1, 4, 3, 3], [2, 3, 4]), - ([2, 3, 20, 30, 40], [1, 1, 3, 3, 4], [2, 3, 4]), - ([2, 10, 20, 30], [1, 1, 5.5, 5.7], [2, 3]), - ([2, 20, 30, 40], [1, 1, 3.3, 3.1], [2, 3]), - ([2, 10, 20, 30], [1, 1, 6.18, 5.34], [2, 3]), - ([2, 20, 30, 40], [1, 1, 3.79, 4.16], [2, 3]), - ([2, 3, 20, 30, 40], [1, 1, 3.12, 3.87, 3.92], [2, 3, 4]), - ([2, 3, 20, 30, 40], [1, 1, 3.74, 4.873, 3.287], [2, 3, 4]), - ([2, 3, 20, 30, 40], [1, 1, 4.8, 3.6, 3.11], [2, 3, 4]), - ([2, 3, 20, 30, 40], [1, 1, 3.33, 3.73, 4.765], [2, 3, 4]), - ]) - def test_conversion(self, input_shape, scales, axes): - input_shape_as_array = int64_array(input_shape) - scales_as_array = float32_array(scales) - graph = build_graph(graph_node_attrs, - graph_edges, - { - 'placeholder_data': {'shape': input_shape_as_array}, - 'scales': {'value': scales_as_array, 'shape': scales_as_array.shape}, - 'scales_data': {'value': scales_as_array, 'shape': scales_as_array.shape}, - 'upsample_data': - {'shape': ((input_shape_as_array + 1.e-5) * scales_as_array).astype(np.int64)} - }) - graph.graph['layout'] = 'NCHW' - ref_graph = build_graph(new_ref_graph_node_attr, - new_ref_graph_edges, - { - 'placeholder_data': {'shape': int64_array(input_shape)}, - 'ss_begin': {'value': int64_array([axes[0]])}, - 'ss_end': {'value': int64_array([axes[-1] + 1])}, - 'ss_begin_data': {'value': int64_array([axes[0]])}, - 'ss_end_data': {'value': int64_array([axes[-1] + 1])}, - 'factor': {'value': scales_as_array[2:], - 'shape': scales_as_array[2:].shape}, - 'factor_data': {'value': scales_as_array[2:], - 'shape': scales_as_array[2:].shape}, - 'axes_const': {'value': int64_array(axes), 'shape': int64_array(axes).shape}, - 'interpolate_data': { - 'shape': (input_shape_as_array * scales_as_array + 1e-5).astype(np.int64)}, - }) - UpsampleToResample().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - assert flag, resp - - @pytest.mark.parametrize("input_shape, scales",[([2, 10, 20, 30], [1, 2, 5, 5],), - ([2, 3, 20, 30, 40], [1, 2, 3, 3, 3],), - ]) - def test_pattern_does_not_satisfy(self, input_shape, scales): - graph = build_graph(graph_node_attrs, graph_edges, - {'placeholder_data': {'shape': int64_array(input_shape)}, - 'scales': {'value': int64_array(scales), 'shape': int64_array(scales).shape}, - 'scales_data': {'value': int64_array(scales), 'shape': int64_array(scales).shape}, - 'upsample_data': {'shape': int64_array(input_shape) * int64_array(scales)}}) - graph.graph['layout'] = 'NCHW' - - ref_graph = build_graph(graph_node_attrs, graph_edges, - {'placeholder_data': {'shape': int64_array(input_shape)}, - 'scales': {'value': int64_array(scales), 'shape': int64_array(scales).shape}, - 'scales_data': {'value': int64_array(scales), 'shape': int64_array(scales).shape}, - 'upsample_data': {'shape': int64_array(input_shape) * int64_array(scales)}}) - - UpsampleToResample().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'output') - assert flag, resp diff --git a/tools/mo/unit_tests/mo/middle/__init__.py b/tools/mo/unit_tests/mo/middle/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/middle/dequantize_linear_resolver_test.py b/tools/mo/unit_tests/mo/middle/dequantize_linear_resolver_test.py deleted file mode 100644 index 0c64f153ed3cd0..00000000000000 --- a/tools/mo/unit_tests/mo/middle/dequantize_linear_resolver_test.py +++ /dev/null @@ -1,237 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.dequantize_linear_resolver import DequantizeLinearResolver -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph -import pytest - -nodes1_attributes = { - 'input': {'kind': 'op', 'op': 'AnyOp'}, - 'input_data': {'kind': 'data', 'shape': None}, - 'dequantize': {'kind': 'op', 'op': 'DequantizeLinear', 'axis': 1}, - 'dequantize_data': {'kind': 'data', 'shape': None}, - 'scale_param_dq': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'scale_param_dq_data': {'kind': 'data', 'shape': None}, - 'zerop_param_dq': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'zerop_param_dq_data': {'kind': 'data', 'shape': None}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, - 'out_data': {'kind': 'data', 'shape': None}, - 'result': {'kind': 'op', 'op': 'Result'}, -} - -nodes_ref_attributes = { - 'input': {'kind': 'op', 'op': 'AnyOp'}, - 'input_data': {'kind': 'data', 'shape': None}, - 'cast': {'kind': 'op', 'op': 'Cast', 'type': 'Convert'}, - 'cast_data': {'kind': 'data', 'shape': None}, - 'sub': {'kind': 'op', 'op': 'Sub', 'type': 'Subtract'}, - 'sub_data': {'kind': 'data', 'shape': None}, - 'mul': {'kind': 'op', 'op': 'Mul', 'type': 'Multiply'}, - 'mul_data': {'kind': 'data', 'shape': None}, - 'scale_param_dq': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'scale_param_dq_data': {'kind': 'data', 'shape': None}, - 'zerop_param_dq': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'zerop_param_dq_data': {'kind': 'data', 'shape': None}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, - 'out_data': {'kind': 'data', 'shape': None}, - 'result': {'kind': 'op', 'op': 'Result'}, - - 'sub_reshape_const': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'sub_reshape_const_data': {'kind': 'data', 'shape': None}, - 'sub_reshape': {'kind': 'op', 'type': 'Reshape', 'op': 'Reshape'}, - 'sub_reshape_data': {'kind': 'data', 'shape': None}, - - 'mul_reshape_const': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'mul_reshape_const_data': {'kind': 'data', 'shape': None}, - 'mul_reshape': {'kind': 'op', 'type': 'Reshape', 'op': 'Reshape'}, - 'mul_reshape_data': {'kind': 'data', 'shape': None}, -} - - -class TestDequantizeLinearResolver(unittest.TestCase): - - def test_dequantize(self): - graph = build_graph(nodes1_attributes, - [('input', 'input_data'), - ('input_data', 'dequantize'), - ('dequantize', 'dequantize_data'), - ('scale_param_dq', 'scale_param_dq_data'), - ('zerop_param_dq', 'zerop_param_dq_data'), - ('scale_param_dq_data', 'dequantize'), - ('zerop_param_dq_data', 'dequantize'), - ('dequantize_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'input_data': {'shape': int64_array([1, 3, 224, 224])}, - 'scale_param_dq': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - 'scale_param_dq_data': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - 'zerop_param_dq': {'shape': np.array([]), 'value': np.uint8(0)}, - 'zerop_param_dq_data': {'shape': np.array([]), 'value': np.uint8(0)}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_ref_attributes, - [('input', 'input_data'), - ('input_data', 'cast'), - ('cast', 'cast_data'), - ('cast_data', 'sub'), - ('zerop_param_dq', 'zerop_param_dq_data'), - ('zerop_param_dq_data', 'sub'), - ('sub', 'sub_data'), - ('sub_data', 'mul'), - ('scale_param_dq', 'scale_param_dq_data'), - ('scale_param_dq_data', 'mul'), - ('mul', 'mul_data'), - ('mul_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'input_data': {'shape': int64_array([1, 3, 224, 224])}, - 'scale_param_dq': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - 'scale_param_dq_data': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - 'zerop_param_dq': {'shape': np.array([]), 'value': np.uint8(0)}, - 'zerop_param_dq_data': {'shape': np.array([]), 'value': np.uint8(0)}, - }, nodes_with_edges_only=True) - - graph.stage = 'middle' - DequantizeLinearResolver().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_dequantize_no_zerop(self): - graph = build_graph(nodes1_attributes, - [('input', 'input_data'), - ('input_data', 'dequantize'), - ('dequantize', 'dequantize_data'), - ('scale_param_dq', 'scale_param_dq_data'), - ('scale_param_dq_data', 'dequantize'), - ('dequantize', 'dequantize_data'), - ('dequantize_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'input_data': {'shape': int64_array([1, 3, 224, 224])}, - 'scale_param_dq': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - 'scale_param_dq_data': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_ref_attributes, - [('input', 'input_data'), - ('input_data', 'cast'), - ('cast', 'cast_data'), - ('cast_data', 'mul'), - ('scale_param_dq', 'scale_param_dq_data'), - ('scale_param_dq_data', 'mul'), - ('mul', 'mul_data'), - ('mul_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'input_data': {'shape': int64_array([1, 3, 224, 224])}, - 'scale_param_dq': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - 'scale_param_dq_data': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - }, nodes_with_edges_only=True) - - graph.stage = 'middle' - DequantizeLinearResolver().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True) - self.assertTrue(flag, resp) - -class TestDequantizeWithAxis(): - @pytest.mark.parametrize("input_shape, scale_param_value, zero_param_value, target_shape, axis", - [(int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), - np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 1, 4, 1]), 2), - (int64_array([1, 3, 4, 4]), int64_array([2, 3, 4, 5]), - np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 3, 1, 1]), 1), - (int64_array([2, 3, 4, 4]), int64_array([2, 3, 4, 5]), - np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([2, 1, 1, 1]), 0), - (int64_array([1, 3, 4, 4]), int64_array([2, 3, 4, 5]), - np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 1, 4, 1]), -2), - (int64_array([1, 3, 4, 4]), int64_array([2, 3, 4, 5]), - np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 1, 1, 4]), -1), - (int64_array([1, 3, 4, 4]), int64_array([2, 3, 4, 5]), - np.array([2, 3, 4, 5], dtype=np.int32), int64_array([1, 1, 4, 1]), 2), - (int64_array([1, 3, 4, 4]), int64_array([2, 3, 4, 5]), - np.array([2, 3, 4, 5], dtype=np.int32), int64_array([1, 3, 1, 1]), 1), - (int64_array([2, 3, 4, 4]), int64_array([2, 3, 4, 5]), - np.array([2, 3, 4, 5], dtype=np.int32), int64_array([2, 1, 1, 1]), 0), - ]) - def test_dequantize_with_axis(self, input_shape, scale_param_value, zero_param_value, target_shape, axis): - graph = build_graph(nodes1_attributes, - [('input', 'input_data'), - ('input_data', 'dequantize'), - ('dequantize', 'dequantize_data'), - ('scale_param_dq', 'scale_param_dq_data'), - ('zerop_param_dq', 'zerop_param_dq_data'), - ('scale_param_dq_data', 'dequantize'), - ('zerop_param_dq_data', 'dequantize'), - ('dequantize_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'input_data': {'shape': input_shape}, - 'dequantize': {'axis': axis}, - 'scale_param_dq': {'shape': scale_param_value.shape, - 'value': scale_param_value}, - 'scale_param_dq_data': {'shape': scale_param_value.shape, - 'value': scale_param_value}, - 'zerop_param_dq': {'shape': zero_param_value.shape, - 'value': zero_param_value}, - 'zerop_param_dq_data': {'shape': zero_param_value.shape, - 'value': zero_param_value}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_ref_attributes, - [('input', 'input_data'), - ('input_data', 'cast'), - ('cast', 'cast_data'), - ('cast_data', 'sub'), - ('zerop_param_dq', 'zerop_param_dq_data'), - - ('zerop_param_dq_data', 'sub_reshape'), - ('sub_reshape_const', 'sub_reshape_const_data'), - ('sub_reshape_const_data', 'sub_reshape'), - ('sub_reshape', 'sub_reshape_data'), - ('sub_reshape_data', 'sub'), - - ('sub', 'sub_data'), - ('sub_data', 'mul'), - ('scale_param_dq', 'scale_param_dq_data'), - - ('scale_param_dq_data', 'mul_reshape'), - ('mul_reshape_const', 'mul_reshape_const_data'), - ('mul_reshape_const_data', 'mul_reshape'), - ('mul_reshape', 'mul_reshape_data'), - ('mul_reshape_data', 'mul'), - - ('mul', 'mul_data'), - ('mul_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'input_data': {'shape': input_shape}, - 'scale_param_dq': {'shape': scale_param_value.shape, - 'value': scale_param_value}, - 'scale_param_dq_data': {'shape': scale_param_value.shape, - 'value': scale_param_value}, - 'zerop_param_dq': {'shape': zero_param_value.shape, - 'value': zero_param_value}, - 'zerop_param_dq_data': {'shape': zero_param_value.shape, - 'value': zero_param_value}, - 'sub_reshape_const_data': {'shape': target_shape.shape, 'value': target_shape}, - 'mul_reshape_const_data': {'shape': target_shape.shape, 'value': target_shape}, - }, nodes_with_edges_only=True) - - graph.stage = 'middle' - DequantizeLinearResolver().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'out', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/middle/layer_normalization_test.py b/tools/mo/unit_tests/mo/middle/layer_normalization_test.py deleted file mode 100644 index 92b7d7e746d203..00000000000000 --- a/tools/mo/unit_tests/mo/middle/layer_normalization_test.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.middle.layer_normalization import LayerNormalization -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, shaped_parameter, regular_op_with_empty_data, shaped_const_with_data, \ - result, connect - - -class LayerNormalizationTest(unittest.TestCase): - - def test_1(self): - graph = build_graph( - nodes_attrs={ - **shaped_parameter('input', int64_array([1, 3, 15, 15])), - **regular_op_with_empty_data('layer_norm', {'op': 'LayerNorm', 'epsilon': 0.001, 'axis': -1, - 'output_mean_var': False}), - **shaped_const_with_data('gamma', None), - **shaped_const_with_data('beta', None), - **result('result') - }, - edges=[ - *connect('input', '0:layer_norm'), - *connect('gamma', '1:layer_norm'), - *connect('beta', '2:layer_norm'), - *connect('layer_norm', 'result') - ] - ) - - ref_graph = build_graph( - nodes_attrs={ - **shaped_parameter('input', int64_array([1, 3, 15, 15])), - **shaped_const_with_data('mvn_const', None), - **regular_op_with_empty_data('mvn', {'eps': 0.001, 'across_channels': 1, 'normalize_variance': 1, - 'eps_mode': 'inside_sqrt', 'op': 'MVN', 'type': 'MVN'}), - **shaped_const_with_data('gamma', None), - **regular_op_with_empty_data('gamma_unsqueeze', {'op': 'Unsqueeze', 'type': 'Unsqueeze'}), - **shaped_const_with_data('gamma_unsqueeze_const', None), - **regular_op_with_empty_data('beta_unsqueeze', {'op': 'Unsqueeze', 'type': 'Unsqueeze'}), - **shaped_const_with_data('beta_unsqueeze_const', None), - **regular_op_with_empty_data('mul', {'op': 'Mul', 'type': 'Multiply'}), - **shaped_const_with_data('beta', None), - **regular_op_with_empty_data('add', {'op': 'Add', 'type': 'Add'}), - **result('result') - }, - edges=[ - *connect('input', '0:mvn'), - *connect('mvn_const', '1:mvn'), - *connect('mvn', '0:mul'), - *connect('gamma', 'gamma_unsqueeze'), - *connect('gamma_unsqueeze_const', '1:gamma_unsqueeze'), - *connect('gamma_unsqueeze', '1:mul'), - *connect('mul', '0:add'), - *connect('beta', 'beta_unsqueeze'), - *connect('beta_unsqueeze_const', '1:beta_unsqueeze'), - *connect('beta_unsqueeze', '1:add'), - *connect('add', 'result') - ], - update_attributes={ - 'mvn_const': {'value': int64_array([-1]), 'shape': int64_array([1])}, - 'gamma_unsqueeze_const': {'value': int64_array([0, 1, 2]), 'shape': int64_array([3])}, - 'beta_unsqueeze_const': {'value': int64_array([0, 1, 2]), 'shape': int64_array([3])} - } - ) - LayerNormalization().find_and_replace_pattern(graph) - flag, resp = compare_graphs(graph, ref_graph, 'result', 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_2(self): - graph = build_graph( - nodes_attrs={ - **shaped_parameter('input', int64_array([1, 3, 15, 15])), - **regular_op_with_empty_data('layer_norm', {'op': 'LayerNorm', 'epsilon': 0.001, 'axis': 1, - 'output_mean_var': False}), - **shaped_const_with_data('gamma', None), - **shaped_const_with_data('beta', None), - **result('result') - }, - edges=[ - *connect('input', '0:layer_norm'), - *connect('gamma', '1:layer_norm'), - *connect('beta', '2:layer_norm'), - *connect('layer_norm', 'result') - ] - ) - - ref_graph = build_graph( - nodes_attrs={ - **shaped_parameter('input', int64_array([1, 3, 15, 15])), - **shaped_const_with_data('mvn_const', None), - **regular_op_with_empty_data('mvn', {'eps': 0.001, 'across_channels': 1, 'normalize_variance': 1, - 'eps_mode': 'inside_sqrt', 'op': 'MVN', 'type': 'MVN'}), - **shaped_const_with_data('gamma', None), - **regular_op_with_empty_data('gamma_unsqueeze', {'op': 'Unsqueeze', 'type': 'Unsqueeze'}), - **shaped_const_with_data('gamma_unsqueeze_const', None), - **regular_op_with_empty_data('beta_unsqueeze', {'op': 'Unsqueeze', 'type': 'Unsqueeze'}), - **shaped_const_with_data('beta_unsqueeze_const', None), - **regular_op_with_empty_data('mul', {'op': 'Mul', 'type': 'Multiply'}), - **shaped_const_with_data('beta', None), - **regular_op_with_empty_data('add', {'op': 'Add', 'type': 'Add'}), - **result('result') - }, - edges=[ - *connect('input', '0:mvn'), - *connect('mvn_const', '1:mvn'), - *connect('mvn', '0:mul'), - *connect('gamma', 'gamma_unsqueeze'), - *connect('gamma_unsqueeze_const', '1:gamma_unsqueeze'), - *connect('gamma_unsqueeze', '1:mul'), - *connect('mul', '0:add'), - *connect('beta', 'beta_unsqueeze'), - *connect('beta_unsqueeze_const', '1:beta_unsqueeze'), - *connect('beta_unsqueeze', '1:add'), - *connect('add', 'result') - ], - update_attributes={ - 'mvn_const': {'value': int64_array([1]), 'shape': int64_array([1])}, - 'gamma_unsqueeze_const': {'value': int64_array([0, 2, 3]), 'shape': int64_array([3])}, - 'beta_unsqueeze_const': {'value': int64_array([0, 2, 3]), 'shape': int64_array([3])} - } - ) - LayerNormalization().find_and_replace_pattern(graph) - flag, resp = compare_graphs(graph, ref_graph, 'result', 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_negative(self): - graph = build_graph( - nodes_attrs={ - **shaped_parameter('input', int64_array([1, 3, 15, 15])), - **regular_op_with_empty_data('layer_norm', {'op': 'LayerNorm', 'epsilon': 0.001, 'axis': -1, - 'output_mean_var': True}), - **shaped_const_with_data('gamma', None), - **shaped_const_with_data('beta', None), - **result('result'), - **result('result_1'), - **result('result_2') - }, - edges=[ - *connect('input', '0:layer_norm'), - *connect('gamma', '1:layer_norm'), - *connect('beta', '2:layer_norm'), - *connect('layer_norm:0', 'result'), - *connect('layer_norm:1', 'result_1'), - *connect('layer_norm:2', 'result_2') - ] - ) - - with self.assertRaises(Error): - LayerNormalization().find_and_replace_pattern(graph) diff --git a/tools/mo/unit_tests/mo/middle/passes/__init__.py b/tools/mo/unit_tests/mo/middle/passes/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/middle/passes/convert_data_type_test.py b/tools/mo/unit_tests/mo/middle/passes/convert_data_type_test.py deleted file mode 100644 index 500bd4b74edaca..00000000000000 --- a/tools/mo/unit_tests/mo/middle/passes/convert_data_type_test.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.middle.passes.convert_data_type import convert_blobs, SUPPORTED_DATA_TYPES -from openvino.tools.mo.utils.error import Error -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'data_node': {'kind': 'data', 'value': None, 'shape': int64_array([5])}, - 'op_node': { 'kind': 'op', 'op': 'Result'}} - - -class TestConvertBlob(UnitTestWithMockedTelemetry): - def test_convert_blob_to_fp32_from_fp64(self): - graph = build_graph(nodes_attributes, - [('data_node', 'op_node', {'bin': 1})], - {'data_node': {'value': np.array([4.0, 3.0, 2.0, 1.0], dtype=np.float64)}}) - - convert_blobs(graph, "FP32") - result_value = graph.node['data_node']['value'] - self.assertTrue(result_value.dtype == np.float32) - self.assertListEqual(list(result_value), [4, 3, 2, 1]) - - def test_convert_blob_to_fp16_from_fp64(self): - graph = build_graph(nodes_attributes, - [('data_node', 'op_node', {'bin': 1})], - {'data_node': {'value': np.array([4.0, 3.0, 2.0, 1.0], dtype=np.float64)}}) - - convert_blobs(graph, "FP16") - result_value = graph.node['data_node']['value'] - self.assertTrue(result_value.dtype == np.float16) - self.assertListEqual(list(result_value), [4, 3, 2, 1]) - - def test_convert_blob_to_fp16_from_fp64_overflow(self): - graph = build_graph(nodes_attributes, - [('data_node', 'op_node', {'bin': 1})], - {'data_node': {'value': np.array([4.0, 3.0, 2.0, 1e10], dtype=np.float64)}}) - - convert_blobs(graph, "FP16") - result_value = graph.node['data_node']['value'] - self.assertTrue(result_value.dtype == np.float16) - self.assertListEqual(list(result_value), [4, 3, 2, np.inf]) - - def test_convert_blob_to_int32_with_force_precision(self): - graph = build_graph(nodes_attributes, - [('data_node', 'op_node', {'bin': 1})], - {'data_node': {'value': np.array([4.0, 3.0, 2.0, 1.0], dtype=np.float64)}}) - - convert_blobs(graph, "I32") - result_value = graph.node['data_node']['value'] - self.assertTrue(result_value.dtype == np.int32) - self.assertListEqual(list(result_value), [4, 3, 2, 1]) - - def test_convert_blob_to_int32_with_force_precision_error(self): - graph = build_graph(nodes_attributes, - [('data_node', 'op_node', {'bin': 1})], - {'data_node': {'value': np.array([4.0, 3.0, 2.0, 1.1], dtype=np.float64)}}) - - with self.assertRaisesRegex(Error, '.*results in rounding.*'): - convert_blobs(graph, "I32") - - -class TestUI8(unittest.TestCase): - def test_supported_data_types_uint8_once(self): - i = 0 - for data_type_str, values in SUPPORTED_DATA_TYPES.items(): - np_dt, precision, element_type = values - if np_dt == np.uint8: - i += 1 - - self.assertEqual(i, 1, 'uint8 data type should be mentioned in SUPPORTED_DATA_TYPES only once, {} entries ' - 'found'.format(i)) diff --git a/tools/mo/unit_tests/mo/middle/passes/eliminate_test.py b/tools/mo/unit_tests/mo/middle/passes/eliminate_test.py deleted file mode 100644 index 90f5bb4f6ae47a..00000000000000 --- a/tools/mo/unit_tests/mo/middle/passes/eliminate_test.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.eliminate import mark_output_reachable_nodes, mark_const_producer_nodes -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'placeholder_1': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'node_1': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Identity'}, - 'node_2': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Identity'}, - 'node_3': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Identity'}, - 'node_4': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Identity'}, - 'node_5': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Identity'}, - 'node_6': {'type': 'Identity', 'value': None, 'kind': 'op', 'op': 'Identity'}, - 'placeholder_1_data_node': {'value': None, 'kind': 'data'}, - 'placeholder_2_data_node': {'value': None, 'kind': 'data'}, - 'data_node_1': {'value': None, 'kind': 'data'}, - 'data_node_2': {'value': None, 'kind': 'data'}, - 'data_node_3': {'value': None, 'kind': 'data'}, - 'data_node_3_2': {'value': None, 'kind': 'data'}, - 'data_node_4': {'value': None, 'kind': 'data'}, - 'data_node_5': {'value': None, 'shape': None, 'kind': 'data'}, - 'data_node_6': {'value': None, 'shape': None, 'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - 'op_output_1': {'kind': 'op', 'op': 'Result'}, - 'op_output_2': {'kind': 'op', 'op': 'Result'} - } - - -class TestEliminatePass(unittest.TestCase): - def test_mark_output_unreachable_nodes(self): - """ - Checks that all nodes that are unreachable from output nodes are marked correspondingly. - The graph doesn't contain data nodes yet. - "node_4" is output. - - placeholder_1->node_1->node_2 - \ - -> node_3->node_4 - - :return: None - """ - graph = build_graph(nodes_attributes, - [('placeholder_1', 'node_1'), - ('node_1', 'node_2'), - ('placeholder_1', 'node_3'), - ('node_3', 'node_4'), - ('node_4', 'op_output') - ], - {'node_4': {}}, - nodes_with_edges_only=True) - mark_output_reachable_nodes(graph) - - self.assertListEqual(sorted(['placeholder_1', 'node_3', 'op_output', 'node_4']), - sorted(graph.get_nodes_with_attributes(is_output_reachable=True))) - self.assertListEqual(sorted(['node_1', 'node_2']), - sorted(graph.get_nodes_with_attributes(is_output_reachable=False))) - - def test_mark_output_unreachable_nodes_behind_output(self): - """ - Checks case when unreachable node is 'behind' (i.e. is the child) of the output node. - The graph doesn't contain data nodes yet. - "node_2" is output. - - placeholder_1->node_1->node_2->node_3 - - :return: None - """ - graph = build_graph(nodes_attributes, - [('placeholder_1', 'node_1'), - ('node_1', 'node_2'), - ('node_2', 'node_3'), - ('node_2', 'op_output') - ], - {'node_2': {}}, - nodes_with_edges_only=True) - mark_output_reachable_nodes(graph) - - self.assertListEqual(sorted(['node_1', 'node_2', 'op_output', 'placeholder_1']), - sorted(graph.get_nodes_with_attributes(is_output_reachable=True))) - self.assertFalse(graph.node['node_3']['is_output_reachable']) - - def test_mark_ops_producing_constant_values(self): - """ - Checks case when operation produces only constant tensors so it could be removed. If the node produces several - tensors and at least one of them is not constant then we should not mark this node. - The graph contains data nodes. - "data_node_2" and "data_node_5" are output. - "node_3" produces constant tensor "data_node_3" and non-constant tensor "data_node_3_2". - "node_6" produces constant tensor "data_node_6". - "node_4" could be eliminated since it gets constant input. - - node_6->data_node_6-> - \ - placeholder_1->placeholder_1_data_node->node_1->data_node_1->node_2->data_node_2 - / - node_3->data_node_3->node_4->data_node_4-> - \ - ->data_node_3_2->node_5->data_node_5 - - :return: None - """ - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data_node'), - ('placeholder_1_data_node', 'node_1'), - ('node_1', 'data_node_1'), - ('data_node_1', 'node_2'), - ('node_2', 'data_node_2'), - ('node_3', 'data_node_3'), - ('node_3', 'data_node_3_2'), - ('node_6', 'data_node_6'), - ('data_node_6', 'node_1'), - ('data_node_3_2', 'node_5'), - ('node_5', 'data_node_5'), - ('data_node_3', 'node_4'), - ('data_node_4', 'node_1'), - ('data_node_2', 'op_output'), - ('data_node_5', 'op_output_1') - ], - {'data_node_2': {}, - 'data_node_5': {}, - 'data_node_3': {'value': np.array(1)}, - 'data_node_6': {'value': np.array(1)}}, - nodes_with_edges_only=True) - mark_const_producer_nodes(graph) - self.assertTrue((graph.node['node_6']['is_const_producer'])) - self.assertListEqual(sorted(['node_1', 'node_2', 'node_3', 'node_5', 'placeholder_1']), - sorted(graph.get_nodes_with_attributes(is_const_producer=False, kind='op'))) - - graph.clean_up() - self.assertTrue('node_3' in graph.nodes()) - self.assertTrue('node_4' not in graph.nodes()) - self.assertTrue('node_6' not in graph.nodes()) - - def test_undead_nodes_with_constant_inputs(self): - """ - Checks that if node of 'undead' type has constant inputs it is not removed from the graph. - :return: None - """ - pass - - def test_remove_node_from_graph(self): - """ - Checks case when remove node from graph. - The graph doesn't contain removed node yet. - "node_2" should be removed. - - placeholder_1->node_1->node_2->node_3 - - :return: None - """ - graph = build_graph(nodes_attributes, - [('placeholder_1', 'node_1'), - ('node_1', 'node_2'), - ('node_2', 'node_3')], - nodes_with_edges_only=True) - graph.erase_node(Node(graph, 'node_2')) - - self.assertListEqual(sorted(['placeholder_1', 'node_1', 'node_3']), sorted(graph.nodes())) diff --git a/tools/mo/unit_tests/mo/middle/passes/fusing/__init__.py b/tools/mo/unit_tests/mo/middle/passes/fusing/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/middle/passes/fusing/decomposition_test.py b/tools/mo/unit_tests/mo/middle/passes/fusing/decomposition_test.py deleted file mode 100644 index 8cb464727469f9..00000000000000 --- a/tools/mo/unit_tests/mo/middle/passes/fusing/decomposition_test.py +++ /dev/null @@ -1,661 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.passes.fusing.decomposition import convert_scale_shift_to_mul_add, convert_batch_norm -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # ScaleShift layer - 'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift', 'axis': 0}, - 'const_scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'}, - 'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'}, - 'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Mul and Add operations - 'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'}, - 'const_mul_1_w': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'}, - 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'add_1': {'type': None, 'kind': 'op', 'op': 'Add'}, - 'const_add_1_w': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'}, - 'add_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'add_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Mul and Add operations - 'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'}, - 'const_mul_2_w': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'}, - 'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'add_2': {'type': None, 'kind': 'op', 'op': 'Add'}, - 'const_add_2_w': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'}, - 'add_2_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'add_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Reshape - 'placeholder_2/Reshape_': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'}, - 'placeholder_2/Reshape_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'placeholder_2/Reshape_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None}, - 'placeholder_2/Reshape_const_data': {'kind': 'data', 'value': None, 'shape': None}, - # BatchNorm operation - 'bn_op': {'type': None, 'kind': 'op', 'op': 'BatchNorm', 'can_be_fused': True}, - 'const_bn_const': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'}, - 'bn_const': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_bn_beta': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'}, - 'bn_beta': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_bn_mean': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'}, - 'bn_mean': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_bn_var': {'value': None, 'shape': None, 'kind': 'op', 'op': 'Const'}, - 'bn_var': {'value': None, 'shape': None, 'kind': 'data'}, - 'bn_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Concat1 operation - 'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'}, - 'concat_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result'} -} - - -class ScaleShiftToMulAdd(unittest.TestCase): - # ScaleShift -> Mul - def test_scaleshift_to_mul_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('const_scaleshift_1_w', 'scaleshift_1_w'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'scaleshift_1_data': {} - }) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1': {'can_be_fused': True}, - 'scaleshift_1_data': {} - }) - - graph.graph['layout'] = 'NHWC' - convert_scale_shift_to_mul_add(graph) - graph.clean_up() - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # ScaleShift 2 inputs-> Mul - def test_scaleshift2_to_mul(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('placeholder_2_data', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227])}, - 'scaleshift_1_data': {} - }) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'placeholder_2/Reshape_'), - ('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'), - ('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'), - ('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'), - ('placeholder_1_data', 'mul_1'), - ('placeholder_2/Reshape_data', 'mul_1'), - ('mul_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227])}, - 'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]}, - 'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]}, - 'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])}, - 'mul_1': {'can_be_fused': True}, - 'scaleshift_1_data': {} - }) - - graph.graph['layout'] = 'NHWC' - convert_scale_shift_to_mul_add(graph) - graph.clean_up() - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # ScaleShift 2 inputs-> Mul (axis = 1) - def test_scaleshift2_axis1_to_mul(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('placeholder_2_data', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([227])}, - 'scaleshift_1': {'axis': 1}, - 'scaleshift_1_data': {} - }) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'placeholder_2/Reshape_'), - ('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'), - ('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'), - ('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'), - ('placeholder_1_data', 'mul_1'), - ('placeholder_2/Reshape_data', 'mul_1'), - ('mul_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([227])}, - 'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]}, - 'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]}, - 'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])}, - 'mul_1': {'can_be_fused': True}, - 'scaleshift_1_data': {} - }) - - graph.graph['layout'] = 'NHWC' - convert_scale_shift_to_mul_add(graph) - graph.clean_up() - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # ScaleShift -> Mul (Zero biases) - def test_scaleshift_to_mul_2(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('const_scaleshift_1_w', 'scaleshift_1_w'), - ('const_scaleshift_1_b', 'scaleshift_1_b'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1_b', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])}, - 'scaleshift_1_data': {} - }) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1': {'can_be_fused': True}, - 'scaleshift_1_data': {} - }) - - graph.graph['layout'] = 'NHWC' - convert_scale_shift_to_mul_add(graph) - graph.clean_up() - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # ScaleShift -> Mul->Add - def test_scaleshift_to_mul_add(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('const_scaleshift_1_w', 'scaleshift_1_w'), - ('const_scaleshift_1_b', 'scaleshift_1_b'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1_b', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([3, 2, 1])}, - 'scaleshift_1_data': {} - }) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])}, - 'add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1': {'can_be_fused': True}, - 'mul_1': {'can_be_fused': True}, - 'scaleshift_1_data': {} - }) - - graph.graph['layout'] = 'NHWC' - convert_scale_shift_to_mul_add(graph) - graph.clean_up() - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # ScaleShift -> None (Zero weights and biases) - def test_scaleshift_to_nothing(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('const_scaleshift_1_w', 'scaleshift_1_w'), - ('const_scaleshift_1_b', 'scaleshift_1_b'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1_b', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])}, - 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])}, - 'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])} - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}} - ,nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - convert_scale_shift_to_mul_add(graph) - graph.clean_up() - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # ScaleShift -> ScaleShift (can_be_fused=False) - def test_scaleshift_can_be_fused(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('const_scaleshift_1_w', 'scaleshift_1_w'), - ('const_scaleshift_1_b', 'scaleshift_1_b'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1_b', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])}, - 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])}, - 'scaleshift_1': {'can_be_fused': False}, - 'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])} - }) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('const_scaleshift_1_w', 'scaleshift_1_w'), - ('const_scaleshift_1_b', 'scaleshift_1_b'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1_b', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])}, - 'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])}, - 'const_scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])}, - 'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])}, - 'scaleshift_1': {'can_be_fused': False}, - 'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])} - }) - - convert_scale_shift_to_mul_add(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data') - self.assertTrue(flag, resp) - - -class BatchNormDecomposition(unittest.TestCase): - def test_bn_decomposition_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'bn_op'), - ('const_bn_const', 'bn_const'), - ('const_bn_beta', 'bn_beta'), - ('const_bn_mean', 'bn_mean'), - ('const_bn_var', 'bn_var'), - ('bn_const', 'bn_op'), - ('bn_beta', 'bn_op'), - ('bn_mean', 'bn_op'), - ('bn_var', 'bn_op'), - ('bn_op', 'bn_data'), - ('concat', 'concat_data'), - ('bn_data', 'concat'), - ('concat_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'bn_op': {'eps': 1.2}, - 'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_data': {'shape': np.array([1, 227, 227, 3])}, - 'concat_data': {} - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'add_2'), - ('const_add_2_w', 'add_2_w'), - ('add_2_w', 'add_2'), - ('add_2', 'add_2_data'), - ('concat', 'concat_data'), - ('add_2_data', 'concat'), - ('concat_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), - 'value': np.array([0.67419986, 0.55901699, 0.48795004])}, - 'mul_1_w': {'shape': np.array([3]), - 'value': np.array([0.67419986, 0.55901699, 0.48795004])}, - 'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_add_1_w': {'shape': np.array([3]), - 'value': np.array([-0.67419986, -1.11803399, -1.46385011])}, - 'add_1_w': {'shape': np.array([3]), - 'value': np.array([-0.67419986, -1.11803399, -1.46385011])}, - 'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'add_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1': {'can_be_fused': True}, - 'mul_2': {'can_be_fused': True}, - 'add_1': {'can_be_fused': True}, - 'add_2': {'can_be_fused': True}, - 'concat_data': {} - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - convert_batch_norm(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data') - self.assertTrue(flag, resp) - - # 'can_be_fused': False for BatchNorm - def test_bn_decomposition_2(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'bn_op'), - ('const_bn_const', 'bn_const'), - ('const_bn_beta', 'bn_beta'), - ('const_bn_mean', 'bn_mean'), - ('const_bn_var', 'bn_var'), - ('bn_const', 'bn_op'), - ('bn_beta', 'bn_op'), - ('bn_mean', 'bn_op'), - ('bn_var', 'bn_op'), - ('bn_op', 'bn_data'), - ('concat', 'concat_data'), - ('bn_data', 'concat'), - ('concat_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'bn_op': {'eps': 1.2, 'can_be_fused': False}, - 'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_data': {'shape': np.array([1, 227, 227, 3])}, - 'concat_data': {} - }) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'add_2'), - ('const_add_2_w', 'add_2_w'), - ('add_2_w', 'add_2'), - ('add_2', 'add_2_data'), - ('concat', 'concat_data'), - ('add_2_data', 'concat'), - ('concat_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), - 'value': np.array([0.67419986, 0.55901699, 0.48795004])}, - 'mul_1_w': {'shape': np.array([3]), - 'value': np.array([0.67419986, 0.55901699, 0.48795004])}, - 'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_add_1_w': {'shape': np.array([3]), - 'value': np.array([-0.67419986, -1.11803399, -1.46385011])}, - 'add_1_w': {'shape': np.array([3]), - 'value': np.array([-0.67419986, -1.11803399, -1.46385011])}, - 'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'add_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1': {'can_be_fused': False}, - 'mul_2': {'can_be_fused': False}, - 'add_1': {'can_be_fused': False}, - 'add_2': {'can_be_fused': False}, - 'concat_data': {} - }) - - graph.graph['layout'] = 'NHWC' - convert_batch_norm(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data') - self.assertTrue(flag, resp) - - # graph - NCHW - # BatchNorm - NHWC - def test_bn_decomposition_different_layouts_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'bn_op'), - ('const_bn_const', 'bn_const'), - ('const_bn_beta', 'bn_beta'), - ('const_bn_mean', 'bn_mean'), - ('const_bn_var', 'bn_var'), - ('bn_const', 'bn_op'), - ('bn_beta', 'bn_op'), - ('bn_mean', 'bn_op'), - ('bn_var', 'bn_op'), - ('bn_op', 'bn_data'), - ('concat', 'concat_data'), - ('bn_data', 'concat'), - ('concat_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'bn_op': {'eps': 1.2, 'data_format': 'NHWC'}, - 'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_data': {'shape': np.array([1, 227, 227, 3])}, - 'concat_data': {} - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'add_2'), - ('const_add_2_w', 'add_2_w'), - ('add_2_w', 'add_2'), - ('add_2', 'add_2_data'), - ('concat', 'concat_data'), - ('add_2_data', 'concat'), - ('concat_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), - 'value': np.array([0.67419986, 0.55901699, 0.48795004])}, - 'mul_1_w': {'shape': np.array([3]), - 'value': np.array([0.67419986, 0.55901699, 0.48795004])}, - 'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_add_1_w': {'shape': np.array([3]), - 'value': np.array([-0.67419986, -1.11803399, -1.46385011])}, - 'add_1_w': {'shape': np.array([3]), - 'value': np.array([-0.67419986, -1.11803399, -1.46385011])}, - 'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'add_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1': {'can_be_fused': True}, - 'mul_2': {'can_be_fused': True}, - 'add_1': {'can_be_fused': True}, - 'add_2': {'can_be_fused': True}, - 'concat_data': {} - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - convert_batch_norm(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data') - self.assertTrue(flag, resp) - - # graph - NHWC - # BatchNorm - NCHW - def test_bn_decomposition_different_layouts_2(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'bn_op'), - ('const_bn_const', 'bn_const'), - ('const_bn_beta', 'bn_beta'), - ('const_bn_mean', 'bn_mean'), - ('const_bn_var', 'bn_var'), - ('bn_const', 'bn_op'), - ('bn_beta', 'bn_op'), - ('bn_mean', 'bn_op'), - ('bn_var', 'bn_op'), - ('bn_op', 'bn_data'), - ('concat', 'concat_data'), - ('bn_data', 'concat'), - ('concat_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 227, 227])}, - 'bn_op': {'eps': 1.2, 'data_format': 'NCHW'}, - 'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'bn_data': {'shape': np.array([1, 3, 227, 227])}, - 'concat_data': {} - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'add_2'), - ('const_add_2_w', 'add_2_w'), - ('add_2_w', 'add_2'), - ('add_2', 'add_2_data'), - ('concat', 'concat_data'), - ('add_2_data', 'concat'), - ('concat_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 227, 227])}, - 'const_mul_1_w': {'shape': np.array([3, 1, 1]), - 'value': np.array([[[0.67419986]], [[0.55901699]], [[0.48795004]]])}, - 'mul_1_w': {'shape': np.array([3, 1, 1]), - 'value': np.array([[[0.67419986]], [[0.55901699]], [[0.48795004]]])}, - 'const_mul_2_w': {'shape': np.array([3, 1, 1]), 'value': np.array([[[1]], [[2]], [[3]]])}, - 'mul_2_w': {'shape': np.array([3, 1, 1]), 'value': np.array([[[1]], [[2]], [[3]]])}, - 'const_add_1_w': {'shape': np.array([3, 1, 1]), - 'value': np.array([[[-0.67419986]], [[-1.11803399]], [[-1.46385011]]])}, - 'add_1_w': {'shape': np.array([3, 1, 1]), - 'value': np.array([[[-0.67419986]], [[-1.11803399]], [[-1.46385011]]])}, - 'const_add_2_w': {'shape': np.array([3, 1, 1]), 'value': np.array([[[1]], [[2]], [[3]]])}, - 'add_2_w': {'shape': np.array([3, 1, 1]), 'value': np.array([[[1]], [[2]], [[3]]])}, - 'add_2_data': {'shape': np.array([1, 3, 227, 227])}, - 'mul_1': {'can_be_fused': True}, - 'mul_2': {'can_be_fused': True}, - 'add_1': {'can_be_fused': True}, - 'add_2': {'can_be_fused': True}, - 'concat_data': {} - }, nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - convert_batch_norm(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_data') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/passes/fusing/fuse_grouped_conv_test.py b/tools/mo/unit_tests/mo/middle/passes/fusing/fuse_grouped_conv_test.py deleted file mode 100644 index 0030a716e35a37..00000000000000 --- a/tools/mo/unit_tests/mo/middle/passes/fusing/fuse_grouped_conv_test.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.fusing.fuse_grouped_conv import grouped_convolutions_fusing -from openvino.tools.mo.ops.op import PermuteAttrs -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, result, connect, regular_op_with_shaped_data, regular_op, shaped_data, \ - valued_const_with_data, shaped_const_with_data, valued_data - -nodes = { - **regular_op_with_shaped_data('placeholder1', [1, 16, 10, 10], {'type': 'Parameter'}), - - **valued_const_with_data('split_1_axis', int64_array(1), {'type': 'Const'}), - **regular_op('split_1', {'type': 'Split', 'can_be_fused': True}), - **shaped_data('split_1_data1', [1, 4, 10, 10]), - **shaped_data('split_1_data2', [1, 4, 10, 10]), - **shaped_data('split_1_data3', [1, 4, 10, 10]), - **shaped_data('split_1_data4', [1, 4, 10, 10]), - - **shaped_const_with_data('split_2_in_const_weights', int64_array([3, 3, 4, 16]), {'type': 'Const'}), - **regular_op('split_2', {'type': 'Split'}), - **valued_data('split_2_data1', np.zeros([3, 3, 4, 4])), - **valued_data('split_2_data2', np.zeros([3, 3, 4, 4])), - **valued_data('split_2_data3', np.zeros([3, 3, 4, 4])), - **valued_data('split_2_data4', np.zeros([3, 3, 4, 4])), - - **regular_op_with_shaped_data('conv2d_1', [1, 4, 8, 8], - {'type': 'Convolution', 'channel_dims': np.array([1]), 'pad': np.array([2, 2]), - 'stride': np.array([2, 2]), - 'get_weights_permute': PermuteAttrs.Permutation(perm=int64_array([3, 2, 0, 1]), - inv=int64_array([2, 3, 1, 0])), - 'group': 1, 'output': 4, 'output_shape': [1, 4, 8, 8], 'can_be_fused': True}), - **regular_op_with_shaped_data('conv2d_2', [1, 4, 8, 8], - {'type': 'Convolution', 'pad': np.array([2, 2]), 'stride': np.array([2, 2]), - 'can_be_fused': True}), - **regular_op_with_shaped_data('conv2d_3', [1, 4, 8, 8], - {'type': 'Convolution', 'pad': np.array([2, 2]), 'stride': np.array([2, 2]), - 'can_be_fused': True}), - **regular_op_with_shaped_data('conv2d_4', [1, 4, 8, 8], - {'type': 'Convolution', 'pad': np.array([2, 2]), 'stride': np.array([2, 2]), - 'can_be_fused': True}), - - **regular_op_with_shaped_data('concat', [1, 16, 8, 8], {'type': 'Concat', 'axis': np.array(1)}), - - **regular_op_with_shaped_data('fused_group_conv', [1, 16, 8, 8], - {'type': 'Convolution', 'channel_dims': np.array([1]), 'pad': np.array([2, 2]), - 'stride': np.array([2, 2]), - 'get_weights_permute': PermuteAttrs.Permutation(perm=int64_array([3, 2, 0, 1]), - inv=int64_array([2, 3, 1, 0])), - 'group': 1, 'output': 4, 'output_shape': [1, 4, 8, 8], 'can_be_fused': True}), - **shaped_const_with_data('new_weights_const', int64_array([3, 3, 4, 16]), {'type': 'Const'}), - - **result('result') -} - - -class FuseGroupedConvTest(unittest.TestCase): - def test_fuse_grouped_conv(self): - graph = build_graph(nodes, [*connect('placeholder1', '0:split_1'), *connect('split_1_axis', '1:split_1'), - ('split_1', 'split_1_data1', {'out': 0}), - ('split_1', 'split_1_data2', {'out': 1}), - ('split_1', 'split_1_data3', {'out': 2}), - ('split_1', 'split_1_data4', {'out': 3}), - - *connect('split_2_in_const_weights', 'split_2'), - ('split_2', 'split_2_data1', {'out': 0}), - ('split_2', 'split_2_data2', {'out': 1}), - ('split_2', 'split_2_data3', {'out': 2}), - ('split_2', 'split_2_data4', {'out': 3}), - - ('split_1_data1', 'conv2d_1', {'in': 0}), - ('split_1_data2', 'conv2d_2', {'in': 0}), - ('split_1_data3', 'conv2d_3', {'in': 0}), - ('split_1_data4', 'conv2d_4', {'in': 0}), - - ('split_2_data1', 'conv2d_1', {'in': 1}), - ('split_2_data2', 'conv2d_2', {'in': 1}), - ('split_2_data3', 'conv2d_3', {'in': 1}), - ('split_2_data4', 'conv2d_4', {'in': 1}), - - *connect('conv2d_1', '0:concat'), - *connect('conv2d_2', '1:concat'), - *connect('conv2d_3', '2:concat'), - *connect('conv2d_4', '3:concat'), - - *connect('concat', 'result')]) - - graph_ref = build_graph(nodes, [*connect('placeholder1', '0:fused_group_conv'), - *connect('new_weights_const', '1:fused_group_conv'), - *connect('fused_group_conv', 'result')]) - - graph.graph['layout'] = 'NCHW' - grouped_convolutions_fusing(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result') - self.assertTrue(flag, resp) - - group_conv_node = Node(graph, 'conv2d_1') - group_conv_weights_shape = group_conv_node.in_node(1).shape - self.assertTrue((group_conv_weights_shape == int64_array([3, 3, 4, 16])).all()) diff --git a/tools/mo/unit_tests/mo/middle/passes/fusing/fuse_linear_ops_test.py b/tools/mo/unit_tests/mo/middle/passes/fusing/fuse_linear_ops_test.py deleted file mode 100644 index a16962091b00d8..00000000000000 --- a/tools/mo/unit_tests/mo/middle/passes/fusing/fuse_linear_ops_test.py +++ /dev/null @@ -1,1301 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.fusing.fuse_linear_ops import _fuse_mul, fuse_linear_ops -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # ScaleShift layer - 'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'}, - 'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'const_scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Mul and Add operations - 'mul_1': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True, - 'infer': lambda node: eltwise_infer(node, lambda a, b: a * b)}, - 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'const_mul_1_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_1': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True}, - 'add_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'const_add_1_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'add_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Mul2 and Add2 operations - 'mul_2': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True}, - 'mul_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'const_mul_2_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'mul_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_2': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True}, - 'add_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'const_add_2_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'add_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Concat1 operation - 'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'}, - 'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Convolutions - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'}, - 'conv_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_conv_1_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'const_conv_1_b': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'}, - 'conv_2_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_conv_2_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'const_conv_2_b': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'deconv': {'type': 'Deconvolution', 'kind': 'op', 'op': 'Deconv2D', 'layout': 'NHWC'}, - 'deconv_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'deconv_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_deconv_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'const_deconv_b': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'deconv_data': {'value': None, 'shape': None, 'kind': 'data'}, - # MatMul - 'fc_1': {'type': 'MatMul', 'kind': 'op', 'layout': 'NHWC', 'op': 'MatMul'}, - 'fc_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'fc_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_fc_1_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'const_fc_1_b': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'fc_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Placeholders - 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_3': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - 'op_output_1': {'kind': 'op', 'op': 'Result'}, - 'op_output_2': {'kind': 'op', 'op': 'Result'} -} - - -# Unit tests for fuse_mul -class FuseMulTests(unittest.TestCase): - # Mul(array)->Conv(w+b) - def test_fuse_mul_to_conv_1(self): - # Placeholder->Mul->Conv - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {} - }) - ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([1, 2, 3]), (3, 1)) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {} - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # Mul(scalar)->Conv(w+b) - def test_fuse_mul_to_conv_2(self): - # Placeholder->Mul->Conv - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([1]), 'value': 6}, - 'mul_1_w': {'shape': np.array([1]), 'value': 6}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {} - }) - ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([6, 6, 6]), (3, 1)) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {} - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # Conv(w+b)->Mul(array) - def test_fuse_mul_to_conv_3(self): - # Placeholder->Conv->Mul - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.ones(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.ones(96)}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'mul_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'const_mul_1_w': {'shape': np.array([96]), 'value': np.array([x for x in range(96)])}, - 'mul_1_w': {'shape': np.array([96]), 'value': np.array([x for x in range(96)])}, - }) - ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([x for x in range(96)]), 96) - ref_biases = np.ones(96) * np.array([x for x in range(96)]) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': ref_biases.shape, 'value': ref_biases}, - 'conv_1_b': {'shape': ref_biases.shape, 'value': ref_biases}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])} - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=True) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1', 'placeholder_1') - self.assertTrue(flag, resp) - - # Conv(w)->Mul(scalar) - def test_fuse_mul_to_conv_4(self): - # Placeholder->Conv->Mul - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.ones(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.ones(96)}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'mul_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'const_mul_1_w': {'shape': np.array([1]), 'value': 6}, - 'mul_1_w': {'shape': np.array([1]), 'value': 6}, - }) - ref_weights = np.ones((11, 11, 3, 96)) * np.array([6]) - ref_biases = np.ones(96) * np.array([6]) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': ref_biases.shape, 'value': ref_biases}, - 'conv_1_b': {'shape': ref_biases.shape, 'value': ref_biases}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])} - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=True) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # Op0-+->Op1--+----+-->Concat Op0-+->Op1--+--+-->Concat - # | | | | | | | | - # | +->Op2--+ | => | +->Op2--+ | - # +---->Mul->Conv-+ +---->Conv----+ - def test_fuse_mul_to_conv_5(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('placeholder_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_1_data', 'placeholder_3'), - ('placeholder_3', 'placeholder_3_data'), - ('placeholder_2_data', 'concat_1'), - ('placeholder_3_data', 'concat_1'), - ('conv_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([1]), 'value': 6}, - 'mul_1_w': {'shape': np.array([1]), 'value': 6}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'concat_1_data': {} - }) - ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([6, 6, 6]), (3, 1)) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('placeholder_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_1_data', 'placeholder_3'), - ('placeholder_3', 'placeholder_3_data'), - ('placeholder_2_data', 'concat_1'), - ('placeholder_3_data', 'concat_1'), - ('conv_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output'), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 3, - 'input_channel_dim': 2, 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {}, - 'placeholder_2_data': {}, - 'placeholder_3_data': {}, - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - def test_fuse_mul_to_conv_5_nparray(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('placeholder_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_1_data', 'placeholder_3'), - ('placeholder_3', 'placeholder_3_data'), - ('placeholder_2_data', 'concat_1'), - ('placeholder_3_data', 'concat_1'), - ('conv_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output'), - - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([1]), 'value': np.array([6])}, - 'mul_1_w': {'shape': np.array([1]), 'value': np.array([6])}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'concat_1_data': {} - }) - ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([6, 6, 6]), (3, 1)) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('placeholder_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_1_data', 'placeholder_3'), - ('placeholder_3', 'placeholder_3_data'), - ('placeholder_2_data', 'concat_1'), - ('placeholder_3_data', 'concat_1'), - ('conv_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output'), - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 3, - 'input_channel_dim': 2, 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {}, - 'placeholder_2_data': {}, - 'placeholder_3_data': {}, - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # Op->Mul(array)-+->Conv(w+b)--+->Concat Op-+->Conv1-+-->Concat - # | | => | | - # +-->Conv(w+b)-+ +->Conv2-+ - def test_fuse_mul_to_convolutions_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('mul_1_data', 'conv_2'), - ('const_conv_2_w', 'conv_2_w'), - ('const_conv_2_b', 'conv_2_b'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ('conv_1_data', 'concat_1'), - ('conv_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'const_conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_data': {'shape': np.array([1, 55, 55, 96])}, - 'concat_1_data': {} - }) - ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([1, 2, 3]), (3, 1)) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('placeholder_1_data', 'conv_2'), - ('const_conv_2_w', 'conv_2_w'), - ('const_conv_2_b', 'conv_2_b'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ('conv_1_data', 'concat_1'), - ('conv_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'const_conv_2_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_2_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_data': {'shape': np.array([1, 55, 55, 96])}, - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1'), Node(graph, 'conv_2')], backward=False) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # Mul(array)->FC(w+b) - def test_fuse_mul_to_fc_1(self): - # Placeholder->Mul->FC - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'fc_1'), - ('const_fc_1_w', 'fc_1_w'), - ('const_fc_1_b', 'fc_1_b'), - ('fc_1_w', 'fc_1'), - ('fc_1_b', 'fc_1'), - ('fc_1', 'fc_1_data'), - ('fc_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 2048])}, - 'mul_1_data': {'shape': np.array([1, 2048])}, - 'const_mul_1_w': {'shape': np.array([2048]), 'value': np.array([x for x in range(2048)])}, - 'mul_1_w': {'shape': np.array([2048]), 'value': np.array([x for x in range(2048)])}, - 'const_fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048))}, - 'fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048)), - 'output_channel_dim': 0, 'input_channel_dim': 1, - 'dims_number': 2}, - 'const_fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)}, - 'fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)}, - 'fc_1_data': {'shape': np.array([1, 10260])}, - }) - ref_weights = np.ones((10260, 2048)) * np.array([x for x in range(2048)]) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'fc_1'), - ('const_fc_1_w', 'fc_1_w'), - ('const_fc_1_b', 'fc_1_b'), - ('fc_1_w', 'fc_1'), - ('fc_1_b', 'fc_1'), - ('fc_1', 'fc_1_data'), - ('fc_1_data', 'op_output') - - ], - {'placeholder_1_data': {'shape': np.array([1, 2048])}, - 'const_fc_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'fc_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 0, 'input_channel_dim': 1, - 'dims_number': 2}, - 'const_fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)}, - 'fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)}, - 'fc_1_data': {'shape': np.array([1, 10260])}, - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'fc_1')], backward=False) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # Mul(scalar)->Conv(w+b) can_be_fused = False - def test_fuse_mul_to_conv_6(self): - # Placeholder->Mul->Conv - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'conv_1': {'can_be_fused': False}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {} - }) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'conv_1': {'can_be_fused': False}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), - 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {} - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # Mul(array)->DWConv(w+b) - def test_fuse_mul_to_dwconv_1(self): - # Placeholder->Mul->Conv - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('conv_1_w', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 112, 112, 6])}, - 'mul_1_data': {'shape': np.array([1, 112, 112, 6])}, - 'const_mul_1_w': {'shape': np.array([6]), 'value': np.array([1, 2, 3, 4, 5, 6])}, - 'mul_1_w': {'shape': np.array([6]), 'value': np.array([1, 2, 3, 4, 5, 6])}, - 'const_conv_1_w': {'shape': np.array([3, 3, 6, 1]), 'value': np.ones((3, 3, 6, 1))}, - 'conv_1_w': {'shape': np.array([3, 3, 6, 1]), 'value': np.ones((3, 3, 6, 1)), - 'output_channel_dim': 2, 'input_channel_dim': 2, - 'dims_number': 4}, - 'conv_1_data': {} - }) - ref_weights = np.ones((3, 3, 6, 1)) * np.reshape(np.array([1, 2, 3, 4, 5, 6]), (6, 1)) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('conv_1_w', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 112, 112, 6])}, - 'const_conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 2, 'input_channel_dim': 2, - 'dims_number': 4}, - 'conv_1_data': {} - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # DWConv(w)->Mul(scalar) - def test_fuse_mul_to_dwconv_2(self): - # Placeholder->Conv->Mul - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('conv_1_w', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 112, 112, 6])}, - 'mul_1_data': {'shape': np.array([1, 112, 112, 6])}, - 'const_mul_1_w': {'shape': np.array([6]), 'value': np.array([1, 2, 3, 4, 5, 6])}, - 'mul_1_w': {'shape': np.array([6]), 'value': np.array([1, 2, 3, 4, 5, 6])}, - 'const_conv_1_w': {'shape': np.array([3, 3, 6, 1]), 'value': np.ones((3, 3, 6, 1))}, - 'conv_1_w': {'shape': np.array([3, 3, 6, 1]), 'value': np.ones((3, 3, 6, 1)), - 'output_channel_dim': 2, 'input_channel_dim': 2, - 'dims_number': 4}, - 'conv_1_data': {} - }) - - ref_weights = np.ones((3, 3, 6, 1)) * np.reshape(np.array([1, 2, 3, 4, 5, 6]), (6, 1)) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('conv_1_w', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 112, 112, 6])}, - 'const_conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 2, 'input_channel_dim': 2, - 'dims_number': 4}, - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=True) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1', 'placeholder_1') - self.assertTrue(flag, resp) - - # Deconv(w)->Mul(array) - def test_fuse_mul_to_deconv_1(self): - # Placeholder->Deonv->Mul - in_shape = np.array([1, 20, 10, 10]) - w_shape = np.array([20, 2, 3, 3]) - out_shape = np.array([1, 10, 21, 21]) - mul_const = np.array(range(10)) - - edges = [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'deconv'), - ('const_deconv_w', 'deconv_w'), - ('deconv_w', 'deconv'), - ('deconv', 'deconv_data'), - ('deconv_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'op_output') - ] - attr_updates = {'placeholder_1_data': {'shape': in_shape}, - 'const_conv_1_w': {'shape': w_shape, 'value': np.ones(w_shape)}, - 'deconv': {'group': 5}, - 'deconv_w': {'shape': w_shape, 'value': np.ones(w_shape), - 'output_channel_dim': 1, 'input_channel_dim': 0, - 'dims_number': 4}, - 'deconv_data': {'shape': out_shape}, - 'mul_1_data': {'shape': mul_const.shape}, - 'const_mul_1_w': {'shape': mul_const.shape, 'value': mul_const}, - 'mul_1_w': {'shape': mul_const.shape, 'value': mul_const}, - } - graph = build_graph(nodes_attributes, edges, attr_updates) - # same graph, nothing fused - graph_ref = build_graph(nodes_attributes, edges, attr_updates) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'deconv')], backward=True) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1', 'placeholder_1') - self.assertTrue(flag, resp) - - def test_fuse_mul_data_nodes_names(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), - 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {} - }) - - _fuse_mul(graph, Node(graph, 'mul_1'), [Node(graph, 'conv_1')], backward=False) - - conv_node = Node(graph, 'conv_1') - conv_in_data_name = conv_node.in_node(1)['name'] - const_node = Node(graph, 'const_conv_1_w') - const_out_data_name = const_node.out_node(0)['name'] - mul_node = Node(graph, 'mul_1') - conv_in_data = conv_node.in_node(1) - - # Check that transformation doesn't produce identical data node names, - # as this may lead to appearing of Const ops with identical names. - self.assertFalse(conv_in_data_name == const_out_data_name) - - # Attributes that are required for fusing are kept on data nodes. - # These checks are needed to ensure that _fuse_mul doesn't remove any of these attributes. - self.assertTrue(conv_in_data['output_channel_dim'] == 3) - self.assertTrue(conv_in_data['input_channel_dim'] == 2) - self.assertTrue(conv_in_data['dims_number'] == 4) - self.assertTrue(mul_node['can_be_fused'] is True) - - -# Unit tests for fuse_linear_ops -class FuseLinOpsTests(unittest.TestCase): - # Op->Mul(array)-+->Conv(w+b)->Add-+->Concat Op-+->Conv1-+-->Concat - # | | => | | - # +-->Conv(w+b)-----+ +->Conv2-+ - def test_fuse_lin_ops_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('mul_1_data', 'conv_2'), - ('const_conv_2_w', 'conv_2_w'), - ('const_conv_2_b', 'conv_2_b'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ('conv_1_data', 'concat_1'), - ('conv_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'const_conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_data': {'shape': np.array([1, 55, 55, 96])}, - 'concat_1_data': {} - }) - ref_weights = np.ones((11, 11, 3, 96)) * np.reshape(np.array([1, 2, 3]), (3, 1)) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('placeholder_1_data', 'conv_2'), - ('const_conv_2_w', 'conv_2_w'), - ('const_conv_2_b', 'conv_2_b'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ('conv_1_data', 'concat_1'), - ('conv_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'const_conv_2_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'conv_2_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_data': {'shape': np.array([1, 55, 55, 96])}, - }) - - fuse_linear_ops(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # Mul(array)->FC(w+b) - def test_fuse_mul_to_fc_1(self): - # Placeholder->Mul->FC - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'fc_1'), - ('const_fc_1_w', 'fc_1_w'), - ('const_fc_1_b', 'fc_1_b'), - ('fc_1_w', 'fc_1'), - ('fc_1_b', 'fc_1'), - ('fc_1', 'fc_1_data'), - ('fc_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 2048])}, - 'mul_1_data': {'shape': np.array([1, 2048])}, - 'const_mul_1_w': {'shape': np.array([2048]), 'value': np.array([x for x in range(2048)])}, - 'mul_1_w': {'shape': np.array([2048]), 'value': np.array([x for x in range(2048)])}, - 'const_fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048))}, - 'fc_1_w': {'shape': np.array([10260, 2048]), 'value': np.ones((10260, 2048)), - 'output_channel_dim': 0, 'input_channel_dim': 1, - 'dims_number': 2}, - 'const_fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)}, - 'fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)}, - 'fc_1_data': {'shape': np.array([1, 10260])}, - }) - ref_weights = np.ones((10260, 2048)) * np.array([x for x in range(2048)]) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'fc_1'), - ('const_fc_1_w', 'fc_1_w'), - ('const_fc_1_b', 'fc_1_b'), - ('fc_1_w', 'fc_1'), - ('fc_1_b', 'fc_1'), - ('fc_1', 'fc_1_data'), - ('fc_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 2048])}, - 'const_fc_1_w': {'shape': ref_weights.shape, 'value': ref_weights}, - 'fc_1_w': {'shape': ref_weights.shape, 'value': ref_weights, - 'output_channel_dim': 0, 'input_channel_dim': 1, - 'dims_number': 2}, - 'const_fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)}, - 'fc_1_b': {'shape': np.array([10260]), 'value': np.ones(10260)}, - 'fc_1_data': {'shape': np.array([1, 10260])}, - }) - - fuse_linear_ops(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'fc_1_data') - self.assertTrue(flag, resp) - - # +-----------+ - # | | => Same - # Placeholder--->Add->Mul-----+->Concat - def test_fuse_lin_op_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1_data', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('concat_1', 'concat_1_data'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('add_1_data', 'concat_1'), - ('mul_1_data', 'concat_1'), - ('add_1_data', 'mul_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': np.array([1, 1, 3, 3]), 'value': np.zeros((1, 1, 3, 3))}, - 'conv_1_w': {'shape': np.array([1, 1, 3, 3]), 'value': np.zeros((1, 1, 3, 3)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([3]), 'value': np.zeros(3)}, - 'conv_1_b': {'shape': np.array([3]), 'value': np.zeros(3)}, - 'conv_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([1]), 'value': np.array([6])}, - 'mul_1_w': {'shape': np.array([1]), 'value': np.array([6])}, - 'const_add_1_w': {'shape': np.array([1]), 'value': np.array([1])}, - 'add_1_w': {'shape': np.array([1]), 'value': np.array([1])}, - 'concat_1_data': {} - }) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1_data', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1_data', 'concat_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('conv_1_data', 'mul_1'), - ('concat_1', 'concat_1_data'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_conv_1_w': {'shape': np.array([1, 1, 3, 3]), 'value': np.zeros((1, 1, 3, 3))}, - 'conv_1_w': {'shape': np.array([1, 1, 3, 3]), 'value': np.zeros((1, 1, 3, 3)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([3]), 'value': np.ones(3)}, - 'conv_1_b': {'shape': np.array([3]), 'value': np.ones(3)}, - 'conv_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([1]), 'value': np.array([6])}, - 'mul_1_w': {'shape': np.array([1]), 'value': np.array([6])}, - 'concat_1_data': {} - }) - - fuse_linear_ops(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - # TODO: refactor this test - # self.assertTrue(flag, resp) - - # Op->Mul(array)-+->Conv(w+b)------+->Concat - # | | => Same('can_be_fused': False) - # +-->Conv(w+b)-----+ - def test_fuse_lin_ops_2(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('mul_1_data', 'conv_2'), - ('const_conv_2_w', 'conv_2_w'), - ('const_conv_2_b', 'conv_2_b'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ('conv_1_data', 'concat_1'), - ('conv_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'conv_2': {'can_be_fused': False}, - 'const_conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_data': {'shape': np.array([1, 55, 55, 96])}, - 'concat_1_data': {} - }) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('mul_1_data', 'conv_2'), - ('const_conv_2_w', 'conv_2_w'), - ('const_conv_2_b', 'conv_2_b'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ('conv_1_data', 'concat_1'), - ('conv_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), - 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'conv_2': {'can_be_fused': False}, - 'const_conv_2_w': {'shape': np.array([11, 11, 3, 96]), - 'value': np.ones((11, 11, 3, 96))}, - 'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_data': {'shape': np.array([1, 55, 55, 96])}, - 'concat_1_data': {} - }) - - fuse_linear_ops(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # Op->Mul(array)-+->Conv(w+b)------+->Concat - # | | => Same('can_be_fused': False) - # +-->Conv(w+b)-----+ - def test_fuse_lin_ops_3(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('mul_1_data', 'conv_2'), - ('const_conv_2_w', 'conv_2_w'), - ('const_conv_2_b', 'conv_2_b'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ('conv_1_data', 'concat_1'), - ('conv_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1': {'can_be_fused': False}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'const_conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96))}, - 'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_data': {'shape': np.array([1, 55, 55, 96])}, - 'concat_1_data': {} - }) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'conv_1'), - ('const_conv_1_w', 'conv_1_w'), - ('const_conv_1_b', 'conv_1_b'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('mul_1_data', 'conv_2'), - ('const_conv_2_w', 'conv_2_w'), - ('const_conv_2_b', 'conv_2_b'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ('conv_1_data', 'concat_1'), - ('conv_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1': {'can_be_fused': False}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'const_conv_1_w': {'shape': np.array([11, 11, 3, 96]), - 'value': np.ones((11, 11, 3, 96))}, - 'conv_1_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_1_data': {'shape': np.array([1, 55, 55, 96])}, - 'const_conv_2_w': {'shape': np.array([11, 11, 3, 96]), - 'value': np.ones((11, 11, 3, 96))}, - 'conv_2_w': {'shape': np.array([11, 11, 3, 96]), 'value': np.ones((11, 11, 3, 96)), - 'output_channel_dim': 3, 'input_channel_dim': 2, - 'dims_number': 4}, - 'const_conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_b': {'shape': np.array([96]), 'value': np.zeros(96)}, - 'conv_2_data': {'shape': np.array([1, 55, 55, 96])}, - 'concat_1_data': {} - }) - - fuse_linear_ops(graph) - graph.clean_up() - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/passes/fusing/fuse_linear_seq_test.py b/tools/mo/unit_tests/mo/middle/passes/fusing/fuse_linear_seq_test.py deleted file mode 100644 index c458b79538d99e..00000000000000 --- a/tools/mo/unit_tests/mo/middle/passes/fusing/fuse_linear_seq_test.py +++ /dev/null @@ -1,993 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.passes.fusing.fuse_linear_seq import fuse_mul_add_sequence -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # ScaleShift layer - 'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'}, - 'const_scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Mul and Add operations - 'mul_1': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True}, - 'const_mul_1_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_1': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True}, - 'const_add_1_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'add_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Mul2 and Add2 operations - 'mul_2': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True}, - 'const_mul_2_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'mul_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'mul_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_2': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True}, - 'const_add_2_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'add_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Mul3 and Add3 operations - 'mul_3': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True}, - 'const_mul_3_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'mul_3_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'mul_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_3': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True}, - 'const_add_3_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'add_3_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Mul4 and Add4 operations - 'mul_4': {'type': 'Mul', 'kind': 'op', 'op': 'Mul', 'can_be_fused': True}, - 'const_mul_4_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'mul_4_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'mul_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_4': {'type': 'Add', 'kind': 'op', 'op': 'Add', 'can_be_fused': True}, - 'const_add_4_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'add_4_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_4_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Concat1 operation - 'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'}, - 'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Convolutions - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'}, - 'const_conv_1_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'conv_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_conv_1_b': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'}, - 'const_conv_2_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'conv_2_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_conv_2_b': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - # FullyConnected - 'fc_1': {'type': 'MatMul', 'kind': 'op', 'op': 'FullyConnected', 'layout': 'NHWC'}, - 'const_fc_1_w': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'fc_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'const_fc_1_b': {'value': None, 'shape': None, 'kind': 'op', 'data_type': None, 'op': 'Const'}, - 'fc_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'fc_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Placeholders - 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_3': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'op_output': {'kind': 'op', 'op': 'Result'} -} - - -# Unit tests for fuse_mul_add_sequence -class LinSeqFusingTests(unittest.TestCase): - # Placeholder-+->Mul->Add->Mul-+->Concat - # | | - # +----------------+ - def test_fuse_lin_seq_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([1]), 'value': 6}, - 'add_1_w': {'shape': np.array([1]), 'value': 6}, - 'mul_2_w': {'shape': np.array([1]), 'value': 6}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([1]), 'value': np.array([36])}, - 'add_1_w': {'shape': np.array([1]), 'value': np.array([36])}, - 'mul_1': {'can_be_fused': True}, - 'add_1': {'can_be_fused': True}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # +----------------+ - # | | - # Placeholder-+->Mul->Add->Mul-+---------------+->Concat - # | | - # +-->Placeholder----+ - def test_fuse_lin_seq_2(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('mul_2_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([1]), 'value': 6}, - 'add_1_w': {'shape': np.array([1]), 'value': 6}, - 'mul_2_w': {'shape': np.array([1]), 'value': 6}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('add_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([1]), 'value': np.array([36])}, - 'add_1_w': {'shape': np.array([1]), 'value': np.array([36])}, - 'mul_1': {'can_be_fused': True}, - 'add_1': {'can_be_fused': True}, - }, - nodes_with_edges_only=True) - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # +----->Placeholder - # | | => The same graph - # Placeholder--->Mul->Add->Mul--+->Concat - def test_fuse_lin_seq_3(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('add_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(6)}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('add_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(6)}, - }, - nodes_with_edges_only=True) - - fuse_mul_add_sequence(graph) - graph.clean_up() - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1') - self.assertTrue(flag, resp) - - # +-------->Placeholder +-------->Placeholder - # | | => | | - # Placeholder--->Mul->Add->Mul-+->Concat Placeholder-+->Mul->Mul->Add-+->Concat - def test_fuse_lin_seq_4(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('mul_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(6)}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('mul_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array([6])}, - 'add_1_w': {'shape': np.array([1]), 'value': np.array([36])}, - 'mul_2_w': {'shape': np.array([1]), 'value': np.array([6])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'op_output') - self.assertTrue(flag, resp) - - # +-------->Placeholder +->Placeholder - # | | => | | - # Placeholder--->Mul->Add->Mul-+->Concat Placeholder--->Mul-----------+->Concat - def test_fuse_lin_seq_5(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('mul_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(0)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(1)}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('mul_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array([6])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # +-------->Placeholder +->Placeholder - # | | => | | - # Placeholder--->Mul->Add->Mul-+->Concat Placeholder--->Mul-->Add-----+->Concat - def test_fuse_lin_seq_6(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('mul_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(1)}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('mul_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([1]), 'value': np.array([6])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # +-------->Placeholder +->Placeholder - # | | => | | - # Placeholder--->Mul->Add->Mul-+->Concat Placeholder--->Mul-->Mul-----+->Concat - def test_fuse_lin_seq_7(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('mul_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(0)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(6)}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('mul_1_data', 'placeholder_2'), - ('placeholder_2', 'placeholder_2_data'), - ('placeholder_2_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'placeholder_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_2_w': {'shape': np.array([1]), 'value': np.array([6])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # Placeholder--->Mul->Add->Mul-+->Concat Placeholder->Concat - def test_fuse_lin_seq_8(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(1)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(0)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(1)}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}}, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # Placeholder--->Mul->Add->Mul-+->Concat Placeholder->Mul->Add->Concat - def test_fuse_lin_seq_9(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([1]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([1]), 'value': np.array(6)}, - 'mul_2_w': {'shape': np.array([1]), 'value': np.array(6)}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([1]), 'value': np.array([36])}, - 'add_1_w': {'shape': np.array([1]), 'value': np.array([36])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # Placeholder--->Mul->Add->Mul-+->Concat Placeholder->Mul->Add->Concat - def test_fuse_lin_seq_10(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([3]), 'value': np.array([6, 6, 6])}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(6)}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([36, 36, 36])}, - 'add_1_w': {'shape': np.array([3]), 'value': np.array([36, 36, 36])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # Placeholder-+->Mul->Add->Mul-+->Concat - # | | With 'can_be_fused' = False - # +----------------+ - def test_fuse_lin_seq_11(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_1': {'can_be_fused': False}, - 'add_1': {'can_be_fused': False}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_1': {'can_be_fused': False}, - 'add_1': {'can_be_fused': False}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # Placeholder-+->Mul->Add->Mul-+->Concat - # | | With 'can_be_fused' = False - # +----------------+ - def test_fuse_lin_seq_12(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1': {'can_be_fused': False}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1': {'can_be_fused': False}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) - - # Placeholder-+->Mul->Add->Mul-+->Concat - # | | - # +->Mul->Mul->----+ (This Mul ops has shared weights with upper Mul ops) - def test_fuse_lin_seq_shared_weights_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('const_mul_2_w', 'mul_2_w'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'mul_3'), - ('mul_3', 'mul_3_data'), - ('mul_1_w', 'mul_3'), - ('mul_3_data', 'mul_4'), - ('mul_2_w', 'mul_4'), - ('mul_4', 'mul_4_data'), - ('mul_4_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_3_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_4_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'add_1_w': {'shape': np.array([]), 'value': np.array(6)}, - 'mul_2_w': {'shape': np.array([]), 'value': np.array(6)}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('const_mul_1_w', 'mul_1_w'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('const_add_1_w', 'add_1_w'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'mul_3'), - ('mul_3', 'mul_3_data'), - ('const_mul_3_w', 'mul_3_w'), - ('mul_3_w', 'mul_3'), - ('mul_3_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_3_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([1]), 'value': np.array([36])}, - 'mul_3_w': {'shape': np.array([1]), 'value': np.array([36])}, - 'add_1_w': {'shape': np.array([1]), 'value': np.array([36])}, - 'mul_1': {'can_be_fused': True}, - 'add_1': {'can_be_fused': True}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NHWC' - fuse_mul_add_sequence(graph) - graph.clean_up() - self.assertTrue(len(graph.node) == len(graph_ref.node), - "Graphs has different number of nodes: {} and {}".format(len(graph.node), - len(graph_ref.node))) - - (flag, resp) = compare_graphs(graph, graph_ref, 'concat_1_data') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/passes/fusing/helpers_test.py b/tools/mo/unit_tests/mo/middle/passes/fusing/helpers_test.py deleted file mode 100644 index ee5fee48ff0c2d..00000000000000 --- a/tools/mo/unit_tests/mo/middle/passes/fusing/helpers_test.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.fusing.helpers import forward_bfs, backward_bfs, get_next_operation, common_bfs -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, connect, result, \ - valued_const_with_data, connect_data - -nodes_attributes = { - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # ScaleShift layer - 'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'}, - 'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Mul and Add operations - 'mul_1': {'type': 'Mul', 'kind': 'op', 'op': 'Mul'}, - 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_1': {'type': 'Add', 'kind': 'op', 'op': 'Add'}, - 'add_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Mul2 and Add2 operations - 'mul_2': {'type': 'Mul', 'kind': 'op', 'op': 'Mul'}, - 'mul_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'mul_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_2': {'type': 'Add', 'kind': 'op', 'op': 'Add'}, - 'add_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Concat1 operation - 'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'}, - 'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Convolutions - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'}, - 'conv_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'}, - 'conv_2_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - # FullyConnected - 'fc_1': {'type': 'MatMul', 'kind': 'op', 'op': 'FullyConnected', 'layout': 'NHWC'}, - 'fc_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'fc_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'fc_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Placeholders - 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_3': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'op_output': { 'kind': 'op', 'op': 'Result'} -} - - -# Unit tests for forward and backward bfs (forward_bfs, backward_bfs) -class BFSTests(unittest.TestCase): - def test_forward_bfs_simple(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'op_output') - ]) - - res = forward_bfs(Node(graph, 'placeholder_1'), ['ScaleShift', 'Mul'], ['Add']) - self.assertTrue(len(res) == 1 and res[0].id == 'add_1', 'Add operation was not found by bfs') - - res = forward_bfs(Node(graph, 'placeholder_1'), [], ['Add'], allowed_all=True) - self.assertTrue(len(res) == 1 and res[0].id == 'add_1', 'Add operation was not found by bfs') - - res = forward_bfs(Node(graph, 'placeholder_1_data'), ['ScaleShift'], ['Add']) - self.assertTrue(len(res) == 0, 'No one node should be found! But bfs found {} nodes'.format(len(res))) - - res = forward_bfs(Node(graph, 'placeholder_1_data'), ['ScaleShift'], ['Mul', 'Add']) - self.assertTrue(len(res) == 1 and res[0].id == 'mul_1', 'BFS should find only one Mul operation') - - def test_backward_bfs_simple(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'op_output') - ]) - - res = backward_bfs(Node(graph, 'add_1_data'), ['Add', 'ScaleShift', 'Mul'], ['Parameter']) - self.assertTrue(len(res) == 1 and res[0].id == 'placeholder_1', 'Placeholder operation was not found by bfs') - - res = backward_bfs(Node(graph, 'add_1'), [], ['Parameter'], allowed_all=True) - self.assertTrue(len(res) == 1 and res[0].id == 'placeholder_1', 'Placeholder operation was not found by bfs') - - res = backward_bfs(Node(graph, 'add_1_data'), ['Add'], ['ScaleShift']) - self.assertTrue(len(res) == 0, 'No one node should be found! But bfs found {} nodes'.format(len(res))) - - res = backward_bfs(Node(graph, 'add_1_data'), ['Add', 'Mul'], ['Parameter', 'ScaleShift']) - self.assertTrue(len(res) == 1 and res[0].id == 'scaleshift_1', 'BFS should find only one ScaleShift operation') - - def test_forward_bfs_hard(self): - # Placeholder->ScaleShift->Mul1->Add1---->Concat - # `----------->Add2->Mul2--' - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('placeholder_1_data', 'add_2'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1', 'add_1_data'), - ('add_2', 'add_2_data'), - ('add_2_data', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('add_1_data', 'concat_1'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ]) - - res = forward_bfs(Node(graph, 'placeholder_1'), ['ScaleShift', 'Mul', 'Add'], ['Concat']) - self.assertTrue(len(res) == 1 and res[0].id == 'concat_1', 'Probably Concat operation was not found by bfs') - - res = forward_bfs(Node(graph, 'placeholder_1'), ['ScaleShift', 'Mul'], ['Add']) - self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]), - 'Add operations was not found by bfs') - - res = forward_bfs(Node(graph, 'placeholder_1'), ['ScaleShift'], ['Add']) - self.assertTrue(len(res) == 0, 'BFS shouldn\'t find any operations') - - res = forward_bfs(Node(graph, 'placeholder_1'), [], ['Add'], allowed_all=True) - self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]), - 'Add operations was not found by bfs') - - res = forward_bfs(Node(graph, 'placeholder_1_data'), ['ScaleShift'], ['Concat']) - self.assertTrue(len(res) == 0, 'No one node should be found! But bfs found {} nodes'.format(len(res))) - - def test_backward_bfs_hard(self): - # Placeholder->ScaleShift->Mul1->Add1---->Concat - # `----------->Add2->Mul2--' - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('placeholder_1_data', 'add_2'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1', 'add_1_data'), - ('add_2', 'add_2_data'), - ('add_2_data', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('add_1_data', 'concat_1'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ]) - - res = backward_bfs(Node(graph, 'concat_1'), ['ScaleShift', 'Mul', 'Add'], ['Parameter']) - self.assertTrue(len(res) == 0, 'Smth went wrong with bfs') - - res = backward_bfs(Node(graph, 'concat_1'), ['Mul'], ['Add']) - self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]), - 'Add operations was not found by bfs') - - res = backward_bfs(Node(graph, 'concat_1'), ['ScaleShift'], ['Add']) - self.assertTrue(len(res) == 0, 'BFS shouldn\'t find any operations') - - res = backward_bfs(Node(graph, 'concat_1'), [], ['Add'], allowed_all=True) - self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]), - 'Add operations was not found by bfs') - - res = backward_bfs(Node(graph, 'concat_1'), ['ScaleShift'], ['ScaleShift']) - self.assertTrue(len(res) == 0, 'No one node should be found! But bfs found {} nodes'.format(len(res))) - - def test_backward_bfs_hard2(self): - # Placeholder->ScaleShift->Mul1->Add1---->Concat - # `----------->Add2->Mul2--' - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'add_2'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1', 'add_1_data'), - ('add_2', 'add_2_data'), - ('add_2_data', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('add_1_data', 'concat_1'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ]) - - res = backward_bfs(Node(graph, 'concat_1'), ['Mul', 'Add'], ['Parameter']) - self.assertTrue(len(res) == 0, 'Smth went wrong with bfs') - - res = backward_bfs(Node(graph, 'concat_1'), ['Mul'], ['Add']) - self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]), - 'Add operations was not found by bfs') - - res = backward_bfs(Node(graph, 'concat_1'), ['ScaleShift'], ['Add']) - self.assertTrue(len(res) == 0, 'BFS shouldn\'t find any operations') - - res = backward_bfs(Node(graph, 'concat_1'), [], ['Add'], allowed_all=True) - self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]), - 'Add operations was not found by bfs') - - res = backward_bfs(Node(graph, 'concat_1'), ['ScaleShift'], ['ScaleShift']) - self.assertTrue(len(res) == 0, 'No one node should be found! But bfs found {} nodes'.format(len(res))) - - def test_backward_bfs_cycle(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'placeholder_1'), - ('add_1_data', 'op_output') - ]) - - res = backward_bfs(Node(graph, 'add_1_data'), ['Add', 'ScaleShift', 'Mul', 'Parameter'], ['Conv2D']) - self.assertTrue(len(res) == 0, 'Sholdn\'t find any nodes due to cycle in graph') - - def test_backward_bfs_check_op_instead_of_type(self): - # Placeholder->ScaleShift->Mul1->Add1---->Concat - # `----------->Add2->Mul2--' - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'add_2'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1', 'add_1_data'), - ('add_2', 'add_2_data'), - ('add_2_data', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('add_1_data', 'concat_1'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('concat_1_data', 'op_output') - ]) - - res = common_bfs(Node(graph, 'concat_1'), ['Mul', 'Add'], ['Parameter'], is_backward=True, attr_to_check='op') - self.assertTrue(len(res) == 0, 'Smth went wrong with bfs') - - res = common_bfs(Node(graph, 'concat_1'), ['Mul'], ['Add'], is_backward=True, attr_to_check='op') - self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]), - 'Add operations was not found by bfs') - - res = common_bfs(Node(graph, 'concat_1'), ['ScaleShift'], ['Add'], is_backward=True, attr_to_check='op') - self.assertTrue(len(res) == 0, 'BFS shouldn\'t find any operations') - - res = common_bfs(Node(graph, 'concat_1'), [], ['Add'], allowed_all=True, is_backward=True, attr_to_check='op') - self.assertTrue(len(res) == 2 and all([res[x].id in ['add_1', 'add_2'] for x in range(len(res))]), - 'Add operations was not found by bfs') - - res = common_bfs(Node(graph, 'concat_1'), ['ScaleShift'], ['ScaleShift'], is_backward=True, attr_to_check='op') - self.assertTrue(len(res) == 0, 'No one node should be found! But bfs found {} nodes'.format(len(res))) - - def test_backward_bfs_multi_consumer_data_nodes(self): - # Placeholder-> Mul -> Result - # Const -/ \- Result2 - - graph = build_graph({**regular_op_with_shaped_data('parameter', [1], {'op': 'Parameter'}), - **valued_const_with_data('const', int64_array([5])), - **regular_op_with_shaped_data('mul', [1], {'op': 'Mul'}), - **result('result'), - **result('result2'), - }, - [*connect('parameter', '0:mul'), - *connect('const', '1:mul'), - *connect('mul:0', 'result'), - *connect_data('mul', 'result2'), - ]) - - res = common_bfs(Node(graph, 'result'), ['Mul'], ['Parameter'], is_backward=True, attr_to_check='op', - follow_multi_consumer_data_nodes=True) - self.assertTrue(len(res) == 1, 'The multi-consumer data node "mul_d" was not followed') - - res = common_bfs(Node(graph, 'result'), ['Mul'], ['Parameter'], is_backward=True, attr_to_check='op') - self.assertTrue(len(res) == 0, 'The multi-consumer data node "mul_d" was followed') - - -# Unit tests for get_next_operation -class GetNextOperationTests(unittest.TestCase): - def test_get_next_operation_1(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'scaleshift_1'), - ('scaleshift_1_w', 'scaleshift_1'), - ('scaleshift_1', 'scaleshift_1_data'), - ('scaleshift_1_data', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'op_output') - ]) - - res = get_next_operation(Node(graph, 'mul_1')) - self.assertTrue(len(res) == 1 and res[0].id == 'add_1', 'get_nex_operation returned wrong op') - - def test_get_next_operation_2(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('placeholder_1_data', 'add_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'op_output') - ]) - - res = get_next_operation(Node(graph, 'placeholder_1')) - self.assertTrue(len(res) == 2 and all([x.id in ['add_1', 'mul_1'] for x in res]), - 'get_nex_operation returned wrong op') - - def test_get_next_operation_3(self): - # Placeholder-+--->ScaleShift - # +-----^ - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1', 'placeholder_2_data'), - ('placeholder_1_data', 'mul_1'), - ('placeholder_2_data', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'op_output') - ]) - - res = get_next_operation(Node(graph, 'placeholder_1')) - self.assertTrue(len(res) == 1 and res[0].id == 'mul_1', 'get_nex_operation returned wrong op') diff --git a/tools/mo/unit_tests/mo/middle/passes/fusing/mark_unfused_nodes_test.py b/tools/mo/unit_tests/mo/middle/passes/fusing/mark_unfused_nodes_test.py deleted file mode 100644 index 67823244f2144a..00000000000000 --- a/tools/mo/unit_tests/mo/middle/passes/fusing/mark_unfused_nodes_test.py +++ /dev/null @@ -1,301 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.passes.fusing.mark_unfused_nodes import mark_unfused_nodes -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # ScaleShift layer - 'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'}, - 'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Mul and Add operations - 'mul_1': {'type': 'Mul', 'kind': 'op', 'op': 'Mul'}, - 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_1': {'type': 'Add', 'kind': 'op', 'op': 'Add'}, - 'add_1_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Mul2 and Add2 operations - 'mul_2': {'type': 'Mul', 'kind': 'op', 'op': 'Mul'}, - 'mul_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'mul_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_2': {'type': 'Add', 'kind': 'op', 'op': 'Add'}, - 'add_2_w': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'add_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Concat1 operation - 'concat_1': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'}, - 'concat_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Convolutions - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'}, - 'conv_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NHWC'}, - 'conv_2_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - # FullyConnected - 'fc_1': {'type': 'MatMul', 'kind': 'op', 'layout': 'NHWC'}, - 'fc_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'fc_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'fc_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Placeholders - 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_3': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'op_output': { 'kind': 'op', 'op': 'Result'} -} - - -# Unit tests for forward and backward bfs (forward_bfs, backward_bfs) -class MarkFusedNodes(unittest.TestCase): - def test_mark_unfused_nodes_1(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([1]), 'value': 6}, - 'add_1_w': {'shape': np.array([1]), 'value': 6}, - 'mul_2_w': {'shape': np.array([1]), 'value': 6}, - }) - - graph.graph['layout'] = 'NHWC' - - mark_unfused_nodes(graph, '.*mul.*') - - self.assertFalse(graph.node['mul_1']['can_be_fused'], "can_be_fused should be False") - self.assertFalse(graph.node['mul_2']['can_be_fused'], "can_be_fused should be False") - self.assertTrue(graph.node['add_1']['can_be_fused'], "can_be_fused should be True") - - def test_mark_unfused_nodes_2(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([1]), 'value': 6}, - 'add_1_w': {'shape': np.array([1]), 'value': 6}, - 'mul_2_w': {'shape': np.array([1]), 'value': 6}, - }) - graph.graph['layout'] = 'NHWC' - - mark_unfused_nodes(graph, '.*') - - self.assertFalse(graph.node['mul_1']['can_be_fused'], "can_be_fused should be False") - self.assertFalse(graph.node['mul_2']['can_be_fused'], "can_be_fused should be False") - self.assertFalse(graph.node['add_1']['can_be_fused'], "can_be_fused should be False") - self.assertFalse(graph.node['placeholder_1']['can_be_fused'], "can_be_fused should be False") - self.assertFalse(graph.node['concat_1']['can_be_fused'], "can_be_fused should be False") - - def test_mark_unfused_nodes_3(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([1]), 'value': 6}, - 'add_1_w': {'shape': np.array([1]), 'value': 6}, - 'mul_2_w': {'shape': np.array([1]), 'value': 6}, - }) - graph.graph['layout'] = 'NHWC' - - mark_unfused_nodes(graph, 'mul_1,add_1') - - self.assertFalse(graph.node['mul_1']['can_be_fused'], "can_be_fused should be False") - self.assertFalse(graph.node['add_1']['can_be_fused'], "can_be_fused should be False") - self.assertTrue(graph.node['mul_2']['can_be_fused'], "can_be_fused should be True") - - def test_mark_unfused_nodes_4(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'add_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - }) - graph.graph['layout'] = 'NHWC' - - mark_unfused_nodes(graph, '') - - self.assertTrue(graph.node['mul_1']['can_be_fused'], "can_be_fused should be True") - self.assertTrue(graph.node['add_1']['can_be_fused'], "can_be_fused should be True") - self.assertTrue(graph.node['mul_2']['can_be_fused'], "can_be_fused should be True") - - def test_mark_unfused_nodes_5(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'add_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - }) - graph.graph['layout'] = 'NCHW' - - mark_unfused_nodes(graph, '') - - self.assertTrue(graph.node['mul_1']['can_be_fused'], "can_be_fused should be True") - self.assertTrue(graph.node['add_1']['can_be_fused'], "can_be_fused should be True") - self.assertTrue(graph.node['mul_2']['can_be_fused'], "can_be_fused should be True") - - def test_mark_unfused_nodes_5(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'add_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - }) - graph.graph['layout'] = 'NCHW' - - mark_unfused_nodes(graph, '') - - self.assertFalse(graph.node['mul_1']['can_be_fused'], "can_be_fused should be False") - self.assertFalse(graph.node['add_1']['can_be_fused'], "can_be_fused should be False") - self.assertFalse(graph.node['mul_2']['can_be_fused'], "can_be_fused should be False") - - def test_mark_unfused_nodes_6(self): - # Placeholder->ScaleShift->Mul->Add - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'mul_1'), - ('mul_1_w', 'mul_1'), - ('mul_1', 'mul_1_data'), - ('mul_1_data', 'add_1'), - ('add_1_w', 'add_1'), - ('add_1', 'add_1_data'), - ('add_1_data', 'mul_2'), - ('mul_2_w', 'mul_2'), - ('mul_2', 'mul_2_data'), - ('mul_2_data', 'concat_1'), - ('concat_1', 'concat_1_data'), - ('placeholder_1_data', 'concat_1'), - ('concat_1_data', 'op_output') - ], - {'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'add_1_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_2_data': {'shape': np.array([1, 227, 227, 3])}, - 'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'add_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - 'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])}, - }) - graph.graph['layout'] = 'NHWC' - - mark_unfused_nodes(graph, '') - - self.assertTrue(graph.node['mul_1']['can_be_fused'], "can_be_fused should be True") - self.assertTrue(graph.node['add_1']['can_be_fused'], "can_be_fused should be True") - self.assertTrue(graph.node['mul_2']['can_be_fused'], "can_be_fused should be True") diff --git a/tools/mo/unit_tests/mo/middle/passes/fusing/resnet_optimization_test.py b/tools/mo/unit_tests/mo/middle/passes/fusing/resnet_optimization_test.py deleted file mode 100644 index d30f4fd96a8a73..00000000000000 --- a/tools/mo/unit_tests/mo/middle/passes/fusing/resnet_optimization_test.py +++ /dev/null @@ -1,628 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer -from openvino.tools.mo.middle.passes.fusing.resnet_optimization import stride_optimization -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.ops.pooling import Pooling -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -max_elt_lambda = lambda node: eltwise_infer(node, lambda a, b: np.maximum(a, b)) - -nodes_attributes = { - # Placeholders - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Concat1 operation - 'eltwise_1': {'type': 'Maximum', 'kind': 'op', 'op': 'Maximum', 'infer': max_elt_lambda}, - 'eltwise_1_data': {'name': 'eltwise_1_data', 'value': None, 'shape': None, 'kind': 'data'}, - # Convolutions - 'conv_1': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW', - 'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1, - 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'dilation': np.array([1, 1, 1, 1]), - 'batch_dims': np.array([0]), 'infer': Convolution.infer, - 'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1, - 'output_feature_channel': 0, }, - 'conv_1_w': {'value': None, 'shape': None, 'kind': 'data', - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'conv_2': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW', - 'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1, - 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'dilation': np.array([1, 1, 1, 1]), - 'batch_dims': np.array([0]), 'infer': Convolution.infer, - 'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1, - 'output_feature_channel': 0, }, - 'conv_2_w': {'value': None, 'shape': None, 'kind': 'data', - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_2_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_2_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'conv_3': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW', - 'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1, - 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'dilation': np.array([1, 1, 1, 1]), - 'batch_dims': np.array([0]), 'infer': Convolution.infer, - 'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1, - 'output_feature_channel': 0, }, - 'conv_3_w': {'value': None, 'shape': None, 'kind': 'data', - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_3_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_3_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'conv_4': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW', - 'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1, - 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'dilation': np.array([1, 1, 1, 1]), - 'batch_dims': np.array([0]), 'infer': Convolution.infer, - 'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1, - 'output_feature_channel': 0, }, - 'conv_4_w': {'value': None, 'shape': None, 'kind': 'data', - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_4_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_4_data': {'value': None, 'shape': None, 'kind': 'data'}, - - 'conv_5': {'type': 'Convolution', 'kind': 'op', 'op': 'Conv2D', 'layout': 'NCHW', - 'output_spatial_shape': None, 'output_shape': None, 'bias_term': True, 'group': 1, - 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'dilation': np.array([1, 1, 1, 1]), - 'batch_dims': np.array([0]), 'infer': Convolution.infer, - 'kernel_spatial_idx': np.array([2, 3], dtype=np.int64), 'input_feature_channel': 1, - 'output_feature_channel': 0, }, - 'conv_5_w': {'value': None, 'shape': None, 'kind': 'data', - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_5_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'conv_5_data': {'value': None, 'shape': None, 'kind': 'data'}, - # ReLU - 'relu_1': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer}, - 'relu_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'relu_2': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer}, - 'relu_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'relu_3': {'shape': None, 'type': 'ReLU', 'kind': 'op', 'op': 'ReLU', 'infer': copy_shape_infer}, - 'relu_3_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # Pooling - 'pool_1': {'type': 'Pooling', 'kind': 'op', 'op': 'Pooling', - 'spatial_dims': np.array([2, 3]), - 'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'infer': Pooling.infer}, - 'pool_1_data': {'value': None, 'shape': None, 'kind': 'data'}, -} - - -# In description of unit tests below will be used next syntax: Operation(NxM,XxY), where NxM - kernel size, XxY - stride -class ResnetOptimizationTests(unittest.TestCase): - # Pl->Conv(1x1,1x1)->Conv(1x1,2x2) => Pl->Conv(1x1,2x2)->Conv(1x1,1x1) - def test_resnet_optimization_1(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'conv_2'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_1': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'output': np.array([3]), }, - 'conv_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_2': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_2_data': {'shape': np.array([1, 3, 112, 112])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'conv_2'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_1': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_1_data': {'shape': np.array([1, 3, 112, 112])}, - - 'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_2': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'output': np.array([3]), }, - 'conv_2_data': {'shape': np.array([1, 3, 112, 112])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph_ref.graph['layout'] = 'NCHW' - - stride_optimization(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - # Pl->Conv(3x3,2x2)->Conv(1x1,2x2) => Pl->Conv(3x3,4x4)->Conv(1x1,1x1) - def test_resnet_optimization_2(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'conv_2'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_1': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_1_data': {'shape': np.array([1, 3, 112, 112])}, - - 'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_2': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_2_data': {'shape': np.array([1, 3, 56, 56])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'conv_2'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_1': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 4, 4]), - 'output': np.array([3]), }, - 'conv_1_data': {'shape': np.array([1, 3, 56, 56])}, - - 'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_2': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'output': np.array([3]), }, - 'conv_2_data': {'shape': np.array([1, 3, 56, 56])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph_ref.graph['layout'] = 'NCHW' - - stride_optimization(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - # Pl->Conv(3x3,2x2)->Conv(3x3,2x2) => Same - def test_resnet_optimization_3(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'conv_2'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])}, - 'conv_1': {'kernel_spatial': np.array([3, 3]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_1_data': {'shape': np.array([1, 3, 112, 112])}, - - 'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])}, - 'conv_2': {'kernel_spatial': np.array([3, 3]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_2_data': {'shape': np.array([1, 3, 56, 56])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'conv_2'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])}, - 'conv_1': {'kernel_spatial': np.array([3, 3]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_1_data': {'shape': np.array([1, 3, 112, 112])}, - - 'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])}, - 'conv_2': {'kernel_spatial': np.array([3, 3]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_2_data': {'shape': np.array([1, 3, 56, 56])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph_ref.graph['layout'] = 'NCHW' - - stride_optimization(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'conv_2_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - # Pl--->Conv(3x3,2x2)->ReLU--->Eltwise-->Conv(1x1,2x2) => Pl--->Conv(3x3,4x4)->ReLU--->Eltwise-->Conv(1x1,1x1) - # `-->Conv(3x3,2x2)->ReLU---` `-->Conv(3x3,4x4)->ReLU---` - def test_resnet_optimization_4(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'relu_1'), - ('relu_1', 'relu_1_data'), - - ('placeholder_1_data', 'conv_2'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ('conv_2_data', 'relu_2'), - ('relu_2', 'relu_2_data'), - - ('relu_1_data', 'eltwise_1'), - ('relu_2_data', 'eltwise_1'), - - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_1_data', 'conv_3'), - ('conv_3_w', 'conv_3'), - ('conv_3_b', 'conv_3'), - ('conv_3', 'conv_3_data'), - - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])}, - 'conv_1': {'kernel_spatial': np.array([3, 3]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_1_data': {'shape': np.array([1, 3, 112, 112])}, - 'relu_1_data': {'shape': np.array([1, 3, 112, 112])}, - - 'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])}, - 'conv_2': {'kernel_spatial': np.array([3, 3]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_2_data': {'shape': np.array([1, 3, 112, 112])}, - 'relu_2_data': {'shape': np.array([1, 3, 112, 112])}, - - 'eltwise_1_data': {'shape': np.array([1, 3, 112, 112])}, - - 'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_3': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_3_data': {'shape': np.array([1, 3, 56, 56])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'relu_1'), - ('relu_1', 'relu_1_data'), - - ('placeholder_1_data', 'conv_2'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - ('conv_2_data', 'relu_2'), - ('relu_2', 'relu_2_data'), - - ('relu_1_data', 'eltwise_1'), - ('relu_2_data', 'eltwise_1'), - - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_1_data', 'conv_3'), - ('conv_3_w', 'conv_3'), - ('conv_3_b', 'conv_3'), - ('conv_3', 'conv_3_data'), - - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])}, - 'conv_1': {'kernel_spatial': np.array([3, 3]), - 'stride': np.array([1, 1, 4, 4]), - 'output': np.array([3])}, - 'conv_1_data': {'shape': np.array([1, 3, 56, 56])}, - 'relu_1_data': {'shape': np.array([1, 3, 56, 56])}, - - 'conv_2_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])}, - 'conv_2': {'kernel_spatial': np.array([3, 3]), - 'stride': np.array([1, 1, 4, 4]), - 'output': np.array([3])}, - 'conv_2_data': {'shape': np.array([1, 3, 56, 56])}, - 'relu_2_data': {'shape': np.array([1, 3, 56, 56])}, - - 'eltwise_1_data': {'shape': np.array([1, 3, 56, 56])}, - - 'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_3': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'output': np.array([3])}, - 'conv_3_data': {'shape': np.array([1, 3, 56, 56])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph_ref.graph['layout'] = 'NCHW' - - stride_optimization(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'conv_3_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - # Pl--->Conv(1x1,1x1)->ReLU--->Eltwise-->Conv(1x1,2x2) => Pl--->Conv(1x1,2x2)->ReLU--->Eltwise-->Conv(1x1,1x1) - # `----------------->ReLU---` `-->Pool(1x1,2x2)->ReLU---` - def test_resnet_optimization_5(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'relu_1'), - ('relu_1', 'relu_1_data'), - - ('placeholder_1_data', 'relu_2'), - ('relu_2', 'relu_2_data'), - - ('relu_1_data', 'eltwise_1'), - ('relu_2_data', 'eltwise_1'), - - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_1_data', 'conv_3'), - ('conv_3_w', 'conv_3'), - ('conv_3_b', 'conv_3'), - ('conv_3', 'conv_3_data'), - - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_1': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'output': np.array([3]), }, - 'conv_1_data': {'shape': np.array([1, 3, 224, 224])}, - 'relu_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'relu_2_data': {'shape': np.array([1, 3, 224, 224])}, - - 'eltwise_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_3': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_3_data': {'shape': np.array([1, 3, 112, 112])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - ('conv_1_data', 'relu_1'), - ('relu_1', 'relu_1_data'), - - ('placeholder_1_data', 'pool_1'), - ('pool_1', 'pool_1_data'), - ('pool_1_data', 'relu_2'), - ('relu_2', 'relu_2_data'), - - ('relu_1_data', 'eltwise_1'), - ('relu_2_data', 'eltwise_1'), - - ('eltwise_1', 'eltwise_1_data'), - ('eltwise_1_data', 'conv_3'), - ('conv_3_w', 'conv_3'), - ('conv_3_b', 'conv_3'), - ('conv_3', 'conv_3_data'), - - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_1': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3])}, - 'conv_1_data': {'shape': np.array([1, 3, 112, 112])}, - 'relu_1_data': {'shape': np.array([1, 3, 112, 112])}, - - 'pool_1': {'stride': np.array([1, 1, 2, 2])}, - 'pool_1_data': {'shape': np.array([1, 3, 112, 112])}, - 'relu_2_data': {'shape': np.array([1, 3, 112, 112])}, - - 'eltwise_1_data': {'shape': np.array([1, 3, 112, 112])}, - - 'conv_3_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_3': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'output': np.array([3])}, - 'conv_3_data': {'shape': np.array([1, 3, 112, 112])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph_ref.graph['layout'] = 'NCHW' - - stride_optimization(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'conv_3_data', check_op_attrs=True) - self.assertTrue(flag, resp) - - # Pl->Conv(1x1,1x1)->Conv(1x1,2x2)->Conv(3x3,1x1)->Conv(1x1,2x2) - # => - # Pl->Conv(1x1,2x2)->Conv(1x1,1x1)->Conv(3x3,2x2)->Conv(1x1,1x1) - def test_resnet_optimization_6(self): - graph = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - - ('conv_1_data', 'conv_2'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - - ('conv_2_data', 'conv_3'), - ('conv_3_w', 'conv_3'), - ('conv_3_b', 'conv_3'), - ('conv_3', 'conv_3_data'), - - ('conv_3_data', 'conv_4'), - ('conv_4_w', 'conv_4'), - ('conv_4_b', 'conv_4'), - ('conv_4', 'conv_4_data'), - - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_1': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'output': np.array([3]), }, - 'conv_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_2': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_2_data': {'shape': np.array([1, 3, 112, 112])}, - - 'conv_3_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])}, - 'conv_3': {'kernel_spatial': np.array([3, 3]), - 'stride': np.array([1, 1, 1, 1]), - 'output': np.array([3]), }, - 'conv_3_data': {'shape': np.array([1, 3, 110, 110])}, - - 'conv_4_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_4': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3]), }, - 'conv_4_data': {'shape': np.array([1, 3, 55, 55])}, - }, - nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_attributes, - [('placeholder_1', 'placeholder_1_data'), - ('placeholder_1_data', 'conv_1'), - ('conv_1_w', 'conv_1'), - ('conv_1_b', 'conv_1'), - ('conv_1', 'conv_1_data'), - - ('conv_1_data', 'conv_2'), - ('conv_2_w', 'conv_2'), - ('conv_2_b', 'conv_2'), - ('conv_2', 'conv_2_data'), - - ('conv_2_data', 'conv_3'), - ('conv_3_w', 'conv_3'), - ('conv_3_b', 'conv_3'), - ('conv_3', 'conv_3_data'), - - ('conv_3_data', 'conv_4'), - ('conv_4_w', 'conv_4'), - ('conv_4_b', 'conv_4'), - ('conv_4', 'conv_4_data'), - - ], - {'placeholder_1_data': {'shape': np.array([1, 3, 224, 224])}, - - 'conv_1_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_1': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3])}, - 'conv_1_data': {'shape': np.array([1, 3, 112, 112])}, - - 'conv_2_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_2': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'output': np.array([3])}, - 'conv_2_data': {'shape': np.array([1, 3, 112, 112])}, - - 'conv_3_w': {'value': np.zeros([3, 3, 3, 3]), 'shape': np.array([3, 3, 3, 3])}, - 'conv_3': {'kernel_spatial': np.array([3, 3]), - 'stride': np.array([1, 1, 2, 2]), - 'output': np.array([3])}, - 'conv_3_data': {'shape': np.array([1, 3, 55, 55])}, - - 'conv_4_w': {'value': np.zeros([3, 3, 1, 1]), 'shape': np.array([3, 3, 1, 1])}, - 'conv_4': {'kernel_spatial': np.array([1, 1]), - 'stride': np.array([1, 1, 1, 1]), - 'output': np.array([3])}, - 'conv_4_data': {'shape': np.array([1, 3, 55, 55])}, - }, - nodes_with_edges_only=True) - - graph.graph['layout'] = 'NCHW' - graph_ref.graph['layout'] = 'NCHW' - - stride_optimization(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'conv_4_data', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/passes/infer_test.py b/tools/mo/unit_tests/mo/middle/passes/infer_test.py deleted file mode 100644 index 5d3920ee5a74d4..00000000000000 --- a/tools/mo/unit_tests/mo/middle/passes/infer_test.py +++ /dev/null @@ -1,281 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.concat import concat_infer -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.infer import override_placeholder_shapes, partial_infer -from openvino.tools.mo.utils.error import Error -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'node_1_data': {'value': None, 'kind': 'data', 'data_type': None}, - 'node_2': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'concat': {'type': 'Concat', 'value': None, 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'value': None, 'kind': 'op'}, - 'node_3_data': {'value': None, 'kind': 'data', 'data_type': None}, - # Placeholders - 'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - 'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'pl_1': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'pl_1_data': {'value': None, 'kind': 'data', 'data_type': None}, - 'pl_2': {'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'}, - 'pl_2_data': {'value': None, 'kind': 'data', 'data_type': None}, - 'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None}, - # ScaleShift layer - 'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift'}, - 'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'}, - 'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - # Mul op - 'mul_1': {'type': None, 'kind': 'op', 'op': 'Mul'}, - 'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'}, - 'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'}, - 'op_output': { 'kind': 'op', 'op': 'Result', 'infer': lambda x: None} - } - - -class TestInferPass(UnitTestWithMockedTelemetry): - def test_override_placeholder_shapes(self): - """ - Test for overriding shape in placeholder by shape from user_shapes. - """ - graph = build_graph(nodes_attributes, - [('node_1', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Parameter'} - }, - nodes_with_edges_only=True) - - ph_shape = np.array([1, 3, 224, 224]) - user_dict = {'node_1': [{'shape': ph_shape}]} - override_placeholder_shapes(graph, user_dict) - res_shape = graph.node['node_1']['shape'] - self.assertTrue(np.array_equal(ph_shape, res_shape)) - - def test_override_placeholder_no_shape(self): - """ - Test for case when user_shapes is not defined. - """ - graph = build_graph(nodes_attributes, - [('node_1', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None, 'op': 'Parameter'}, - 'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Parameter'} - }, - nodes_with_edges_only=True) - out = override_placeholder_shapes(graph, None) - res_shape = graph.node['node_1']['shape'] - placeholder_shape = np.array([1, 3, 227, 227]) - self.assertIsNone(out) - self.assertTrue(np.array_equal(placeholder_shape, res_shape)) - - def test_override_placeholder_shapes(self): - """ - Test for case when user_shapes is not None, but it shouldn't rewrite shapes. - """ - graph = build_graph(nodes_attributes, - [('node_1', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Parameter'} - }, - nodes_with_edges_only=True) - - node_1_shape = np.array([1, 3, 227, 227]) - user_dict = {'some_node': [{'shape': np.zeros((3))}]} - override_placeholder_shapes(graph, user_dict) - res_shape = graph.node['node_1']['shape'] - self.assertTrue(np.array_equal(node_1_shape, res_shape)) - - def test_override_placeholder_shapes_dict(self): - graph = build_graph(nodes_attributes, - [('node_1', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None, 'op': 'Parameter'}, - 'node_1': {'shape': np.array([1, 3, 227, 227]), 'op': 'Parameter'} - }, - nodes_with_edges_only=True) - - placeholder_shape = np.array([1, 3, 224, 224]) - user_shapes = { - 'node_1': [{'shape': placeholder_shape}], - 'node_2': [{'shape': placeholder_shape}], - } - override_placeholder_shapes(graph, user_shapes) - res_shape = graph.node['node_1']['shape'] - res_shape2 = graph.node['node_2']['shape'] - self.assertTrue(np.array_equal(placeholder_shape, res_shape)) - self.assertTrue(np.array_equal(placeholder_shape, res_shape2)) - - nodes = { - 'placeholder_1': {'name': 'placeholder_1', 'shape': [1, 2, 3, 4], 'type': 'Parameter', 'value': None, - 'kind': 'op', 'op': 'Parameter'}, - 'placeholder_2': {'name': 'placeholder_2', 'shape': [5, 6, 7, 8], 'type': 'Parameter', 'value': None, - 'kind': 'op', 'op': 'Parameter'}, - '1': {'name': 'node_1', 'type': 'Identity', 'value': None, 'kind': 'op'}, - '2': {'name': 'node_2', 'type': 'Identity', 'value': None, 'kind': 'op'}, - '3': {'name': 'concat', 'type': 'Identity', 'value': None, 'kind': 'op'}, - '4': {'name': 'output', 'type': 'SoftMax', 'value': None, 'kind': 'op'} - } - edges = [ - ('placeholder_1', '1'), - ('1', '3'), - ('placeholder_2', '2'), - ('2', '3'), - ('3', '4') - ] - - def test_override_placeholder_shapes_batch_is_not_set(self): - """ - Test case when batch is not set. (shapes shouldn't change) - """ - graph = build_graph(self.nodes, self.edges) - shapes = {} - batch = None - override_placeholder_shapes(graph, shapes, batch) - res_shape_1 = graph.node['placeholder_1']['shape'] - res_shape_2 = graph.node['placeholder_2']['shape'] - self.assertTrue(np.array_equal(self.nodes['placeholder_1']['shape'], res_shape_1)) - self.assertTrue(np.array_equal(self.nodes['placeholder_2']['shape'], res_shape_2)) - - def test_override_placeholder_shapes_real_inputs_and_batch(self): - """ - Test case when batch is set and shapes should overwrite by user shapes. - """ - graph = build_graph(self.nodes, self.edges) - shapes = {'placeholder_1': [{'shape': np.array([1, 2, 3, 4])}], - 'placeholder_2': [{'shape': np.array([1, 5, 6, 7])}]} - batch = 4 - override_placeholder_shapes(graph, shapes, batch) - res_shape_1 = graph.node['placeholder_1']['shape'] - res_shape_2 = graph.node['placeholder_2']['shape'] - self.assertTrue(np.array_equal(res_shape_1, np.array([4, 2, 3, 4]))) - self.assertTrue(np.array_equal(res_shape_2, np.array([4, 5, 6, 7]))) - - def test_override_placeholder_shapes_real_inputs_and_batch_2(self): - """ - Test case when batch is set, but shapes in user_shapes is None. - """ - graph = build_graph(self.nodes, self.edges) - shapes = {'placeholder_1': [{'shape': None}], 'placeholder_2': [{'shape': None}]} - batch = 4 - graph.node['placeholder_2']['shape'] = np.array([1, 2, 3, 4]) - graph.node['placeholder_2']['shape'] = np.array([1, 5, 6, 7]) - override_placeholder_shapes(graph, shapes, batch) - np.testing.assert_array_equal(graph.node['placeholder_1']['shape'], np.array([4, 2, 3, 4])) - np.testing.assert_array_equal(graph.node['placeholder_2']['shape'], np.array([4, 5, 6, 7])) - - def test_partial_infer(self): - graph = build_graph(nodes_attributes, - [('node_1', 'concat'), - ('node_2', 'concat'), - ('concat', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'kind': 'data', 'shape': None, 'infer': None}, - 'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None}, - 'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None}, - 'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer} - }, - nodes_with_edges_only=True) - - start_node = 'concat' - partial_infer(graph, start_node) - node = Node(graph, start_node) - self.assertTrue(node.is_partial_inferred) - self.assertTrue(node.out_node().is_partial_inferred) - - # check if previous nodes are not inferred - node = Node(graph, start_node) - while True: - # collect nodes in a list - if isinstance(node.in_nodes(), list): - in_nodes = node.in_nodes() - else: - in_nodes = [y for x, y in node.in_nodes().items()] - - # check parents and find next parent - for n in in_nodes: - if 'embedded_input_' not in n.id: - node = n - self.assertFalse(n.has('is_partial_inferred')) - - if not len(in_nodes): - break - - def test_partial_infer_no_shape(self): - graph = build_graph(nodes_attributes, - [('node_1', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None, 'infer': None}, - 'node_1': {'shape': None, 'infer': None} - }, - nodes_with_edges_only=True) - self.assertRaises(Error, partial_infer, graph, 'node_1') - - def test_partial_infer_cycle(self): - graph = build_graph(nodes_attributes, - [('node_1', 'concat'), - ('node_2', 'concat'), - ('concat', 'node_3'), - ('node_3', 'concat'), - ('node_3', 'op_output') - ], - {'node_3': {'kind': 'data', 'shape': None, 'infer': None}, - 'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None}, - 'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None}, - 'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer} - }, - nodes_with_edges_only=True) - - start_node = 'concat' - self.assertRaises(Error, partial_infer, graph, start_node) - - -class CycleTest(UnitTestWithMockedTelemetry): - def test_is_not_fully_inferred_param(self): - # Node that have is_not_fully_inferred=True - graph = build_graph(nodes_attributes, - [('node_1', 'concat'), - ('node_2', 'concat'), - ('concat', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'kind': 'data', 'shape': None, 'infer': None}, - 'node_1': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None}, - 'node_2': {'kind': 'data', 'shape': np.array([1, 3, 227, 227]), 'infer': None}, - 'concat': {'kind': 'op', 'axis': 2, 'infer': concat_infer, 'is_not_fully_inferred': True} - }, - nodes_with_edges_only=True) - - start_node = 'concat' - try: - partial_infer(graph, start_node) - except Error: - self.fail("Unexpected Error raised") - node = Node(graph, start_node) - self.assertTrue(node.is_partial_inferred) - self.assertTrue(node.out_node().is_partial_inferred) - - def test_for_is_cyclic1(self): - # Test for case of cyclic graph without is_cyclic attrs - graph = build_graph(nodes_attributes, - [('node_1', 'node_1_data'), - ('node_1_data', 'node_3'), - ('node_3', 'node_3_data'), - ('node_3_data', 'node_1')], - nodes_with_edges_only=True) - with self.assertRaisesRegex(Error, 'Graph contains a cycle. Can not proceed.*'): - partial_infer(graph) diff --git a/tools/mo/unit_tests/mo/middle/quantize_dequantize_linear_resolver_test.py b/tools/mo/unit_tests/mo/middle/quantize_dequantize_linear_resolver_test.py deleted file mode 100644 index 55ffb57c43af17..00000000000000 --- a/tools/mo/unit_tests/mo/middle/quantize_dequantize_linear_resolver_test.py +++ /dev/null @@ -1,139 +0,0 @@ -# Copyright (C) 2018-2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import unittest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array, int8_array -from openvino.tools.mo.middle.quantize_dequantize_linear_resolver import QuantizeDequantizeLinearResolver -from openvino.tools.mo.middle.quantize_linear_resolver import QuantizeLinearResolver -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, result, connect, connect_data, \ - valued_const_with_data, regular_op_with_empty_data - -nodes_attributes = { - **valued_const_with_data('y_scale_1', float32_array(1.0 / 255.0)), - **valued_const_with_data('y_scale_2', float32_array(1.0 / 255.0)), - **valued_const_with_data('y_zeropoint_1', int8_array(0)), - **valued_const_with_data('y_zeropoint_2', int8_array(0)), - **valued_const_with_data('x_scale_1', float32_array(1.0 / 255.0)), - **valued_const_with_data('x_scale_2', float32_array(1.0 / 255.0)), - **valued_const_with_data('x_zeropoint_1', int8_array(0)), - **valued_const_with_data('x_zeropoint_2', int8_array(0)), - **valued_const_with_data('const_input', float32_array([[0.3, 0.6], [-0.7, -0.9]])), - **valued_const_with_data('in_low', float32_array(-128.0)), - **valued_const_with_data('in_high', float32_array(127.0)), - **valued_const_with_data('out_low', float32_array(-128.0)), - **valued_const_with_data('out_high', float32_array(127.0)), - **valued_const_with_data('non_const_in_low', float32_array(-128.0)), - **valued_const_with_data('non_const_in_high', float32_array(127.0)), - **valued_const_with_data('non_const_out_low', float32_array(-128.0)), - **valued_const_with_data('non_const_out_high', float32_array(127.0)), - **regular_op_with_shaped_data('input', [1, 2, 2], {'op': 'Parameter', 'type': 'Parameter'}), - **regular_op_with_empty_data('const_quantize', {'op': 'QuantizeLinear'}), - **regular_op_with_empty_data('non_const_quantize', {'op': 'QuantizeLinear'}), - **regular_op_with_empty_data('const_dequantize', {'op': 'DequantizeLinear'}), - **regular_op_with_empty_data('non_const_dequantize', {'op': 'DequantizeLinear'}), - **regular_op_with_empty_data('add', {'op': 'Add'}), - **regular_op_with_empty_data('mul_low', {'op': 'Mul'}), - **regular_op_with_empty_data('mul_high', {'op': 'Mul'}), - **regular_op_with_empty_data('const_fq', {'op': 'FakeQuantize'}), - **regular_op_with_empty_data('const_cast', {'op': 'Cast', 'type': 'Convert', 'dst_type': np.uint8}), - **regular_op_with_empty_data('non_const_mul_low', {'op': 'Mul'}), - **regular_op_with_empty_data('non_const_mul_high', {'op': 'Mul'}), - **regular_op_with_empty_data('non_const_fq', {'op': 'FakeQuantize'}), - **regular_op_with_empty_data('non_const_cast', {'op': 'Cast', 'type': 'Convert', 'dst_type': np.uint8}), - **result('result'), - -} - - -class QuantizeDequantizeLinearResolverTest(unittest.TestCase): - def test_quantize_dequantize_linear_resolver(self): - graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', '0:non_const_quantize'), - *connect('y_scale_2', '1:non_const_quantize'), - *connect('y_zeropoint_2', '2:non_const_quantize'), - *connect('non_const_quantize', '0:non_const_dequantize'), - *connect('x_scale_2', '1:non_const_dequantize'), - *connect('x_zeropoint_2', '2:non_const_dequantize'), - - *connect('const_input', '0:const_quantize'), - *connect('y_scale_1', '1:const_quantize'), - *connect('y_zeropoint_1', '2:const_quantize'), - *connect('const_quantize', '0:const_dequantize'), - *connect('x_scale_1', '1:const_dequantize'), - *connect('x_zeropoint_1', '2:const_dequantize'), - *connect('const_dequantize', '0:add'), - *connect('non_const_dequantize', '1:add'), - *connect('add', 'result') - ], nodes_with_edges_only=True) - - const_ref_graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', '0:non_const_quantize'), - *connect('y_scale_2', '1:non_const_quantize'), - *connect('y_zeropoint_2', '2:non_const_quantize'), - *connect('non_const_quantize', '0:non_const_dequantize'), - *connect('x_scale_2', '1:non_const_dequantize'), - *connect('x_zeropoint_2', '2:non_const_dequantize'), - - *connect('const_input', '0:const_fq'), - *connect('y_scale_1:0', '0:mul_low'), - *connect('in_low', '1:mul_low'), - ('y_scale_1_d', 'mul_high', {'out': 1, 'in': 0}), - *connect('in_high', '1:mul_high'), - *connect('mul_low', '1:const_fq'), - *connect('mul_high', '2:const_fq'), - *connect('out_low', '3:const_fq'), - *connect('out_high', '4:const_fq'), - *connect('const_fq', 'const_cast'), - *connect('const_cast', '0:const_dequantize'), - *connect('x_scale_1', '1:const_dequantize'), - *connect('x_zeropoint_1', '2:const_dequantize'), - *connect('const_dequantize', '0:add'), - *connect('non_const_dequantize', '1:add'), - *connect('add', 'result') - ],nodes_with_edges_only=True) - QuantizeDequantizeLinearResolver().find_and_replace_pattern(graph) - graph.graph['layout'] = 'NCHW' - (flag, resp) = compare_graphs(graph, const_ref_graph, 'result') - self.assertTrue(flag, resp) - - ref_graph = build_graph(nodes_attrs=nodes_attributes, - edges=[ - *connect('input', '0:non_const_fq'), - *connect('y_scale_2:0', '0:non_const_mul_low'), - *connect('non_const_in_low', '1:non_const_mul_low'), - ('y_scale_2_d', 'non_const_mul_high', {'out': 1, 'in': 0}), - *connect('non_const_in_high', '1:non_const_mul_high'), - *connect('non_const_mul_low', '1:non_const_fq'), - *connect('non_const_mul_high', '2:non_const_fq'), - *connect('non_const_out_low', '3:non_const_fq'), - *connect('non_const_out_high', '4:non_const_fq'), - *connect('non_const_fq', 'non_const_cast'), - *connect('non_const_cast', '0:non_const_dequantize'), - *connect('x_scale_2', '1:non_const_dequantize'), - *connect('x_zeropoint_2', '2:non_const_dequantize'), - - *connect('const_input', '0:const_fq'), - *connect('y_scale_1:0', '0:mul_low'), - *connect('in_low', '1:mul_low'), - ('y_scale_1_d', 'mul_high', {'out': 1, 'in': 0}), - *connect('in_high', '1:mul_high'), - *connect('mul_low', '1:const_fq'), - *connect('mul_high', '2:const_fq'), - *connect('out_low', '3:const_fq'), - *connect('out_high', '4:const_fq'), - *connect('const_fq', 'const_cast'), - *connect('const_cast', '0:const_dequantize'), - *connect('x_scale_1', '1:const_dequantize'), - *connect('x_zeropoint_1', '2:const_dequantize'), - *connect('const_dequantize', '0:add'), - *connect('non_const_dequantize', '1:add'), - *connect('add', 'result') - ], nodes_with_edges_only=True) - QuantizeLinearResolver().find_and_replace_pattern(graph) - (flag, resp) = compare_graphs(graph, ref_graph, 'result') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/quantize_fuses_test.py b/tools/mo/unit_tests/mo/middle/quantize_fuses_test.py deleted file mode 100644 index c715bc2dd1466c..00000000000000 --- a/tools/mo/unit_tests/mo/middle/quantize_fuses_test.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.quantize_fuses import FakeQuantizeFuse -from openvino.tools.mo.front.common.partial_infer.eltwise import eltwise_infer -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - -nodes = { - 'placeholder': {'kind': 'op', 'op': 'Placeholder'}, - 'placeholder_d': {'kind': 'data', 'shape': np.array([1, 3, 224, 224]), 'value': None}, - - 'mi_i': {'kind': 'op', 'op': 'Const'}, - 'mi_i_d': {'kind': 'data', 'shape': np.array([1, 3, 224, 224]), 'value': None}, - 'ma_i': {'kind': 'op', 'op': 'Const'}, - 'ma_i_d': {'kind': 'data', 'shape': np.array([1, 3, 224, 224]), 'value': None}, - 'mi_o': {'kind': 'op', 'op': 'Const'}, - 'mi_o_d': {'kind': 'data', 'shape': np.array([1, 3, 224, 224]), 'value': None}, - 'ma_o': {'kind': 'op', 'op': 'Const'}, - 'ma_o_d': {'kind': 'data', 'shape': np.array([1, 3, 224, 224]), 'value': None}, - - 'quantize': {'kind': 'op', 'op': 'FakeQuantize'}, - 'quantize_d': {'kind': 'data', 'shape': None}, - - 'mul_val': {'kind': 'op', 'op': 'Const'}, - 'mul_val_d': {'kind': 'data', 'shape': np.array([1]), 'value': np.array([5])}, - - 'mul': {'kind': 'op', 'op': 'Mul', 'infer': lambda node: eltwise_infer(node, lambda a, b: a * b)}, - 'mul_d': {'kind': 'data', 'shape': np.array([1, 3, 224, 224]), 'value': None}, - 'mul_1': {'kind': 'op', 'op': 'Mul', 'infer': lambda node: eltwise_infer(node, lambda a, b: a * b)}, - 'mul_1_d': {'kind': 'data', 'shape': np.array([1, 3, 224, 224]), 'value': None}, - 'mul_2': {'kind': 'op', 'op': 'Mul', 'infer': lambda node: eltwise_infer(node, lambda a, b: a * b)}, - 'mul_2_d': {'kind': 'data', 'shape': np.array([1, 3, 224, 224]), 'value': None}, - 'mul_3': {'kind': 'op', 'op': 'Mul', 'infer': lambda node: eltwise_infer(node, lambda a, b: a * b)}, - 'mul_3_d': {'kind': 'data', 'shape': np.array([1, 3, 224, 224]), 'value': None}, - 'mul_4': {'kind': 'op', 'op': 'Mul', 'infer': lambda node: eltwise_infer(node, lambda a, b: a * b)}, - 'mul_4_d': {'kind': 'data', 'shape': np.array([1, 3, 224, 224]), 'value': None}, - - 'output': {'kind': 'op', 'op': 'Result'}, -} - -edges = [ - ('placeholder', 'placeholder_d'), - ('mi_i', 'mi_i_d'), - ('ma_i', 'ma_i_d'), - ('mi_o', 'mi_o_d'), - ('ma_o', 'ma_o_d'), - ('quantize', 'quantize_d'), - ('mul', 'mul_d'), - ('mul_val', 'mul_val_d'), - - ('placeholder_d', 'quantize', {'in': 0}), - ('mi_i_d', 'quantize', {'in': 1}), - ('ma_i_d', 'quantize', {'in': 2}), - ('mi_o_d', 'quantize', {'in': 3}), - ('ma_o_d', 'quantize', {'in': 4}), - - ('quantize_d', 'mul', {'in': 0}), - ('mul_val_d', 'mul', {'in': 1}), - - ('mul_d', 'output'), -] - -edges_ref_1 = [ - ('placeholder', 'placeholder_d'), - ('mi_i', 'mi_i_d'), - ('ma_i', 'ma_i_d'), - ('mi_o', 'mi_o_d'), - ('ma_o', 'ma_o_d'), - ('quantize', 'quantize_d'), - ('mul', 'mul_d'), - ('mul_val', 'mul_val_d'), - - ('placeholder_d', 'mul', {'in': 0}), - ('mul_val_d', 'mul', {'in': 1}), - - ('mul_d', 'quantize', {'in': 0}), - ('mi_i_d', 'quantize', {'in': 1}), - ('ma_i_d', 'quantize', {'in': 2}), - ('mi_o_d', 'quantize', {'in': 3}), - ('ma_o_d', 'quantize', {'in': 4}), - - ('quantize_d', 'output'), -] - -edges_ref_5 = [ - ('placeholder', 'placeholder_d'), - ('mi_i', 'mi_i_d'), - ('ma_i', 'ma_i_d'), - ('mi_o', 'mi_o_d'), - ('ma_o', 'ma_o_d'), - ('quantize', 'quantize_d'), - ('mul', 'mul_d'), - ('mul_1', 'mul_1_d'), - ('mul_2', 'mul_2_d'), - ('mul_3', 'mul_3_d'), - ('mul_4', 'mul_4_d'), - ('mul_val', 'mul_val_d'), - - - ('placeholder_d', 'mul', {'in': 0}), - ('mi_i_d', 'mul_1'), - ('ma_i_d', 'mul_2'), - ('mi_o_d', 'mul_3'), - ('ma_o_d', 'mul_4'), - - ('mul_val_d', 'mul', {'in': 1, 'out': 0}), - ('mul_d', 'quantize', {'in': 0}), - - ('mul_val_d', 'mul_1', {'in': 1, 'out': 0}), - ('mul_1_d', 'quantize', {'in': 1}), - - ('mul_val_d', 'mul_2', {'in': 1, 'out': 0}), - ('mul_2_d', 'quantize', {'in': 2}), - - ('mul_val_d', 'mul_3', {'in': 1, 'out': 0}), - ('mul_3_d', 'quantize', {'in': 3}), - - ('mul_val_d', 'mul_4', {'in': 1, 'out': 0}), - ('mul_4_d', 'quantize', {'in': 4}), - - ('quantize_d', 'output'), -] - - -class TestQuantizeFuses(unittest.TestCase): - def test_pool_1_port_through_quantize(self): - graph = build_graph(nodes, edges, {'mul': {'fuse_up_to_quantize_ports': [0]}}, nodes_with_edges_only=True) - graph.stage = 'middle' - FakeQuantizeFuse().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, edges_ref_1, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_pool_5_ports_through_quantize(self): - graph = build_graph(nodes, edges, {'mul': {'fuse_up_to_quantize_ports': [0, 1, 2, 3, 4]}}, - nodes_with_edges_only=True) - graph.stage = 'middle' - FakeQuantizeFuse().find_and_replace_pattern(graph) - - graph_ref = build_graph(nodes, edges_ref_5, nodes_with_edges_only=True) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/quantize_linear_resolver_test.py b/tools/mo/unit_tests/mo/middle/quantize_linear_resolver_test.py deleted file mode 100644 index 3e18f1f5319ac6..00000000000000 --- a/tools/mo/unit_tests/mo/middle/quantize_linear_resolver_test.py +++ /dev/null @@ -1,369 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.quantize_linear_resolver import QuantizeLinearResolver -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph -import pytest - -nodes1_attributes = { - 'input': {'kind': 'op', 'op': 'AnyOp'}, - 'input_data': {'kind': 'data', 'shape': None}, - 'quantize': {'kind': 'op', 'op': 'QuantizeLinear', 'axis': 1}, - 'quantize_data': {'kind': 'data', 'shape': None}, - 'scale_param_q': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'scale_param_q_data': {'kind': 'data', 'shape': None}, - 'zerop_param_q': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'zerop_param_q_data': {'kind': 'data', 'shape': None}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, - 'out_data': {'kind': 'data', 'shape': None}, - 'result': {'kind': 'op', 'op': 'Result'}, -} - -nodes_ref_attributes = { - 'input': {'kind': 'op', 'op': 'AnyOp'}, - 'input_data': {'kind': 'data', 'shape': None}, - 'cast': {'kind': 'op', 'op': 'Cast', 'type': 'Convert'}, - 'cast_data': {'kind': 'data', 'shape': None}, - 'f_quantize': {'kind': 'op', 'op': 'FakeQuantize', 'type': 'FakeQuantize'}, - 'f_quantize_data': {'kind': 'data', 'shape': None}, - 'mul1': {'kind': 'op', 'op': 'Mul', 'type': 'Multiply'}, - 'mul1_data': {'kind': 'data', 'shape': None}, - 'mul2': {'kind': 'op', 'op': 'Mul', 'type': 'Multiply'}, - 'mul2_data': {'kind': 'data', 'shape': None}, - 'scale_param_q': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'scale_param_q_data': {'kind': 'data', 'shape': None}, - 'in_low': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'in_low_data': {'kind': 'data', 'shape': None}, - 'in_high': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'in_high_data': {'kind': 'data', 'shape': None}, - 'out_low': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'out_low_data': {'kind': 'data', 'shape': None}, - 'out_high': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'out_high_data': {'kind': 'data', 'shape': None}, - 'out': {'kind': 'op', 'op': 'AnyOp'}, - 'out_data': {'kind': 'data', 'shape': None}, - 'result': {'kind': 'op', 'op': 'Result'}, - - 'high_reshape_const': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'high_reshape_const_data': {'kind': 'data', 'shape': None}, - 'high_reshape': {'kind': 'op', 'type': 'Reshape', 'op': 'Reshape'}, - 'high_reshape_data': {'kind': 'data', 'shape': None}, - - 'low_reshape_const': {'kind': 'op', 'type': 'Const', 'op': 'Const'}, - 'low_reshape_const_data': {'kind': 'data', 'shape': None}, - 'low_reshape': {'kind': 'op', 'type': 'Reshape', 'op': 'Reshape'}, - 'low_reshape_data': {'kind': 'data', 'shape': None}, -} - - -class TestQuantizeLinearResolver(unittest.TestCase): - - def test_quantize_uint8(self): - graph = build_graph(nodes1_attributes, - [('input', 'input_data'), - ('input_data', 'quantize'), - ('scale_param_q', 'scale_param_q_data'), - ('scale_param_q_data', 'quantize'), - ('zerop_param_q', 'zerop_param_q_data'), - ('zerop_param_q_data', 'quantize'), - ('quantize', 'quantize_data'), - ('quantize_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - { - 'quantize': {'axis': 2}, - 'scale_param_q': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - 'scale_param_q_data': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - 'zerop_param_q': {'shape': np.array([]), 'value': np.uint8(128)}, - 'zerop_param_q_data': {'shape': np.array([]), 'value': np.uint8(128)}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_ref_attributes, - [('input', 'input_data'), - ('input_data', 'f_quantize'), - ('scale_param_q', 'scale_param_q_data'), - ('scale_param_q_data', 'mul1', {'out': 0}), - ('in_low', 'in_low_data'), - ('in_low_data', 'mul1'), - ('mul1', 'mul1_data'), - ('mul1_data', 'f_quantize'), - ('f_quantize', 'f_quantize_data'), - ('scale_param_q_data', 'mul2', {'out': 0}), - ('in_high', 'in_high_data'), - ('in_high_data', 'mul2'), - ('mul2', 'mul2_data'), - ('mul2_data', 'f_quantize'), - ('out_low', 'out_low_data'), - ('out_low_data', 'f_quantize'), - ('out_high', 'out_high_data'), - ('out_high_data', 'f_quantize'), - ('f_quantize_data', 'cast'), - ('cast', 'cast_data'), - ('cast_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'in_low': {'shape': np.array([]), 'value': -128}, - 'in_low_data': {'shape': np.array([]), 'value': -128}, - 'in_high': {'shape': np.array([]), 'value': 127}, - 'in_high_data': {'shape': np.array([]), 'value': 127}, - 'out_low': {'shape': np.array([]), 'value': 0}, - 'out_low_data': {'shape': np.array([]), 'value': 0}, - 'out_high': {'shape': np.array([]), 'value': 255}, - 'out_high_data': {'shape': np.array([]), 'value': 255}, - 'cast': {'dst_type': np.uint8} - }, nodes_with_edges_only=True) - - graph.stage = 'middle' - QuantizeLinearResolver().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_quantize_int8(self): - graph = build_graph(nodes1_attributes, - [('input', 'input_data'), - ('input_data', 'quantize'), - ('scale_param_q', 'scale_param_q_data'), - ('scale_param_q_data', 'quantize'), - ('zerop_param_q', 'zerop_param_q_data'), - ('zerop_param_q_data', 'quantize'), - ('quantize', 'quantize_data'), - ('quantize_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'scale_param_q': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - 'scale_param_q_data': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - 'zerop_param_q': {'shape': np.array([]), 'value': np.int8(0)}, - 'zerop_param_q_data': {'shape': np.array([]), 'value': np.int8(0)}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_ref_attributes, - [('input', 'input_data'), - ('input_data', 'f_quantize'), - ('scale_param_q', 'scale_param_q_data'), - ('scale_param_q_data', 'mul1', {'out': 0}), - ('in_low', 'in_low_data'), - ('in_low_data', 'mul1'), - ('mul1', 'mul1_data'), - ('mul1_data', 'f_quantize'), - ('f_quantize', 'f_quantize_data'), - ('scale_param_q_data', 'mul2', {'out': 0}), - ('in_high', 'in_high_data'), - ('in_high_data', 'mul2'), - ('mul2', 'mul2_data'), - ('mul2_data', 'f_quantize'), - ('out_low', 'out_low_data'), - ('out_low_data', 'f_quantize'), - ('out_high', 'out_high_data'), - ('out_high_data', 'f_quantize'), - ('f_quantize_data', 'cast'), - ('cast', 'cast_data'), - ('cast_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'in_low': {'shape': np.array([]), 'value': -128}, - 'in_low_data': {'shape': np.array([]), 'value': -128}, - 'in_high': {'shape': np.array([]), 'value': 127}, - 'in_high_data': {'shape': np.array([]), 'value': 127}, - 'out_low': {'shape': np.array([]), 'value': -128}, - 'out_low_data': {'shape': np.array([]), 'value': -128}, - 'out_high': {'shape': np.array([]), 'value': 127}, - 'out_high_data': {'shape': np.array([]), 'value': 127}, - 'cast': {'dst_type': np.int8} - }, nodes_with_edges_only=True) - - graph.stage = 'middle' - QuantizeLinearResolver().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_quantize_no_zerop(self): - graph = build_graph(nodes1_attributes, - [('input', 'input_data'), - ('input_data', 'quantize'), - ('quantize', 'quantize_data'), - ('scale_param_q', 'scale_param_q_data'), - ('scale_param_q_data', 'quantize'), - ('quantize', 'quantize_data'), - ('quantize_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'scale_param_q': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - 'scale_param_q_data': {'shape': np.array([]), 'value': np.float32(1.0 / 255)}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_ref_attributes, - [('input', 'input_data'), - ('input_data', 'f_quantize'), - ('scale_param_q', 'scale_param_q_data'), - ('scale_param_q_data', 'mul1', {'out': 0}), - ('in_low', 'in_low_data'), - ('in_low_data', 'mul1'), - ('mul1', 'mul1_data'), - ('mul1_data', 'f_quantize'), - ('f_quantize', 'f_quantize_data'), - ('scale_param_q_data', 'mul2', {'out': 0}), - ('in_high', 'in_high_data'), - ('in_high_data', 'mul2'), - ('mul2', 'mul2_data'), - ('mul2_data', 'f_quantize'), - ('out_low', 'out_low_data'), - ('out_low_data', 'f_quantize'), - ('out_high', 'out_high_data'), - ('out_high_data', 'f_quantize'), - ('f_quantize_data', 'cast'), - ('cast', 'cast_data'), - ('cast_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'in_low': {'shape': np.array([]), 'value': 0}, - 'in_low_data': {'shape': np.array([]), 'value': 0}, - 'in_high': {'shape': np.array([]), 'value': 255}, - 'in_high_data': {'shape': np.array([]), 'value': 255}, - 'out_low': {'shape': np.array([]), 'value': 0}, - 'out_low_data': {'shape': np.array([]), 'value': 0}, - 'out_high': {'shape': np.array([]), 'value': 255}, - 'out_high_data': {'shape': np.array([]), 'value': 255}, - 'cast': {'dst_type': np.uint8} - }, nodes_with_edges_only=True) - - graph.stage = 'middle' - QuantizeLinearResolver().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - self.assertTrue(flag, resp) - - -class TestQuantizeWithAxis(): - @pytest.mark.parametrize("input_shape, scale_param_value, zero_param_value,target_shape, in_low, in_high, out_low, out_high, axis", - [(int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), - np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 1, 4, 1]), - np.array([-2., -3., -4., -5.]), np.array([253., 252., 251., 250.]), - 0, 255, 2), - (int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), - np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 3, 1, 1]), - np.array([-2., -3., -4., -5.]), np.array([253., 252., 251., 250.]), - 0, 255, 1), - (int64_array([2, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), - np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([2, 1, 1, 1]), - np.array([-2., -3., -4., -5.]), np.array([253., 252., 251., 250.]), - 0, 255, 0), - (int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), - np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 1, 1, 4]), - np.array([-2., -3., -4., -5.]), np.array([253., 252., 251., 250.]), - 0, 255, -1), - (int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), - np.array([2, 3, 4, 5], dtype=np.uint8), int64_array([1, 1, 4, 1]), - np.array([-2., -3., -4., -5.]), np.array([253., 252., 251., 250.]), - 0, 255, -2), - (int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), - np.array([2, 3, 4, 5], dtype=np.int8), int64_array([1, 1, 4, 1]), - np.array([-130., -131., -132., -133.]), np.array([125., 124., 123., 122.]), - -128.0, 127.0, 2), - (int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), - np.array([2, 3, 4, 5], dtype=np.int8), int64_array([1, 3, 1, 1]), - np.array([-130., -131., -132., -133.]), np.array([125., 124., 123., 122.]), - -128.0, 127.0, 1), - (int64_array([2, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), - np.array([2, 3, 4, 5], dtype=np.int8), int64_array([2, 1, 1, 1]), - np.array([-130., -131., -132., -133.]), np.array([125., 124., 123., 122.]), - -128.0, 127.0, 0), - (int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), - np.array([2, 3, 4, 5], dtype=np.int8), int64_array([1, 1, 1, 4]), - np.array([-130., -131., -132., -133.]), np.array([125., 124., 123., 122.]), - -128.0, 127.0, -1), - (int64_array([1, 3, 4, 4]), np.array([2, 3, 4, 5], dtype=np.float32), - np.array([2, 3, 4, 5], dtype=np.int8), int64_array([1, 1, 4, 1]), - np.array([-130., -131., -132., -133.]), np.array([125., 124., 123., 122.]), - -128.0, 127.0, -2), - ]) - def test_quantize_with_axis(self, input_shape, scale_param_value, zero_param_value, - target_shape, in_low, in_high, out_low, out_high, axis): - graph = build_graph(nodes1_attributes, - [('input', 'input_data'), - ('input_data', 'quantize'), - ('scale_param_q', 'scale_param_q_data'), - ('scale_param_q_data', 'quantize'), - ('zerop_param_q', 'zerop_param_q_data'), - ('zerop_param_q_data', 'quantize'), - ('quantize', 'quantize_data'), - ('quantize_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - { - 'quantize': {'axis': axis}, - 'input': {'shape': input_shape}, - 'input_data': {'shape': input_shape}, - 'scale_param_q': {'shape': scale_param_value.shape, 'value': scale_param_value}, - 'scale_param_q_data': {'shape': scale_param_value.shape, 'value': scale_param_value}, - 'zerop_param_q': {'shape': zero_param_value.shape, 'value': zero_param_value}, - 'zerop_param_q_data': {'shape': zero_param_value.shape, 'value': zero_param_value}, - }, nodes_with_edges_only=True) - - graph_ref = build_graph(nodes_ref_attributes, - [('input', 'input_data'), - ('input_data', 'f_quantize'), - ('scale_param_q', 'scale_param_q_data'), - ('scale_param_q_data', 'mul1', {'out': 0}), - ('in_low', 'in_low_data'), - ('in_low_data', 'mul1'), - ('mul1', 'mul1_data'), - ('mul1_data', 'high_reshape'), - ('high_reshape_const', 'high_reshape_const_data'), - ('high_reshape_const_data', 'high_reshape'), - ('high_reshape', 'high_reshape_data'), - ('high_reshape_data', 'f_quantize'), - - ('f_quantize', 'f_quantize_data'), - ('scale_param_q_data', 'mul2', {'out': 0}), - ('in_high', 'in_high_data'), - ('in_high_data', 'mul2'), - ('mul2', 'mul2_data'), - ('mul2_data', 'low_reshape'), - ('low_reshape', 'low_reshape_data'), - ('low_reshape_data', 'f_quantize'), - ('low_reshape_const', 'low_reshape_const_data'), - ('low_reshape_const_data', 'low_reshape'), - ('out_low', 'out_low_data'), - ('out_low_data', 'f_quantize'), - - ('out_high', 'out_high_data'), - ('out_high_data', 'f_quantize'), - ('f_quantize_data', 'cast'), - ('cast', 'cast_data'), - ('cast_data', 'out'), - ('out', 'out_data'), - ('out_data', 'result'), - ], - {'in_low': {'shape': in_low.shape, 'value': in_low}, - 'in_low_data': {'shape': in_low.shape, 'value': in_low}, - 'in_high': {'shape': in_high.shape, 'value': in_high}, - 'in_high_data': {'shape': in_high.shape, 'value': in_high}, - 'out_low': {'shape': np.array([]), 'value': out_low}, - 'out_low_data': {'shape': np.array([]), 'value': out_low}, - 'out_high': {'shape': np.array([]), 'value': out_high}, - 'out_high_data': {'shape': np.array([]), 'value': out_high}, - 'cast': {'dst_type': zero_param_value.dtype}, - 'low_reshape_const_data': {'shape': target_shape.shape, 'value': target_shape}, - 'high_reshape_const_data': {'shape': target_shape.shape, 'value': target_shape}, - }, nodes_with_edges_only=True) - - graph.stage = 'middle' - QuantizeLinearResolver().find_and_replace_pattern(graph) - - (flag, resp) = compare_graphs(graph, graph_ref, 'result', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/middle/reverse_tensor_iterator_test.py b/tools/mo/unit_tests/mo/middle/reverse_tensor_iterator_test.py deleted file mode 100644 index c462d0e9a6dd36..00000000000000 --- a/tools/mo/unit_tests/mo/middle/reverse_tensor_iterator_test.py +++ /dev/null @@ -1,117 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.middle.reverse_tensor_iterator import ReverseTensorIteratorLSTM -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, connect, \ - valued_const_with_data, regular_op_with_empty_data, result - -nodes = { - **regular_op_with_shaped_data('parameter', [1, 3, 227, 227], - {'type': 'Parameter', 'op': 'Parameter', 'shape': [1, 3, 227, 227]}), - **valued_const_with_data('seq_len', np.array([227])), - **regular_op_with_empty_data('shapeof', {'type': 'ShapeOf', 'op': 'ShapeOf'}), - **valued_const_with_data('gather_axis', np.array([0])), - **valued_const_with_data('gather_batch_ind', np.array([0])), - **valued_const_with_data('gather_seq_ind', np.array([2])), - **regular_op_with_empty_data('gather_batch', {'type': 'Gather', 'op': 'Gather'}), - **regular_op_with_empty_data('gather_seq', {'type': 'Gather', 'op': 'Gather'}), - **regular_op_with_empty_data('broadcast', {'type': 'Broadcast', 'op': 'Broadcast'}), - **regular_op_with_shaped_data('direct_reverse', [1, 3, 227, 227], {'type': 'ReverseSequence', - 'op': 'ReverseSequence', - 'seq_axis': 2, 'batch_axis': 0}), - **regular_op_with_empty_data('init_hidden', {'type': 'Init', 'op': 'Init'}), - - **regular_op_with_shaped_data('ti', [1, 2, 34, 56], {'type': 'TensorIterator', 'op': 'TensorIterator', - 'output_port_map': [{'axis': 2, 'start': 0, 'end': -1, - 'stride': 1, 'external_port_id': 0}], - 'input_port_map': [{'axis': 2, 'start': -1, 'end': 0, - 'stride': -1, 'external_port_id': 0}]}), - **valued_const_with_data('inverse_seq_len', np.array([34])), - **regular_op_with_empty_data('inverse_shapeof', {'type': 'ShapeOf', 'op': 'ShapeOf'}), - **regular_op_with_empty_data('inverse_gather_batch', {'type': 'Gather', 'op': 'Gather'}), - **regular_op_with_empty_data('inverse_gather_seq', {'type': 'Gather', 'op': 'Gather'}), - **regular_op_with_empty_data('inverse_broadcast', {'type': 'Broadcast', 'op': 'Broadcast'}), - **regular_op_with_shaped_data('inverse_reverse', [1, 2, 34, 56], {'type': 'ReverseSequence', - 'op': 'ReverseSequence', - 'seq_axis': 2, 'batch_axis': 0}), - **regular_op_with_empty_data('some_op', {'op': 'SomeOp'}), - **result() -} - -ref_nodes = { - **regular_op_with_shaped_data('parameter', [1, 3, 227, 227], - {'type': 'Parameter', 'op': 'Parameter', 'shape': [1, 3, 227, 227]}), - **regular_op_with_empty_data('init_hidden', {'type': 'Init', 'op': 'Init'}), - **regular_op_with_empty_data('ti', {'type': 'TensorIterator', 'op': 'TensorIterator', - 'output_port_map': [{'axis': 2, 'start': -1, 'end': 0, 'stride': -1, - 'external_port_id': 0}], - 'input_port_map': [{'axis': 2, 'start': 0, 'end': -1, 'stride': 1, - 'external_port_id': 0}]}), - **regular_op_with_empty_data('some_op', {'op': 'SomeOp'}), - **result() -} - - -class ReverseTensorIteratorTest(unittest.TestCase): - def test_ti_reverse(self): - graph = build_graph(nodes, [*connect('parameter:0', '0:direct_reverse'), - *connect('parameter:0', 'shapeof', skip_data=True), - *connect('shapeof:0', '0:gather_batch'), - *connect('gather_batch_ind', '1:gather_batch'), - *connect('gather_axis', '2:gather_batch'), - *connect('shapeof:0', '0:gather_seq', skip_data=True), - *connect('gather_seq_ind', '1:gather_seq'), - *connect('gather_axis', '2:gather_seq'), - *connect('gather_seq', '0:broadcast'), - *connect('gather_batch', '1:broadcast'), - *connect('broadcast', '1:direct_reverse'), - *connect('direct_reverse', '0:ti'), - *connect('init_hidden', '1:ti'), - *connect('ti', 'inverse_shapeof'), - *connect('inverse_shapeof:0', '0:inverse_gather_batch'), - *connect('gather_batch_ind', '1:inverse_gather_batch'), - *connect('gather_axis', '2:inverse_gather_batch'), - *connect('inverse_shapeof:0', '0:inverse_gather_seq', skip_data=True), - *connect('gather_seq_ind', '1:inverse_gather_seq'), - *connect('gather_axis', '2:inverse_gather_seq'), - *connect('inverse_gather_seq', '0:inverse_broadcast'), - *connect('inverse_gather_batch', '1:inverse_broadcast'), - *connect('ti', '0:inverse_reverse', skip_data=True), - *connect('inverse_broadcast', '1:inverse_reverse'), - *connect('inverse_reverse', 'some_op'), - *connect('some_op', 'output')], nodes_with_edges_only=True) - - ReverseTensorIteratorLSTM().find_and_replace_pattern(graph) - graph.clean_up() - - ref_graph = build_graph(ref_nodes, [*connect('parameter', '0:ti'), - *connect('init_hidden', '1:ti'), - *connect('ti', 'some_op'), - *connect('some_op', 'output')]) - flag, resp = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_ti_reverse_const(self): - graph = build_graph(nodes, [*connect('parameter:0', '0:direct_reverse'), - *connect('seq_len', '1:direct_reverse'), - *connect('direct_reverse', '0:ti'), - *connect('init_hidden', '1:ti'), - *connect('ti', '0:inverse_reverse'), - *connect('inverse_seq_len', '1:inverse_reverse'), - *connect('inverse_reverse', 'some_op'), - *connect('some_op', 'output')], nodes_with_edges_only=True) - - ReverseTensorIteratorLSTM().find_and_replace_pattern(graph) - graph.clean_up() - - ref_graph = build_graph(ref_nodes, [*connect('parameter', '0:ti'), - *connect('init_hidden', '1:ti'), - *connect('ti', 'some_op'), - *connect('some_op', 'output')]) - flag, resp = compare_graphs(graph, ref_graph, 'output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/middle/sparse_reshape_test.py b/tools/mo/unit_tests/mo/middle/sparse_reshape_test.py deleted file mode 100644 index 6269e0e430c0e4..00000000000000 --- a/tools/mo/unit_tests/mo/middle/sparse_reshape_test.py +++ /dev/null @@ -1,97 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.middle.sparse_reshape import SparseReshapeMiddleReplacer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class SparseReshapeMiddleReplacerTests(unittest.TestCase): - def test1(self): - graph = build_graph({ - 'const_dense_shape': {'type': 'Const', 'kind': 'op', 'op': 'Const', - 'value': int64_array([4, 5]), 'shape': int64_array([2])}, - 'const_dense_shape_data': {'kind': 'data', - 'value': int64_array([4, 5]), 'shape': int64_array([2])}, - 'const_new_dense_shape': {'type': 'Const', 'kind': 'op', 'op': 'Const', - 'value': int64_array([4, -1]), 'shape': int64_array([2])}, - 'const_new_dense_shape_data': {'kind': 'data', - 'value': int64_array([4, -1]), 'shape': int64_array([2])}, - 'const_default_value': {'type': 'Const', 'kind': 'op', 'op': 'Const', - 'value': 2, 'shape': int64_array([])}, - 'const_default_value_data': {'kind': 'data','value': 2, 'shape': int64_array([])}, - - 'input_indices': {'value': None, 'shape': int64_array([10, 2]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'input_indices_data': {'shape': int64_array([10, 2]), 'value': None, 'kind': 'data'}, - 'input_values': {'value': None, 'shape': int64_array([10]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'input_values_data': {'shape': int64_array([10]), 'value': None, 'kind': 'data'}, - 'input_params_table': {'value': None, 'shape': int64_array([100, 4, 3]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'input_params_table_data': {'shape': int64_array([10, 4, 3]), 'value': None, 'kind': 'data'}, - - 'sparse_reshape': {'kind': 'op', 'op': 'SparseReshape'}, - - 'output_indices_data': {'shape': int64_array([10, 2]), 'value': None, 'kind': 'data'}, - 'output_new_dense_shape_data': {'kind': 'data', 'value': int64_array([4, 5]), 'shape': int64_array([2])}, - - 'sparse_weighted_sum': {'kind': 'op', 'op': 'SparseWeightedSum'}, - }, - [ - ('input_indices', 'input_indices_data'), - ('input_indices_data', 'sparse_reshape', {'in': 0}), - ('const_dense_shape', 'const_dense_shape_data'), - ('const_dense_shape_data', 'sparse_reshape', {'in': 1}), - ('const_new_dense_shape', 'const_new_dense_shape_data'), - ('const_new_dense_shape_data', 'sparse_reshape', {'in': 2}), - ('sparse_reshape', 'output_indices_data', {'out': 0, 'in': 0}), - ('sparse_reshape', 'output_new_dense_shape_data', {'out': 1, 'in': 0}), - ('output_indices_data', 'sparse_weighted_sum', {'in': 0}), - ('input_values', 'input_values_data'), - ('input_values_data', 'sparse_weighted_sum', {'in': 1}), - ('output_new_dense_shape_data', 'sparse_weighted_sum', {'in': 2}), - ('input_params_table', 'input_params_table_data'), - ('input_params_table_data', 'sparse_weighted_sum', {'in': 3}), - ('const_default_value', 'const_default_value_data'), - ('const_default_value_data', 'sparse_weighted_sum', {'in': 4}) - ]) - SparseReshapeMiddleReplacer().find_and_replace_pattern(graph) - #graph_clean_up(graph) - ref_graph = build_graph({ - 'const_dense_shape': {'type': 'Const', 'kind': 'op', 'op': 'Const', - 'value': int64_array([4, 5]), 'shape': int64_array([2])}, - 'output_new_dense_shape_data': {'kind': 'data', - 'value': int64_array([4, 5]), 'shape': int64_array([2])}, - 'const_default_value': {'type': 'Const', 'kind': 'op', 'op': 'Const', - 'value': 2, 'shape': int64_array([])}, - 'const_default_value_data': {'kind': 'data','value': 2, 'shape': int64_array([])}, - 'input_indices': {'value': None, 'shape': int64_array([10, 2]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'output_indices_data': {'shape': int64_array([10, 2]), 'value': None, 'kind': 'data'}, - 'input_values': {'value': None, 'shape': int64_array([10]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'input_values_data': {'shape': int64_array([10]), 'value': None, 'kind': 'data'}, - 'input_params_table': {'value': None, 'shape': int64_array([100, 4, 3]), 'type': 'Parameter', 'kind': 'op', - 'op': 'Parameter'}, - 'input_params_table_data': {'shape': int64_array([10, 4, 3]), 'value': None, 'kind': 'data'}, - 'sparse_weighted_sum': {'kind': 'op', 'op': 'SparseWeightedSum'}, - }, - [ - ('input_indices', 'output_indices_data'), - ('output_indices_data', 'sparse_weighted_sum', {'in': 0}), - ('input_values', 'input_values_data'), - ('input_values_data', 'sparse_weighted_sum', {'in': 1}), - ('const_dense_shape', 'output_new_dense_shape_data'), - ('output_new_dense_shape_data', 'sparse_weighted_sum', {'in': 2}), - ('input_params_table', 'input_params_table_data'), - ('input_params_table_data', 'sparse_weighted_sum', {'in': 3}), - ('const_default_value', 'const_default_value_data'), - ('const_default_value_data', 'sparse_weighted_sum', {'in': 4}) - ]) - - (flag, resp) = compare_graphs(graph, ref_graph, 'sparse_weighted_sum') - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/moc_frontend/moc_extractor_test_actual.py b/tools/mo/unit_tests/mo/moc_frontend/moc_extractor_test_actual.py deleted file mode 100644 index cac56f82bf897b..00000000000000 --- a/tools/mo/unit_tests/mo/moc_frontend/moc_extractor_test_actual.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.moc_frontend.extractor import decode_name_with_port -from openvino.tools.mo.utils.error import Error - -import pytest - - -mock_available = True - -try: - # pylint: disable=no-name-in-module,import-error - from mock_mo_python_api import get_model_statistic, get_place_statistic, \ - clear_frontend_statistic, clear_model_statistic, clear_place_statistic, \ - clear_setup, set_equal_data, set_max_port_counts - - # pylint: disable=no-name-in-module,import-error - from openvino.frontend import FrontEndManager - -except Exception: - print("No mock frontend API available, " - "ensure to use -DENABLE_TESTS=ON option when running these tests") - mock_available = False - -# FrontEndManager shall be initialized and destroyed after all tests finished -# This is because destroy of FrontEndManager will unload all plugins, -# no objects shall exist after this -if mock_available: - fem = FrontEndManager() - -mock_needed = pytest.mark.skipif(not mock_available, - reason="mock MO fe is not available") - - -class TestMainFrontend(unittest.TestCase): - def setUp(self): - clear_frontend_statistic() - clear_model_statistic() - clear_place_statistic() - clear_setup() - set_max_port_counts(10, 10) - self.fe = fem.load_by_framework('openvino_mock_mo_frontend') - self.model = self.fe.load('abc.bin') - - # Mock model has 'tensor' tensor place - @mock_needed - def test_decode_name_with_port_tensor(self): - node = decode_name_with_port(self.model, "tensor") - model_stat = get_model_statistic() - - assert model_stat.get_place_by_tensor_name == 1 - assert node - - # pylint: disable=wrong-spelling-in-comment - # Mock model doesn't have 'mocknoname' place - @mock_needed - def test_decode_name_with_port_noname(self): - with self.assertRaisesRegex(Error, 'No\\ node\\ with\\ name.*mocknoname*'): - decode_name_with_port(self.model, 'mocknoname') - model_stat = get_model_statistic() - assert model_stat.get_place_by_tensor_name == 1 - - # Mock model has both tensor and tensor:0 places with non equal data - # Collision is expected - @mock_needed - def test_decode_name_with_port_collision(self): - with self.assertRaisesRegex(Error, 'Name\\ collision.*tensorAndOp*'): - decode_name_with_port(self.model, 'tensorAndOp:0') - model_stat = get_model_statistic() - place_stat = get_place_statistic() - - assert model_stat.get_place_by_tensor_name == 1 - assert model_stat.get_place_by_operation_name == 1 - assert place_stat.is_equal_data > 0 - - - # Mock model has 'operation' and output port up to 10 - @mock_needed - def test_decode_name_with_port_delim_op_out(self): - node = decode_name_with_port(self.model, 'operation:7') - model_stat = get_model_statistic() - place_stat = get_place_statistic() - - assert model_stat.get_place_by_tensor_name == 1 - assert model_stat.get_place_by_operation_name == 1 - assert place_stat.get_output_port == 1 - assert place_stat.lastArgInt == 7 - assert node - - # Mock model has 'operation' and input port up to 10 - @mock_needed - def test_decode_name_with_port_delim_op_in(self): - node = decode_name_with_port(self.model, '7:operation') - model_stat = get_model_statistic() - place_stat = get_place_statistic() - - assert model_stat.get_place_by_tensor_name == 1 - assert model_stat.get_place_by_operation_name == 1 - assert place_stat.get_input_port == 1 - assert place_stat.lastArgInt == 7 - assert node - - # Mock model has 'tensor' and 'tensor:0' tensor places, no collision is expected - @mock_needed - def test_decode_name_with_port_delim_tensor_no_collision_out(self): - node = decode_name_with_port(self.model, 'tensor:0') - model_stat = get_model_statistic() - place_stat = get_place_statistic() - - assert model_stat.get_place_by_tensor_name == 1 - assert model_stat.get_place_by_operation_name == 1 - assert place_stat.get_output_port == 0 - assert node - - # Mock model has 'tensor' and '0:tensor' tensor places, no collision is expected - @mock_needed - def test_decode_name_with_port_delim_tensor_no_collision_in(self): - node = decode_name_with_port(self.model, '0:tensor') - model_stat = get_model_statistic() - place_stat = get_place_statistic() - - assert model_stat.get_place_by_tensor_name == 1 - assert model_stat.get_place_by_operation_name == 1 - assert place_stat.get_input_port == 0 - assert node - - # Mock model doesn't have such '1234:operation' or output port=1234 for 'operation' - @mock_needed - def test_decode_name_with_port_delim_no_port_out(self): - with self.assertRaisesRegex(Error, 'No\\ node\\ with\\ name.*operation\\:1234*'): - decode_name_with_port(self.model, 'operation:1234') - model_stat = get_model_statistic() - place_stat = get_place_statistic() - - assert model_stat.get_place_by_tensor_name == 1 - assert model_stat.get_place_by_operation_name == 1 - assert place_stat.get_output_port == 1 - assert place_stat.lastArgInt == 1234 - - # Mock model doesn't have such '1234:operation' or input port=1234 for 'operation' - @mock_needed - def test_decode_name_with_port_delim_no_port_in(self): - with self.assertRaisesRegex(Error, 'No\\ node\\ with\\ name.*1234\\:operation*'): - decode_name_with_port(self.model, '1234:operation') - model_stat = get_model_statistic() - place_stat = get_place_statistic() - - assert model_stat.get_place_by_tensor_name == 1 - assert model_stat.get_place_by_operation_name == 1 - assert place_stat.get_input_port == 1 - assert place_stat.lastArgInt == 1234 - - # Mock model has tensor with name 'conv2d:0' and operation 'conv2d' with output port = 1 - # It is setup to return 'is_equal_data=True' for these tensor and port - # So no collision is expected - @mock_needed - def test_decode_name_with_port_delim_equal_data_out(self): - set_equal_data('conv2d', 'conv2d') - node = decode_name_with_port(self.model, 'conv2d:0') - model_stat = get_model_statistic() - place_stat = get_place_statistic() - - assert model_stat.get_place_by_tensor_name == 1 - assert model_stat.get_place_by_operation_name == 1 - assert place_stat.get_output_port == 1 - assert place_stat.is_equal_data > 0 - assert node - - # Mock model has tensor with name '0:conv2d' and operation 'conv2d' with input port = 1 - # It is setup to return 'is_equal_data=True' for these tensor and port - # So no collision is expected - @mock_needed - def test_decode_name_with_port_delim_equal_data_in(self): - set_equal_data('conv2d', 'conv2d') - node = decode_name_with_port(self.model, '0:conv2d') - model_stat = get_model_statistic() - place_stat = get_place_statistic() - - assert model_stat.get_place_by_tensor_name == 1 - assert model_stat.get_place_by_operation_name == 1 - assert place_stat.get_input_port == 1 - assert place_stat.is_equal_data > 0 - assert node - - # Stress case: Mock model has: - # Tensor '8:9' - # Operation '8:9' - # Operation '8' with output port = 9 - # Operation '9' with input port = 8 - # All places point to same data - no collision is expected - @mock_needed - def test_decode_name_with_port_delim_all_same_data(self): - set_equal_data('8', '9') - node = decode_name_with_port(self.model, '8:9') - model_stat = get_model_statistic() - place_stat = get_place_statistic() - - assert model_stat.get_place_by_tensor_name == 1 - assert model_stat.get_place_by_operation_name == 2 - assert place_stat.get_input_port == 1 - assert place_stat.get_output_port == 1 - # At least 3 comparisons of places are expected - assert place_stat.is_equal_data > 2 - assert node diff --git a/tools/mo/unit_tests/mo/ops/Complex_test.py b/tools/mo/unit_tests/mo/ops/Complex_test.py deleted file mode 100644 index b14bc67c193c10..00000000000000 --- a/tools/mo/unit_tests/mo/ops/Complex_test.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.Complex import Complex -from unit_tests.utils.graph import build_graph - -graph_node_attrs_sizes = { - 'input_real': {'type': 'Parameter', 'kind': 'op'}, - 'input_imag': {'type': 'Parameter', 'kind': 'op'}, - 'input_real_data': {'kind': 'data', 'shape': None, 'value': None}, - 'input_imag_data': {'kind': 'data', 'shape': None, 'value': None}, - 'complex': {'op': 'Complex', 'kind': 'op'}, - 'complex_data': {'kind': 'data', 'shape': None, 'value': None}, - 'op_output': {'kind': 'op', 'op': 'Result'}, -} - -graph_edges_sizes = [ - ('input_real', 'input_real_data'), - ('input_imag', 'input_imag_data'), - ('input_real_data', 'complex', {'in': 0}), - ('input_imag_data', 'complex', {'in': 1}), - ('complex', 'complex_data'), - ('complex_data', 'op_output'), -] - - -class TestComplexOp(): - @pytest.mark.parametrize("input_shape, output_shape",[ - ([1, 260, 100, 150], [1, 260, 100, 150, 2]), - ([1, 260, 100], [1, 260, 100, 2]), - ([5, 14, 300, 40], [5, 14, 300, 40, 2]), - ([1, 3, 260, 100, 150], [1, 3, 260, 100, 150, 2]), - ([5, 14, 1000, 300, 40], [5, 14, 1000, 300, 40, 2]) - ]) - def test_complex_op_shape_inference(self, input_shape, output_shape): - graph = build_graph(nodes_attrs=graph_node_attrs_sizes, - edges=graph_edges_sizes, - update_attributes={ - 'input_real_data': {'shape': int64_array(input_shape)}, - 'input_imag_data': {'shape': int64_array(input_shape)}, - }) - node = Node(graph, 'complex') - Complex.infer(node) - - msg = "Complex operation infer failed for case: expected_shape={}, actual_shape={}" - - assert np.array_equal(graph.node['complex_data']['shape'], int64_array(output_shape)),\ - msg.format(output_shape, graph.node['complex_data']['shape']) diff --git a/tools/mo/unit_tests/mo/ops/ExtractImagePatches_test.py b/tools/mo/unit_tests/mo/ops/ExtractImagePatches_test.py deleted file mode 100644 index 7a51f02203b9e1..00000000000000 --- a/tools/mo/unit_tests/mo/ops/ExtractImagePatches_test.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -import pytest - -import numpy as np - -from openvino.tools.mo.ops.ExtractImagePatches import ExtractImagePatches -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes = { - 'input': {'op': 'Parameter', 'kind': 'op', 'shape': None}, - 'input_data': {'value': None, 'kind': 'data', 'shape': None}, - 'EIP': {'op': 'ExtractImagePatches', 'kind': 'op', 'sizes': None, 'strides': None, 'rates': None, 'auto_pad': None}, - 'EIP_data': {'value': None, 'kind': 'data', 'shape': None}, - 'output': {'op': 'Result', 'kind': 'op', 'shape': None}, -} - -edges = [ - ('input', 'input_data'), - ('input_data', 'EIP'), - ('EIP', 'EIP_data'), - ('EIP_data', 'output'), -] - -class TestExtractImagePatchesPartialInfer(): - @pytest.mark.parametrize("input_shape, sizes, strides, rates, auto_pad, layout, output_shape",[ - ([1, 10, 10, 3], [1, 3, 3, 1], [1, 5, 5, 1], [1, 1, 1, 1], 'valid', 'NHWC', [1, 2, 2, 27]), - ([1, 10, 10, 3], [1, 3, 3, 1], [1, 5, 5, 1], [1, 2, 2, 1], 'valid', 'NHWC', [1, 2, 2, 27]), - ([1, 10, 10, 3], [1, 4, 4, 1], [1, 8, 8, 1], [1, 1, 1, 1], 'valid', 'NHWC', [1, 1, 1, 48]), - ([1, 10, 10, 3], [1, 4, 4, 1], [1, 8, 8, 1], [1, 1, 1, 1], 'same_upper', 'NHWC', [1, 2, 2, 48]), - ([1, 10, 10, 3], [1, 4, 4, 1], [1, 9, 9, 1], [1, 1, 1, 1], 'same_upper', 'NHWC', [1, 2, 2, 48]), - ([1, 10, 10, 3], [1, 4, 4, 1], [1, 9, 9, 1], [1, 1, 1, 1], 'same_lower', 'NHWC', [1, 2, 2, 48]), - ([1, 64, 64, 3], [1, 3, 3, 1], [1, 1, 1, 1], [1, 1, 1, 1], 'valid', 'NHWC', [1, 62, 62, 27]), - ([1, 64, 64, 3], [1, 3, 3, 1], [1, 1, 1, 1], [1, 1, 1, 1], 'same_upper', 'NHWC', [1, 64, 64, 27]), - - ([1, 3, 10, 10], [1, 1, 3, 3], [1, 1, 5, 5], [1, 1, 1, 1], 'valid', 'NCHW', [1, 27, 2, 2]), - ([1, 3, 10, 10], [1, 1, 4, 4], [1, 1, 8, 8], [1, 1, 1, 1], 'valid', 'NCHW', [1, 48, 1, 1]), - - ([1, 3, 10, 10], [1, 1, 4, 4], [1, 1, 9, 9], [1, 1, 1, 1], 'same_upper', 'NCHW', [1, 48, 2, 2]), - ([1, 3, 10, 10], [1, 1, 4, 4], [1, 1, 9, 9], [1, 1, 1, 1], 'same_lower', 'NCHW', [1, 48, 2, 2]), - - ]) - - - def test_eip_infer(self, input_shape, sizes, strides, rates, auto_pad, layout, output_shape): - graph = build_graph( - nodes_attrs=nodes, - edges=edges, - update_attributes={ - 'input': {'shape': int64_array(input_shape)}, - 'input_data': {'shape': int64_array(input_shape)}, - 'EIP': {'spatial_dims': int64_array([1, 2]) if layout == 'NHWC' else int64_array([2, 3]), - 'sizes': int64_array(sizes), 'strides': int64_array(strides), 'rates': int64_array(rates), - 'auto_pad': auto_pad}, - } - ) - - graph.graph['layout'] = layout - - eip_node = Node(graph, 'EIP') - ExtractImagePatches.infer(eip_node) - - assert np.array_equal(eip_node.out_port(0).data.get_shape(), output_shape) diff --git a/tools/mo/unit_tests/mo/ops/If_test.py b/tools/mo/unit_tests/mo/ops/If_test.py deleted file mode 100644 index 73828ac6d8ad41..00000000000000 --- a/tools/mo/unit_tests/mo/ops/If_test.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np -import numpy.testing as npt - -from openvino.tools.mo.ops.If import If -from openvino.tools.mo.ops.elementwise import Add, Mul -from openvino.tools.mo.ops.identity import Identity -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, strict_compare_tensors, \ - dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.ops.eltwise import eltwise_infer -from openvino.tools.mo.ops.shape import Shape -from unit_tests.utils.graph import build_graph_with_edge_attrs, build_graph -from unit_tests.utils.graph import regular_op_with_empty_data, connect, result, valued_const_with_data, regular_op, \ - empty_data - - -class TestIf(): - @pytest.mark.parametrize("cond, output_port_0_shape, output_port_1_shape",[ - (np.array([True], dtype=bool), shape_array([3]), shape_array([3])), - (np.array([False], dtype=bool), shape_array([3]), shape_array([2])), - (shape_array(dynamic_dimension_value), shape_array([3]), shape_array([dynamic_dimension_value])), - ]) - def test_simple_shape_inf(self, cond, output_port_0_shape, output_port_1_shape): - then_graph_nodes = {**regular_op_with_empty_data('param_1', {'type': 'Parameter', 'kind': 'op', 'input_id': 1, - 'shape': None, 'infer': Parameter.infer}), - **regular_op_with_empty_data('param_2', {'type': 'Parameter', 'kind': 'op', 'input_id': 2, - 'shape': None, 'infer': Parameter.infer}), - **regular_op_with_empty_data('add', {'type': 'Add', 'kind': 'op', 'op': 'Add', - 'infer': lambda node: eltwise_infer(node, - Add.operation)}), - **regular_op_with_empty_data('mul', {'type': 'Mul', 'kind': 'op', 'op': 'Mul', - 'infer': lambda node: eltwise_infer(node, - Mul.operation)}), - **regular_op_with_empty_data('res1', {'kind': 'op', 'type': 'Result', 'op': 'Result', - 'infer': lambda x: 0, 'output_id': 0}), - **regular_op_with_empty_data('res2', {'kind': 'op', 'type': 'Result', 'op': 'Result', - 'infer': lambda x: 0, 'output_id': 1})} - then_graph_edges = [*connect('param_1', '0:add'), - *connect('param_2', '1:add'), - *connect('param_1', '1:mul'), - *connect('param_2', '0:mul'), - *connect('add', 'res1'), - *connect('mul', 'res2'), - ] - - else_graph_nodes = {**regular_op_with_empty_data('param_1', {'type': 'Parameter', 'kind': 'op', 'input_id': 1, - 'shape': None, 'infer': Parameter.infer}), - **regular_op_with_empty_data('param_2', {'type': 'Parameter', 'kind': 'op', 'input_id': 3, - 'shape': None, 'infer': Parameter.infer}), - **regular_op_with_empty_data('identity', - {'kind': 'op', 'op': 'Identity', 'infer': Identity.infer}), - **regular_op_with_empty_data('identity_1', - {'kind': 'op', 'op': 'Identity', 'infer': Identity.infer}), - **regular_op_with_empty_data('res1', {'kind': 'op', 'type': 'Result', 'op': 'Result', - 'infer': lambda x: 0, 'output_id': 0}), - **regular_op_with_empty_data('res2', {'kind': 'op', 'type': 'Result', 'op': 'Result', - 'infer': lambda x: 0, 'output_id': 1})} - else_graph_edges = [*connect('param_1', 'identity'), - *connect('param_2', 'identity_1'), - *connect('identity_1', 'res2'), - *connect('identity', 'res1'), ] - then_graph = build_graph_with_edge_attrs(then_graph_nodes, then_graph_edges) - else_graph = build_graph_with_edge_attrs(else_graph_nodes, else_graph_edges) - external_graph_nodes = { - **valued_const_with_data('cond', cond), - **valued_const_with_data('input_2', int64_array([3, 2, 1])), - **valued_const_with_data('input_1', int64_array([1, 2, 3])), - **valued_const_with_data('input_3', int64_array([8, 4])), - **regular_op('if', {'kind': 'op', 'op': 'If', 'then_graph': then_graph, - 'else_graph': else_graph, 'infer': If.infer}), - **empty_data('if_d_1'), - **empty_data('if_d_2'), - **result('res_1'), - **result('res_2')} - external_graph_edges = [*connect('cond', '0:if'), - *connect('input_1', '1:if'), - *connect('input_2', '2:if'), - *connect('input_3', '3:if'), - ('if', 'if_d_1', {'out': 0}), - ('if', 'if_d_2', {'out': 1}), - ('if_d_1', 'res_1'), - ('if_d_2', 'res_2')] - - graph = build_graph(external_graph_nodes, external_graph_edges) - graph.stage = 'middle' - partial_infer(graph) - if_node = Node(graph, 'if') - assert strict_compare_tensors(if_node.out_port(0).data.get_shape(), output_port_0_shape) - # shape of the "then" branch is [3] and shape of the "else" branch is [2], so the output shape is "[dynamic]" - assert strict_compare_tensors(if_node.out_port(1).data.get_shape(), output_port_1_shape) - - def test_fake_results(self): - then_graph_nodes = {**valued_const_with_data('fake_const', int64_array(0)), - **regular_op_with_empty_data('shapeof', - {'kind': 'op', 'type': 'ShapeOf', 'op': 'ShapeOf', 'infer': Shape.infer, - 'output_type': np.int64}), - **regular_op_with_empty_data('res_1', {'kind': 'op', 'type': 'Result', 'op': 'Result', - 'infer': lambda x: 0, 'output_id': 0})} - then_graph_edges = [*connect('fake_const', 'shapeof'), - *connect('shapeof', 'res_1'), - ] - - else_graph_nodes = {**regular_op_with_empty_data('param_1', {'type': 'Parameter', 'kind': 'op', 'input_id': 1, - 'shape': None, 'infer': Parameter.infer}), - **regular_op_with_empty_data('res_1', {'kind': 'op', 'type': 'Result', 'op': 'Result', - 'infer': lambda x: 0, 'output_id': 0})} - else_graph_edges = [*connect('param_1', 'res_1')] - then_graph = build_graph_with_edge_attrs(then_graph_nodes, then_graph_edges) - else_graph = build_graph_with_edge_attrs(else_graph_nodes, else_graph_edges) - external_graph_nodes = { - **valued_const_with_data('cond', shape_array([dynamic_dimension_value])), - **valued_const_with_data('input_1', int64_array([1, 2, 3, 3, 2, 3]).reshape((2, 3))), - **regular_op_with_empty_data('if', {'kind': 'op', 'op': 'If', 'then_graph': then_graph, - 'else_graph': else_graph, 'infer': If.infer}), - **result('res_1')} - external_graph_edges = [*connect('cond', '0:if'), - *connect('input_1', '1:if'), - *connect('if', 'res_1')] - - graph = build_graph(external_graph_nodes, external_graph_edges) - graph.stage = 'middle' - partial_infer(graph) - npt.assert_array_equal(Node(graph, 'if').out_port(0).data.get_shape(), int64_array([2, 3])) diff --git a/tools/mo/unit_tests/mo/ops/LookupTableInsert_test.py b/tools/mo/unit_tests/mo/ops/LookupTableInsert_test.py deleted file mode 100644 index 2d8682d4ce8dc2..00000000000000 --- a/tools/mo/unit_tests/mo/ops/LookupTableInsert_test.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.LookupTableInsert import LookupTableInsert -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'table': {'kind': 'op'}, - 'table_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'keys': {'kind': 'op'}, - 'keys_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'values': {'kind': 'op'}, - 'values_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'lookuptableinsert_node': {'op': 'LookupTableInsert', 'kind': 'op'}, - 'output': {'shape': None, 'value': None, 'kind': 'data'}} - -# graph 1 -edges1 = [('table', 'table_data'), - ('keys', 'keys_data'), - ('values', 'values_data'), - ('table_data', 'lookuptableinsert_node', {'in': 0}), - ('keys_data', 'lookuptableinsert_node', {'in': 1}), - ('values_data', 'lookuptableinsert_node', {'in': 2}), - ('lookuptableinsert_node', 'output')] - -# valid test case -inputs1 = {'table_data': {}, - 'keys_data': {'shape': int64_array([4])}, - 'values_data': {'shape': int64_array([4])}} - -# invalid test case -inputs2 = {'table_data': {}, - 'keys_data': {'shape': int64_array([5, 2])}, - 'values_data': {'shape': int64_array([4])}} - -class TestLookupTableInsert(unittest.TestCase): - def test_infer1(self): - graph = build_graph(nodes_attributes, edges1, inputs1) - lookuptableinsert_node = Node(graph, 'lookuptableinsert_node') - LookupTableInsert.infer(lookuptableinsert_node) - - # prepare reference results - ref_output_shape = int64_array([]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_infer_invalid1(self): - graph = build_graph(nodes_attributes, edges1, inputs2) - lookuptableinsert_node = Node(graph, 'lookuptableinsert_node') - self.assertRaises(AssertionError, LookupTableInsert.infer, lookuptableinsert_node) diff --git a/tools/mo/unit_tests/mo/ops/MatMul_test.py b/tools/mo/unit_tests/mo/ops/MatMul_test.py deleted file mode 100644 index 46e3bc91f20f16..00000000000000 --- a/tools/mo/unit_tests/mo/ops/MatMul_test.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.MatMul import MatMul -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph_with_attrs - - -class TestMatMul(): - nodes = [ - ('A', {'type': 'Parameter', 'kind': 'op'}), - ('A_d', {'kind': 'data'}), - ('B', {'type': 'Parameter', 'kind': 'op'}), - ('B_d', {'kind': 'data', 'dim_attrs': []}), - ('mat_mul', {'type': 'MatMul', 'kind': 'op'}), - ('mat_mul_d', {'kind': 'data', 'value': None, 'shape': None}), - ('op_output', {'kind': 'op', 'op': 'Result'}), - ] - edges = [ - ('A', 'A_d'), - ('B', 'B_d'), - ('A_d', 'mat_mul', {'in': 0}), - ('B_d', 'mat_mul', {'in': 1}), - ('mat_mul', 'mat_mul_d'), - ('mat_mul_d', 'op_output'), - ] - - @pytest.mark.parametrize("A_shape, B_shape, C_shape, transpose_a, transpose_b",[ - ([1024], [1024, 1000], [1000], False, False), - ([dynamic_dimension_value], [1024, 1000], [1000], False, False), - ([1024], [dynamic_dimension_value, 1000], [1000], False, False), - ([1024], [1024, 1000], [1000], True, False), - ([1024], [1000, 1024], [1000], True, True), - ([dynamic_dimension_value], [dynamic_dimension_value, dynamic_dimension_value], [dynamic_dimension_value], True, - True), - ([1, 1024], [1024, 1000], [1, 1000], False, False), - ([1, 1024], [1000, 1024], [1, 1000], False, True), - ([1024, 1000], [1000], [1024], False, False), - ([1024, 1000], [1000], [1024], False, True), - ([1000, 1024], [1000], [1024], True, True), - ([1000, dynamic_dimension_value], [1000], [dynamic_dimension_value], True, True), - ([10, 1024], [1024, 1000], [10, 1000], False, False), - ([5, 10, 1024], [1024, 1000], [5, 10, 1000], False, False), - ([5, 10, 1024], [5, 1024, 1000], [5, 10, 1000], False, False), - ([5, 10, 1024], [1, 1024, 1000], [5, 10, 1000], False, False), - ([5, 10, 1024], [1, 1000, 1024], [5, 10, 1000], False, True), - ]) - def test_positive_matmul_infer(self, A_shape, B_shape, C_shape, transpose_a, transpose_b): - graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges, - update_nodes_attributes=[ - ('A_d', {'shape': shape_array(A_shape)}), - ('B_d', {'shape': shape_array(B_shape)}), - ('mat_mul', {'transpose_a': transpose_a, 'transpose_b': transpose_b}), - ]) - node = Node(graph, 'mat_mul') - MatMul.infer(node) - - msg = "MatMul infer failed for case: A_shape={}, B_shape={}, transpose_a={}, transpose_b={} " \ - "expected_shape={}, actual_shape={}" - - assert np.array_equal(graph.node['mat_mul_d']['shape'], shape_array(C_shape)),\ - msg.format(A_shape, B_shape, transpose_a, transpose_b, C_shape, - graph.node['mat_mul_d']['shape']) - - @pytest.mark.parametrize("A_shape, B_shape",[ - (None, [1024, 1000]), - (1, [1024, 1000]), - ([], [1024, 1000]), - ([1024, 1000], [1024, 1000]), - ([5, 10, 1024], [3, 1024, 1000]), - ]) - def test_negative_matmul_infer(self, A_shape, B_shape): - graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges, - update_nodes_attributes=[ - ('A_d', {'shape': np.array(A_shape)}), - ('B_d', {'shape': int64_array(B_shape)}), - ]) - - node = Node(graph, 'mat_mul') - with pytest.raises(AssertionError): - MatMul.infer(node) diff --git a/tools/mo/unit_tests/mo/ops/MatMul_value_propagation_test.py b/tools/mo/unit_tests/mo/ops/MatMul_value_propagation_test.py deleted file mode 100644 index bc3fd1813a91ed..00000000000000 --- a/tools/mo/unit_tests/mo/ops/MatMul_value_propagation_test.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.MatMul import MatMul, transpose -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -graph_nodes_attrs = { - 'A': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'A_data': {'kind': 'data', 'shape': None, 'value': None}, - 'B': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'B_data': {'kind': 'data', 'shape': None, 'value': None, 'dim_attrs': []}, - 'matmul': {'type': 'MatMul', 'op': 'MatMul', 'kind': 'op', 'transpose_a': False, 'transpose_b': False}, - 'matmul_data': {'kind': 'data', 'value': None, 'shape': None}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - - -graph_edges=[ - ('A', 'A_data'), - ('B', 'B_data'), - ('A_data', 'matmul', {'in': 0}), - ('B_data', 'matmul', {'in': 1}), - ('matmul', 'matmul_data'), - ('matmul_data', 'output'), -] - - -class TestMatMulValuePropagation(): - @pytest.mark.parametrize("a_shape, a_value, b_shape, b_value, transpose_a, transpose_b",[ - ([16, 3], np.arange(-5, -5 + 16 * 3).reshape((16, 3)), - [3, 5], np.arange(0, 3 * 5).reshape((3, 5)), - False, False), - ([3, 16], np.arange(-5, -5 + 16 * 3).reshape((3, 16)), - [3, 5], np.arange(0, 3 * 5).reshape((3, 5)), - True, False), - ([5, 8], np.arange(-1, -1 + 5 * 8).reshape((5, 8)), - [4, 8], np.arange(-2, -2 + 4 * 8).reshape((4, 8)), - False, True), - ([8, 8], np.arange(1, 1 + 8 * 8).reshape((8, 8)), - [4, 8], np.arange(-2, -2 + 4 * 8).reshape((4, 8)), - True, True), - - ([7, 16, 3], np.arange(0, 0 + 16 * 3 * 7).reshape((7, 16, 3)), - [3, 5], np.arange(0, 3 * 5).reshape((3, 5)), - False, False), - ([1, 3, 16], np.arange(-5, -5 + 16 * 3).reshape((1, 3, 16)), - [3, 5], np.arange(0, 3 * 5).reshape((3, 5)), - True, False), - ([11, 5, 8], np.arange(-1, -1 + 5 * 8 * 11).reshape((11, 5, 8)), - [11, 4, 8], np.arange(-2, -2 + 4 * 8 * 11).reshape((11, 4, 8)), - False, True), - ([1, 3, 5, 8, 8], np.arange(1, 1 + 8 * 8 * 3 * 5).reshape((1, 3, 5, 8, 8)), - [4, 8], np.arange(-2, -2 + 4 * 8).reshape((4, 8)), - True, True), - - ([2], np.zeros((2)), [2], np.zeros((2)), False, False), - ([2], np.zeros((2)), [1, 2, 3], np.zeros((1, 2, 3)), False, False), - ([1, 2, 3], np.zeros((1, 2, 3)), [3], np.zeros((3)), False, False), - ]) - def test_value_propagation(self, a_shape, a_value, b_shape, b_value, transpose_a, transpose_b): - graph = build_graph( - nodes_attrs=graph_nodes_attrs, - edges=graph_edges, - update_attributes={ - 'A': {'shape': int64_array(a_shape), 'value': a_value}, - 'A_data': {'shape': int64_array(a_shape), 'value': a_value}, - 'B': {'shape': int64_array(b_shape), 'value': b_value}, - 'B_data': {'shape': int64_array(b_shape), 'value': b_value}, - 'matmul': {'transpose_a': transpose_a, 'transpose_b': transpose_b}, - 'matmul_data': {'value': None, 'shape': None}, - } - ) - node = Node(graph, 'matmul') - MatMul.infer(node) - node_data = node.out_port(0).get_destination().data.get_value() - a = a_value - b = b_value - if transpose_a: - a = transpose(a) - if transpose_b: - b = transpose(b) - ref_data = np.matmul(a, b) - node_data_shape = node_data.shape - ref_data_shape = ref_data.shape - msg = "Value propagation for 'matmul' node is not correct." - assert node_data_shape == ref_data_shape and np.all(node_data == ref_data), msg diff --git a/tools/mo/unit_tests/mo/ops/ONNXResize11_test.py b/tools/mo/unit_tests/mo/ops/ONNXResize11_test.py deleted file mode 100644 index ed17446c02af12..00000000000000 --- a/tools/mo/unit_tests/mo/ops/ONNXResize11_test.py +++ /dev/null @@ -1,192 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.ONNXResize11 import ONNXResize11Op -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - - -graph_node_attrs_sizes = { - 'input': {'type': 'Parameter', 'kind': 'op'}, - 'input_data': {'kind': 'data', 'shape': None, 'value': None}, - 'roi': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'roi_data': {'kind': 'data', 'shape': None, 'value': None}, - 'scales': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'scales_data': {'kind': 'data', 'shape': None, 'value': None}, - 'sizes': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'sizes_data': {'kind': 'data', 'shape': None, 'value': None}, - 'onnx_resize11': { - 'op': 'ONNXResize11', 'kind': 'op', 'mode': 'nearest', 'nearest_mode': 'round_prefer_floor', - 'coordinate_transformation_mode': 'half_pixel', 'cube_coeff': -0.75 - }, - 'onnx_resize11_data': {'kind': 'data', 'value': None, 'shape': None}, - 'op_output': {'kind': 'op', 'op': 'Result'}, -} - -graph_edges_sizes = [ - ('input', 'input_data'), - ('roi', 'roi_data'), - ('sizes', 'sizes_data'), - ('input_data', 'onnx_resize11', {'in': 0}), - ('roi_data', 'onnx_resize11', {'in': 1}), - ('sizes_data', 'onnx_resize11', {'in': 3}), - ('onnx_resize11', 'onnx_resize11_data'), - ('onnx_resize11_data', 'op_output'), -] - - -graph_node_attrs_scales = { - 'input': {'type': 'Parameter', 'kind': 'op'}, - 'input_data': {'kind': 'data', 'shape': None, 'value': None}, - 'roi': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'roi_data': {'kind': 'data', 'shape': None, 'value': None}, - 'scales': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'scales_data': {'kind': 'data', 'shape': None, 'value': None}, - 'onnx_resize11': { - 'op': 'ONNXResize11', 'kind': 'op', 'mode': 'nearest', 'nearest_mode': 'round_prefer_floor', - 'coordinate_transformation_mode': 'half_pixel', 'cube_coeff': -0.75 - }, - 'onnx_resize11_data': {'kind': 'data', 'value': None, 'shape': None}, - 'op_output': {'kind': 'op', 'op': 'Result'}, -} - -graph_edges_scales = [ - ('input', 'input_data'), - ('roi', 'roi_data'), - ('scales', 'scales_data'), - ('input_data', 'onnx_resize11', {'in': 0}), - ('roi_data', 'onnx_resize11', {'in': 1}), - ('scales_data', 'onnx_resize11', {'in': 2}), - ('onnx_resize11', 'onnx_resize11_data'), - ('onnx_resize11_data', 'op_output'), -] - - -class TestONNXResize11Op(): - @pytest.mark.parametrize("input_shape, output_shape, sizes, scales",[([1, 260, 100, 150], [1, 260, 200, 350], [1, 260, 200, 350], [1.0, 1.0, 1.0, 1.0]), - ([1, 260, 100, 150], [1, 260, 200, 350], [1, 1, 200, 350], [1.0, 1.0, 1.0, 1.0]), - ([5, 14, 300, 40], [5, 14, 140, 280], [1, 1, 140, 280], [1.0, 1.0, 1.0, 1.0]), - ([5, 14, 300, 40], [5, 14, 140, 280], [5, 14, 140, 280], [1.0, 1.0, 1.0, 1.0]), - ([1, 3, 260, 100, 150], [1, 3, 780, 200, 350], [1, 3, 780, 200, 350], [1.0, 1.0, 1.0, 1.0, 1.0]), - ([1, 3, 450, 100, 150], [1, 3, 260, 200, 350], [1, 3, 260, 200, 350], [1.0, 1.0, 1.0, 1.0, 1.0]), - ([5, 14, 1000, 300, 40], [5, 14, 500, 140, 280], [1, 1, 500, 140, 280], [1.0, 1.0, 1.0, 1.0, 1.0]), - ([5, 14, 1000, 300, 40], [5, 14, 500, 140, 280], [5, 14, 500, 140, 280], [1.0, 1.0, 1.0, 1.0, 1.0])]) - def test_onnx_resize11_using_sizes(self, input_shape, output_shape, sizes, scales): - np_scales = np.array(scales) - np_sizes = int64_array(sizes) - graph = build_graph(nodes_attrs=graph_node_attrs_sizes, - edges=graph_edges_sizes, - update_attributes={ - 'input_data': {'shape': int64_array(input_shape)}, - 'scales': {'shape': int64_array(np_scales.shape), 'value': np_scales}, - 'scales_data': {'shape': int64_array(np_scales.shape), 'value': np_scales}, - 'sizes': {'shape': int64_array(np_sizes.shape), 'value': np_sizes}, - 'sizes_data': {'shape': int64_array(np_sizes.shape), 'value': np_sizes}, - }) - node = Node(graph, 'onnx_resize11') - ONNXResize11Op.onnx_resize_infer(node) - - msg = "ONNXResize11 infer failed for case: sizes={}, scales={}, expected_shape={}, actual_shape={}" - - assert np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)),\ - msg.format(sizes, scales, output_shape, graph.node['onnx_resize11_data']['shape']) - - @pytest.mark.parametrize("input_shape, output_shape, scales", - [([1, 260, 100, 150], [1, 260, 200, 350], [1.0, 1.0, 2.0, 350 / 150]), - ([1, 3, 100, 200], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]), - ([5, 14, 300, 40], [5, 14, 140, 280], [1.0, 1.0, 140 / 300, 7.0]), - ([5, 14, 300, 40], [5, 14, 140, 560], [1.0, 1.0, 140 / 300, 14.0]), - ([1, 3, 260, 100, 150], [1, 3, 780, 200, 350], [1.0, 1.0, 3.0, 2.0, 350 / 150]), - ([1, 3, 450, 100, 150], [1, 3, 260, 200, 350], [1.0, 1.0, 260 / 450, 2.0, 350 / 150]), - ([5, 14, 1000, 300, 40], [5, 14, 500, 140, 280], [1.0, 1.0, 0.5, 140 / 300, 7.0]), - ([4, 3, 180, 1340], [4, 3, 60, 804], [1.0, 1.0, 0.33333334, 0.6]), - ([4, 3, 500, 180, 1340], [4, 3, 750, 60, 804], [1.0, 1.0, 1.5, 0.33333334, 0.6])]) - def test_onnx_resize_using_scales(self, input_shape, output_shape, scales): - np_scales = np.array(scales) - graph = build_graph(nodes_attrs=graph_node_attrs_scales, - edges=graph_edges_scales, - update_attributes={ - 'input_data': {'shape': int64_array(input_shape)}, - 'scales': {'shape': int64_array(np_scales.shape), 'value': np_scales}, - 'scales_data': {'shape': int64_array(np_scales.shape), 'value': np_scales}, - }) - node = Node(graph, 'onnx_resize11') - ONNXResize11Op.onnx_resize_infer(node) - - msg = "ONNXResize11 infer failed for case: scales={}, expected_shape={}, actual_shape={}" - - assert np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)),\ - msg.format(scales, output_shape, graph.node['onnx_resize11_data']['shape']) - - @pytest.mark.parametrize("input_shape, output_shape, sizes, scales", - [([1, 260, 100, 150], [1, 260, 200, 350], [1, 260, 200, 350], [1.0, 1.0, 1.0, 1.0]), - ([1, 260, 100, 150], [1, 260, 200, 350], [1, 1, 200, 350], [1.0, 1.0, 1.0, 1.0]), - ([5, 14, 300, 40], [5, 14, 140, 280], [1, 1, 140, 280], [1.0, 1.0, 1.0, 1.0]), - ([5, 14, 300, 40], [5, 14, 140, 280], [5, 14, 140, 280], [1.0, 1.0, 1.0, 1.0]), - ([1, 3, 260, 100, 150], [1, 3, 780, 200, 350], [1, 3, 780, 200, 350], [1.0, 1.0, 1.0, 1.0, 1.0]), - ([1, 3, 450, 100, 150], [1, 3, 260, 200, 350], [1, 3, 260, 200, 350], [1.0, 1.0, 1.0, 1.0, 1.0]), - ([5, 14, 1000, 300, 40], [5, 14, 500, 140, 280], [1, 1, 500, 140, 280], [1.0, 1.0, 1.0, 1.0, 1.0]), - ([5, 14, 1000, 300, 40], [5, 14, 500, 140, 280], [5, 14, 500, 140, 280], [1.0, 1.0, 1.0, 1.0, 1.0])]) - def test_onnx_resize11_using_sizes_without_roi_input(self, input_shape, output_shape, sizes, scales): - np_scales = np.array(scales) - np_sizes = int64_array(sizes) - graph = build_graph(nodes_attrs=graph_node_attrs_sizes, - edges=[('input', 'input_data'), - ('sizes', 'sizes_data'), - ('input_data', 'onnx_resize11', {'in': 0}), - ('sizes_data', 'onnx_resize11', {'in': 3}), - ('onnx_resize11', 'onnx_resize11_data'), - ('onnx_resize11_data', 'op_output'), - ], - update_attributes={ - 'input_data': {'shape': int64_array(input_shape)}, - 'scales': {'shape': int64_array(np_scales.shape), 'value': np_scales}, - 'scales_data': {'shape': int64_array(np_scales.shape), 'value': np_scales}, - 'sizes': {'shape': int64_array(np_sizes.shape), 'value': np_sizes}, - 'sizes_data': {'shape': int64_array(np_sizes.shape), 'value': np_sizes}, - }) - node = Node(graph, 'onnx_resize11') - ONNXResize11Op.onnx_resize_infer(node) - - msg = "ONNXResize11 infer failed for case: sizes={}, scales={}, expected_shape={}, actual_shape={}" - - assert np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)),\ - msg.format(sizes, scales, output_shape, graph.node['onnx_resize11_data']['shape']) - - @pytest.mark.parametrize("input_shape, output_shape, scales", - [([1, 260, 100, 150], [1, 260, 200, 350], [1.0, 1.0, 2.0, 350 / 150]), - ([1, 3, 100, 200], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]), - ([5, 14, 300, 40], [5, 14, 140, 280], [1.0, 1.0, 140 / 300, 7.0]), - ([5, 14, 300, 40], [5, 14, 140, 560], [1.0, 1.0, 140 / 300, 14.0]), - ([1, 3, 260, 100, 150], [1, 3, 780, 200, 350], [1.0, 1.0, 3.0, 2.0, 350 / 150]), - ([1, 3, 450, 100, 150], [1, 3, 260, 200, 350], [1.0, 1.0, 260 / 450, 2.0, 350 / 150]), - ([5, 14, 1000, 300, 40], [5, 14, 500, 140, 280], [1.0, 1.0, 0.5, 140 / 300, 7.0]), - ([4, 3, 180, 1340], [4, 3, 60, 804], [1.0, 1.0, 0.33333334, 0.6]), - ([4, 3, 500, 180, 1340], [4, 3, 750, 60, 804], [1.0, 1.0, 1.5, 0.33333334, 0.6])]) - def test_onnx_resize_using_scales_without_roi(self, input_shape, output_shape, scales): - np_scales = np.array(scales) - graph = build_graph(nodes_attrs=graph_node_attrs_scales, - edges=[('input', 'input_data'), - ('scales', 'scales_data'), - ('input_data', 'onnx_resize11', {'in': 0}), - ('scales_data', 'onnx_resize11', {'in': 2}), - ('onnx_resize11', 'onnx_resize11_data'), - ('onnx_resize11_data', 'op_output'), - ], - update_attributes={ - 'input_data': {'shape': int64_array(input_shape)}, - 'scales': {'shape': int64_array(np_scales.shape), 'value': np_scales}, - 'scales_data': {'shape': int64_array(np_scales.shape), 'value': np_scales}, - }) - node = Node(graph, 'onnx_resize11') - ONNXResize11Op.onnx_resize_infer(node) - - msg = "ONNXResize11 infer failed for case: scales={}, expected_shape={}, actual_shape={}" - - assert np.array_equal(graph.node['onnx_resize11_data']['shape'], int64_array(output_shape)),\ - msg.format(scales, output_shape, graph.node['onnx_resize11_data']['shape']) diff --git a/tools/mo/unit_tests/mo/ops/ReduceOps_test.py b/tools/mo/unit_tests/mo/ops/ReduceOps_test.py deleted file mode 100644 index 947fc95c38ab9f..00000000000000 --- a/tools/mo/unit_tests/mo/ops/ReduceOps_test.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest -import unittest - -import numpy as np - -from openvino.tools.mo.ops.ReduceOps import reduce_infer -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, strict_compare_tensors, is_fully_defined -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, result, connect, valued_const_with_data - -nodes_attributes = { - **regular_op_with_shaped_data('data', [1, 3, 224, 224], {'type': 'Parameter', 'value': None, - '_out_port_data_type': {0: np.float32}}), - **valued_const_with_data('axis', int64_array(0)), - **regular_op_with_shaped_data('reduce_lp', None, {'op': 'ReduceLp', 'type': None, 'name': 'my_reduce_lp'}), - **regular_op_with_shaped_data('identity', None, {'op': 'Identity', 'name': 'identity'}), - **result('output'), -} - - -class TestReduceLpTest(): - @unittest.skip("Skipped due to function array_equal failure") - @pytest.mark.parametrize("shape, axes, keepdims, p",[ - ([3, 2, 2], [0], True, 1), - ([3, 2, 2], [0], True, 2), - ([3, 2, 2], [1], True, 2), - ([3, 2, 2], [2], True, 2), - ([3, 2, 2], [0], False, 1), - ([3, 2, 2], [0], False, 2), - ([3, 2, 2], [1], False, 2), - ([3, 2, 2], [2], False, 2), - ]) - def test_reduce_lp(self, shape, axes, keepdims, p): - data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape) - reduced = np.power(np.sum(a=np.abs(np.power(data, p)), axis=tuple(axes), keepdims=keepdims), 1 / p) - axis = int64_array(axes) - p = int64_array(p) - graph = build_graph(nodes_attributes, - [*connect('data', '0:reduce_lp'), - *connect('axis', '1:reduce_lp'), - *connect('reduce_lp', '0:identity'), - ('identity', 'identity_d', {'out': 0}), - ('identity_d', 'output') - ], - {'data_d': {'value': data, 'shape': data.shape}, - 'axis_d': {'value': axis, 'shape': axis.shape}, - 'reduce_lp': {'keep_dims': keepdims}}, - nodes_with_edges_only=True) - - reduce_node = Node(graph, 'reduce_lp') - reduce_node.op = reduce_node.type = 'ReduceL' + str(p) - reduce_infer(reduce_node) - assert np.array_equal(reduce_node.out_port(0).data.get_value(), reduced) - - @pytest.mark.parametrize("shape, axes, keepdims, p",[ - ([3, 2, 2], [0], True, 1), - ([3, 2, 2], [2], False, 2), - ([3, 2, 2], [0, 2], False, 2), - ]) - def test_reduce_dynamic(self, shape, axes, keepdims, p): - false_mask = np.zeros(shape) - false_mask[0][1][1] = True - data = np.ma.masked_array(np.ones(shape), mask=false_mask) - assert not is_fully_defined(data) - reduced_tensor = np.sum(data, axis=tuple(axes), keepdims=keepdims) - # create an array of all masked elements which is the expected result of the reduce of the tensor with dynamic - # values - fully_undefined = np.ma.masked_array(reduced_tensor, mask=np.ones(reduced_tensor.shape)) - axis = int64_array(axes) - p = int64_array(p) - graph = build_graph(nodes_attributes, - [*connect('data', '0:reduce_lp'), - *connect('axis', '1:reduce_lp'), - *connect('reduce_lp', '0:identity'), - ('identity', 'identity_d', {'out': 0}), - ('identity_d', 'output') - ], - {'data_d': {'value': data, 'shape': data.shape}, - 'axis_d': {'value': axis, 'shape': axis.shape}, - 'reduce_lp': {'keep_dims': keepdims}}, - nodes_with_edges_only=True) - - reduce_node = Node(graph, 'reduce_lp') - reduce_node.op = reduce_node.type = 'ReduceL' + str(p) - reduce_infer(reduce_node) - assert strict_compare_tensors(reduce_node.out_port(0).data.get_value(), fully_undefined) diff --git a/tools/mo/unit_tests/mo/ops/Reverse_test.py b/tools/mo/unit_tests/mo/ops/Reverse_test.py deleted file mode 100644 index fe16c245bf995a..00000000000000 --- a/tools/mo/unit_tests/mo/ops/Reverse_test.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.Reverse import Reverse -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'}, - 'node_1_data': {'type': 'Identity', 'kind': 'data', 'value': np.array([[1, 3, 227, 227]])}, - 'node_2': {'type': 'Identity', 'kind': 'op'}, - 'node_2_data': {'kind': 'data', 'value': np.array([1])}, - 'reverse': {'type': 'Reverse', 'kind': 'op', }, - 'node_3': {'type': 'Identity', 'kind': 'op'}, - 'op_output': { 'kind': 'op', 'op': 'Result'} - } - -class TestReverse(unittest.TestCase): - def test_reverse_infer(self): - graph = build_graph(nodes_attributes, - [ - ('node_1', 'node_1_data'), - ('node_1_data', 'reverse'), - ('node_2', 'node_2_data'), - ('node_2_data', 'reverse'), - ('reverse', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1_data': {'shape': np.array([1, 4])}, - 'reverse': {'stride': 2, - **layout_attrs()} - }) - - reverse_node = Node(graph, 'reverse') - Reverse.infer(reverse_node) - exp_shape = np.array([1, 4]) - exp_value = np.array([[227, 227, 3, 1]]) - res_shape = graph.node['node_3']['shape'] - res_value = graph.node['node_3']['value'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - for i in range(0, len(exp_value[0])): - self.assertEqual(exp_value[0][i], res_value[0][i]) diff --git a/tools/mo/unit_tests/mo/ops/__init__.py b/tools/mo/unit_tests/mo/ops/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/ops/activation_test.py b/tools/mo/unit_tests/mo/ops/activation_test.py deleted file mode 100644 index 9e0441431b362b..00000000000000 --- a/tools/mo/unit_tests/mo/ops/activation_test.py +++ /dev/null @@ -1,176 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.activation_ops import Elu, SoftPlus, Mish, Swish, SoftSign -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - - -class TestActivationOp(unittest.TestCase): - nodes_attributes = { - 'node_1': { - 'shape': np.array([4]), - 'value': None - }, - 'activation_node': { - 'op': 'Activation', - 'kind': 'op', - 'operation': None - }, - 'node_3': { - 'shape': None - } - } - - def test_activation_elu_infer(self): - graph = build_graph(self.nodes_attributes, - [ - ('node_1', 'activation_node'), - ('activation_node', 'node_3') - ], - { - 'node_1': { - 'value': np.array([6, -4, -2, -1]) - }, - 'activation_node': { - 'operation': 'elu', - 'alpha': 1.0, - }, - 'node_3': { - 'value': None - } - }) - graph.graph['layout'] = 'NCHW' - activation_node = Node(graph, 'activation_node') - Elu.infer(activation_node) - exp_shape = np.array([4]) - res_shape = graph.node['node_3']['shape'] - res_value = graph.node['node_3']['value'] - exp_value = np.array([6., -0.98168436, -0.86466472, -0.63212056]) - for i, value in enumerate(exp_shape): - self.assertEqual(res_shape[i], value) - for i, value in enumerate(exp_value): - self.assertAlmostEqual(res_value[i], value) - - def test_activation_softplus_infer(self): - graph = build_graph(self.nodes_attributes, - [ - ('node_1', 'activation_node'), - ('activation_node', 'node_3') - ], - { - 'node_1': { - 'value': np.array([-1.0, 0.0, 1.0, 20.0]) - }, - 'activation_node': { - 'op': 'SoftPlus', - 'operation': SoftPlus.operation, - }, - 'node_3': { - 'value': None - } - }) - graph.graph['layout'] = 'NCHW' - activation_node = Node(graph, 'activation_node') - SoftPlus.infer(activation_node) - exp_shape = np.array([4]) - res_shape = graph.node['node_3']['shape'] - res_value = graph.node['node_3']['value'] - exp_value = np.array([0.3132617, 0.6931472, 1.3132617, 20.0]) - for i, value in enumerate(exp_shape): - self.assertEqual(res_shape[i], value) - for i, value in enumerate(exp_value): - self.assertAlmostEqual(res_value[i], value) - - def test_activation_mish_infer(self): - graph = build_graph(self.nodes_attributes, - [ - ('node_1', 'activation_node'), - ('activation_node', 'node_3') - ], - { - 'node_1': { - 'value': np.array([-1.0, 0.0, 1.0, 20.0]) - }, - 'activation_node': { - 'op': 'Mish', - 'operation': Mish.operation, - }, - 'node_3': { - 'value': None - } - }) - graph.graph['layout'] = 'NCHW' - activation_node = Node(graph, 'activation_node') - Mish.infer(activation_node) - exp_shape = np.array([4]) - res_shape = graph.node['node_3']['shape'] - res_value = graph.node['node_3']['value'] - exp_value = np.array([-0.30340146, 0.0, 0.8650984, 20.0]) - for i, value in enumerate(exp_shape): - self.assertEqual(res_shape[i], value) - for i, value in enumerate(exp_value): - self.assertAlmostEqual(res_value[i], value) - - def test_activation_swish_infer(self): - graph = build_graph(self.nodes_attributes, - [ - ('node_1', 'activation_node'), - ('activation_node', 'node_3') - ], - { - 'node_1': { - 'value': np.array([-1.0, 0.0, 1.0, 20.0]) - }, - 'activation_node': { - 'op': 'Swish', - }, - 'node_3': { - 'value': None - } - }) - graph.graph['layout'] = 'NCHW' - activation_node = Node(graph, 'activation_node') - Swish.infer(activation_node) - exp_shape = np.array([4]) - res_shape = graph.node['node_3']['shape'] - res_value = graph.node['node_3']['value'] - exp_value = np.array([-0.26894142, 0.0, 0.73105858, 19.99999996]) - for i, value in enumerate(exp_shape): - self.assertEqual(res_shape[i], value) - for i, value in enumerate(exp_value): - self.assertAlmostEqual(res_value[i], value) - - def test_activation_softsign_infer(self): - graph = build_graph(self.nodes_attributes, - edges=[ - ('node_1', 'activation_node'), - ('activation_node', 'node_3') - ], - update_attributes={ - 'node_1': { - 'value': np.array([1.0, -1.0, 3.5, -5.8]) - }, - 'activation_node': { - 'op': 'SoftSign', - 'operation': SoftSign.operation - }, - 'node_3': { - 'value': None - } - }) - graph.graph['layout'] = 'NCHW' - activation_node = Node(graph, 'activation_node') - SoftSign.infer(activation_node) - exp_shape = np.array([4]) - res_shape = graph.nodes['node_3']['shape'] - res_value = graph.nodes['node_3']['value'] - exp_value = np.array([0.5, -0.5, 0.7777777777777, -0.85294117647]) - for i, value in enumerate(exp_shape): - self.assertEqual(res_shape[i], value) - for i, value in enumerate(exp_value): - self.assertAlmostEqual(res_value[i], value) diff --git a/tools/mo/unit_tests/mo/ops/argmax_test.py b/tools/mo/unit_tests/mo/ops/argmax_test.py deleted file mode 100644 index 5a55528495f7f4..00000000000000 --- a/tools/mo/unit_tests/mo/ops/argmax_test.py +++ /dev/null @@ -1,138 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.argmax import arg_ops_infer -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'op_input': {'kind': 'op', 'op': 'Parameter'}, - 'node_1': {'kind': 'data'}, - 'argmax': {'op': 'ArgMax', 'kind': 'op'}, - 'node_3': {'kind': 'data', 'value': None}, - 'op_output': {'kind': 'op', 'op': 'Result'} - } - - -class TestArgMaxOp(unittest.TestCase): - def test_caffe_argmax_axis(self): - graph = build_graph(nodes_attributes, - [ - ('op_input', 'node_1'), - ('node_1', 'argmax'), - ('argmax', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 1025, 2049])}, - 'argmax': { - 'out_max_val': True, - 'top_k': 100, - 'axis': 2 - } - }) - - argmax_node = Node(graph, 'argmax') - arg_ops_infer(argmax_node) - exp_shape = np.array([1, 3, 100, 2049]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_caffe_argmax_axis_negative(self): - graph = build_graph(nodes_attributes, - [ - ('op_input', 'node_1'), - ('node_1', 'argmax'), - ('argmax', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 1025, 2049])}, - 'argmax': { - 'out_max_val': True, - 'top_k': 100, - 'axis': -1 - } - }) - - argmax_node = Node(graph, 'argmax') - arg_ops_infer(argmax_node) - exp_shape = np.array([1, 3, 1025, 100]) - res_shape = graph.node['node_3']['shape'] - self.assertEqual(argmax_node.axis, 3) - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_caffe_argmax_no_axis(self): - graph = build_graph(nodes_attributes, - [ - ('op_input', 'node_1'), - ('node_1', 'argmax'), - ('argmax', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 1025, 2049])}, - 'argmax': { - 'out_max_val': True, - 'top_k': 100 - } - }) - - argmax_node = Node(graph, 'argmax') - arg_ops_infer(argmax_node) - exp_shape = np.array([1, 2, 100, 1]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_caffe_argmax_extend_shape(self): - graph = build_graph(nodes_attributes, - [ - ('op_input', 'node_1'), - ('node_1', 'argmax'), - ('argmax', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 3])}, - 'argmax': { - 'out_max_val': True, - 'top_k': 100 - } - }) - - argmax_node = Node(graph, 'argmax') - arg_ops_infer(argmax_node) - exp_shape = np.array([1, 2, 100]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_caffe_argmax_out_max_val_false(self): - graph = build_graph(nodes_attributes, - [ - ('op_input', 'node_1'), - ('node_1', 'argmax'), - ('argmax', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 3])}, - 'argmax': { - 'out_max_val': False, - 'top_k': 100 - } - }) - - argmax_node = Node(graph, 'argmax') - arg_ops_infer(argmax_node) - exp_shape = np.array([1, 1, 100]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) diff --git a/tools/mo/unit_tests/mo/ops/assert_test.py b/tools/mo/unit_tests/mo/ops/assert_test.py deleted file mode 100644 index 96a12be4d88c35..00000000000000 --- a/tools/mo/unit_tests/mo/ops/assert_test.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import Mock - -from openvino.tools.mo.ops.assert_op import Assert -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph_with_edge_attrs - - -class TestAssert(unittest.TestCase): - def test_assert_cf_true(self): - me_mock = Mock() - nodes = { - 'input_data': {'kind': 'data', 'executable': True}, - 'assert': {'type': 'Assert', 'value': None, 'kind': 'op', 'op': 'Assert'}, - 'assert_data': {'value': True, 'kind': 'data', 'executable': True}} - edges = [ - ('input_data', 'assert', {'in': 0}), - ('assert', 'assert_data', {'out': 0, 'control_flow_edge': False})] - graph = build_graph_with_edge_attrs(nodes, edges) - tested_class = Assert(graph=graph, attrs={}) - node = Node(graph, 'assert') - tested_class.assert_control_flow_infer(node=node, is_executable=True, mark_executability=me_mock) - me_mock.assert_called_once_with('assert_data', True) - - def test_assert_cf_false(self): - me_mock = Mock() - nodes = { - 'input_data': {'name': 'input', 'kind': 'data', 'executable': True}, - 'assert': {'name': 'assert', 'type': 'Assert', 'value': None, 'kind': 'op', 'op': 'Assert'}, - 'assert_data': {'name': 'output', 'value': False, 'kind': 'data', 'executable': True}} - edges = [ - ('input_data', 'assert', {'in': 0}), - ('assert', 'assert_data', {'out': 0, 'control_flow_edge': False})] - graph = build_graph_with_edge_attrs(nodes, edges) - tested_class = Assert(graph=graph, attrs={}) - node = Node(graph, 'assert') - tested_class.assert_control_flow_infer(node=node, is_executable=True, mark_executability=me_mock) - me_mock.assert_called_once_with('assert_data', False) diff --git a/tools/mo/unit_tests/mo/ops/block_lstm_test.py b/tools/mo/unit_tests/mo/ops/block_lstm_test.py deleted file mode 100644 index 2b5271d3a668e8..00000000000000 --- a/tools/mo/unit_tests/mo/ops/block_lstm_test.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.common.partial_infer.utils import strict_compare_tensors, shape_array, dynamic_dimension -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.BlockLSTM import BlockLSTM -from unit_tests.utils.graph import build_graph, result, connect, regular_op_with_shaped_data, regular_op_with_empty_data - - -class TestBlockLSTM(unittest.TestCase): - - def run_test(self, time_len, batch_size, num_inputs, hidden_size, ref_hidden_states_shape, ref_cell_states_shape): - nodes = { - **regular_op_with_shaped_data('x', [time_len, batch_size, num_inputs], {'type': 'Parameter'}), - **regular_op_with_shaped_data('weights', [num_inputs, hidden_size * 4], {'type': 'Parameter'}), - **regular_op_with_shaped_data('bias', [hidden_size * 4], {'type': 'Parameter'}), - **regular_op_with_shaped_data('init_hidden_state', [hidden_size * 4], {'type': 'Parameter'}), - **regular_op_with_shaped_data('init_cell_state', [hidden_size * 4], {'type': 'Parameter'}), - **regular_op_with_empty_data('block_lstm', {'op': 'BlockLSTM'}), - **result('hidden_states'), - **result('cell_states'), - } - - edges = [ - *connect('x', '0:block_lstm'), - *connect('weights', '1:block_lstm'), - *connect('bias', '2:block_lstm'), - *connect('init_hidden_state', '3:block_lstm'), - *connect('init_cell_state', '4:block_lstm'), - *connect('block_lstm:0', 'hidden_states'), - *connect('block_lstm:1', 'cell_states') - ] - - graph = build_graph(nodes, edges) - node = Node(graph, 'block_lstm') - BlockLSTM.infer(node) - hidden_states_shape = node.out_port(0).data.get_shape() - cell_states_shape = node.out_port(1).data.get_shape() - self.assertTrue(strict_compare_tensors(hidden_states_shape, ref_hidden_states_shape)) - self.assertTrue(strict_compare_tensors(cell_states_shape, ref_cell_states_shape)) - - def test_block_lstm_basic_infer(self): - self.run_test(time_len=4, batch_size=2, num_inputs=40, hidden_size=60, - ref_hidden_states_shape=shape_array([4, 2, 60]), ref_cell_states_shape=shape_array([4, 2, 60])) - - def test_block_lstm_dynamic_infer(self): - self.run_test(time_len=dynamic_dimension, batch_size=dynamic_dimension, num_inputs=40, hidden_size=60, - ref_hidden_states_shape=shape_array([dynamic_dimension, dynamic_dimension, 60]), - ref_cell_states_shape=shape_array([dynamic_dimension, dynamic_dimension, 60])) - - def test_failed_three_outputs(self): - nodes = { - **regular_op_with_shaped_data('x', [30, 3, 70], {'type': 'Parameter'}), - **regular_op_with_shaped_data('weights', [70, 120], {'type': 'Parameter'}), - **regular_op_with_shaped_data('bias', [120], {'type': 'Parameter'}), - **regular_op_with_shaped_data('init_hidden_state', [120], {'type': 'Parameter'}), - **regular_op_with_shaped_data('init_cell_state', [120], {'type': 'Parameter'}), - **regular_op_with_empty_data('block_lstm', {'op': 'BlockLSTM'}), - **result('hidden_states'), - **result('cell_states'), - **result('f'), - } - - edges = [ - *connect('x', '0:block_lstm'), - *connect('weights', '1:block_lstm'), - *connect('bias', '2:block_lstm'), - *connect('init_hidden_state', '3:block_lstm'), - *connect('init_cell_state', '4:block_lstm'), - *connect('block_lstm:0', 'hidden_states'), - *connect('block_lstm:1', 'cell_states'), - *connect('block_lstm:2', 'f') - ] - - graph = build_graph(nodes, edges) - node = Node(graph, 'block_lstm') - with self.assertRaisesRegex(AssertionError, 'Internal Model Optimizer Error or unsupported BlockLSTM*'): - BlockLSTM.infer(node) diff --git a/tools/mo/unit_tests/mo/ops/broadcast_test.py b/tools/mo/unit_tests/mo/ops/broadcast_test.py deleted file mode 100644 index bded97d9cb9ff9..00000000000000 --- a/tools/mo/unit_tests/mo/ops/broadcast_test.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, undefined_shape_of_rank -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.broadcast import Broadcast -from unit_tests.utils.graph import build_graph, valued_const_with_data, regular_op_with_empty_data, \ - shaped_data - - -class TestBroadcastTest(): - @pytest.mark.parametrize("data, target_shape, axes_mapping, mode, ref_out, test_raising",[ - ([1], [3, 3], None, 'numpy', [[1, 1, 1], [1, 1, 1], [1, 1, 1]], False), - ([1], [3, 3], None, 'numpy', None, False), - - # shape broadcasting - ([1], [1, 2], [0], 'explicit', None, False), - ([1], [1, 2], [-2], 'explicit', None, False), - ([1, 7], [5, 1, 7, 3], [1, 2], 'explicit', None, False), - ([2, 1, 3], [2, 1, 3, 3], [0, 1, 2], 'explicit', None, False), - ([2, 1, 3], [5, 2, 1, 3], [1, 2, 3], 'explicit', None, False), - - # value broadcasting - ([1], [1, 2], [0], 'explicit', [[1, 1]], False), - - ([[3, 1]], [2, 1, 2], [1, 2], 'explicit', [[[3, 1]], [[3, 1]]], False), # ref_shape (2, 1, 2) - - ([[3, 1]], [2, 1, 2], [-2, -1], 'explicit', [[[3, 1]], [[3, 1]]], False), # ref_shape (2, 1, 2) - - ([[[9, 5, 7]], [[9, 5, 7]]], [2, 2, 1, 3], [1, 2, 3], 'explicit', # in_shape (2, 1, 3) - [[[[9, 5, 7]], [[9, 5, 7]]], [[[9, 5, 7]], [[9, 5, 7]]]], False), # ref_out_shape (2, 2, 1, 3) - - ([[[9, 5, 7]], [[3, 4, 8]]], [2, 1, 3, 3], [0, 1, 2], 'explicit', # in_shape (2, 1, 3) - [[[[9, 9, 9], [5, 5, 5], [7, 7, 7]]], [[[3, 3, 3], [4, 4, 4], [8, 8, 8]]]], False), # ref_out_shape (2, 1, 3, 3) - - # negative tests - ([1], [2, 2], [0], 'explicit', None, True), - ([1, 7], [5, 2, 7, 3], [1, 2], 'explicit', None, True), - ([1, 7], [5, 2, 7, 3], [2, 1], 'explicit', None, True), - ([1, 7], [5, 2, 7, 3], [-3, -2], 'explicit', None, True), - ]) - def test_broadcast(self, data, target_shape, axes_mapping, mode, ref_out, test_raising): - if ref_out is not None: - input = valued_const_with_data('data', int64_array(data)) - else: - input = shaped_data('data', int64_array(data)) - - nodes = { - **input, - **valued_const_with_data('target_shape', int64_array(target_shape)), - **regular_op_with_empty_data('broadcast', {'op': 'Broadcast', 'mode': mode}), - } - - edges = [('data', 'broadcast'), - ('target_shape', 'broadcast'), - ('broadcast', 'broadcast_d')] - - if axes_mapping is not None: - nodes.update(**valued_const_with_data('axes_mapping', int64_array(axes_mapping))) - edges.append(('axes_mapping', 'broadcast')) - graph = build_graph(nodes, edges) - - broadcast_node = Node(graph, 'broadcast') - if test_raising: - with pytest.raises(AssertionError): - Broadcast.infer(broadcast_node) - return - - Broadcast.infer(broadcast_node) - if ref_out is not None: - assert np.array_equal(broadcast_node.out_node().value, np.array(ref_out)) - else: - assert np.array_equal(broadcast_node.out_node().shape, np.array(target_shape)) - - @pytest.mark.parametrize("data, target_shape_shape, axes_mapping, mode, ref_out_shape, test_raising",[ - ([1], [3], [0], 'explicit', undefined_shape_of_rank(3), False), - ([1], [3], None, 'numpy', undefined_shape_of_rank(3), False), - ([1], [3], None, 'bidirectional', undefined_shape_of_rank(3),False), - ([1, 7], [4], [1, 2], 'explicit', undefined_shape_of_rank(4), False), - ([1, 2], [3], None, 'numpy', undefined_shape_of_rank(3),False), - ([1, 1], [2], None, 'bidirectional', undefined_shape_of_rank(2), False), - ([1, 1], [2, 1], None, 'numpy', None, True), - ]) - def test_broadcast_dynamic(self, data, target_shape_shape, axes_mapping, mode, ref_out_shape, test_raising): - nodes = { - **shaped_data('data', int64_array(data)), - **shaped_data('target_shape', int64_array(target_shape_shape)), - **regular_op_with_empty_data('broadcast', {'op': 'Broadcast', 'mode': mode}), - } - - edges = [('data', 'broadcast'), - ('target_shape', 'broadcast'), - ('broadcast', 'broadcast_d')] - - if axes_mapping is not None: - nodes.update(**valued_const_with_data('axes_mapping', int64_array(axes_mapping))) - edges.append(('axes_mapping', 'axes_mapping_d')) - edges.append(('axes_mapping_d', 'broadcast')) - graph = build_graph(nodes, edges) - - broadcast_node = Node(graph, 'broadcast') - if test_raising: - with pytest.raises(AssertionError): - Broadcast.infer(broadcast_node) - return - - Broadcast.infer(broadcast_node) - assert np.array_equal(broadcast_node.out_node().shape, ref_out_shape) diff --git a/tools/mo/unit_tests/mo/ops/bucketize_test.py b/tools/mo/unit_tests/mo/ops/bucketize_test.py deleted file mode 100644 index b9b3589bef47df..00000000000000 --- a/tools/mo/unit_tests/mo/ops/bucketize_test.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.bucketize import Bucketize -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'input_tensor': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_buckets': {'shape': None, 'value': None, 'kind': 'data'}, - 'bucketize_node': {'op': 'Bucketize', 'kind': 'op', 'with_right_bound': False, 'output_type': np.int32}, - 'output': {'shape': None, 'value': None, 'kind': 'data'}} - -# graph 1 -edges1 = [('input_tensor', 'bucketize_node', {'in': 0}), - ('input_buckets', 'bucketize_node', {'in': 1}), - ('bucketize_node', 'output', {'out': 0})] - -inputs1 = {'input_tensor': {'shape': int64_array([4]), 'value': np.array([0.2, 6.4, 3.0, 1.6])}, - 'input_buckets': {'shape': int64_array([5]), 'value': np.array([0.0, 1.0, 2.5, 4.0, 10.0])}} - -inputs2 = {'input_tensor': {'shape': int64_array([4]), 'value': np.array([0.2, 6.4, 3.0, 1.6])}, - 'input_buckets': {'shape': int64_array([5]), 'value': np.array([])}} - -inputs3 = {'input_tensor': {'shape': int64_array([10, 40]), 'value': None}, - 'input_buckets': {'shape': int64_array([5]), 'value': None}} - -class TestBucketize(unittest.TestCase): - def test_infer1(self): - graph = build_graph(nodes_attributes, edges1, inputs1) - bucketize_node = Node(graph, 'bucketize_node') - Bucketize.infer(bucketize_node) - - # prepare reference results - ref_output_value = np.array([1, 4, 3, 2], dtype=np.int32) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(ref_output_value, res_output_value), - 'values do not match expected: {} and given: {}'.format(ref_output_value, res_output_value)) - - def test_infer2(self): - graph = build_graph(nodes_attributes, edges1, inputs2) - bucketize_node = Node(graph, 'bucketize_node') - Bucketize.infer(bucketize_node) - - # prepare reference results - ref_output_value = np.array([0, 0, 0, 0], dtype=np.int32) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(ref_output_value, res_output_value), - 'values do not match expected: {} and given: {}'.format(ref_output_value, res_output_value)) - - def test_partial_infer1(self): - graph = build_graph(nodes_attributes, edges1, inputs3) - bucketize_node = Node(graph, 'bucketize_node') - Bucketize.infer(bucketize_node) - - # prepare reference results - ref_output_shape = np.array([10, 40], dtype=np.int32) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) diff --git a/tools/mo/unit_tests/mo/ops/cast_test.py b/tools/mo/unit_tests/mo/ops/cast_test.py deleted file mode 100644 index db48340190c96e..00000000000000 --- a/tools/mo/unit_tests/mo/ops/cast_test.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.Cast import Cast -from openvino.tools.mo.middle.passes.convert_data_type import packed_U4, packed_I4 -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import valued_const_with_data, regular_op_with_empty_data, result, build_graph, connect - -nodes = lambda value, dst_type: { - **valued_const_with_data('value', np.array(value)), - **regular_op_with_empty_data('convert', {'dst_type': dst_type, 'infer': Cast.infer}), - **result(), -} - - -class TestCastTest(): - """ - Example of checking: - 7 == 0111, padded to 00000111, results in 7 - 7 == 0111, 8 == 1000 packed to 10000111, results in 7+16 - - -8 == 1000, padded to 00001000, results in 8 - """ - - @pytest.mark.parametrize("value, expected, custom_dtype", - [([i], [i], packed_U4) for i in range(16)] + - [([i, 15-i], [i + (15-i)*16], packed_U4) for i in range(16)] + - [([-i], [16-i], packed_I4) for i in range(1, 8+1)] + - [([i], [i], packed_I4) for i in range(8)] + - [([-i-1, i], [16-i-1 + 16*i], packed_I4) for i in range(8)] + - [([i, -i-1], [i + 16*(16-i-1)], packed_I4) for i in range(8)] - ) - def test_custom_value_propagation(self, value, expected, custom_dtype): - graph = build_graph(nodes(value, custom_dtype), [ - *connect('value', 'convert'), *connect('convert', 'output'), - ]) - partial_infer(graph) - - graph_ref = build_graph(nodes(value, custom_dtype), [ - *connect('value', 'convert'), *connect('convert', 'output')], - {'convert_d': {'force_type': custom_dtype, 'force_shape': np.array(value).shape, - 'value': expected}}) - - (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) - assert flag, resp diff --git a/tools/mo/unit_tests/mo/ops/concat_test.py b/tools/mo/unit_tests/mo/ops/concat_test.py deleted file mode 100644 index 29cea71208e6d0..00000000000000 --- a/tools/mo/unit_tests/mo/ops/concat_test.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.concat import concat_infer -from openvino.tools.mo.ops.concat import Concat -from unit_tests.utils.graph import build_graph - - -class TestConcatOp(unittest.TestCase): - nodes_attributes = { - 'node_1': { - 'shape': np.array([227, 227, 227, 227]) - }, - 'concat_node': { - }, - 'node_3': { - 'kind': 'data' - } - } - - def test_concat_op(self): - graph = build_graph(self.nodes_attributes, - [ - ('node_1', 'concat_node'), - ('concat_node', 'node_3') - ]) - concat_node = Concat(graph, self.nodes_attributes['concat_node']).add_node() - self.assertEqual(concat_node.type, 'Concat') - self.assertEqual(concat_node.op, 'Concat') - self.assertEqual(concat_node.infer, concat_infer) diff --git a/tools/mo/unit_tests/mo/ops/convolution_test.py b/tools/mo/unit_tests/mo/ops/convolution_test.py deleted file mode 100644 index f5e9a547e2702e..00000000000000 --- a/tools/mo/unit_tests/mo/ops/convolution_test.py +++ /dev/null @@ -1,460 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.convolution import Convolution -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.extractors import FakeValue -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'conv_input': {'value': None, 'kind': 'data'}, - 'conv_node': {'type': 'Convolution', 'kind': 'op'}, - 'conv_weights': {'value': FakeValue(None), 'kind': 'data'}, - 'conv_output': {'value': None, 'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result'} - } - - -class TestConvolutionPartialInfer(unittest.TestCase): - def test_caffe_conv2d_infer(self): - graph = build_graph(nodes_attributes, - [('conv_input', 'conv_node'), - ('conv_weights', 'conv_node'), - ('conv_node', 'conv_output'), - ('conv_output', 'op_output') - ], - {'conv_output': {'shape': None}, - 'conv_input': {'shape': np.array([1, 3, 227, 227])}, - 'conv_weights': {'shape': np.array([64, 3, 3, 3]), - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_node': {'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]), - 'dilation': np.array([1, 1, 1, 1]), 'bias_addable': True, 'bias_term': False, - 'output_spatial_shape': None, 'output_shape': None, - 'stride': np.array([1, 1, 1, 1]), 'group': 1, - 'kernel_spatial_idx': np.array([2, 3]), - 'input_feature_channel': 1, - 'output_feature_channel': 0, - 'output': 64, 'kernel_spatial': np.array([3, 3]), - 'spatial_dims': np.array([2, 3]), 'channel_dims': np.array([1]), - 'batch_dims': np.array([0])} - }) - - conv_node = Node(graph, 'conv_node') - Convolution.infer(conv_node) - exp_shape = np.array([1, 64, 225, 225]) - res_shape = graph.node['conv_output']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_caffe_conv2d_dynamic_input_infer(self): - graph = build_graph(nodes_attributes, - [('conv_input', 'conv_node'), - ('conv_weights', 'conv_node'), - ('conv_node', 'conv_output'), - ('conv_output', 'op_output') - ], - {'conv_output': {'shape': None}, - 'conv_input': {'shape': shape_array([1, 3, dynamic_dimension_value, 227])}, - 'conv_weights': {'shape': np.array([64, 3, 3, 3]), - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_node': {'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]), - 'dilation': np.array([1, 1, 1, 1]), 'bias_addable': True, 'bias_term': False, - 'output_spatial_shape': None, 'output_shape': None, - 'stride': np.array([1, 1, 1, 1]), 'group': 1, - 'kernel_spatial_idx': np.array([2, 3]), - 'input_feature_channel': 1, - 'output_feature_channel': 0, - 'output': 64, 'kernel_spatial': np.array([3, 3]), - 'spatial_dims': np.array([2, 3]), 'channel_dims': np.array([1]), - 'batch_dims': np.array([0])} - }) - - conv_node = Node(graph, 'conv_node') - Convolution.infer(conv_node) - exp_shape = shape_array([1, 64, dynamic_dimension_value, 225]) - res_shape = graph.node['conv_output']['shape'] - self.assertTrue(strict_compare_tensors(exp_shape, res_shape)) - - def test_caffe_conv2d_infer_no_shape(self): - graph = build_graph(nodes_attributes, - [('conv_input', 'conv_node'), - ('conv_weights', 'conv_node'), - ('conv_node', 'conv_output'), - ('conv_output', 'op_output') - ], - {'conv_output': {'shape': None}, - 'conv_input': {'shape': None}, - 'conv_weights': {'shape': None, - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_node': {'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]), - 'dilation': np.array([1, 1, 1, 1]), 'bias_addable': True, 'bias_term': False, - 'output_spatial_shape': None, 'output_shape': None, - 'stride': np.array([1, 1, 1, 1]), 'group': 1, - 'output': 64, 'kernel_spatial': np.array([3, 3]), - 'spatial_dims': np.array([2, 3]), 'channel_dims': np.array([1]), - 'batch_dims': np.array([0])} - }) - - conv_node = Node(graph, 'conv_node') - with self.assertRaisesRegex(Error, "Input data shape is None for node.*"): - Convolution.infer(conv_node) - - def test_deconv_infer_ideal(self): - graph = build_graph(nodes_attributes, - [('conv_input', 'conv_node'), - ('conv_weights', 'conv_node'), - ('conv_node', 'conv_output'), - ('conv_output', 'op_output') - ], - {'conv_output': {'shape': None}, - 'conv_input': {'shape': np.array([1, 21, 16, 16])}, - 'conv_weights': {'shape': np.array([1, 21, 4, 4]), - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_node': {#'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]), - 'channel_dims': np.array([1]), 'bias_addable': True, 'bias_term': False, - 'batch_dims': np.array([0]), - 'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'kernel_spatial': np.array([4, 4]), 'output_spatial_shape': None, - 'kernel_spatial_idx': np.array([2, 3]), - 'input_feature_channel': 1, - 'output_feature_channel': 0, - 'output_padding': np.array([0, 0, 1, 1]), - 'type': 'Deconvolution', 'output': 21, 'dilation': np.array([1, 1, 1, 1]), - 'group': 1, 'stride': np.array([1, 1, 2, 2]), 'output_shape': None} - }) - - deconv_node = Node(graph, 'conv_node') - - Convolution.infer(deconv_node) - res_shape = deconv_node['output_shape'] - exp_shape = np.array([1, 21, 35, 35]) - - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - # Check that after double infer shape and pad attrs do not changes - Convolution.infer(deconv_node) - - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_deconv_dynamic_infer_ideal(self): - graph = build_graph(nodes_attributes, - [('conv_input', 'conv_node'), - ('conv_weights', 'conv_node'), - ('conv_node', 'conv_output'), - ('conv_output', 'op_output') - ], - {'conv_output': {'shape': None}, - 'conv_input': {'shape': shape_array([1, 21, dynamic_dimension_value, 16])}, - 'conv_weights': {'shape': np.array([1, 21, 4, 4]), - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_node': {#'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]), - 'channel_dims': np.array([1]), 'bias_addable': True, 'bias_term': False, - 'batch_dims': np.array([0]), - 'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'kernel_spatial': np.array([4, 4]), 'output_spatial_shape': None, - 'kernel_spatial_idx': np.array([2, 3]), - 'input_feature_channel': 1, - 'output_feature_channel': 0, - 'output_padding': np.array([0, 0, 1, 1]), - 'type': 'Deconvolution', 'output': 21, 'dilation': np.array([1, 1, 1, 1]), - 'group': 1, 'stride': np.array([1, 1, 2, 2]), 'output_shape': None} - }) - - deconv_node = Node(graph, 'conv_node') - - Convolution.infer(deconv_node) - res_shape = deconv_node['output_shape'] - exp_shape = shape_array([1, 21, dynamic_dimension_value, 35]) - - self.assertTrue(strict_compare_tensors(exp_shape, res_shape)) - - # Check that after double infer shape and pad attrs do not changes - Convolution.infer(deconv_node) - - self.assertTrue(strict_compare_tensors(exp_shape, res_shape)) - - def test_deconv_infer_no_shape(self): - graph = build_graph(nodes_attributes, - [('conv_input', 'conv_node'), - ('conv_weights', 'conv_node'), - ('conv_node', 'conv_output'), - ('conv_output', 'op_output') - ], - {'conv_output': {'shape': None}, - 'conv_input': {'shape': None}, - 'conv_weights': {'shape': np.array([1, 21, 16, 16]), - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_node': {'spatial_dims': np.array([2, 3]), 'batch_dims': np.array([0]), - 'channel_dims': np.array([1]), - 'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'kernel_spatial': np.array([4, 4]), 'output_spatial_shape': None, - 'kernel_spatial_idx': np.array([2, 3]), - 'input_feature_channel': 1, - 'output_feature_channel': 0, - 'type': 'Deconvolution', 'output': 21, 'dilation': np.array([1, 1, 1, 1]), - 'group': 1, 'stride': np.array([1, 1, 2, 2]), 'output_shape': None} - }) - - deconv_node = Node(graph, 'conv_node') - with self.assertRaisesRegex(Error, "Input data shape is None for node.*"): - Convolution.infer(deconv_node) - - def test_conv_infer_set_default_attrs_nchw(self): - graph = build_graph(nodes_attributes, - [ - ('conv_input', 'conv_node'), - ('conv_weights', 'conv_node'), - ('conv_node', 'conv_output'), - ('conv_output', 'op_output') - ], - { - 'conv_output': { - 'shape': None - }, - 'conv_input': { - 'shape': int64_array([1, 3, 224, 224]) - }, - 'conv_weights': { - 'shape': int64_array([3, 64, 7, 7]), - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis'] - }, - 'conv_node': { - 'type': 'Convolution', - 'bias_term': None, - 'stride': None, - 'dilation': None, - - 'batch_dims': int64_array([0]), - 'channel_dims': int64_array([1]), - - 'output_spatial_shape': None, - - 'input_feature_channel': 0, - 'output_feature_channel': 1, - - 'group': 1, - 'output_shape': None, - 'layout': 'NCHW' - } - }) - - conv_node = Node(graph, 'conv_node') - conv_output = Node(graph, 'conv_output') - - Convolution.infer(conv_node) - - # Check bias_term attribute - self.assertTrue(conv_node.has_valid('bias_term')) - self.assertTrue(not conv_node.bias_term) - # Check kernel_spatial_idx attr detection - self.assertTrue(conv_node.has_valid('kernel_spatial_idx')) - self.assertTrue(np.array_equal(int64_array([2, 3]), conv_node.kernel_spatial_idx)) - # Check spatial_dims attr detection - self.assertTrue(conv_node.has_valid('spatial_dims')) - self.assertTrue(np.array_equal(int64_array([2, 3]), conv_node.spatial_dims)) - # Check kernel_spatial attr detection - self.assertTrue(conv_node.has_valid('kernel_spatial')) - self.assertTrue(np.array_equal(int64_array([7, 7]), conv_node.kernel_spatial)) - # Check output attribute - self.assertTrue(conv_node.has_valid('output')) - self.assertEqual(64, conv_node.output) - # Check dilation value. Should be set to default - self.assertTrue(conv_node.has_valid('dilation')) - self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.dilation)) - # Check stride value. Should be set to default - self.assertTrue(conv_node.has_valid('stride')) - self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.stride)) - # Check pad value. Should be set to default - self.assertTrue(conv_node.has_valid('pad')) - self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]), conv_node.pad)) - # Check pad_spatial_shape - self.assertTrue(conv_node.has_valid('pad_spatial_shape')) - self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0]]), conv_node.pad_spatial_shape)) - # Check resulting output shape - self.assertTrue(np.array_equal(int64_array([1, 64, 218, 218]), conv_output.shape)) - - def test_conv_infer_set_default_attrs_nhwc(self): - graph = build_graph(nodes_attributes, - [ - ('conv_input', 'conv_node'), - ('conv_weights', 'conv_node'), - ('conv_node', 'conv_output'), - ('conv_output', 'op_output') - ], - { - 'conv_output': { - 'shape': None - }, - 'conv_input': { - 'shape': int64_array([1, 224, 224, 3]) - }, - 'conv_weights': { - 'shape': int64_array([3, 64, 7, 7]), - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis'] - }, - 'conv_node': { - 'type': 'Convolution', - 'bias_term': None, - 'stride': None, - 'dilation': None, - - 'batch_dims': int64_array([0]), - 'channel_dims': int64_array([3]), - - 'output_spatial_shape': None, - - 'input_feature_channel': 0, - 'output_feature_channel': 1, - - 'group': 1, - 'output_shape': None, - 'layout': 'NHWC' - } - }) - - conv_node = Node(graph, 'conv_node') - conv_output = Node(graph, 'conv_output') - - Convolution.infer(conv_node) - - # Check bias_term attribute - self.assertTrue(conv_node.has_valid('bias_term')) - self.assertTrue(not conv_node.bias_term) - # Check kernel_spatial_idx attr detection - self.assertTrue(conv_node.has_valid('kernel_spatial_idx')) - self.assertTrue(np.array_equal(int64_array([2, 3]), conv_node.kernel_spatial_idx)) - # Check spatial_dims attr detection - self.assertTrue(conv_node.has_valid('spatial_dims')) - self.assertTrue(np.array_equal(int64_array([1, 2]), conv_node.spatial_dims)) - # Check kernel_spatial attr detection - self.assertTrue(conv_node.has_valid('kernel_spatial')) - self.assertTrue(np.array_equal(int64_array([7, 7]), conv_node.kernel_spatial)) - # Check output attribute - self.assertTrue(conv_node.has_valid('output')) - self.assertEqual(64, conv_node.output) - # Check dilation value. Should be set to default - self.assertTrue(conv_node.has_valid('dilation')) - self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.dilation)) - # Check stride value. Should be set to default - self.assertTrue(conv_node.has_valid('stride')) - self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1]), conv_node.stride)) - # Check pad value. Should be set to default - self.assertTrue(conv_node.has_valid('pad')) - self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]), conv_node.pad)) - # Check pad_spatial_shape - self.assertTrue(conv_node.has_valid('pad_spatial_shape')) - self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0]]), conv_node.pad_spatial_shape)) - # Check resulting output shape - self.assertTrue(np.array_equal(int64_array([1, 218, 218, 64]), conv_output.shape)) - - def test_conv_infer_3D_convolution(self): - graph = build_graph(nodes_attributes, - [ - ('conv_input', 'conv_node'), - ('conv_weights', 'conv_node'), - ('conv_node', 'conv_output'), - ('conv_output', 'op_output') - ], - { - 'conv_output': { - 'shape': None - }, - 'conv_input': { - 'shape': int64_array([1, 3, 16, 224, 224]) - }, - 'conv_weights': { - 'shape': int64_array([3, 64, 1, 7, 7]), - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis'] - }, - 'conv_node': { - 'type': 'Convolution', - 'bias_term': None, - 'stride': None, - 'dilation': None, - - 'batch_dims': int64_array([0]), - 'channel_dims': int64_array([1]), - - 'output_spatial_shape': None, - - 'input_feature_channel': 0, - 'output_feature_channel': 1, - - 'group': 1, - 'output_shape': None, - 'layout': 'NCHW' - } - }) - - conv_node = Node(graph, 'conv_node') - conv_output = Node(graph, 'conv_output') - - Convolution.infer(conv_node) - - # Check bias_term attribute - self.assertTrue(conv_node.has_valid('bias_term')) - self.assertTrue(not conv_node.bias_term) - # Check kernel_spatial_idx attr detection - self.assertTrue(conv_node.has_valid('kernel_spatial_idx')) - self.assertTrue(np.array_equal(int64_array([2, 3, 4]), conv_node.kernel_spatial_idx)) - # Check spatial_dims attr detection - self.assertTrue(conv_node.has_valid('spatial_dims')) - self.assertTrue(np.array_equal(int64_array([2, 3, 4]), conv_node.spatial_dims)) - # Check kernel_spatial attr detection - self.assertTrue(conv_node.has_valid('kernel_spatial')) - self.assertTrue(np.array_equal(int64_array([1, 7, 7]), conv_node.kernel_spatial)) - # Check output attribute - self.assertTrue(conv_node.has_valid('output')) - self.assertEqual(64, conv_node.output) - # Check dilation value. Should be set to default - self.assertTrue(conv_node.has_valid('dilation')) - self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1, 1]), conv_node.dilation)) - # Check stride value. Should be set to default - self.assertTrue(conv_node.has_valid('stride')) - self.assertTrue(np.array_equal(int64_array([1, 1, 1, 1, 1]), conv_node.stride)) - # Check pad value. Should be set to default - self.assertTrue(conv_node.has_valid('pad')) - self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]), conv_node.pad)) - # Check pad_spatial_shape - self.assertTrue(conv_node.has_valid('pad_spatial_shape')) - self.assertTrue(np.array_equal(int64_array([[0, 0], [0, 0], [0, 0]]), conv_node.pad_spatial_shape)) - # Check resulting output shape - self.assertTrue(np.array_equal(int64_array([1, 64, 16, 218, 218]), conv_output.shape)) - - def test_caffe_conv2d_infer_wrong_input_shape(self): - graph = build_graph(nodes_attributes, - [('conv_input', 'conv_node'), - ('conv_weights', 'conv_node'), - ('conv_node', 'conv_output'), - ('conv_output', 'op_output') - ], - {'conv_output': {'shape': None}, - 'conv_input': {'shape': np.array([1, 3, 1, 1])}, - 'conv_weights': {'shape': np.array([64, 3, 3, 3]), - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'conv_node': {'pad_spatial_shape': np.array([[0, 0], [0, 0]]), - 'conv_pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]]), - 'dilation': np.array([1, 1, 1, 1]), 'bias_addable': True, 'bias_term': False, - 'output_spatial_shape': None, 'output_shape': None, - 'stride': np.array([1, 1, 1, 1]), 'group': 1, - 'kernel_spatial_idx': np.array([2, 3]), - 'input_feature_channel': 1, - 'output_feature_channel': 0, - 'output': 64, 'kernel_spatial': np.array([3, 3]), - 'spatial_dims': np.array([2, 3]), 'channel_dims': np.array([1]), - 'batch_dims': np.array([0])} - }) - - conv_node = Node(graph, 'conv_node') - with self.assertRaises(Error): - Convolution.infer(conv_node) diff --git a/tools/mo/unit_tests/mo/ops/crop_test.py b/tools/mo/unit_tests/mo/ops/crop_test.py deleted file mode 100644 index 71bc61e9e27e08..00000000000000 --- a/tools/mo/unit_tests/mo/ops/crop_test.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.crop import Crop -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.graph import build_graph - - -class TestCropPartialInfer(unittest.TestCase): - @staticmethod - def _create_graph_type1(): - nodes_attributes = {'crop_input': {'shape': None, 'value': None, 'kind': 'data'}, - 'crop_node': {'op': 'Crop', 'kind': 'op'}, - 'crop_output': {'shape': None, 'value': None, 'kind': 'data'} - } - return build_graph(nodes_attributes, - [ - ('crop_input', 'crop_node'), ('crop_node', 'crop_output') - ], - { - 'crop_input': {'shape': int64_array([1, 3, 224, 224])}, - 'crop_node': {'axis': int64_array([2, 3]), - 'crop_begin': int64_array([10, 15]), - 'crop_end': int64_array([10, 15]) - }, - }) - - @staticmethod - def _create_graph_type2(): - nodes_attributes = {'crop_input': {'shape': None, 'value': None, 'kind': 'data'}, - 'crop_node': {'op': 'Crop', 'kind': 'op'}, - 'crop_output': {'shape': None, 'value': None, 'kind': 'data'} - } - return build_graph(nodes_attributes, - [ - ('crop_input', 'crop_node'), ('crop_node', 'crop_output') - ], - { - 'crop_input': {'shape': int64_array([1, 3, 224, 224])}, - 'crop_node': {'axis': int64_array([2, 3]), 'dim': int64_array([100, 150])}, - }) - - @staticmethod - def _create_graph_type3(): - nodes_attributes = {'crop_input': {'shape': None, 'value': None, 'kind': 'data'}, - 'crop_input2': {'shape': None, 'value': None, 'kind': 'data'}, - 'crop_node': {'op': 'Crop', 'kind': 'op'}, - 'crop_output': {'shape': None, 'value': None, 'kind': 'data'} - } - return build_graph(nodes_attributes, - [ - ('crop_input', 'crop_node'), ('crop_input2', 'crop_node'), ('crop_node', 'crop_output') - ], - { - 'crop_input': {'shape': int64_array([1, 3, 224, 224])}, - 'crop_input2': {'shape': int64_array([1, 3, 100, 150])}, - 'crop_node': {'axis': 2, 'offset': int64_array([10, 15])}, - }) - - def test_crop_type1_infer(self): - graph = self._create_graph_type1() - - crop_node = Node(graph, 'crop_node') - Crop.infer(crop_node) - - exp_shape = int64_array([1, 3, 204, 194]) - res_shape = graph.node['crop_output']['shape'] - - self.assertTrue(np.array_equal(exp_shape, res_shape), - 'shapes do not match expected: {} and given: {}'.format(exp_shape, res_shape)) - - def test_crop_type1_infer_neg1(self): - graph = self._create_graph_type1() - - crop_node = Node(graph, 'crop_node') - crop_node['axis'] = None - - with self.assertRaisesRegex(Error, "axis attribute is missing .*"): - Crop.infer(crop_node) - - def test_crop_type1_infer_neg2(self): - graph = self._create_graph_type1() - - crop_node = Node(graph, 'crop_node') - crop_node['crop_begin'] = int64_array([1, 2, 3]) - - with self.assertRaisesRegex(Error, "number of crop_begin.*"): - Crop.infer(crop_node) - - def test_crop_type2_infer(self): - graph = self._create_graph_type2() - - crop_node = Node(graph, 'crop_node') - Crop.infer(crop_node) - - exp_shape = int64_array([1, 3, 100, 150]) - res_shape = graph.node['crop_output']['shape'] - - self.assertTrue(np.array_equal(exp_shape, res_shape), - 'shapes do not match expected: {} and given: {}'.format(exp_shape, res_shape)) - - def test_crop_type2_infer_neg1(self): - graph = self._create_graph_type2() - - crop_node = Node(graph, 'crop_node') - crop_node['dim'] = int64_array([1, 2, 3]) - - with self.assertRaisesRegex(Error, "Number of axis.*"): - Crop.infer(crop_node) - - def test_crop_type2_infer_neg2(self): - graph = self._create_graph_type2() - - crop_node = Node(graph, 'crop_node') - crop_node['dim'] = None - crop_node['crop_begin'] = None - - with self.assertRaisesRegex(Error, "Crop node crop_node should have either.*"): - Crop.infer(crop_node) - - def test_crop_type3_infer(self): - graph = self._create_graph_type3() - - crop_node = Node(graph, 'crop_node') - Crop.infer(crop_node) - - exp_shape = int64_array([1, 3, 100, 150]) - res_shape = graph.node['crop_output']['shape'] - - self.assertTrue(np.array_equal(exp_shape, res_shape), - 'shapes do not match expected: {} and given: {}'.format(exp_shape, res_shape)) - - def test_crop_type3_infer_neg1(self): - graph = self._create_graph_type3() - - crop_node = Node(graph, 'crop_node') - crop_input2 = Node(graph, 'crop_input2') - crop_input2.shape = None - - with self.assertRaisesRegex(Error, "Not all input shapes were defined.*"): - Crop.infer(crop_node) - - def test_crop_type3_infer_neg2(self): - graph = self._create_graph_type3() - - crop_node = Node(graph, 'crop_node') - crop_node['axis'] = None - - with self.assertRaisesRegex(Error, "axis attribute is missing for .*"): - Crop.infer(crop_node) - - def test_crop_type3_infer_neg3(self): - graph = self._create_graph_type3() - - crop_node = Node(graph, 'crop_node') - crop_node['offset'] = None - - with self.assertRaisesRegex(Error, "offset attribute is missing.*"): - Crop.infer(crop_node) - - def test_crop_type3_infer_neg4(self): - graph = self._create_graph_type3() - - crop_node = Node(graph, 'crop_node') - crop_input2 = Node(graph, 'crop_input2') - crop_input2.shape = int64_array([1, 4, 423, 563]) - - with self.assertRaisesRegex(Error, "The crop for dimension is out of bounds.*"): - Crop.infer(crop_node) diff --git a/tools/mo/unit_tests/mo/ops/ctc_greedy_decoder_test.py b/tools/mo/unit_tests/mo/ops/ctc_greedy_decoder_test.py deleted file mode 100644 index 5e1ab46dc3d419..00000000000000 --- a/tools/mo/unit_tests/mo/ops/ctc_greedy_decoder_test.py +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.ctc_greedy_decoder_seq_len import CTCGreedyDecoderSeqLenOp -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - - -nodes_attributes = {'logits': {'kind': 'op'}, - 'logits_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'seq_mask': {'kind': 'op'}, - 'seq_mask_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'ctcgreedydecoder_node': {'op': 'CTCGreedyDecoderSeqLen', 'kind': 'op', - 'ctc_merge_repeated': True}, - 'output1': {'shape': None, 'value': None, 'kind': 'data'}, - 'last_output1': {'shape': None, 'value': None, 'kind': 'op'}, - 'output2': {'shape': None, 'value': None, 'kind': 'data'} - } - -# graph 1 -edges1 = [('logits', 'logits_data'), - ('seq_mask', 'seq_mask_data'), - ('logits_data', 'ctcgreedydecoder_node', {'in': 0}), - ('seq_mask_data', 'ctcgreedydecoder_node', {'in': 1}), - ('ctcgreedydecoder_node', 'output1', {'out': 0}), - ('ctcgreedydecoder_node', 'output2', {'out': 1}), - ('output1', 'last_output1', {'out': 0}),] - -# valid test case -inputs1 = {'logits_data': {'shape': int64_array([4, 100, 5])}, - 'seq_mask_data': {'shape': int64_array([4])}} - -# invalid test case with incorrect rank for the first input tensor -inputs1_inv = {'logits_data': {'shape': int64_array([4, 100, 5, 6])}, - 'seq_mask_data': {'shape': int64_array([4])}} - -# invalid test case with incorrect rank for the second input tensor -inputs2_inv = {'logits_data': {'shape': int64_array([4, 100, 5])}, - 'seq_mask_data': {'shape': int64_array([4, 100])}} - -# invalid test case with incorrect time dimension -inputs3_inv = {'logits_data': {'shape': int64_array([4, 100, 5])}, - 'seq_mask_data': {'shape': int64_array([4, 101])}} - -# invalid test case with incorrect batch dimension -inputs4_inv = {'logits_data': {'shape': int64_array([4, 100, 5])}, - 'seq_mask_data': {'shape': int64_array([14, 100])}} - -class TestCTCGreedyDecoder(unittest.TestCase): - def test_infer1(self): - graph = build_graph(nodes_attributes, edges1, inputs1) - ctcgreedydecoder_node = Node(graph, 'ctcgreedydecoder_node') - CTCGreedyDecoderSeqLenOp.infer(ctcgreedydecoder_node) - - # prepare reference results - ref_output1_shape = int64_array([4, 100]) - - # get the result - res_output1_shape = graph.node['output1']['shape'] - - self.assertTrue(np.array_equal(ref_output1_shape, res_output1_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output1_shape, res_output1_shape)) - - def test_infer_invalid1(self): - graph = build_graph(nodes_attributes, edges1, inputs1_inv) - ctcgreedydecoder_node = Node(graph, 'ctcgreedydecoder_node') - self.assertRaises(AssertionError, CTCGreedyDecoderSeqLenOp.infer, ctcgreedydecoder_node) - - def test_infer_invalid2(self): - graph = build_graph(nodes_attributes, edges1, inputs2_inv) - ctcgreedydecoder_node = Node(graph, 'ctcgreedydecoder_node') - self.assertRaises(AssertionError, CTCGreedyDecoderSeqLenOp.infer, ctcgreedydecoder_node) - - def test_infer_invalid3(self): - graph = build_graph(nodes_attributes, edges1, inputs3_inv) - ctcgreedydecoder_node = Node(graph, 'ctcgreedydecoder_node') - self.assertRaises(AssertionError, CTCGreedyDecoderSeqLenOp.infer, ctcgreedydecoder_node) - - def test_infer_invalid4(self): - graph = build_graph(nodes_attributes, edges1, inputs4_inv) - ctcgreedydecoder_node = Node(graph, 'ctcgreedydecoder_node') - self.assertRaises(AssertionError, CTCGreedyDecoderSeqLenOp.infer, ctcgreedydecoder_node) diff --git a/tools/mo/unit_tests/mo/ops/ctc_loss_test.py b/tools/mo/unit_tests/mo/ops/ctc_loss_test.py deleted file mode 100644 index 9b7f8d782c1a29..00000000000000 --- a/tools/mo/unit_tests/mo/ops/ctc_loss_test.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.ctc_loss import CTCLoss -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'logits': {'kind': 'op'}, - 'logits_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'logit_length': {'kind': 'op'}, - 'logit_length_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'labels': {'kind': 'op'}, - 'labels_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'label_length': {'kind': 'op'}, - 'label_length_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'blank_index': {'kind': 'op'}, - 'blank_index_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'ctcloss_node': {'op': 'CTCLoss', 'kind': 'op', 'preprocess_collapse_repeated': False, - 'ctc_merge_repeated': True, 'unique': False}, - 'output': {'shape': None, 'value': None, 'kind': 'data'}} - -# graph 1 -edges1 = [('logits', 'logits_data'), - ('logit_length', 'logit_length_data'), - ('labels', 'labels_data'), - ('label_length', 'label_length_data'), - ('blank_index', 'blank_index_data'), - ('logits_data', 'ctcloss_node', {'in': 0}), - ('logit_length_data', 'ctcloss_node', {'in': 1}), - ('labels_data', 'ctcloss_node', {'in': 2}), - ('label_length_data', 'ctcloss_node', {'in': 3}), - ('blank_index_data', 'ctcloss_node', {'in': 4}), - ('ctcloss_node', 'output', {'out': 0})] - -# valid test case -inputs1 = {'logits_data': {'shape': int64_array([4, 100, 5])}, - 'logit_length_data': {'shape': int64_array([4])}, - 'labels_data': {'shape': int64_array([4, 100])}, - 'label_length_data': {'shape': int64_array([4])}, - 'blank_index_data': {'shape': int64_array([])}} - -# invalid test case with incorrect rank for the second input tensor -inputs2 = {'logits_data': {'shape': int64_array([4, 100, 5])}, - 'logit_length_data': {'shape': int64_array([4, 3])}, - 'labels_data': {'shape': int64_array([4, 100])}, - 'label_length_data': {'shape': int64_array([4])}, - 'blank_index_data': {'shape': int64_array([])}} - -# invalid test case with incorrect time dimension -inputs3 = {'logits_data': {'shape': int64_array([4, 100, 5])}, - 'logit_length_data': {'shape': int64_array([4])}, - 'labels_data': {'shape': int64_array([4, 300])}, - 'label_length_data': {'shape': int64_array([4])}, - 'blank_index_data': {'shape': int64_array([])}} - -class TestCTCLoss(unittest.TestCase): - def test_infer1(self): - graph = build_graph(nodes_attributes, edges1, inputs1) - ctc_loss_node = Node(graph, 'ctcloss_node') - CTCLoss.infer(ctc_loss_node) - - # prepare reference results - ref_output_shape = int64_array([4]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_infer_invalid1(self): - graph = build_graph(nodes_attributes, edges1, inputs2) - ctc_loss_node = Node(graph, 'ctcloss_node') - self.assertRaises(AssertionError, CTCLoss.infer, ctc_loss_node) - - def test_infer_invalid2(self): - graph = build_graph(nodes_attributes, edges1, inputs3) - ctc_loss_node = Node(graph, 'ctcloss_node') - self.assertRaises(AssertionError, CTCLoss.infer, ctc_loss_node) diff --git a/tools/mo/unit_tests/mo/ops/cumsum_test.py b/tools/mo/unit_tests/mo/ops/cumsum_test.py deleted file mode 100644 index a87c6f5da48cb9..00000000000000 --- a/tools/mo/unit_tests/mo/ops/cumsum_test.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.cumsum import CumSum -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph, valued_const_with_data, regular_op_with_shaped_data, result, connect - -nodes_attributes = { - **regular_op_with_shaped_data('data', [1, 3, 224, 224], {'type': 'Parameter', 'value': None, - '_out_port_data_type': {0: np.float32}}), - **valued_const_with_data('axis', int64_array(0)), - **regular_op_with_shaped_data('cumsum', None, {'op': 'CumSum', 'type': 'CumSum', 'name': 'cumsum'}), - **regular_op_with_shaped_data('identity', None, {'op': 'Identity', 'name': 'identity'}), - **result('output'), -} - - -class TestCumSum(unittest.TestCase): - def test_cumsum_axis(self): - graph = build_graph(nodes_attributes, - [*connect('data', '0:cumsum'), - *connect('axis', '1:cumsum'), - *connect('cumsum', '0:identity'), - ('identity', 'identity_d', {'out': 0}), - ('identity_d', 'output'), - ], - {'cumsum': {'reverse': False, 'exclusive': False} - }, nodes_with_edges_only=True) - - cumsum_node = Node(graph, 'cumsum') - CumSum.infer(cumsum_node) - self.assertTrue(np.array_equal(cumsum_node.out_port(0).data.get_shape(), int64_array([1, 3, 224, 224]))) - - def test_cumsum_value_prop(self): - graph = build_graph(nodes_attributes, - [*connect('data', '0:cumsum'), - *connect('axis', '1:cumsum'), - ('cumsum', 'cumsum_d', {'out': 0}), - ('cumsum_d', 'output'), - ], - {'data_d': {'value': np.array([1., 2., 3., 4., 5.]).astype(np.float32), 'shape': [5]}, - 'cumsum': {'reverse': False, 'exclusive': False} - }, nodes_with_edges_only=True) - - cumsum_node = Node(graph, 'cumsum') - CumSum.infer(cumsum_node) - self.assertTrue(np.array_equal(cumsum_node.out_port(0).data.get_value(), - np.array([1., 3., 6., 10., 15.]).astype(np.float32))) - - def test_cumsum_value_prop_exclusive(self): - graph = build_graph(nodes_attributes, - [*connect('data', '0:cumsum'), - *connect('axis', '1:cumsum'), - ('cumsum', 'cumsum_d', {'out': 0}), - ('cumsum_d', 'output'), - ], - {'data_d': {'value': np.array([1., 2., 3., 4., 5.]).astype(np.float32), 'shape': [5]}, - 'cumsum': {'reverse': False, 'exclusive': True} - }, nodes_with_edges_only=True) - - cumsum_node = Node(graph, 'cumsum') - CumSum.infer(cumsum_node) - self.assertTrue(np.array_equal(cumsum_node.out_port(0).data.get_value(), - np.array([0., 1., 3., 6., 10.]).astype(np.float32))) - - def test_cumsum_value_prop_reverse(self): - graph = build_graph(nodes_attributes, - [*connect('data', '0:cumsum'), - *connect('axis', '1:cumsum'), - ('cumsum', 'cumsum_d', {'out': 0}), - ('cumsum_d', 'output'), - ], - {'data_d': {'value': np.array([1., 2., 3., 4., 5.]).astype(np.float32), 'shape': [5]}, - 'cumsum': {'reverse': True, 'exclusive': False} - }, nodes_with_edges_only=True) - - cumsum_node = Node(graph, 'cumsum') - CumSum.infer(cumsum_node) - self.assertTrue(np.array_equal(cumsum_node.out_port(0).data.get_value(), - np.array([15., 14., 12., 9., 5.]).astype(np.float32))) - - def test_cumsum_value_prop_exclusive_reverse(self): - graph = build_graph(nodes_attributes, - [*connect('data', '0:cumsum'), - *connect('axis', '1:cumsum'), - ('cumsum', 'cumsum_d', {'out': 0}), - ('cumsum_d', 'output'), - ], - {'data_d': {'value': np.array([1., 2., 3., 4., 5.]).astype(np.float32), 'shape': [5]}, - 'cumsum': {'reverse': True, 'exclusive': True} - }, nodes_with_edges_only=True) - - cumsum_node = Node(graph, 'cumsum') - CumSum.infer(cumsum_node) - self.assertTrue(np.array_equal(cumsum_node.out_port(0).data.get_value(), - np.array([14., 12., 9., 5., 0.]).astype(np.float32))) - - def test_cumsum_value_prop_axis_1(self): - graph = build_graph(nodes_attributes, - [*connect('data', '0:cumsum'), - *connect('axis', '1:cumsum'), - ('cumsum', 'cumsum_d', {'out': 0}), - ('cumsum_d', 'output'), - ], - {'data_d': {'value': np.array([[1., 2., 3.], [4., 5., 6.]]).astype(np.float32), - 'shape': [2, 3]}, - 'axis_d': {'value': int64_array(1), - 'shape': []}, - 'cumsum': {'reverse': False, 'exclusive': False} - }, nodes_with_edges_only=True) - - cumsum_node = Node(graph, 'cumsum') - CumSum.infer(cumsum_node) - self.assertTrue(np.array_equal(cumsum_node.out_port(0).data.get_value(), - np.array([[1., 3., 6.], [4., 9., 15.]]).astype(np.float32))) diff --git a/tools/mo/unit_tests/mo/ops/deconvolution_test.py b/tools/mo/unit_tests/mo/ops/deconvolution_test.py deleted file mode 100644 index ed99f87f029b1c..00000000000000 --- a/tools/mo/unit_tests/mo/ops/deconvolution_test.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.front.tf.deconv_ext import get_conv_backprop_groups -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.deconvolution import Deconvolution -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'deconv_input': {'value': None, 'kind': 'data'}, - 'deconv_weights': {'value': None, 'kind': 'data'}, - 'deconv_output_shape': {'value': None, 'kind': 'data'}, - 'deconv_node': {'type': 'Deconvolution', 'op': 'Deconvolution', 'kind': 'op'}, - 'deconv_output': {'value': None, 'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result'} - } - - -def create_deconv_graph(input_shape: int64_array, weights_shape: int64_array, output_shape: int64_array): - graph = build_graph(nodes_attributes, - [('deconv_input', 'deconv_node'), - ('deconv_weights', 'deconv_node'), - ('deconv_output_shape', 'deconv_node'), - ('deconv_node', 'deconv_output'), - ('deconv_output', 'op_output') - ], - {'deconv_input': {'shape': input_shape}, - 'deconv_weights': {'shape': weights_shape, - 'dim_attrs': ['spatial_dims', 'channel_dims', 'batch_dims', 'axis']}, - 'deconv_output_shape': {'value': output_shape}, - 'deconv_node': {'channel_dims': int64_array([1]), - 'batch_dims': int64_array([0]), - 'spatial_dims': int64_array([2, 3]), - 'pad_spatial_shape': int64_array([[0, 0], [0, 0]]), - 'kernel_spatial': int64_array([4, 4]), - 'kernel_spatial_idx': int64_array([2, 3]), - 'input_feature_channel': 0, - 'output_feature_channel': 1, - 'auto_pad': 'same_lower', - 'output_padding': int64_array([0, 0, 1, 1]), - 'type': 'Deconvolution', - 'dilation': int64_array([1, 1, 1, 1]), - 'stride': int64_array([1, 1, 2, 2]), - 'pad': None, - 'output': None, - 'output_shape': None, - 'get_group': get_conv_backprop_groups}, - 'deconv_output': {'shape': None}, - }) - return graph - - -class TestConvolutionPartialInfer(unittest.TestCase): - def test_deconv_infer_one_group(self): - graph = create_deconv_graph(int64_array([1, 21, 18, 18]), int64_array([21, 50, 4, 4]), - int64_array([1, 50, 35, 35])) - - Deconvolution.infer(Node(graph, 'deconv_node')) - res_shape = graph.node['deconv_output']['shape'] - exp_shape = np.array([1, 50, 35, 35]) - - res_group = graph.node['deconv_node']['group'] - exp_group = int64_array([1]) - - self.assertTrue(np.array_equal(exp_shape, res_shape), - 'values do not match expected: {} and computed: {}'.format(exp_shape, res_shape)) - - self.assertTrue(np.array_equal(exp_group, res_group), - 'group number values do not match expected: {} and computed: {}'.format(exp_group, res_group)) - - def test_deconv_infer_several_groups(self): - graph = create_deconv_graph(int64_array([1, 21, 18, 18]), int64_array([21, 50, 4, 4]), - int64_array([1, 350, 35, 35])) - - Deconvolution.infer(Node(graph, 'deconv_node')) - res_shape = graph.node['deconv_output']['shape'] - exp_shape = np.array([1, 350, 35, 35]) - - res_group = graph.node['deconv_node']['group'] - exp_group = int64_array([7]) - - self.assertTrue(np.array_equal(exp_shape, res_shape), - 'values do not match expected: {} and computed: {}'.format(exp_shape, res_shape)) - - self.assertTrue(np.array_equal(exp_group, res_group), - 'group number values do not match expected: {} and computed: {}'.format(exp_group, res_group)) diff --git a/tools/mo/unit_tests/mo/ops/depth_to_space_test.py b/tools/mo/unit_tests/mo/ops/depth_to_space_test.py deleted file mode 100644 index 7e9379101f0ce2..00000000000000 --- a/tools/mo/unit_tests/mo/ops/depth_to_space_test.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -import numpy as np -from openvino.tools.mo.ops.depth_to_space import DepthToSpaceOp -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.graph import build_graph - -nodes = { - 'in_data_node': {'value': None, 'kind': 'data', 'shape': np.array([1, 1024, 576, 256])}, - 'DtS': {'op': 'DepthToSpace', 'kind': 'op', 'block_size': 2}, - 'out_data_node': {'value': None, 'kind': 'data', 'shape': None} -} - -edges = [ - ('in_data_node', 'DtS'), - ('DtS', 'out_data_node') -] - - -class TestDepthToSpacePartialInfer(unittest.TestCase): - def test_tf_depth_to_space_infer_nhwc(self): - graph = build_graph(nodes, edges) - graph.graph['layout'] = 'NHWC' - dts_node = Node(graph, 'DtS') - DepthToSpaceOp.infer(dts_node) - exp_shape = np.array([1, 2048, 1152, 64]) - res_shape = graph.node['out_data_node']['shape'] - self.assertTrue(np.array_equal(exp_shape, res_shape)) - - def test_tf_depth_to_space_infer_nchw(self): - graph = build_graph(nodes, edges) - graph.graph['layout'] = 'NCHW' - graph.node['in_data_node']['shape'] = np.array([1, 256, 1024, 576]) - dts_node = Node(graph, 'DtS') - DepthToSpaceOp.infer(dts_node) - exp_shape = np.array([1, 64, 2048, 1152]) - res_shape = graph.node['out_data_node']['shape'] - self.assertTrue(np.array_equal(exp_shape, res_shape)) - - def test_tf_depth_to_space_infer_error(self): - graph = build_graph(nodes, edges) - graph.graph['layout'] = 'NHWC' - graph.node['in_data_node']['shape'] = np.array([1024, 576, 256]) - dts_node = Node(graph, 'DtS') - self.assertRaises(Error, DepthToSpaceOp.infer, dts_node) - - def test_tf_depth_to_space_infer_divisibility_error_1(self): - graph = build_graph(nodes, edges) - graph.graph['layout'] = 'NHWC' - graph.node['in_data_node']['shape'] = np.array([1, 1024, 576, 255]) - dts_node = Node(graph, 'DtS') - self.assertRaises(Error, DepthToSpaceOp.infer, dts_node) - - def test_tf_depth_to_space_infer_divisibility_error_2(self): - graph = build_graph(nodes, edges) - graph.graph['layout'] = 'NCHW' - graph.node['in_data_node']['shape'] = np.array([1, 255, 1024, 576]) - dts_node = Node(graph, 'DtS') - self.assertRaises(Error, DepthToSpaceOp.infer, dts_node) - diff --git a/tools/mo/unit_tests/mo/ops/dft_signal_size_canonicalization_test.py b/tools/mo/unit_tests/mo/ops/dft_signal_size_canonicalization_test.py deleted file mode 100644 index c28adc5d0da168..00000000000000 --- a/tools/mo/unit_tests/mo/ops/dft_signal_size_canonicalization_test.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.dft import FFTBase -from openvino.tools.mo.front.common.partial_infer.utils import int64_array - - -class TestDFTSignalSizeCanonicalizationTest(): - @pytest.mark.parametrize("signal_size, axes, input_shape, expected_result",[ - (int64_array([-1, 77]), int64_array([1, 2]), int64_array([2, 180, 180, 2]), int64_array([180, 77])), - (int64_array([390, 87]), int64_array([2, 0]), int64_array([2, 180, 180, 2]), int64_array([390, 87])), - (int64_array([600, -1, 40]), - int64_array([3, 0, 1]), - int64_array([7, 50, 130, 400, 2]), - int64_array([600, 7, 40])), - (int64_array([-1, 16, -1]), - int64_array([3, 0, 2]), - int64_array([7, 50, 130, 400, 2]), - int64_array([400, 16, 130])), - (int64_array([16, -1, -1]), - int64_array([3, 0, 2]), - int64_array([7, 50, 130, 400, 2]), - int64_array([16, 7, 130])), - (int64_array([-1, -1, 16]), - int64_array([3, 0, 2]), - int64_array([7, 50, 130, 400, 2]), - int64_array([400, 7, 16])), - (int64_array([-1, -1, -1]), - int64_array([3, 0, 2]), - int64_array([7, 50, 130, 400, 2]), - int64_array([400, 7, 130])), - ]) - def test_canonicalization(self, signal_size, axes, input_shape, expected_result): - canonicalized_signal_size = FFTBase.canonicalize_signal_size(signal_size, axes, input_shape) - assert np.array_equal(canonicalized_signal_size, expected_result) diff --git a/tools/mo/unit_tests/mo/ops/div_value_propagation_test.py b/tools/mo/unit_tests/mo/ops/div_value_propagation_test.py deleted file mode 100644 index 3fd2b200e714bb..00000000000000 --- a/tools/mo/unit_tests/mo/ops/div_value_propagation_test.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Div -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -graph_nodes_attrs = { - 'A': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'A_data': {'kind': 'data', 'shape': None, 'value': None}, - 'B': {'type': 'Const', 'op': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'B_data': {'kind': 'data', 'shape': None, 'value': None, 'dim_attrs': []}, - 'div': {'type': 'Divide', 'op': 'Div', 'kind': 'op'}, - 'div_data': {'kind': 'data', 'value': None, 'shape': None}, - 'output': {'kind': 'op', 'op': 'Result'}, -} - - -graph_edges = [ - ('A', 'A_data'), - ('B', 'B_data'), - ('A_data', 'div', {'in': 0}), - ('B_data', 'div', {'in': 1}), - ('div', 'div_data'), - ('div_data', 'output'), -] - - -class TestDivValuePropagation(): - @pytest.mark.parametrize("a_shape, a_value, b_shape, b_value, elem_type",[ - ([2, 3], np.array([[1, 4, -6], [0, -16, 45]], dtype=np.int64), - [2, 3], np.array([[1, 2, -4], [1, -8, -5]], dtype=np.int64), - np.int64), - ([2, 3], np.array([[1, 4, -6], [0, -16, 45]], dtype=np.int64), - [2, 3], np.array([[1, 2, -4], [1, -8, -5]], dtype=np.int64), - np.float64), - ([2, 3], np.array([[1, 4, -6], [0, -16, 45]], dtype=np.int64), - [2, 3], np.array([[1, 2, -4], [1, -8, -5]], dtype=np.int64), - np.float32), - ([3, 3], np.array([[15, 2, 11], [14, 7, 8], [24, 12, 0]], dtype=np.int64), - [3, 3], np.array([[-5, 4, 2], [7, 2, 4], [6, 24, 1]], dtype=np.int64), - np.int64), - ([3, 3], np.array([[15, 2, 11], [14, 7, 8], [24, 12, 0]], dtype=np.int64), - [3, 3], np.array([[-5, 4, 2], [7, 2, 4], [6, 24, 1]], dtype=np.int64), - np.float64), - ([3, 3], np.array([[15, 2, 11], [14, 7, 8], [24, 12, 0]], dtype=np.int64), - [3, 3], np.array([[-5, 4, 2], [7, 2, 4], [6, 24, 1]], dtype=np.int64), - np.float32), - ]) - def test_value_propagation(self, a_shape, a_value, b_shape, b_value, elem_type): - graph = build_graph( - nodes_attrs=graph_nodes_attrs, - edges=graph_edges, - update_attributes={ - 'A': {'shape': int64_array(a_shape), 'value': a_value.astype(elem_type)}, - 'A_data': {'shape': int64_array(a_shape), 'value': a_value.astype(elem_type)}, - 'B': {'shape': int64_array(b_shape), 'value': b_value.astype(elem_type)}, - 'B_data': {'shape': int64_array(b_shape), 'value': b_value.astype(elem_type)}, - } - ) - node = Node(graph, 'div') - node['infer'] = Div(graph, node.attrs()).create_node().infer - node.infer(node) - node_data = node.out_port(0).get_destination().data.get_value() - - def func_for_ref(): - if np.issubdtype(elem_type, np.integer): - return lambda a, b: a // b - else: - return lambda a, b: a / b - - ref_data = func_for_ref()(a_value, b_value) - node_data_shape = node_data.shape - ref_data_shape = ref_data.shape - msg = "Value propagation for 'div' node is not correct." - assert node_data_shape == ref_data_shape and np.all(node_data == ref_data), msg diff --git a/tools/mo/unit_tests/mo/ops/einsum_test.py b/tools/mo/unit_tests/mo/ops/einsum_test.py deleted file mode 100644 index eb896425da905c..00000000000000 --- a/tools/mo/unit_tests/mo/ops/einsum_test.py +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.einsum import Einsum -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Graph -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, result, connect - - -def create_einsum_graph(input_shapes: list, equation: str) -> Graph: - num_inputs = len(input_shapes) - assert num_inputs > 0, "Einsum node must have at least one input" - nodes = {} - edges = [] - for input_ind in range(num_inputs): - input_name = 'input' + str(input_ind) - parameter_op = regular_op_with_shaped_data(input_name, input_shapes[input_ind], - {'op': 'Parameter', 'type': 'Parameter'}) - nodes.update(parameter_op) - edges += connect(input_name, str(input_ind) + ":einsum_node") - einsum_op = regular_op_with_shaped_data('einsum_node', None, - {'op': 'Einsum', 'type': 'Einsum', 'equation': equation}) - nodes.update(einsum_op) - result_op = result('output') - nodes.update(result_op) - edges += connect('einsum_node', 'output') - - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - return graph - - -class TestEinsum(): - @pytest.mark.parametrize("input_shapes, equation, ref_output_shape",[ - # dot product - ([int64_array([10]), int64_array([10])], "i,i->", int64_array([])), - # matrix multiplication - ([int64_array([2, 3]), int64_array([3, 4])], "ab,bc->ac", int64_array([2, 4])), - # trace per batch - ([int64_array([2, 3, 3])], "kii->k", int64_array([2])), - # diagonal extraction - ([int64_array([6, 5, 5])], "kii->ki", int64_array([6, 5])), - # transpose - ([int64_array([1, 2, 3])], "ijk->kij", int64_array([3, 1, 2])), - # multiple matrix multiplication - ([int64_array([2, 5]), int64_array([5, 3, 6]), int64_array([5, 3])], "ab,bcd,bc->ca", int64_array([3, 2])), - # ellipsis for one operand - ([int64_array([5, 3, 4])], "a...->...", int64_array([3, 4])), - # ellipsis for multiple operands - ([int64_array([3, 5]), int64_array([1])], "a...,...->a...", int64_array([3, 5])), - # ellipsis with broadcasting - ([int64_array([9, 1, 4, 3]), int64_array([3, 11, 7, 1])], "a...b,b...->a...", int64_array([9, 11, 7, 4])), - # mixed case letters in equation - ([int64_array([1, 3, 5])], "AbC", int64_array([1, 5, 3])), - # mixed case letters and equation in implicit mode - ([int64_array([3, 11, 1, 5]), int64_array([1, 3, 1, 7])], "a...b,B...", int64_array([3, 11, 7, 1, 3, 5])), - # inner product in implicit mode - ([int64_array([3]), int64_array([3])], "i,i", int64_array([])), - # equation with ellipsis and repeated labels in implicit mode - # "a...b,b..." is equivalent to "a...b,b...->...a" - ([int64_array([9, 1, 4, 3]), int64_array([3, 11, 7, 1])], "a...b,b...", int64_array([11, 7, 4, 9])), - ]) - def test_einsum(self, input_shapes, equation, ref_output_shape): - graph = create_einsum_graph(input_shapes, equation) - einsum_node = Node(graph, 'einsum_node') - Einsum.infer(einsum_node) - - # get the result - res_output_shape = graph.node['einsum_node_d']['shape'] - - assert np.array_equal(ref_output_shape, res_output_shape),\ - 'shape does not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape) - - @pytest.mark.parametrize("input_shapes, equation, ref_output_shape", [ - # incorrect subscript numbers or inputs - ([int64_array([3, 11]), int64_array([11, 4])], "ab,bc,cd->ac", None), - # invalid labels - ([int64_array([3, 11]), int64_array([11, 4])], "a$,Bc->ac", None), - # incompatible shapes - ([int64_array([3, 11]), int64_array([12, 4])], "ab,bc->ac", None), - # not broadcastable shapes - ([int64_array([11, 1, 4, 3]), int64_array([3, 11, 7, 5])], "a...b,b...->a...", None), - # missed ellipsis - ([int64_array([11, 1, 4, 3]), int64_array([3, 11, 7, 4])], "a...b,b...->a", None), -]) - def test_invalid_cases(self, input_shapes, equation, ref_output_shape): - graph = create_einsum_graph(input_shapes, equation) - einsum_node = Node(graph, 'einsum_node') - with pytest.raises(AssertionError): - Einsum.infer(einsum_node) diff --git a/tools/mo/unit_tests/mo/ops/elementwise_test.py b/tools/mo/unit_tests/mo/ops/elementwise_test.py deleted file mode 100644 index cd47b78c6b824b..00000000000000 --- a/tools/mo/unit_tests/mo/ops/elementwise_test.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.elementwise import Round, Elementwise -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.infer import type_infer -from unit_tests.utils.graph import valued_const_with_data, result, regular_op_with_empty_data, connect, \ - shaped_parameter, build_graph - - -def round_test_graph(nodes_attributes, value, mode: str): - graph = build_graph(nodes_attributes, - [ - ('node_1', 'elementwise_node'), - ('elementwise_node', 'node_3') - ], - { - 'node_1': { - 'value': value - }, - 'elementwise_node': { - 'op': 'Round', - 'mode': mode, - }, - 'node_3': { - 'value': None - } - }) - return graph - - -class TestElementwiseOp(unittest.TestCase): - nodes_attributes = { - 'node_1': { - 'shape': np.array([13]), - 'value': None - }, - 'elementwise_node': { - 'op': None, - 'kind': 'op', - 'operation': None - }, - 'node_3': { - 'shape': None - } - } - - value = np.array([-23.5, -22.5, -2.5, -1.5, -0.5, 0.5, 0.9, 1.5, 2.3, 2.5, 3.5, 22.5, 23.5]) - - def test_elementwise_round_even_infer(self): - graph = round_test_graph(self.nodes_attributes, self.value, 'half_to_even') - - graph.graph['layout'] = 'NCHW' - elementwise_node = Node(graph, 'elementwise_node') - Round.infer(elementwise_node) - exp_shape = np.array([13]) - res_shape = graph.node['node_3']['shape'] - res_value = graph.node['node_3']['value'] - exp_value = np.array([-24., -22., -2., -2., -0., 0., 1., 2., 2., 2., 4., 22., 24., ]) - for i, value in enumerate(exp_shape): - self.assertEqual(res_shape[i], value) - for i, value in enumerate(exp_value): - self.assertAlmostEqual(res_value[i], value) - - def test_elementwise_round_away_infer(self): - graph = round_test_graph(self.nodes_attributes, self.value, 'half_away_from_zero') - - graph.graph['layout'] = 'NCHW' - elementwise_node = Node(graph, 'elementwise_node') - Round.infer(elementwise_node) - exp_shape = np.array([13]) - res_shape = graph.node['node_3']['shape'] - res_value = graph.node['node_3']['value'] - exp_value = np.array([-24., -23., -3., -2., -1., 1., 1., 2., 2., 3., 4., 23., 24.]) - for i, value in enumerate(exp_shape): - self.assertEqual(res_shape[i], value) - for i, value in enumerate(exp_value): - self.assertAlmostEqual(res_value[i], value) - - -class TestElementwiseTypeAlignment(unittest.TestCase): - - @staticmethod - def build_graph_to_test_type_alignment(edges, - input_1_type=np.float32, - input_2_type=np.float32, - const_type=np.float32): - input_shape = int64_array([1, 3, 255, 255]) - const_value = np.array([1], dtype=const_type) - - nodes = { - **shaped_parameter('input_1', input_shape, {'data_type': input_1_type}), - **shaped_parameter('input_2', input_shape, {'data_type': input_2_type}), - **regular_op_with_empty_data('add', {'op': 'Add', 'type': 'Add', 'type_infer': Elementwise.type_infer}), - **valued_const_with_data('const', const_value, kwargs={'data_type': const_type}), - **result('result'), - } - graph = build_graph(nodes, edges, nodes_with_edges_only=True) - graph.stage = 'back' - return graph - - def test_first_input_const(self): - edges = [ - *connect('const', '0:add'), - *connect('input_1', '1:add'), - *connect('add', 'result') - ] - graph = self.build_graph_to_test_type_alignment(edges, const_type=np.float16, input_1_type=np.float32) - - type_infer(graph) - const_node = Node(graph, 'const') - self.assertEqual(const_node.out_port(0).get_data_type(), np.float32) - - def test_second_input_const(self): - edges = [ - *connect('input_1', '0:add'), - *connect('const', '1:add'), - *connect('add', 'result') - ] - graph = self.build_graph_to_test_type_alignment(edges, input_1_type=np.float32, const_type=np.float16) - - type_infer(graph) - const_node = Node(graph, 'const') - self.assertEqual(const_node.out_port(0).get_data_type(), np.float32) - - def test_raises(self): - edges = [ - *connect('input_1', '0:add'), - *connect('input_2', '1:add'), - *connect('add', 'result') - ] - graph = self.build_graph_to_test_type_alignment(edges, input_1_type=np.float32, input_2_type=np.float16) - - self.assertRaises(Exception, type_infer, graph) - - def test_not_raises(self): - edges = [ - *connect('input_1', '0:add'), - *connect('input_2', '1:add'), - *connect('add', 'result') - ] - graph = self.build_graph_to_test_type_alignment(edges, input_1_type=np.float32, input_2_type=np.float32) - - type_infer(graph) - add_node = Node(graph, 'add') - self.assertEqual(add_node.out_port(0).get_data_type(), np.float32) diff --git a/tools/mo/unit_tests/mo/ops/embedding_bag_test.py b/tools/mo/unit_tests/mo/ops/embedding_bag_test.py deleted file mode 100644 index 9e9e4a62515ea9..00000000000000 --- a/tools/mo/unit_tests/mo/ops/embedding_bag_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.embedding_bag import EmbeddingBagOffsetsSum, EmbeddingBagPackedSum, EmbeddingSegmentsSum -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, result, connect - -nodes = { - **valued_const_with_data('data', np.random.randn(3000, 8)), - **regular_op_with_shaped_data('indices1d', [100], {'type': 'Parameter', 'value': None, - '_out_port_data_type': {0: np.int32}}), - **regular_op_with_shaped_data('indices2d', [30, 3], {'type': 'Parameter', 'value': None, - '_out_port_data_type': {0: np.int32}}), - **regular_op_with_shaped_data('offsets', [30], {'type': 'Parameter', 'value': None, - '_out_port_data_type': {0: np.int32}}), - **regular_op_with_shaped_data('segment_ids', [100], {'type': 'Parameter', 'value': None, - '_out_port_data_type': {0: np.int32}}), - **valued_const_with_data('num_segments', np.array(30, dtype=np.int32)), - **regular_op_with_shaped_data('embedding_bag_offsets', None, - {'op': 'EmbeddingBagOffsetsSum', 'type': 'EmbeddingBagOffsetsSum', - 'name': 'embedding_bag_offsets'}), - **regular_op_with_shaped_data('embedding_bag_packed', None, - {'op': 'EmbeddingBagPackedSum', 'type': 'EmbeddingBagPackedSum', - 'name': 'embedding_bag_packed'}), - **regular_op_with_shaped_data('embedding_segments', None, - {'op': 'EmbeddingSegmentsSum', 'type': 'EmbeddingSegmentsSum', - 'name': 'embedding_bag_packed'}), - **result('output'), -} - - -class TestEmbeddingInfer(unittest.TestCase): - def test_embedding_bag_offsets_sum(self): - graph = build_graph(nodes, [ - *connect('data', '0:embedding_bag_offsets'), - *connect('indices1d', '1:embedding_bag_offsets'), - *connect('offsets', '2:embedding_bag_offsets'), - ('embedding_bag_offsets', 'embedding_bag_offsets_d', {'out': 0}), - ('embedding_bag_offsets_d', 'output'), - ], nodes_with_edges_only=True) - eb_node = Node(graph, 'embedding_bag_offsets') - EmbeddingBagOffsetsSum.infer(eb_node) - - self.assertTrue(np.array_equal(eb_node.out_port(0).data.get_shape(), int64_array([30, 8]))) - - def test_embedding_bag_packed_sum(self): - graph = build_graph(nodes, [ - *connect('data', '0:embedding_bag_packed'), - *connect('indices2d', '1:embedding_bag_packed'), - ('embedding_bag_packed', 'embedding_bag_packed_d', {'out': 0}), - ('embedding_bag_packed_d', 'output'), - ], nodes_with_edges_only=True) - eb_node = Node(graph, 'embedding_bag_packed') - EmbeddingBagPackedSum.infer(eb_node) - - self.assertTrue(np.array_equal(eb_node.out_port(0).data.get_shape(), int64_array([30, 8]))) - - def test_embedding_segments_sum(self): - graph = build_graph(nodes, [ - *connect('data', '0:embedding_segments'), - *connect('indices1d', '1:embedding_segments'), - *connect('segment_ids', '2:embedding_segments'), - *connect('num_segments', '3:embedding_segments'), - ('embedding_segments', 'embedding_segments_d', {'out': 0}), - ('embedding_segments_d', 'output'), - ], nodes_with_edges_only=True) - eb_node = Node(graph, 'embedding_segments') - EmbeddingSegmentsSum.infer(eb_node) - - self.assertTrue(np.array_equal(eb_node.out_port(0).data.get_shape(), int64_array([30, 8]))) diff --git a/tools/mo/unit_tests/mo/ops/exit_test.py b/tools/mo/unit_tests/mo/ops/exit_test.py deleted file mode 100644 index e38c05b12e543b..00000000000000 --- a/tools/mo/unit_tests/mo/ops/exit_test.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright (C) 2021 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -import numpy as np -import unittest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.Exit import Exit -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, connect, shaped_parameter - - -# test for TensorIterator -graph_nodes = { - **shaped_parameter("input", int64_array([1, 4, 64, 54])), - **regular_op_with_empty_data("exit", {'op': "Exit"}), - **result("output") -} - - -class ExitTest(unittest.TestCase): - def test_exit_static(self): - graph = build_graph(nodes_attrs=graph_nodes, - edges=[*connect('input', 'exit'), - *connect('exit', 'output')], - nodes_with_edges_only=True) - exit_node = Node(graph, 'exit') - in_node = Node(graph, 'input') - - Exit.exit_infer(exit_node) - - self.assertTrue(np.ma.allequal(exit_node.out_port(0).data.get_shape(), in_node.shape)) - - def test_exit_dynamic(self): - graph = build_graph(nodes_attrs=graph_nodes, - edges=[*connect('input', 'exit'), - *connect('exit', 'output')], - nodes_with_edges_only=True) - exit_node = Node(graph, 'exit') - in_node = Node(graph, 'input') - shape = int64_array([-1, 36]) - in_node.shape = np.ma.masked_array(shape, mask=shape == -1, fill_value=dynamic_dimension_value) - in_node.out_port(0).data.set_shape(in_node.shape) - - Exit.exit_infer(exit_node) - - self.assertTrue(np.ma.allequal(exit_node.out_port(0).data.get_shape(), in_node.shape)) diff --git a/tools/mo/unit_tests/mo/ops/expand_dims_test.py b/tools/mo/unit_tests/mo/ops/expand_dims_test.py deleted file mode 100644 index f73ece25c39237..00000000000000 --- a/tools/mo/unit_tests/mo/ops/expand_dims_test.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.expand_dims import ExpandDims -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'data_1': { - 'kind': 'data', - 'shape': np.array([2, 3, 224, 224]), - 'value': None, - }, - 'expand_dims': { - 'type': 'None', - 'kind': 'op', - }, - 'data_2': { - 'kind': 'data', - 'shape': None, - 'value': None, - } -} - -class TestExpandDimsOp(): - @pytest.mark.parametrize("axis, ref_out_shape",[(0, [1, 2, 3, 224, 224]), - (1, [2, 1, 3, 224, 224]), - (2, [2, 3, 1, 224, 224]), - (3, [2, 3, 224, 1, 224]), - (4, [2, 3, 224, 224, 1]), - ]) - def test_expand_dims_infer(self, axis, ref_out_shape): - graph = build_graph(nodes_attributes, - [('data_1', 'expand_dims'), - ('expand_dims', 'data_2')], - {'expand_dims': {'expand_axis': axis}}) - expand_dims_node = Node(graph, 'expand_dims') - - ExpandDims.infer(expand_dims_node) - - assert np.array_equal(expand_dims_node.out_node().shape, np.array(ref_out_shape)) - - -class TestExpandDimsOpDynamicDims(): - @pytest.mark.parametrize("axis, ref_out_shape",[(0, [1, 2, 3, dynamic_dimension_value, 224]), - (1, [2, 1, 3, dynamic_dimension_value, 224]), - (2, [2, 3, 1, dynamic_dimension_value, 224]), - (3, [2, 3, dynamic_dimension_value, 1, 224]), - (4, [2, 3, dynamic_dimension_value, 224, 1]), - ]) - def test_expand_dims_infer(self, axis, ref_out_shape): - graph = build_graph(nodes_attributes, - [('data_1', 'expand_dims'), - ('expand_dims', 'data_2')], - {'expand_dims': {'expand_axis': axis}}) - Node(graph, 'data_1').shape = shape_array([2, 3, dynamic_dimension_value, 224]) - expand_dims_node = Node(graph, 'expand_dims') - - ExpandDims.infer(expand_dims_node) - - assert strict_compare_tensors(expand_dims_node.out_node().shape, shape_array(ref_out_shape)) - - -class TestExpandDimsOpValueInfer(): - @pytest.mark.parametrize("axis, in_shape, ref_out_shape",[(0, [2, 3, 224, 224], [1, 2, 3, 224, 224]), - (1, [2, 3, 224, 224], [2, 1, 3, 224, 224]), - (2, [2, 3, 224, 224], [2, 3, 1, 224, 224]), - (3, [2, 3, 224, 224], [2, 3, 224, 1, 224]), - (4, [2, 3, 224, 224], [2, 3, 224, 224, 1]), - ]) - def test_expand_dims_infer_value(self, axis, in_shape, ref_out_shape): - in_value = np.random.rand(*in_shape) - graph = build_graph(nodes_attributes, - [('data_1', 'expand_dims'), - ('expand_dims', 'data_2')], - {'data_1': {'value': in_value}, - 'expand_dims': {'expand_axis': axis}}) - expand_dims_node = Node(graph, 'expand_dims') - - ExpandDims.infer(expand_dims_node) - - assert np.array_equal(expand_dims_node.out_node().shape, np.array(ref_out_shape)) - assert np.array_equal(expand_dims_node.out_node().value, np.array(in_value.reshape(ref_out_shape))) diff --git a/tools/mo/unit_tests/mo/ops/eye_test.py b/tools/mo/unit_tests/mo/ops/eye_test.py deleted file mode 100644 index d3d83ca6a75970..00000000000000 --- a/tools/mo/unit_tests/mo/ops/eye_test.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest -import numpy as np - - -from openvino.tools.mo.ops.eye import Eye -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph_with_attrs, build_graph -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value -from unit_tests.utils.graph import valued_const_with_data, result, regular_op_with_empty_data, connect - - -graph_node_attrs_sizes = [ - ('num_rows', {'type': 'Parameter', 'kind': 'op'}), - ('num_rows_data', {'kind': 'data'}), - - ('num_columns', {'type': 'Parameter', 'kind': 'op'}), - ('num_columns_data', {'kind': 'data'}), - - ('diagonal_index', {'type': 'Parameter', 'kind': 'op'}), - ('diagonal_index_data', {'kind': 'data'}), - - ('batch_shape', {'type': 'Parameter', 'kind': 'op'}), - ('batch_shape_data', {'kind': 'data'}), - - ('eye_op', {'type': 'Eye', 'kind': 'op'}), - ('eye_op_data', {'kind': 'data', 'shape': None, 'value': None}), - - ('op_output', {'kind': 'op', 'op': 'Result'}), -] - - -graph_edges_sizes = [ - ('num_rows', 'num_rows_data'), - ('num_columns', 'num_columns_data'), - ('diagonal_index', 'diagonal_index_data'), - ('batch_shape', 'batch_shape_data'), - - ('num_rows_data', 'eye_op', {'in': 0}), - ('num_columns_data', 'eye_op', {'in': 1}), - ('diagonal_index_data', 'eye_op', {'in': 2}), - ('batch_shape_data', 'eye_op', {'in': 3}), - - ('eye_op', 'eye_op_data'), - ('eye_op_data', 'op_output'), -] - - -class TestComplexOp(): - @pytest.mark.parametrize("input_shape, output_shape, num_rows, num_cols, batch_shape",[ - ([], [dynamic_dimension_value, dynamic_dimension_value],None,None,[]), - ([1], [dynamic_dimension_value, dynamic_dimension_value],None,None,[]), - ([1], [2, dynamic_dimension_value, dynamic_dimension_value], None, None, [2]), - ([1], [2, 3, dynamic_dimension_value], 3, None, [2]), - ([1], [2, dynamic_dimension_value, 4], None, 4, [2]), - ([1], [2, 3, 4], [3], [4], [2]) - ]) - def test_complex_op_shape_inference(self, input_shape, output_shape, num_rows, num_cols, batch_shape): - graph = build_graph_with_attrs(nodes_with_attrs=graph_node_attrs_sizes, - edges_with_attrs=graph_edges_sizes, - update_nodes_attributes=[ - ('num_rows_data', {'shape': int64_array(input_shape), 'value': num_rows}), - ('num_columns_data', {'shape': int64_array(input_shape), 'value': num_cols}), - ('diagonal_index_data', {'shape': int64_array(input_shape)}), - ('batch_shape_data', {'shape': int64_array([len(batch_shape)]), 'value': batch_shape}), - ('eye_op', {'output_type': np.float32}), - ]) - node = Node(graph, 'eye_op') - Eye.infer(node) - - msg = "Eye operation infer failed for case: expected_shape={}, actual_shape={}" - - assert np.array_equal(graph.node['eye_op_data']['shape'], output_shape),\ - msg.format(output_shape, graph.node['eye_op_data']['shape']) - - def test_value_inference(self): - graph_node_attrs_sizes = { - **valued_const_with_data('num_rows', int64_array([128])), - **valued_const_with_data('num_columns', int64_array([128])), - **valued_const_with_data('diagonal_index', int64_array([0])), - **valued_const_with_data('batch_shape', int64_array([])), - **regular_op_with_empty_data('eye_op', {'op': 'Eye', 'output_type': np.float32}), - **result('res'), - } - graph_edges_sizes = [ - *connect('num_rows', '0:eye_op'), - *connect('num_columns', '1:eye_op'), - *connect('diagonal_index', '2:eye_op'), - *connect('batch_shape', '3:eye_op'), - *connect('eye_op', 'res'), - ] - graph = build_graph( - graph_node_attrs_sizes, graph_edges_sizes) - node = Node(graph, 'eye_op') - Eye.infer(node) - output_value = np.eye(int64_array(128), M=int64_array( - 128), k=int64_array(0), dtype=np.float32) - - msg = "Eye operation infer failed for case: expected_value={}, actual_value={}" - - assert np.array_equal(graph.node['eye_op_d']['value'], output_value),\ - msg.format(output_value, graph.node['eye_op_d']['value']) diff --git a/tools/mo/unit_tests/mo/ops/gather_test.py b/tools/mo/unit_tests/mo/ops/gather_test.py deleted file mode 100644 index d5760f2d6fcdad..00000000000000 --- a/tools/mo/unit_tests/mo/ops/gather_test.py +++ /dev/null @@ -1,341 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy.testing as npt - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, strict_compare_tensors, \ - dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.ops.gather import Gather -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.utils.error import Error -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from unit_tests.utils.graph import valued_const_with_data, result, regular_op_with_empty_data, connect, \ - shaped_parameter, build_graph - - -class TestGatherPartialInfer(UnitTestWithMockedTelemetry): - - @staticmethod - def build_and_test_value_inference(data, indices, axis, batch_dims, ref_value, negative_test_string=None): - nodes = { - **valued_const_with_data('data', int64_array(data)), - **valued_const_with_data('indices', int64_array(indices)), - **valued_const_with_data('axis', int64_array(axis)), - **regular_op_with_empty_data('gather', {'op': 'Gather', 'batch_dims': batch_dims, 'infer': Gather.infer}), - **result('res'), - } - - edges = [ - *connect('data', '0:gather'), - *connect('indices', '1:gather'), - *connect('axis', '2:gather'), - *connect('gather', 'res') - ] - - graph = build_graph(nodes, edges) - graph.stage = 'middle' - partial_infer(graph) - - node = Node(graph, 'gather') - res = node.out_port(0).data.get_value() - npt.assert_array_equal(res, ref_value) - - @staticmethod - def build_and_test_shape_inference(data_shape, indices_shape, axis, batch_dims, ref_shape): - nodes = { - **shaped_parameter('data', int64_array(data_shape)), - **shaped_parameter('indices', int64_array(indices_shape)), - **valued_const_with_data('axis', int64_array(axis)), - **regular_op_with_empty_data('gather', {'op': 'Gather', 'batch_dims': batch_dims, 'infer': Gather.infer}), - **result('res'), - } - - edges = [ - *connect('data', '0:gather'), - *connect('indices', '1:gather'), - *connect('axis', '2:gather'), - *connect('gather', 'res') - ] - - graph = build_graph(nodes, edges) - graph.stage = 'middle' - partial_infer(graph) - - node = Node(graph, 'gather') - res = node.out_port(0).data.get_shape() - npt.assert_array_equal(res, ref_shape) - - def test_shape_axis_1(self): - self.build_and_test_shape_inference(axis=1, batch_dims=0, - data_shape=[3, 3], - indices_shape=[1, 2], - ref_shape=[3, 1, 2]) - - def test_shape_axis_1_1(self): - self.build_and_test_shape_inference(axis=1, batch_dims=0, - data_shape=[3, 3], - indices_shape=[1, 2, 4], - ref_shape=[3, 1, 2, 4]) - - def test_shape_axis_1_2(self): - self.build_and_test_shape_inference(axis=1, batch_dims=0, - data_shape=[1, 2, 4], - indices_shape=[3, 3], - ref_shape=[1, 3, 3, 4]) - - def test_shape_axis_1_3(self): - self.build_and_test_shape_inference(axis=1, batch_dims=0, - data_shape=[1, 2, 4], - indices_shape=[5, 8, 16], - ref_shape=[1, 5, 8, 16, 4]) - - def test_shape_axis_0(self): - self.build_and_test_shape_inference(axis=0, batch_dims=0, - data_shape=[3, 3], - indices_shape=[1, 2], - ref_shape=[1, 2, 3]) - - def test_shape_axis_0_1(self): - self.build_and_test_shape_inference(axis=0, batch_dims=0, - data_shape=[3, 3], - indices_shape=[1, 2, 5], - ref_shape=[1, 2, 5, 3]) - - def test_shape_axis_0_2(self): - self.build_and_test_shape_inference(axis=0, batch_dims=0, - data_shape=[1, 2, 5], - indices_shape=[3, 3], - ref_shape=[3, 3, 2, 5]) - - def test_shape_axis_0_3(self): - self.build_and_test_shape_inference(axis=0, batch_dims=0, - data_shape=[1, 2, 5], - indices_shape=[6, 8, 15], - ref_shape=[6, 8, 15, 2, 5]) - - def test_shape_axis_minus_2(self): - self.build_and_test_shape_inference(axis=-2, batch_dims=0, - data_shape=[2, 3, 7], - indices_shape=[1, 4], - ref_shape=[2, 1, 4, 7]) - - def test_shape_axis_1_batch_dims_1(self): - self.build_and_test_shape_inference(axis=1, batch_dims=1, - data_shape=[3, 4], - indices_shape=[3, 1, 2], - ref_shape=[3, 1, 2]) - - def test_shape_axis_2_batch_dims_1(self): - self.build_and_test_shape_inference(axis=2, batch_dims=1, - data_shape=[3, 4, 7], - indices_shape=[3, 1, 2], - ref_shape=[3, 4, 1, 2]) - - def test_shape_axis_2_batch_dims_minus_1(self): - self.build_and_test_shape_inference(axis=2, batch_dims=-1, - data_shape=[3, 1, 7], - indices_shape=[3, 1, 2], - ref_shape=[3, 1, 2]) - - def test_shape_axis_2_batch_dims_minus_2(self): - self.build_and_test_shape_inference(axis=2, batch_dims=-2, - data_shape=[3, 4, 7], - indices_shape=[3, 1, 2], - ref_shape=[3, 4, 1, 2]) - - def test_axis_0_batch_dims_0(self): - self.build_and_test_value_inference(axis=0, batch_dims=0, - data=[1, 2, 3, 4, 5], - indices=[0, 0, 4], - ref_value=[1, 1, 5]) - - def test_axis_0_batch_dims_0_negative_indices(self): - self.build_and_test_value_inference(axis=0, batch_dims=0, - data=[1, 2, 3, 4, 5], - indices=[-1, -2, -3], - ref_value=[5, 4, 3]) - - def test_axis_1_batch_dims_1(self): - self.build_and_test_value_inference(axis=1, batch_dims=1, - data=[[1, 2, 3, 4, 5], - [6, 7, 8, 9, 10]], - indices=[[0, 0, 4], - [4, 0, 0]], - - ref_value=[[1, 1, 5], - [10, 6, 6]]) - - def test_axis_minus_1_batch_dims_1(self): - self.build_and_test_value_inference(axis=-1, batch_dims=1, - data=[[1, 2, 3, 4, 5], - [6, 7, 8, 9, 10]], - indices=[[0, 0, 4], - [4, 0, 0]], - - ref_value=[[1, 1, 5], - [10, 6, 6]]) - - def test_axis_2_batch_dims_1(self): - self.build_and_test_value_inference(axis=2, batch_dims=1, - data=[[[[ 1, 2, 3, 4], # <-- first batch - [ 5, 6, 7, 8], - [ 9, 10, 11, 12], - [13, 14, 15, 16], - [17, 18, 19, 20]]], - [[[21, 22, 23, 24], # < -- second batch - [25, 26, 27, 28], - [29, 30, 31, 32], - [33, 34, 35, 36], - [37, 38, 39, 40]]]], # data_shape = (2, 1, 5, 4) - indices=[[1, 2, 4], - [4, 3, 2]], - ref_value=[[[[ 5, 6, 7, 8], - [ 9, 10, 11, 12], - [17, 18, 19, 20]]], - [[[37, 38, 39, 40], - [33, 34, 35, 36], - [29, 30, 31, 32]]]]) - - def test_axis_2_batch_dims_1_with_negative_indices(self): - self.build_and_test_value_inference(axis=2, batch_dims=1, - data=[[[[ 1, 2, 3, 4], # <-- first batch - [ 5, 6, 7, 8], - [ 9, 10, 11, 12], - [13, 14, 15, 16], - [17, 18, 19, 20]]], - [[[21, 22, 23, 24], # < -- second batch - [25, 26, 27, 28], - [29, 30, 31, 32], - [33, 34, 35, 36], - [37, 38, 39, 40]]]], # data_shape = (2, 1, 5, 4) - indices=[[-4, -3, -1], - [-1, 3, 2]], - ref_value=[[[[ 5, 6, 7, 8], - [ 9, 10, 11, 12], - [17, 18, 19, 20]]], - [[[37, 38, 39, 40], - [33, 34, 35, 36], - [29, 30, 31, 32]]]]) - - def test_axis_2_batch_dims_mimus_1(self): - self.build_and_test_value_inference(axis=2, batch_dims=-1, - data=[[[[ 1, 2, 3, 4], # <-- first batch - [ 5, 6, 7, 8], - [ 9, 10, 11, 12], - [13, 14, 15, 16], - [17, 18, 19, 20]]], - [[[21, 22, 23, 24], # < -- second batch - [25, 26, 27, 28], - [29, 30, 31, 32], - [33, 34, 35, 36], - [37, 38, 39, 40]]]], # data_shape = (2, 1, 5, 4) - indices=[[1, 2, 4], - [4, 3, 2]], - ref_value=[[[[ 5, 6, 7, 8], - [ 9, 10, 11, 12], - [17, 18, 19, 20]]], - [[[37, 38, 39, 40], - [33, 34, 35, 36], - [29, 30, 31, 32]]]]) - - # negative tests - def test_shape_indices_data_shape_inconsistency(self): - self.assertRaises(Error, self.build_and_test_shape_inference, - axis=2, batch_dims=2, - data_shape=[3, 4, 7], - indices_shape=[3, 1, 2], - ref_shape=[3, 4, 2]) - - def test_shape_batch_dims_greater_than_axis(self): - self.assertRaises(Error, self.build_and_test_shape_inference, - axis=2, batch_dims=3, - data_shape=[3, 4, 7], - indices_shape=[3, 4, 2], - ref_shape=[3, 4, 2]) - - def test_shape_batch_dims_out_of_bound(self): - self.assertRaises(Error, self.build_and_test_shape_inference, - axis=2, batch_dims=4, - data_shape=[3, 4, 7], - indices_shape=[3, 4, 2], - ref_shape=[3, 4, 2]) - - def test_shape_axis_out_of_bound(self): - self.assertRaises(Error, self.build_and_test_shape_inference, - axis=3, batch_dims=2, - data_shape=[3, 4, 7], - indices_shape=[3, 4, 2], - ref_shape=[3, 4, 2]) - - -dyn = dynamic_dimension_value - - -class TestElementwiseReverseInfer(UnitTestWithMockedTelemetry): - @staticmethod - def build_and_test_reverse_inference(data_shape, indices_shape, axis, batch_dims, out_shape, ref_shape): - in_port_with_defined_shape = 0 if data_shape is not None else 1 - defined_shape = shape_array(data_shape if data_shape is not None else indices_shape) - - nodes = { - **shaped_parameter('data', data_shape, {'reverse_infer': Parameter.reverse_infer}), - **shaped_parameter('indices', indices_shape, {'reverse_infer': Parameter.reverse_infer}), - **valued_const_with_data('axis', int64_array(axis)), - **regular_op_with_empty_data('gather', {'op': 'Gather', 'batch_dims': batch_dims, - 'infer': Gather.infer, - 'reverse_infer': Gather.reverse_infer}), - **result('res'), - } - - edges = [ - *connect('data', '0:gather'), - *connect('indices', '1:gather'), - *connect('axis', '2:gather'), - *connect('gather', 'res') - ] - - graph = build_graph(nodes, edges) - graph.stage = 'middle' - - Node(graph, 'gather').out_port(0).data.set_shape(shape_array(out_shape)) - Node(graph, 'gather').in_port(in_port_with_defined_shape).data.set_shape(defined_shape) - - partial_infer(graph) - actual_shape = Node(graph, 'gather').in_port(int(not in_port_with_defined_shape)).data.get_shape() - assert strict_compare_tensors(actual_shape, shape_array(ref_shape)) - - # undefined indices pshape - def test_reverse_infer_1(self): - self.build_and_test_reverse_inference(data_shape=[dyn, dyn], - indices_shape=None, - axis=0, - batch_dims=0, - out_shape=[dyn, dyn, dyn, dyn], - ref_shape=[dyn, dyn, dyn]) - - def test_reverse_infer_2(self): - self.build_and_test_reverse_inference(data_shape=[3, 10], - indices_shape=None, - axis=1, - batch_dims=0, - out_shape=[3, 40, 50, 60], - ref_shape=[40, 50, 60]) - - # undefined data pshape - def test_reverse_infer_3(self): - self.build_and_test_reverse_inference(data_shape=None, - indices_shape=[4, 5], - axis=0, - batch_dims=0, - out_shape=[4, 5, 10], - ref_shape=[dyn, 10]) - - def test_reverse_infer_4(self): - self.build_and_test_reverse_inference(data_shape=None, - indices_shape=[4, 67], - axis=1, - batch_dims=0, - out_shape=[3, 4, 67, 100], - ref_shape=[3, dyn, 100]) diff --git a/tools/mo/unit_tests/mo/ops/gatherelements_test.py b/tools/mo/unit_tests/mo/ops/gatherelements_test.py deleted file mode 100644 index 63f187ff14083e..00000000000000 --- a/tools/mo/unit_tests/mo/ops/gatherelements_test.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.gatherelements import GatherElements -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, strict_compare_tensors, dynamic_dimension -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, connect, \ - valued_const_with_data, shaped_parameter - -dyn = dynamic_dimension - -class TestGatherElementsInferTest(): - @pytest.mark.parametrize("data, indices, axis, ref_res",[ - ([[1, 2], - [3, 4]], - [[0, 1], - [0, 0]], - 0, # axis - [[1, 4], # ref_res - [1, 2]]), - - ([[1, 2], - [3, 4]], - [[0, 1], - [0, 0]], - 1, # axis - [[1, 2], # ref_res - [3, 3]]), - - ([[1, 2, 3], - [4, 5, 6], - [7, 8, 9]], - [[1, 2, 0], - [2, 0, 0]], - 0, # axis - [[4, 8, 3], # ref_res - [7, 2, 3]]), - - ([[1, 2], - [3, 4]], - [[0, 1], - [0, 0]], - -1, # axis - [[1, 2], # ref_res - [3, 3]]), - - ([ # 3D case - [[1, 2], - [3, 4]], - [[5, 6], - [7, 8]], - [[9, 10], - [11, 12]] - ], - [ - [[1, 0], - [0, 1]], - [[1, 1], - [1, 0]], - [[0, 0], - [1, 1]] - ], - -1, # axis - [ - [[2, 1], - [3, 4]], - [[6, 6], - [8, 7]], - [[9, 9], - [12, 12]] - ]), - ]) - def test_gatherelements_value_infer(self, data, indices, axis, ref_res): - nodes = { - **valued_const_with_data('data', int64_array(data)), - **valued_const_with_data('indices', int64_array(indices)), - **regular_op_with_empty_data('gather_elements', {'op': 'GatherElements', 'axis': axis}), - **result() - } - - graph = build_graph(nodes_attrs=nodes, edges=[ - *connect('data', '0:gather_elements'), - *connect('indices', '1:gather_elements'), - *connect('gather_elements', 'output') - ], nodes_with_edges_only=True) - graph.stage = 'middle' - - gather_el_node = Node(graph, 'gather_elements') - GatherElements.infer(gather_el_node) - - res_output_shape = gather_el_node.out_node().shape - assert np.array_equal(int64_array(ref_res).shape, res_output_shape) - - res_output_value = gather_el_node.out_node().value - if res_output_value is not None: - assert np.array_equal(int64_array(ref_res), res_output_value) - - def check_shape_infer(self, data_shape, indices_shape, axis, ref): - nodes = { - **shaped_parameter('data', data_shape), - **shaped_parameter('indices', indices_shape), - **regular_op_with_empty_data('gather_elements', {'op': 'GatherElements', 'axis': axis}), - **result() - } - - graph = build_graph(nodes_attrs=nodes, edges=[ - *connect('data', '0:gather_elements'), - *connect('indices', '1:gather_elements'), - *connect('gather_elements', 'output') - ], nodes_with_edges_only=True) - graph.stage = 'middle' - - gather_el_node = Node(graph, 'gather_elements') - GatherElements.infer(gather_el_node) - - res_output_shape = gather_el_node.out_node().shape - assert strict_compare_tensors(res_output_shape, ref) - - def test_shape_infer_1(self): - self.check_shape_infer(data_shape=[3], indices_shape=[100], ref=[100], axis=0) - - def test_shape_infer_2(self): - self.check_shape_infer(data_shape=[100, 4], indices_shape=[4, 4], ref=[4, 4], axis=0) - - def test_shape_infer_3(self): - self.check_shape_infer(data_shape=[3, 4], indices_shape=[3, 100], ref=[3, 100], axis=1) - - def test_shape_infer_4(self): - self.check_shape_infer(data_shape=[1, 3, 256], indices_shape=[256, 3, 256], ref=[256, 3, 256], axis=0) - - def test_shape_infer_5(self): - self.check_shape_infer(data_shape=[1, 3, 256], indices_shape=[1, 1024, 256], ref=[1, 1024, 256], axis=1) - - def test_shape_infer_6(self): - self.check_shape_infer(data_shape=[1, 3, 256], indices_shape=[1, 3, 1024], ref=[1, 3, 1024], axis=2) - - def test_shape_infer_7(self): - self.check_shape_infer(data_shape=[1, 25, 64, 256], indices_shape=[1, 25, 64, 2], ref=[1, 25, 64, 2], axis=-1) - - # dynamic dimensions - def test_shape_infer_8(self): - self.check_shape_infer(data_shape=[dyn, 4], indices_shape=[3, 100], ref=[3, 100], axis=1) - - def test_shape_infer_9(self): - self.check_shape_infer(data_shape=[100, 4], indices_shape=[dyn, 100], ref=[100, 100], axis=1) - - def test_shape_infer_10(self): - self.check_shape_infer(data_shape=[1, 3, 256], indices_shape=[dyn, 3, 256], ref=[dyn, 3, 256], axis=0) - - def test_shape_infer_11(self): - self.check_shape_infer(data_shape=[dyn, dyn, dyn], indices_shape=[dyn, dyn, dyn], ref=[dyn, dyn, dyn], axis=0) - - def test_shape_infer_12(self): - self.check_shape_infer(data_shape=[1, 3, 256], indices_shape=[dyn, 1024, dyn], ref=[1, 1024, 256], axis=1) - - def test_shape_infer_13(self): - self.check_shape_infer(data_shape=[1, 3, 256], indices_shape=[dyn, dyn, 1024], ref=[1, 3, 1024], axis=2) - - # negative tests - def test_negative_shape_infer_ranks_differ(self): - with pytest.raises(AssertionError): - self.check_shape_infer(data_shape=[1, 3, 64], indices_shape=[1, 3], ref=[1, 3, 1024], axis=2) - - def test_negative_shape_infer_axis_out_of_bound(self): - with pytest.raises(AssertionError): - self.check_shape_infer(data_shape=[1, 4, 64], indices_shape=[1, 3, 64], ref=[1, 3, 1024], axis=20) - - def test_negative_shape_infer_inconsistent_shapes(self): - with pytest.raises(Error): - self.check_shape_infer(data_shape=[1, 4, 64], indices_shape=[1, 3, 64], ref=[1, 3, 1024], axis=2) diff --git a/tools/mo/unit_tests/mo/ops/gathernd_test.py b/tools/mo/unit_tests/mo/ops/gathernd_test.py deleted file mode 100644 index ae1d28dd65ea68..00000000000000 --- a/tools/mo/unit_tests/mo/ops/gathernd_test.py +++ /dev/null @@ -1,456 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.gathernd import GatherND -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'data': {'kind': 'op'}, - 'data_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'indices': {'kind': 'op'}, - 'indices_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'gathernd_node': {'op': 'GatherNDUpdate', 'kind': 'op', 'batch_dims': 0, 'version': 'opset8'}, - 'output': {'shape': None, 'value': None, 'kind': 'data'}} - -# graph 1 -edges = [('data', 'data_data', {'in': 0}), - ('indices', 'indices_data', {'in': 1}), - ('data_data', 'gathernd_node', {'in': 0}), - ('indices_data', 'gathernd_node', {'in': 1}), - ('gathernd_node', 'output', {'out': 0})] - -# test data for partial infer: gather elements -inputs = {'data_data': {'shape': int64_array([10, 40]), 'value': None}, - 'indices_data': {'shape': int64_array([3, 2]), 'value': None}} - -# test data for partial infer: gather slices -inputs1 = {'data_data': {'shape': int64_array([10, 40, 30]), 'value': None}, - 'indices_data': {'shape': int64_array([3, 2]), 'value': None}} - -# test data for partial infer: gather slices and batch_dims=2 -inputs2 = {'data_data': {'shape': int64_array([10, 40, 4, 9]), 'value': None}, - 'indices_data': {'shape': int64_array([10, 40, 3, 5, 1]), 'value': None}} - -# test data for partial infer: gather slices and batch_dims=3 and indices.shape[-1]=len(data.shape)-batch_dims -inputs3 = {'data_data': {'shape': int64_array([1, 64, 64, 320]), 'value': None}, - 'indices_data': {'shape': int64_array([1, 64, 64, 1, 1]), 'value': None}} - -# test data for constant folding: gather elements, batch_dims = 0 -inputs4 = {'data_data': {'shape': int64_array([2, 2]), 'value': int64_array([[1, 2], - [3, 4]])}, - 'indices_data': {'shape': int64_array([2, 2]), 'value': int64_array([[0, 0], - [1, 0]])}} -output4 = int64_array([1, 3]) - -# test data for constant folding: gather slices, batch_dims = 0 -inputs5 = {'data_data': {'shape': int64_array([2, 3, 4]), 'value': int64_array([[[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]], - [[13, 14, 15, 16], - [17, 18, 19, 20], - [21, 22, 23, 24]]])}, - 'indices_data': {'shape': int64_array([3, 2]), 'value': int64_array([[0, 1], - [1, 0], - [1, 2]])}} -output5 = int64_array([[5, 6, 7, 8], - [13, 14, 15, 16], - [21, 22, 23, 24]]) - -# test data for constant folding: gather slices, batch_dims = 1 -inputs6 = {'data_data': {'shape': int64_array([2, 3, 4]), 'value': int64_array([[[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]], - [[13, 14, 15, 16], - [17, 18, 19, 20], - [21, 22, 23, 24]]])}, - 'indices_data': {'shape': int64_array([2, 1]), 'value': int64_array([[1], - [0]])}} -output6 = int64_array([[5, 6, 7, 8], - [13, 14, 15, 16]]) - -# test data for constant folding: gather slices with leading dimensions, batch_dims = 2 -inputs7 = {'data_data': {'shape': int64_array([2, 3, 4]), 'value': int64_array([[[1, 2, 3, 4], - [5, 6, 7, 8], - [9, 10, 11, 12]], - [[13, 14, 15, 16], - [17, 18, 19, 20], - [21, 22, 23, 24]]])}, - 'indices_data': {'shape': int64_array([2, 3, 1, 1]), 'value': int64_array([[[[1]], - [[0]], - [[2]]], - [[[0]], - [[2]], - [[2]]]])}} -output7 = int64_array([[2], [5], [11], [13], [19], [23]]) - -# test data for constant folding: gather elements, batch_dims = 2 -inputs8 = {'data_data': {'shape': int64_array([2, 3, 4, 2]), - 'value': int64_array([[[[1, 2], [3, 4], [5, 6], [7, 8]], - [[9, 10], [11, 12], [13, 14], [15, 16]], - [[17, 18], [19, 20], [21, 22], [23, 24]]], - [[[25, 26], [27, 28], [29, 30], [31, 32]], - [[33, 34], [35, 36], [37, 38], [39, 40]], - [[41, 42], [43, 44], [45, 46], [47, 48]]]])}, - 'indices_data': {'shape': int64_array([2, 3, 3, 2]), - 'value': int64_array([[[[1, 0], [3, 1], [2, 1]], - [[0, 1], [1, 1], [2, 0]], - [[3, 0], [3, 1], [2, 1]]], - [[[2, 0], [1, 1], [3, 1]], - [[1, 1], [2, 0], [2, 0]], - [[0, 0], [3, 1], [3, 1]]]])}} -output8 = int64_array([[3, 8, 6], - [10, 12, 13], - [23, 24, 22], - [29, 28, 32], - [36, 37, 37], - [41, 48, 48]]) - -# test data for partial infer: gather slices and batch_dims=2 -inputs9 = {'data_data': {'shape': shape_array([dynamic_dimension_value, 40, 4, 9]), 'value': None}, - 'indices_data': {'shape': shape_array([dynamic_dimension_value, 40, 3, 5, 1]), 'value': None}} - -# test data for partial infer: gather slices and batch_dims=2 -inputs10 = {'data_data': {'shape': shape_array([40, dynamic_dimension_value, 4, 9]), 'value': None}, - 'indices_data': {'shape': shape_array([40, dynamic_dimension_value, 3, 5, 1]), 'value': None}} - -# test data for partial infer: gather slices and batch_dims=2 -inputs11 = {'data_data': {'shape': shape_array([dynamic_dimension_value, 40, 4, 9]), 'value': None}, - 'indices_data': {'shape': shape_array([40, dynamic_dimension_value, 3, 5, 1]), 'value': None}} - -# invalid test case with incorrect rank for indices -inputs_inv1 = {'data_data': {'shape': int64_array([10, 40]), 'value': None}, - 'indices_data': {'shape': int64_array([5, 3, 4]), 'value': None}} - -# invalid test case with unequal batch dimensions, batch_dims = 2 -inputs_inv2 = {'data_data': {'shape': int64_array([10, 40, 20]), 'value': None}, - 'indices_data': {'shape': int64_array([5, 3, 4]), 'value': None}} - -# invalid test case with indices rank greater than a rank of data excluding batch dimensions, batch_dims = 2 -inputs_inv3 = {'data_data': {'shape': int64_array([10, 40, 20, 10, 2]), 'value': None}, - 'indices_data': {'shape': int64_array([10, 40, 4]), 'value': None}} - - -class TestGatherND_5(unittest.TestCase): - def setUp(self): - nodes_attributes['gathernd_node']['batch_dims'] = 0 - nodes_attributes['gathernd_node']['version'] = 'opset5' - - def test_partial_infer_gather_element(self): - graph = build_graph(nodes_attributes, edges, inputs) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = int64_array([3]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer_gather_slice(self): - graph = build_graph(nodes_attributes, edges, inputs1) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = int64_array([3, 30]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer_gather_slice_batch_dims2(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - graph = build_graph(nodes_attributes, edges, inputs2) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = int64_array([400, 3, 5, 9]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer_gather_slice_batch_dims3(self): - nodes_attributes['gathernd_node']['batch_dims'] = 3 - graph = build_graph(nodes_attributes, edges, inputs3) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = int64_array([4096, 1]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer_gather_slice_batch_dims2_dynamic1(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - graph = build_graph(nodes_attributes, edges, inputs9) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = shape_array([dynamic_dimension_value, 3, 5, 9]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(strict_compare_tensors(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer_gather_slice_batch_dims2_dynamic2(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - graph = build_graph(nodes_attributes, edges, inputs10) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = shape_array([dynamic_dimension_value, 3, 5, 9]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(strict_compare_tensors(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer_gather_slice_batch_dims2_dynamic3(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - graph = build_graph(nodes_attributes, edges, inputs11) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = shape_array([dynamic_dimension_value, 3, 5, 9]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(strict_compare_tensors(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_infer4(self): - graph = build_graph(nodes_attributes, edges, inputs4) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(output4, res_output_value), - 'values do not match expected: {} and given: {}'.format(output4, res_output_value)) - - def test_infer5(self): - graph = build_graph(nodes_attributes, edges, inputs5) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(output5, res_output_value), - 'values do not match expected: {} and given: {}'.format(output5, res_output_value)) - - def test_infer6(self): - nodes_attributes['gathernd_node']['batch_dims'] = 1 - graph = build_graph(nodes_attributes, edges, inputs6) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(output6, res_output_value), - 'values do not match expected: {} and given: {}'.format(output6, res_output_value)) - - def test_infer7(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - graph = build_graph(nodes_attributes, edges, inputs7) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - output = output7.reshape([6, 1]) - self.assertTrue(np.array_equal(output, res_output_value), - 'values do not match expected: {} and given: {}'.format(output, res_output_value)) - - def test_infer8(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - graph = build_graph(nodes_attributes, edges, inputs8) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(output8, res_output_value), - 'values do not match expected: {} and given: {}'.format(output8, res_output_value)) - - def test_infer9(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - graph = build_graph(nodes_attributes, edges, inputs8) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(output8, res_output_value), - 'values do not match expected: {} and given: {}'.format(output8, res_output_value)) - - def test_infer9_opset_5(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - graph = build_graph(nodes_attributes, edges, inputs8) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - output = output8.reshape([6, 3]) - self.assertTrue(np.array_equal(output, res_output_value), - 'values do not match expected: {} and given: {}'.format(output, res_output_value)) - - def test_infer_invalid1(self): - graph = build_graph(nodes_attributes, edges, inputs_inv1) - gathernd_node = Node(graph, 'gathernd_node') - self.assertRaises(AssertionError, GatherND.infer, gathernd_node) - - def test_infer_invalid2(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - graph = build_graph(nodes_attributes, edges, inputs_inv2) - gathernd_node = Node(graph, 'gathernd_node') - self.assertRaises(AssertionError, GatherND.infer, gathernd_node) - - def test_infer_invalid3(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - graph = build_graph(nodes_attributes, edges, inputs_inv3) - gathernd_node = Node(graph, 'gathernd_node') - self.assertRaises(AssertionError, GatherND.infer, gathernd_node) - - - def test_partial_infer_gather_slice_batch_dims2_opset8(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - nodes_attributes['gathernd_node']['version'] = 'opset8' - graph = build_graph(nodes_attributes, edges, inputs2) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = int64_array([10, 40, 3, 5, 9]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer_gather_slice_batch_dims3_opset8(self): - nodes_attributes['gathernd_node']['batch_dims'] = 3 - nodes_attributes['gathernd_node']['version'] = 'opset8' - graph = build_graph(nodes_attributes, edges, inputs3) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = int64_array([1, 64, 64, 1]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer_gather_slice_batch_dims2_dynamic1_opset8(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - nodes_attributes['gathernd_node']['version'] = 'opset8' - graph = build_graph(nodes_attributes, edges, inputs9) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = shape_array([dynamic_dimension_value, 40, 3, 5, 9]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(strict_compare_tensors(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer_gather_slice_batch_dims2_dynamic2_opset8(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - nodes_attributes['gathernd_node']['version'] = 'opset8' - graph = build_graph(nodes_attributes, edges, inputs10) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = shape_array([40, dynamic_dimension_value, 3, 5, 9]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(strict_compare_tensors(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer_gather_slice_batch_dims2_dynamic3_opset8(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - nodes_attributes['gathernd_node']['version'] = 'opset8' - graph = build_graph(nodes_attributes, edges, inputs11) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # prepare reference results - ref_output_shape = shape_array([40, 40, 3, 5, 9]) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(strict_compare_tensors(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_infer7_opset8(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - nodes_attributes['gathernd_node']['version'] = 'opset8' - graph = build_graph(nodes_attributes, edges, inputs7) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - output = output7.reshape([2, 3, 1]) - - self.assertTrue(np.array_equal(output, res_output_value), - 'values do not match expected: {} and given: {}'.format(output, res_output_value)) - - def test_infer8_opset8(self): - nodes_attributes['gathernd_node']['batch_dims'] = 2 - nodes_attributes['gathernd_node']['version'] = 'opset8' - graph = build_graph(nodes_attributes, edges, inputs8) - gathernd_node = Node(graph, 'gathernd_node') - GatherND.infer(gathernd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - output = output8.reshape([2, 3, 3]) - - self.assertTrue(np.array_equal(output, res_output_value), - 'values do not match expected: {} and given: {}'.format(output, res_output_value)) diff --git a/tools/mo/unit_tests/mo/ops/grn_test.py b/tools/mo/unit_tests/mo/ops/grn_test.py deleted file mode 100644 index 09c8f1ad89c991..00000000000000 --- a/tools/mo/unit_tests/mo/ops/grn_test.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'}, - 'grn': {'type': 'GRN', 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'kind': 'op'}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - } - - -class TestGRNOp(unittest.TestCase): - def test_grn_infer(self): - graph = build_graph(nodes_attributes, - [('node_1', 'grn'), - ('grn', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227])}, - 'grn': {'bias': 1} - }) - - grn_node = Node(graph, 'grn') - copy_shape_infer(grn_node) - exp_shape = np.array([1, 3, 227, 227]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) diff --git a/tools/mo/unit_tests/mo/ops/instance_normalization_test.py b/tools/mo/unit_tests/mo/ops/instance_normalization_test.py deleted file mode 100644 index bdf483b77c9e69..00000000000000 --- a/tools/mo/unit_tests/mo/ops/instance_normalization_test.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.ops.instance_normalization import InstanceNormalization -from openvino.tools.mo.graph.graph import Graph - - -class InstanceNormalizationOp(unittest.TestCase): - def test_constructor_supported_attrs(self): - graph = Graph() - op = InstanceNormalization(graph, attrs={'epsilon': 0.1}) - self.assertEqual(op.supported_attrs(), ['epsilon']) diff --git a/tools/mo/unit_tests/mo/ops/interpolate_test.py b/tools/mo/unit_tests/mo/ops/interpolate_test.py deleted file mode 100644 index 48b1cdf7f9c279..00000000000000 --- a/tools/mo/unit_tests/mo/ops/interpolate_test.py +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.interpolate import Interpolate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - - -graph_node_attrs_without_axes = { - 'input': {'type': 'Parameter', 'kind': 'op'}, - 'input_data': {'kind': 'data', 'shape': None, 'value': None}, - 'sizes': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'sizes_data': {'kind': 'data', 'shape': None, 'value': None}, - 'scales': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'scales_data': {'kind': 'data', 'shape': None, 'value': None}, - 'interpolate': { - 'type': 'Interpolate', 'kind': 'op', 'mode': 'nearest', 'shape_calculation_mode': 'sizes', - 'coordinate_transformation_mode': 'half_pixel', 'version': 'opset4', - 'nearest_mode': 'round_prefer_floor', 'antialias': 0, - }, - 'interpolate_data': {'kind': 'data', 'value': None, 'shape': None}, - 'op_output': {'kind': 'op', 'op': 'Result'}, -} - -graph_edges_without_axes = [ - ('input', 'input_data'), - ('sizes', 'sizes_data'), - ('scales', 'scales_data'), - ('input_data', 'interpolate', {'in': 0}), - ('sizes_data', 'interpolate', {'in': 1}), - ('scales_data', 'interpolate', {'in': 2}), - ('interpolate', 'interpolate_data'), - ('interpolate_data', 'op_output'), -] - - -graph_nodes_attrs = { - 'input': {'type': 'Parameter', 'kind': 'op'}, - 'input_data': {'kind': 'data', 'shape': None, 'value': None}, - 'sizes': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'sizes_data': {'kind': 'data', 'shape': None, 'value': None}, - 'scales': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'scales_data': {'kind': 'data', 'shape': None, 'value': None}, - 'axes': {'type': 'Const', 'kind': 'op', 'shape': None, 'value': None}, - 'axes_data': {'kind': 'data', 'shape': None, 'value': None}, - 'interpolate': { - 'type': 'Interpolate', 'kind': 'op', 'mode': 'nearest', 'shape_calculation_mode': 'sizes', - 'coordinate_transformation_mode': 'half_pixel', 'version': 'opset4', - 'nearest_mode': 'round_prefer_floor', 'antialias': 0, - }, - 'interpolate_data': {'kind': 'data', 'value': None, 'shape': None}, - 'op_output': {'kind': 'op', 'op': 'Result'}, -} - -graph_edges = [ - ('input', 'input_data'), - ('sizes', 'sizes_data'), - ('scales', 'scales_data'), - ('axes', 'axes_data'), - ('input_data', 'interpolate', {'in': 0}), - ('sizes_data', 'interpolate', {'in': 1}), - ('scales_data', 'interpolate', {'in': 2}), - ('axes_data', 'interpolate', {'in': 3}), - ('interpolate', 'interpolate_data'), - ('interpolate_data', 'op_output'), -] - - -class TestInterpolateOp(): - @pytest.mark.parametrize("pads_begin, pads_end, input_shape, output_shape, sizes, scales, axes", - [([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [350, 150], [3.5, 150 / 200], [2, 3]), - ([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600], - [8, 390, 600], [0.5, 390 / 200, 600 / 410], [0, 2, 3]), - ([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028], - [56, 520], [4.0, 0.5], [0, 2]), - ([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380], - [20, 40, 1380], [20.0, 40.0 / 85.0, 1380.0 / 690.0], [0, 2, 4]), - ([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345], - [60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4]), - ([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050], - [100, 308, 4440, 44], [20.0, 4.0, 10.0, 0.5], [0, 1, 2, 3]), - ([0], [0], [1, 100, 200], [1, 350, 150], [350, 150], [3.5, 150 / 200], [1, 2]), - ([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 390], [0.5, 390 / 200], [0, 2]), - ([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520], [4.0, 0.5], [0, 1]), - ([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0], [0, 1]), - ([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349], - [60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4]) - ]) - def test_interpolate4_using_sizes(self, pads_begin, pads_end, input_shape, output_shape, sizes, scales, axes): - graph = build_graph(nodes_attrs=graph_nodes_attrs, - edges=graph_edges, - update_attributes={ - 'input_data': {'shape': input_shape}, - 'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)}, - 'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)}, - 'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)}, - 'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)}, - 'axes': {'shape': int64_array(axes).shape, 'value': int64_array(axes)}, - 'axes_data': {'shape': int64_array(axes).shape, 'value': int64_array(axes)}, - 'interpolate': {'pads_begin': int64_array(pads_begin), - 'pads_end': int64_array(pads_end)} - }) - - node = Node(graph, 'interpolate') - tested_class = Interpolate(graph=graph, attrs=node.attrs()) - tested_class.infer(node) - - msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}, axes={}," \ - " expected_shape={}, actual_shape={}" - - assert np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),\ - msg.format(sizes, scales, pads_begin, pads_end, axes, output_shape, - graph.node['interpolate_data']['shape']) - - @pytest.mark.parametrize("pads_begin, pads_end, input_shape, output_shape, sizes, scales, axes", - [([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [350, 150], [3.5, 150 / 200], [2, 3]), - ([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600], - [8, 390, 600], [0.5, 390 / 200, 600 / 410], [0, 2, 3]), - ([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028], - [56, 520], [4.0, 0.5], [0, 2]), - ([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380], - [20, 40, 1380], [20.0, 40.0 / 85.0, 1380.0 / 690.0], [0, 2, 4]), - ([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345], - [60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4]), - ([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050], - [100, 308, 4440, 44], [20.0, 4.0, 10.0, 0.5], [0, 1, 2, 3]), - ([0], [0], [1, 100, 200], [1, 350, 150], [350, 150], [3.5, 150 / 200], [1, 2]), - ([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 390], [0.5, 390 / 200], [0, 2]), - ([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520], [4.0, 0.5], [0, 1]), - ([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0], [0, 1]), - ([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349], - [60, 430, 345], [10.0, 4.3, 345.0 / 700.0], [0, 2, 4]), - ([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3], - [60, 22, 430, 500, 345, 349, 1], - [60, 430, 345, 1], [10.0, 4.3, 345.0 / 700.0, 1 / 3], [0, 2, 4, 6]), - ([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3], - [60, 22, 430, 500, 345, 349, 1], - [60, 430, 345, 1], [10.0, 4.3, 345.0 / 700.0, 0.3333333], [0, 2, 4, 6]), - ]) - def test_interpolate4_using_scales(self, pads_begin, pads_end, input_shape, output_shape, sizes, scales, axes): - graph = build_graph(nodes_attrs=graph_nodes_attrs, - edges=graph_edges, - update_attributes={ - 'input_data': {'shape': input_shape}, - 'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)}, - 'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)}, - 'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)}, - 'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)}, - 'axes': {'shape': int64_array(axes).shape, 'value': int64_array(axes)}, - 'axes_data': {'shape': int64_array(axes).shape, 'value': int64_array(axes)}, - 'interpolate': {'pads_begin': int64_array(pads_begin), - 'pads_end': int64_array(pads_end), - 'shape_calculation_mode': 'scales'} - }) - - node = Node(graph, 'interpolate') - tested_class = Interpolate(graph=graph, attrs=node.attrs()) - tested_class.infer(node) - - msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}, axes={}," \ - " expected_shape={}, actual_shape={}" - - assert np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),\ - msg.format(sizes, scales, pads_begin, pads_end, axes, output_shape, - graph.node['interpolate_data']['shape']) - - @pytest.mark.parametrize("pads_begin, pads_end, input_shape, output_shape, sizes, scales", - [([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]), - ([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600], - [8, 10, 390, 600], [0.5, 1.0, 390 / 200, 600 / 410]), - ([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028], - [56, 42, 520, 8028], [4.0, 1.0, 0.5, 1.0]), - ([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380], - [20, 16, 40, 470, 1380], [20.0, 1.0, 40.0 / 85.0, 1.0, 1380.0 / 690.0]), - ([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345], - [60, 22, 430, 500, 345], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0]), - ([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050], - [100, 308, 4440, 44, 6050], [20.0, 4.0, 10.0, 0.5, 1.0]), - ([0], [0], [1, 100, 200], [1, 350, 150], [1, 350, 150], [1.0, 3.5, 150 / 200]), - ([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 10, 390], [0.5, 1.0, 390 / 200]), - ([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520, 8028], [4.0, 0.5, 1.0]), - ([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0]), - ([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349], - [60, 22, 430, 500, 345, 349], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0]), - ([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3], - [60, 22, 430, 500, 345, 349, 1], - [60, 22, 430, 500, 345, 349, 1], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0, 1 / 3]), - ]) - def test_interpolate4_using_sizes_without_axes(self, pads_begin, pads_end, input_shape, output_shape, sizes, - scales): - graph = build_graph(nodes_attrs=graph_node_attrs_without_axes, - edges=graph_edges_without_axes, - update_attributes={ - 'input_data': {'shape': input_shape}, - 'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)}, - 'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)}, - 'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)}, - 'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)}, - 'interpolate': {'pads_begin': int64_array(pads_begin), - 'pads_end': int64_array(pads_end), - 'shape_calculation_mode': 'sizes'} - }) - - node = Node(graph, 'interpolate') - tested_class = Interpolate(graph=graph, attrs=node.attrs()) - tested_class.infer(node) - - msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}," \ - " expected_shape={}, actual_shape={}" - - assert np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),\ - msg.format(sizes, scales, pads_begin, pads_end, output_shape, - graph.node['interpolate_data']['shape']) - - @pytest.mark.parametrize("pads_begin, pads_end, input_shape, output_shape, sizes, scales", - [([0], [0], [1, 3, 100, 200], [1, 3, 350, 150], [1, 3, 350, 150], [1.0, 1.0, 3.5, 150 / 200]), - ([0, 3, 10, 10], [0], [16, 7, 190, 400], [8, 10, 390, 600], - [8, 10, 390, 600], [0.5, 1.0, 390 / 200, 600 / 410]), - ([10, 5, 0, 10], [0, 4, 16, 18], [4, 33, 1024, 8000], [56, 42, 520, 8028], - [56, 42, 520, 8028], [4.0, 1.0, 0.5, 1.0]), - ([0], [0], [1, 16, 85, 470, 690], [20, 16, 40, 470, 1380], - [20, 16, 40, 470, 1380], [20.0, 1.0, 40.0 / 85.0, 1.0, 1380.0 / 690.0]), - ([4, 3, 11, 22, 5], [1, 3, 4, 8, 5], [1, 16, 85, 470, 690], [60, 22, 430, 500, 345], - [60, 22, 430, 500, 345], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0]), - ([0], [0], [5, 77, 444, 88, 6050], [100, 308, 4440, 44, 6050], - [100, 308, 4440, 44, 6050], [20.0, 4.0, 10.0, 0.5, 1.0]), - ([0], [0], [1, 100, 200], [1, 350, 150], [1, 350, 150], [1.0, 3.5, 150 / 200]), - ([0, 3, 10], [0], [16, 7, 190], [8, 10, 390], [8, 10, 390], [0.5, 1.0, 390 / 200]), - ([10, 0, 10], [0, 16, 18], [4, 1024, 8000], [56, 520, 8028], [56, 520, 8028], [4.0, 0.5, 1.0]), - ([0], [0], [1, 690], [20, 1380], [20, 1380], [20.0, 1380.0 / 690.0]), - ([4, 3, 11, 22, 5, 0], [1, 3, 4, 8, 5, 0], [1, 16, 85, 470, 690, 349], [60, 22, 430, 500, 345, 349], - [60, 22, 430, 500, 345, 349], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0]), - ([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3], - [60, 22, 430, 500, 345, 349, 1], - [60, 22, 430, 500, 345, 349, 1], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0, 1 / 3]), - ([4, 3, 11, 22, 5, 0, 0], [1, 3, 4, 8, 5, 0, 0], [1, 16, 85, 470, 690, 349, 3], - [60, 22, 430, 500, 345, 349, 1], - [60, 22, 430, 500, 345, 349, 1], [10.0, 1.0, 4.3, 1.0, 345.0 / 700.0, 1.0, 0.3333333]), - ]) - def test_interpolate4_using_scales_without_axes(self, pads_begin, pads_end, input_shape, output_shape, sizes, - scales): - graph = build_graph(nodes_attrs=graph_node_attrs_without_axes, - edges=graph_edges_without_axes, - update_attributes={ - 'input_data': {'shape': input_shape}, - 'sizes': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)}, - 'sizes_data': {'shape': int64_array(sizes).shape, 'value': int64_array(sizes)}, - 'scales': {'shape': np.array(scales).shape, 'value': np.array(scales)}, - 'scales_data': {'shape': np.array(scales).shape, 'value': np.array(scales)}, - 'interpolate': {'pads_begin': int64_array(pads_begin), - 'pads_end': int64_array(pads_end), - 'shape_calculation_mode': 'scales'} - }) - - node = Node(graph, 'interpolate') - tested_class = Interpolate(graph=graph, attrs=node.attrs()) - tested_class.infer(node) - - msg = "Interpolate-4 infer failed for case: sizes={}, scales={}, pads_begin={}, pads_end={}," \ - " expected_shape={}, actual_shape={}" - - assert np.array_equal(graph.node['interpolate_data']['shape'], int64_array(output_shape)),\ - msg.format(sizes, scales, pads_begin, pads_end, output_shape, - graph.node['interpolate_data']['shape']) diff --git a/tools/mo/unit_tests/mo/ops/merge_test.py b/tools/mo/unit_tests/mo/ops/merge_test.py deleted file mode 100644 index 8130832f1536e0..00000000000000 --- a/tools/mo/unit_tests/mo/ops/merge_test.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.merge import Merge -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph_with_attrs - - -class TestMerge(unittest.TestCase): - nodes = [ - ('first', {'value': np.ones((2, 2)), 'kind': 'data', 'executable': True, 'shape': np.array([2, 2]), - 'is_partial_inferred': True}), - ('second', {'value': np.zeros((2, 2)), 'kind': 'data', 'executable': False, 'shape': np.array([2, 2]), - 'is_partial_inferred': True}), - ('merge', {'type': 'Merge', 'kind': 'op', 'op': 'Merge'}), - ('merge_output', {'value': None, 'kind': 'data', 'executable': True, 'shape': None}), - ] - edges = [ - ('first', 'merge', {'in': 0}), - ('second', 'merge', {'in': 1}), - ('merge', 'merge_output', {'out': 0}), - ] - - def test_merge_infer_simple_case_one_executable(self): - graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges) - - # We should propagate value of the first input since only this input is executable - graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes, - edges_with_attrs=self.edges, - update_nodes_attributes=[('merge_output', {'shape': np.array([2, 2]), - 'value': np.ones((2, 2))}), - ('merge', {'is_not_fully_inferred': False})]) - - tested_class = Merge(graph=graph, attrs={}) - node = Node(graph, 'merge') - tested_class.merge_infer(node) - - (flag, resp) = compare_graphs(graph, graph_ref, 'merge_output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_merge_infer_complex_case(self): - """ - Case as in cycles when in first visit only one input are inferred and in the second -- both. - """ - graph = build_graph_with_attrs(nodes_with_attrs=self.nodes, edges_with_attrs=self.edges, - update_nodes_attributes=[('first', {'is_partial_inferred': False, - 'value': None}), - ('second', {'executable': True})]) - - # In first visit we should propagate only shapes - graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes, - edges_with_attrs=self.edges, - update_nodes_attributes=[('second', {'executable': True}), - ('first', {'is_partial_inferred': False, - 'value': None}), - ('merge_output', {'shape': np.array([2, 2]), - 'value': None}), - ('merge', {'is_not_fully_inferred': True})]) - tested_class = Merge(graph=graph, attrs={}) - node = Node(graph, 'merge') - tested_class.merge_infer(node) - - (flag, resp) = compare_graphs(graph, graph_ref, 'merge_output', check_op_attrs=True) - self.assertTrue(flag, resp) - - # Imitate that inputs nodes now is inferred - graph.node['first']['is_partial_inferred'] = True - - # Run infer second time - tested_class = Merge(graph=graph, attrs={}) - node = Node(graph, 'merge') - tested_class.merge_infer(node) - - graph_ref = build_graph_with_attrs(nodes_with_attrs=self.nodes, - edges_with_attrs=self.edges, - update_nodes_attributes=[('second', {'executable': True}), - ('first', {'is_partial_inferred': True, - 'value': None}), - ('merge_output', {'shape': np.array([2, 2]), - 'value': None}), - ('merge', {'is_not_fully_inferred': False})]) - (flag, resp) = compare_graphs(graph, graph_ref, 'merge_output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_merge_infer_only_second_executable(self): - graph = build_graph_with_attrs( - nodes_with_attrs=self.nodes, - edges_with_attrs=self.edges, - update_nodes_attributes=[ - ('first', {'executable': False, 'value': np.ones([2, 2]), 'shape': int64_array([2, 2])}), - ('second', {'executable': True, 'value': np.zeros([4, 4]), 'shape': int64_array([4, 4])}) - ] - ) - - ref_graph = build_graph_with_attrs( - nodes_with_attrs=self.nodes, - edges_with_attrs=self.edges, - update_nodes_attributes=[ - ('first', {'executable': False, 'value': np.ones([2, 2]), 'shape': int64_array([2, 2])}), - ('second', {'executable': True, 'value': np.zeros([4, 4]), 'shape': int64_array([4, 4])}), - ('merge', {'is_not_fully_inferred': False}), - ('merge_output', {'shape': int64_array([4, 4]), 'value': np.zeros([4, 4])}) - ] - ) - - tested_class = Merge(graph=graph, attrs={}) - node = Node(graph, 'merge') - tested_class.merge_infer(node) - - (flag, resp) = compare_graphs(graph, ref_graph, 'merge_output', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_merge_infer_no_executable(self): - graph = build_graph_with_attrs( - nodes_with_attrs=self.nodes, - edges_with_attrs=self.edges, - update_nodes_attributes=[ - ('first', {'executable': False, 'value': np.ones([2, 2]), 'shape': int64_array([2, 2])}), - ('second', {'executable': False, 'value': np.zeros([4, 4]), 'shape': int64_array([4, 4])}) - ] - ) - - ref_graph = build_graph_with_attrs( - nodes_with_attrs=self.nodes, - edges_with_attrs=self.edges, - update_nodes_attributes=[ - ('first', {'executable': False, 'value': np.ones([2, 2]), 'shape': int64_array([2, 2])}), - ('second', {'executable': False, 'value': np.zeros([4, 4]), 'shape': int64_array([4, 4])}), - ('merge', {'is_not_fully_inferred': False}), - ('merge_output', {'shape': int64_array([2, 2]), 'value': None}) - ] - ) - - tested_class = Merge(graph=graph, attrs={}) - node = Node(graph, 'merge') - tested_class.merge_infer(node) - - (flag, resp) = compare_graphs(graph, ref_graph, 'merge_output', check_op_attrs=True) - self.assertTrue(flag, resp) diff --git a/tools/mo/unit_tests/mo/ops/non_max_suppression_test.py b/tools/mo/unit_tests/mo/ops/non_max_suppression_test.py deleted file mode 100644 index 5962772a7062b6..00000000000000 --- a/tools/mo/unit_tests/mo/ops/non_max_suppression_test.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.non_max_suppression import NonMaxSuppression -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph, regular_op, regular_op_with_shaped_data, valued_const_with_data, result, connect, empty_data -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value - - -class TestNonMaxSuppressionInfer(unittest.TestCase): - def setUp(self): - nodes = { - **regular_op_with_shaped_data('boxes', [10, 100, 4], {'type': 'Parameter'}), - **regular_op_with_shaped_data('scores', [10, 5, 100], {'type': 'Parameter'}), - **valued_const_with_data('max_output_per_class', int64_array(7)), - **regular_op('nms', {'op': 'NonMaxSuppression', 'type': 'NonMaxSuppression', 'name': 'nms'}), - - **empty_data('nms_data_0'), - **empty_data('nms_data_1'), - **empty_data('nms_data_2'), - **result('output_0'), - **result('output_1'), - **result('output_2'), - } - - self.graph = build_graph(nodes, [ - *connect('boxes', '0:nms'), - *connect('scores', '1:nms'), - *connect('max_output_per_class', '2:nms'), - *connect('nms:0', 'nms_data_0', front_phase=True), # Use this WA for correct creating operation - *connect('nms_data_0', 'output_0', front_phase=True), # with multiple outputs - ], nodes_with_edges_only=True) - - self.graph_nms_5_2_outs = build_graph(nodes, [ - *connect('boxes', '0:nms'), - *connect('scores', '1:nms'), - *connect('max_output_per_class', '2:nms'), - *connect('nms:0', 'nms_data_0', front_phase=True), # Use this WA for correct creating operation - *connect('nms_data_0', 'output_0', front_phase=True), # with multiple outputs - *connect('nms:1', 'nms_data_1', front_phase=True), - *connect('nms_data_1', 'output_1', front_phase=True), - ], nodes_with_edges_only=True) - - self.graph_nms_5_3_outs = build_graph(nodes, [ - *connect('boxes', '0:nms'), - *connect('scores', '1:nms'), - *connect('max_output_per_class', '2:nms'), - *connect('nms:0', 'nms_data_0', front_phase=True), # Use this WA for correct creating operation - *connect('nms_data_0', 'output_0', front_phase=True), # with multiple outputs - *connect('nms:1', 'nms_data_1', front_phase=True), - *connect('nms_data_1', 'output_1', front_phase=True), - *connect('nms:2', 'nms_data_2', front_phase=True), - *connect('nms_data_2', 'output_2', front_phase=True), - ], nodes_with_edges_only=True) - - def test_nms_infer_opset1(self): - nms_node = Node(self.graph, 'nms') - nms_node['version'] = 'opset1' - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), [100, 3])) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64) - - def test_nms_infer_i64_opset3(self): - nms_node = Node(self.graph, 'nms') - nms_node['version'] = 'opset3' - nms_node['output_type'] = np.int64 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), [100, 3])) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64) - - def test_nms_infer_i32_opset3(self): - nms_node = Node(self.graph, 'nms') - nms_node['version'] = 'opset3' - nms_node['output_type'] = np.int32 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), [100, 3])) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int32) - - def test_nms_infer_i32_opset4(self): - nms_node = Node(self.graph, 'nms') - nms_node['version'] = 'opset4' - nms_node['output_type'] = np.int32 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), [10 * 5 * 7, 3])) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int32) - - def test_nms_infer_i64_opset4(self): - nms_node = Node(self.graph, 'nms') - nms_node['version'] = 'opset4' - nms_node['output_type'] = np.int64 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), [10 * 5 * 7, 3])) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64) - - def test_nms_infer_i32_opset5_1_out(self): - nms_node = Node(self.graph, 'nms') - nms_node['version'] = 'opset5' - nms_node['output_type'] = np.int32 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int32) - - def test_nms_infer_i64_opset5_1_out(self): - nms_node = Node(self.graph, 'nms') - nms_node['version'] = 'opset5' - nms_node['output_type'] = np.int64 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64) - - def test_nms_infer_i32_opset5_2_outs(self): - nms_node = Node(self.graph_nms_5_2_outs, 'nms') - nms_node['version'] = 'opset5' - nms_node['output_type'] = np.int32 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(1).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int32) - self.assertTrue(nms_node.out_port(1).get_data_type() == np.float32) - - def test_nms_infer_i64_opset5_2_outs(self): - nms_node = Node(self.graph_nms_5_2_outs, 'nms') - nms_node['version'] = 'opset5' - nms_node['output_type'] = np.int64 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(1).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64) - self.assertTrue(nms_node.out_port(1).get_data_type() == np.float32) - - def test_nms_infer_i32_opset5_3_outs(self): - nms_node = Node(self.graph_nms_5_3_outs, 'nms') - nms_node['version'] = 'opset5' - nms_node['output_type'] = np.int32 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(1).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(2).data.get_shape(), [1])) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int32) - self.assertTrue(nms_node.out_port(1).get_data_type() == np.float32) - self.assertTrue(nms_node.out_port(2).get_data_type() == np.int64) - - def test_nms_infer_i64_opset5_3_outs(self): - nms_node = Node(self.graph_nms_5_3_outs, 'nms') - nms_node['version'] = 'opset5' - nms_node['output_type'] = np.int64 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(1).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(2).data.get_shape(), [1])) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64) - self.assertTrue(nms_node.out_port(1).get_data_type() == np.float32) - self.assertTrue(nms_node.out_port(2).get_data_type() == np.int64) - - def test_nms_infer_i32_opset9_1_out(self): - nms_node = Node(self.graph, 'nms') - nms_node['version'] = 'opset9' - nms_node['output_type'] = np.int32 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int32) - - def test_nms_infer_i64_opset9_1_out(self): - nms_node = Node(self.graph, 'nms') - nms_node['version'] = 'opset9' - nms_node['output_type'] = np.int64 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64) - - def test_nms_infer_i32_opset9_2_outs(self): - nms_node = Node(self.graph_nms_5_2_outs, 'nms') - nms_node['version'] = 'opset9' - nms_node['output_type'] = np.int32 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(1).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int32) - self.assertTrue(nms_node.out_port(1).get_data_type() == np.float32) - - def test_nms_infer_i64_opset9_2_outs(self): - nms_node = Node(self.graph_nms_5_2_outs, 'nms') - nms_node['version'] = 'opset9' - nms_node['output_type'] = np.int64 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(1).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64) - self.assertTrue(nms_node.out_port(1).get_data_type() == np.float32) - - def test_nms_infer_i32_opset9_3_outs(self): - nms_node = Node(self.graph_nms_5_3_outs, 'nms') - nms_node['version'] = 'opset9' - nms_node['output_type'] = np.int32 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(1).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(2).data.get_shape(), [1])) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int32) - self.assertTrue(nms_node.out_port(1).get_data_type() == np.float32) - self.assertTrue(nms_node.out_port(2).get_data_type() == np.int64) - - def test_nms_infer_i64_opset9_3_outs(self): - nms_node = Node(self.graph_nms_5_3_outs, 'nms') - nms_node['version'] = 'opset9' - nms_node['output_type'] = np.int64 - NonMaxSuppression.infer(nms_node) - NonMaxSuppression.type_infer(nms_node) - - self.assertTrue(np.array_equal(nms_node.out_port(0).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(1).data.get_shape(), - shape_array([dynamic_dimension_value, 3]))) - self.assertTrue(np.array_equal(nms_node.out_port(2).data.get_shape(), [1])) - self.assertTrue(nms_node.out_port(0).get_data_type() == np.int64) - self.assertTrue(nms_node.out_port(1).get_data_type() == np.float32) - self.assertTrue(nms_node.out_port(2).get_data_type() == np.int64) diff --git a/tools/mo/unit_tests/mo/ops/normalize_test.py b/tools/mo/unit_tests/mo/ops/normalize_test.py deleted file mode 100644 index d8967af423a65a..00000000000000 --- a/tools/mo/unit_tests/mo/ops/normalize_test.py +++ /dev/null @@ -1,36 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'}, - 'norm': {'type': 'Normalize', 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'kind': 'op'}, - 'op_output': { 'kind': 'op', 'op': 'Result'} - } - - -class TestNormalize(unittest.TestCase): - def test_region_infer(self): - graph = build_graph(nodes_attributes, - [('node_1', 'norm'), - ('norm', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227]), 'value': None}, - 'norm': {} - }) - - norm_node = Node(graph, 'norm') - copy_shape_infer(norm_node) - exp_shape = np.array([1, 3, 227, 227]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) diff --git a/tools/mo/unit_tests/mo/ops/one_hot_test.py b/tools/mo/unit_tests/mo/ops/one_hot_test.py deleted file mode 100644 index 5658e3f291a2d4..00000000000000 --- a/tools/mo/unit_tests/mo/ops/one_hot_test.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.one_hot import OneHot -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, connect - - -def generate_nodes(data, axis=-1, depth=4, on_value=1., off_value=0.): - return { - 'indices': {'Op': 'Parameter', 'value': data, 'shape': int64_array(data.shape)}, - 'indices_d': {'kind': 'data', 'value': data, 'shape': int64_array(data.shape)}, - **valued_const_with_data('depth', int64_array(depth)), - **valued_const_with_data('on_value', float_array(on_value)), - **valued_const_with_data('off_value', float_array(off_value)), - **regular_op_with_shaped_data('one_hot', None, {'type': 'OneHot', 'axis': axis, 'Op': 'OneHot'}) - } - - -edges = [ - *connect('indices:0', 'one_hot:0'), - *connect('depth:0', 'one_hot:1'), - *connect('on_value:0', 'one_hot:2'), - *connect('off_value:0', 'one_hot:3'), - ('one_hot', 'one_hot_d') -] - - -class TestOneHotInfer(): - @pytest.mark.parametrize("input_value, exp_value, axis",[ - # 0d input - (1, [0, 1, 0, 0], -1), - # 1d input - ([1, 2], [[0, 1, 0, 0], [0, 0, 1, 0]], -1), - # 2D input - ([[1, 2], [3, 4]], [[[0, 1, 0, 0], [0, 0, 1, 0]], - [[0, 0, 0, 1], [0, 0, 0, 0]]], -1), - # 3d input - ([[[0, 2], [1, 2]], [[2, 1], [3, 0]]], - [[[[1, 0, 0, 0], [0, 0, 1, 0]], [[0, 1, 0, 0], [0, 0, 1, 0]]], - [[[0, 0, 1, 0], [0, 1, 0, 0]], [[0, 0, 0, 1], [1, 0, 0, 0]]]], -1), - # 1d input with negative indices - ([-2, 2], [[0, 0, 1, 0], [0, 0, 1, 0]], -1), - # check if axis is neither 0 nor -1 - ([[1, 2], [3, 4]], [[[0, 0], [1, 0], [0, 1], [0, 0]], - [[0, 0], [0, 0], [0, 0], [1, 0]]], 1) - ]) - def test_infer(self, input_value, exp_value, axis): - graph = build_graph(generate_nodes(int64_array(input_value), axis), edges) - onehot_node = Node(graph, 'one_hot') - OneHot.infer(onehot_node) - res_value = graph.node['one_hot_d']['value'] - assert np.array_equal(exp_value, int64_array(res_value)) diff --git a/tools/mo/unit_tests/mo/ops/op_test.py b/tools/mo/unit_tests/mo/ops/op_test.py deleted file mode 100644 index 93bf9817f8d401..00000000000000 --- a/tools/mo/unit_tests/mo/ops/op_test.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.ops.lstm_cell import LSTMCell -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph, regular_op - -nodes = { - **regular_op('Op1', {'type': 'Op1', 'kind': 'op', 'op': 'Op1'}), - **regular_op('Op2', {'type': 'Op2', 'kind': 'op', 'op': 'Op2'}), - **regular_op('Op3', {'type': 'Op3', 'kind': 'op', 'op': 'Op3'}), -} - - -class TestOp(unittest.TestCase): - def test_create_node(self): - graph = build_graph(nodes, [('Op1', 'Op3', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('Op1', 'Op1')]}), - ('Op2', 'Op3', {'in': 1, 'out': 0, 'fw_tensor_debug_info': [('Op2', 'Op2')]})]) - graph.stage = 'front' - input1 = Node(graph, 'Op1') - input2 = Node(graph, 'Op2') - inputs = [(input1, 0), (input2, 0)] - - lstm_op = LSTMCell(graph, dict(name='LSTMCell')) - _ = lstm_op.create_node(inputs) - - self.assertTrue(input1.out_edge(0)['fw_tensor_debug_info'] == [('Op1', 'Op1')]) - self.assertTrue(input2.out_edge(0)['fw_tensor_debug_info'] == [('Op2', 'Op2')]) diff --git a/tools/mo/unit_tests/mo/ops/pad_test.py b/tools/mo/unit_tests/mo/ops/pad_test.py deleted file mode 100644 index df999ae88318da..00000000000000 --- a/tools/mo/unit_tests/mo/ops/pad_test.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, dynamic_dimension, \ - strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.pad import Pad, AttributedPad -from unit_tests.utils.graph import build_graph - - -class TestPadOps(unittest.TestCase): - node_attrs = { - 'data_in': { - 'kind': 'data', - 'shape': np.array([1, 3, 100, 200]), - 'value': None, - }, - 'pads_begin': { - 'kind': 'data', - 'value': np.array([0, 0, 1, 2], dtype=np.int64), - 'shape': np.array([4], dtype=np.int64) - }, - 'pads_end': { - 'kind': 'data', - 'value': np.array([0, 0, 3, 4], dtype=np.int64), - 'shape': np.array([4], dtype=np.int64) - }, - 'pad': { - 'op': 'Pad', - 'kind': 'op', - 'pads': None, - }, - 'data_out': { - 'kind': 'data', - 'shape': None, - 'value': None, - } - } - - edge_attrs = [ - ('data_in', 'pad'), - ('pad', 'data_out') - ] - - def test_attribute_pad_no_infer(self): - graph = build_graph( - self.node_attrs, - self.edge_attrs, - {'pad': {'pads': np.array([[0, 0], [0, 0], [1, 3], [2, 4]], dtype=np.int64)}}, - nodes_with_edges_only=True, - ) - pad_node = Node(graph, 'pad') - with self.assertRaisesRegex(AttributeError, ".*has no attribute 'infer'.*"): - AttributedPad.infer(pad_node) - - def test_two_inputs(self): - graph = build_graph( - self.node_attrs, - self.edge_attrs + [('pads_begin', 'pad'), ('pads_end', 'pad')], - nodes_with_edges_only=True, - ) - pad_node = Node(graph, 'pad') - Pad.infer(pad_node) - self.assertTrue(np.array_equal(Node(graph, 'data_out').shape, np.array([1, 3, 100 + 1 + 3, 200 + 2 + 4]))) - - def test_not_enough_inputs(self): - graph = build_graph( - self.node_attrs, - self.edge_attrs + [('pads_begin', 'pad')], - nodes_with_edges_only=True, - ) - pad_node = Node(graph, 'pad') - with self.assertRaisesRegex(AssertionError, ".*must have 3 or 4 inputs.*"): - Pad.infer(pad_node) - - def test_two_inputs_value_infer(self): - in_value = np.random.rand(*self.node_attrs['data_in']['shape']).astype(np.float32) - graph = build_graph( - self.node_attrs, - self.edge_attrs + [('pads_begin', 'pad'), ('pads_end', 'pad')], - {'data_in': {'value': in_value}}, - nodes_with_edges_only=True, - ) - - pads = np.insert(self.node_attrs['pads_end']['value'], - np.arange(len(self.node_attrs['pads_begin']['value'])), self.node_attrs['pads_begin']['value']) - pads = np.reshape(pads, (len(self.node_attrs['pads_begin']['value']), 2)) - ref_value = np.pad(in_value, pads, constant_values=0, mode='constant') - - pad_node = Node(graph, 'pad') - Pad.infer(pad_node) - - self.assertTrue(np.array_equal(Node(graph, 'data_out').shape, np.array([1, 3, 100 + 1 + 3, 200 + 2 + 4]))) - self.assertTrue(np.array_equal(Node(graph, 'data_out').value, ref_value)) - self.assertFalse(isinstance(Node(graph, 'data_out').value, np.ma.masked_array)) - - def test_two_inputs_dynamic_value_infer(self): - in_value = shape_array([dynamic_dimension_value, 3]).reshape((1, 1, 1, 2)) - graph = build_graph( - self.node_attrs, - self.edge_attrs + [('pads_begin', 'pad'), ('pads_end', 'pad')], - {'data_in': {'value': in_value, 'shape': in_value.shape}}, - nodes_with_edges_only=True, - ) - out_shape = (1, 1, 5, 8) - mask = np.zeros(out_shape, dtype=bool) - mask[0][0][1][2] = True - ref_value = np.ma.masked_array(np.zeros(out_shape, dtype=np.int64), mask=mask, dtype=np.int64) - ref_value[0][0][1][3] = 3 - - pad_node = Node(graph, 'pad') - Pad.infer(pad_node) - output_value = Node(graph, 'data_out').value - self.assertTrue(np.array_equal(Node(graph, 'data_out').shape, ref_value.shape)) - self.assertTrue(strict_compare_tensors(output_value, ref_value)) - self.assertTrue(isinstance(output_value, np.ma.masked_array)) - self.assertTrue(output_value[0][0][1][2] is dynamic_dimension) diff --git a/tools/mo/unit_tests/mo/ops/pooling_test.py b/tools/mo/unit_tests/mo/ops/pooling_test.py deleted file mode 100644 index b1d91c2c4cbeb9..00000000000000 --- a/tools/mo/unit_tests/mo/ops/pooling_test.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.pooling import Pooling -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'value': None, 'kind': 'data'}, - 'pool': {'type': 'Pooling', 'value': None, 'kind': 'op'}, - 'node_2': {'value': None, 'kind': 'data'}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - } - - -class TestPoolingPartialInfer(unittest.TestCase): - def test_pooling_infer(self): - graph = build_graph(nodes_attributes, - [('node_1', 'pool'), - ('pool', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 256, 256])}, - 'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 2, 2]), - 'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]), - 'pad_spatial_shape': np.array([[3, 3], [3, 3]]), - 'pool_method': 'avg', 'exclude_pad': False, 'global_pool': False, - 'output_spatial_shape': None, 'output_shape': None, - 'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'batch_dims': np.array([0]), - 'pooling_convention': 'full'} - }) - - pool_node = Node(graph, 'pool') - - Pooling.infer(pool_node) - exp_shape = np.array([1, 3, 131, 131]) - res_shape = graph.node['node_2']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_pooling_dynamic_infer(self): - graph = build_graph(nodes_attributes, - [('node_1', 'pool'), - ('pool', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None}, - 'node_1': {'shape': shape_array([1, dynamic_dimension_value, dynamic_dimension_value, - 256])}, - 'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 2, 2]), - 'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]), - 'pad_spatial_shape': np.array([[3, 3], [3, 3]]), - 'pool_method': 'avg', 'exclude_pad': False, 'global_pool': False, - 'output_spatial_shape': None, 'output_shape': None, - 'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'batch_dims': np.array([0]), - 'pooling_convention': 'full'} - }) - - pool_node = Node(graph, 'pool') - - Pooling.infer(pool_node) - exp_shape = shape_array([1, dynamic_dimension_value, dynamic_dimension_value, 131]) - res_shape = graph.node['node_2']['shape'] - self.assertTrue(strict_compare_tensors(exp_shape, res_shape)) - - def test_pooling_infer_decrement_input_spatial(self): - graph = build_graph(nodes_attributes, - [('node_1', 'pool'), - ('pool', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 224, 224])}, - 'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 3, 3]), - 'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]), - 'pad_spatial_shape': np.array([[1, 1], [1, 1]]), - 'pool_method': 'avg', 'exclude_pad': False, 'global_pool': False, - 'output_spatial_shape': None, 'output_shape': None, - 'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'batch_dims': np.array([0]), - 'pooling_convention': 'full'} - }) - - pool_node = Node(graph, 'pool') - - Pooling.infer(pool_node) - exp_shape = np.array([1, 3, 75, 75]) - res_shape = graph.node['node_2']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_pooling_infer_no_convention(self): - graph = build_graph(nodes_attributes, - [('node_1', 'pool'), - ('pool', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 256, 256])}, - 'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 2, 2]), - 'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]), - 'pad_spatial_shape': np.array([[3, 3], [3, 3]]), - 'pool_method': 'avg', 'exclude_pad': False, 'global_pool': False, - 'output_spatial_shape': None, 'output_shape': None, - 'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'batch_dims': np.array([0])} - }) - - pool_node = Node(graph, 'pool') - - Pooling.infer(pool_node) - exp_shape = np.array([1, 3, 130, 130]) - res_shape = graph.node['node_2']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_pooling_infer_no_shape(self): - graph = build_graph(nodes_attributes, - [('node_1', 'pool'), - ('pool', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None}, - 'node_1': {'shape': None}, - 'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 2, 2]), - 'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]), - 'pad_spatial_shape': np.array([[3, 3], [3, 3]]), - 'pool_method': 'avg', 'exclude_pad': False, - 'output_spatial_shape': None, 'output_shape': None, - 'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'batch_dims': np.array([0]), - 'pooling_convention': 'full'} - }) - - pool_node = Node(graph, 'pool') - Pooling.infer(pool_node) - res_shape = graph.node['node_2']['shape'] - self.assertIsNone(res_shape) - - def test_pooling_infer_wrong_input_shape(self): - graph = build_graph(nodes_attributes, - [('node_1', 'pool'), - ('pool', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 1, 1])}, - 'pool': {'window': np.array([1, 1, 5, 5]), 'stride': np.array([1, 1, 2, 2]), - 'pad': np.array([[0, 0], [0, 0], [1, 1], [1, 1]]), - 'pad_spatial_shape': np.array([[1, 1], [1, 1]]), - 'pool_method': 'avg', 'exclude_pad': False, 'global_pool': False, - 'output_spatial_shape': None, 'output_shape': None, - 'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'batch_dims': np.array([0]), - 'pooling_convention': 'full'} - }) - - pool_node = Node(graph, 'pool') - - with self.assertRaises(Error): - Pooling.infer(pool_node) - - def test_pooling_infer_with_dilations(self): - graph = build_graph(nodes_attributes, - [('node_1', 'pool'), - ('pool', 'node_2'), - ('node_2', 'op_output') - ], - {'node_2': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 256, 256])}, - 'pool': {'window': np.array([1, 1, 2, 2]), 'stride': np.array([1, 1, 2, 2]), - 'pad': np.array([[0, 0], [0, 0], [0, 0], [1, 1]]), - 'pad_spatial_shape': np.array([[0, 0], [1, 1]]), - 'pool_method': 'max', 'exclude_pad': False, 'global_pool': False, - 'output_spatial_shape': None, 'output_shape': None, - 'kernel_spatial': np.array([2, 2]), 'spatial_dims': np.array([2, 3]), - 'channel_dims': np.array([1]), 'batch_dims': np.array([0]), - 'pooling_convention': 'full', 'dilation': np.array([1, 1, 2, 2]), - 'auto_pad': 'valid'} - }) - - pool_node = Node(graph, 'pool') - - Pooling.infer(pool_node) - exp_shape = np.array([1, 3, 127, 127]) - res_shape = graph.node['node_2']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) diff --git a/tools/mo/unit_tests/mo/ops/priorbox_clustered_test.py b/tools/mo/unit_tests/mo/ops/priorbox_clustered_test.py deleted file mode 100644 index bb29f481330b8f..00000000000000 --- a/tools/mo/unit_tests/mo/ops/priorbox_clustered_test.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.priorbox_clustered import PriorBoxClusteredOp -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'value': None, 'kind': 'data'}, - 'node_2': {'type': 'Identity', 'value': None, 'kind': 'data'}, - 'pbc': {'type': 'PriorBoxClustered', 'value': None, 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'value': None, 'kind': 'data'}, - 'op_output': { 'kind': 'op', 'op': 'Result'} - } - - -class TestPriorBoxClusteredPartialInfer(unittest.TestCase): - def test_caffe_priorboxclustered_infer(self): - graph = build_graph(nodes_attributes, - [ - ('node_1', 'pbc'), - ('node_2', 'pbc'), - ('pbc', 'node_3'), - ('node_3', 'op_output') - ], - { - 'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 384, 19, 19])}, - 'node_2': {'shape': np.array([1, 3, 300, 300])}, - 'pbc': {'flip': 0, 'clip': 0, 'variance': [0.1, 0.1, 0.2, 0.2], - 'step': 0, 'offset': 0.5, 'width': [1., 1., 1., 1., 1., 1., 1., 1., 1.], - 'height': [2., 2., 2., 2., 2., 2., 2., 2., 2.]} - }) - graph.graph['layout'] = 'NCHW' - - pbc_node = Node(graph, 'pbc') - PriorBoxClusteredOp.priorbox_clustered_infer(pbc_node) - exp_shape = np.array([1, 2, 12996]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_tf_priorboxclustered_infer(self): - graph = build_graph(nodes_attributes, - [ - ('node_1', 'pbc'), - ('node_2', 'pbc'), - ('pbc', 'node_3'), - ('node_3', 'op_output') - ], - { - 'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 19, 19, 384])}, - 'node_2': {'shape': np.array([1, 300, 300, 3])}, - 'pbc': {'flip': 0, 'clip': 0, 'variance': [0.1, 0.1, 0.2, 0.2], - 'step': 0, 'offset': 0.5, 'width': [1., 1., 1., 1., 1., 1., 1., 1., 1.], - 'height': [2., 2., 2., 2., 2., 2., 2., 2., 2.]} - }) - graph.graph['layout'] = 'NHWC' - - pbc_node = Node(graph, 'pbc') - PriorBoxClusteredOp.priorbox_clustered_infer(pbc_node) - exp_shape = np.array([1, 2, 12996]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) diff --git a/tools/mo/unit_tests/mo/ops/priorbox_test.py b/tools/mo/unit_tests/mo/ops/priorbox_test.py deleted file mode 100644 index 8717624eff8247..00000000000000 --- a/tools/mo/unit_tests/mo/ops/priorbox_test.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.priorbox import PriorBoxOp -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'value': None, 'kind': 'data'}, - 'pb': {'type': 'PriorBox', 'value': None, 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'value': None, 'kind': 'data'}, - 'op_output': { 'kind': 'op', 'op': 'Result'} - } - - -class TestPriorBoxPartialInfer(unittest.TestCase): - def test_caffe_priorbox_infer(self): - graph = build_graph(nodes_attributes, - [ - ('node_1', 'pb'), - ('pb', 'node_3'), - ('node_3', 'op_output') - ], - { - 'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 384, 19, 19])}, - 'pb': { - 'aspect_ratio': np.array([1]), - 'flip': 0, - 'min_size': np.array([1]), - 'max_size': np.array([1]) - } - }) - graph.graph['layout'] = 'NCHW' - pb_node = Node(graph, 'pb') - PriorBoxOp.priorbox_infer(pb_node) - exp_shape = np.array([1, 2, 4 * 19 * 19 * 2]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_caffe_priorbox_flip_infer(self): - graph = build_graph(nodes_attributes, - [ - ('node_1', 'pb'), - ('pb', 'node_3'), - ('node_3', 'op_output') - ], - { - 'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 384, 19, 19])}, - 'pb': { - 'aspect_ratio': np.array([1, 2, 0.5]), - 'flip': 1, - 'min_size': np.array([1]), - 'max_size': np.array([1]) - } - }) - graph.graph['layout'] = 'NCHW' - pb_node = Node(graph, 'pb') - PriorBoxOp.priorbox_infer(pb_node) - exp_shape = np.array([1, 2, 4 * 19 * 19 * 4]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_tf_priorbox_infer(self): - graph = build_graph(nodes_attributes, - [ - ('node_1', 'pb'), - ('pb', 'node_3'), - ('node_3', 'op_output') - ], - { - 'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 19, 19, 384])}, - 'pb': { - 'aspect_ratio': np.array([1]), - 'flip': 0, - 'min_size': np.array([1]), - 'max_size': np.array([1]) - } - }) - graph.graph['layout'] = 'NHWC' - pb_node = Node(graph, 'pb') - PriorBoxOp.priorbox_infer(pb_node) - exp_shape = np.array([1, 2, 4 * 19 * 19 * 2]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_tf_priorbox_flip_infer(self): - graph = build_graph(nodes_attributes, - [ - ('node_1', 'pb'), - ('pb', 'node_3'), - ('node_3', 'op_output') - ], - { - 'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 19, 19, 384])}, - 'pb': { - 'aspect_ratio': np.array([1, 2, 0.5]), - 'flip': 1, - 'min_size': np.array([1]), - 'max_size': np.array([1]) - } - }) - graph.graph['layout'] = 'NHWC' - pb_node = Node(graph, 'pb') - PriorBoxOp.priorbox_infer(pb_node) - exp_shape = np.array([1, 2, 4 * 19 * 19 * 4]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_caffe_priorbox_density_infer(self): - graph = build_graph(nodes_attributes, - [ - ('node_1', 'pb'), - ('pb', 'node_3')], - { - 'node_3': {'is_output': True, 'shape': None}, - 'node_1': {'shape': np.array([1, 128, 32, 32])}, - 'pb': { - 'aspect_ratio': np.array([1]), - 'flip': 1, - 'min_size': np.array([]), - 'max_size': np.array([]), - 'fixed_size': np.array([32, 64, 128]), - 'density': np.array([1, 2, 4]), - } - }) - graph.graph['layout'] = 'NCHW' - pb_node = Node(graph, 'pb') - PriorBoxOp.priorbox_infer(pb_node) - exp_shape = np.array([1, 2, 4*32*32*21]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) diff --git a/tools/mo/unit_tests/mo/ops/proposal_test.py b/tools/mo/unit_tests/mo/ops/proposal_test.py deleted file mode 100644 index b81c04da337e97..00000000000000 --- a/tools/mo/unit_tests/mo/ops/proposal_test.py +++ /dev/null @@ -1,53 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.ops.proposal import ProposalOp -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'proposal_input': {'kind': 'data', 'shape': None, 'value': None}, - 'proposal': {'type': 'Proposal', 'kind': 'op'}, - 'proposal_out_data_1': {'kind': 'data', 'shape': None, 'value': None}, - 'proposal_out_data_2': {'kind': 'data', 'shape': None, 'value': None}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - 'op_output2': {'kind': 'op', 'op': 'Result'}, - } - - -class TestProposal(unittest.TestCase): - def test_proposal_infer_one_output(self): - graph = build_graph(nodes_attributes, - [('proposal_input', 'proposal'), - ('proposal', 'proposal_out_data_1'), - ('proposal_out_data_1', 'op_output') - ], - {'proposal_input': {'shape': int64_array([1, 3, 227, 227])}, - 'proposal': {'post_nms_topn': 2, **layout_attrs()} - }) - - proposal_node = Node(graph, 'proposal') - ProposalOp.proposal_infer(proposal_node) - - self.assertListEqual([1 * 2, 5], list(graph.node['proposal_out_data_1']['shape'])) - - def test_proposal_infer_two_outputs(self): - graph = build_graph(nodes_attributes, - [('proposal_input', 'proposal'), - ('proposal', 'proposal_out_data_1'), - ('proposal', 'proposal_out_data_2'), - ('proposal_out_data_1', 'op_output'), - ('proposal_out_data_2', 'op_output') - ], - {'proposal_input': {'shape': int64_array([1, 3, 227, 227])}, - 'proposal': {'post_nms_topn': 2, **layout_attrs()} - }) - - proposal_node = Node(graph, 'proposal') - ProposalOp.proposal_infer(proposal_node) - - self.assertListEqual(list([1 * 2, 5]), list(graph.node['proposal_out_data_1']['shape'])) - self.assertListEqual(list([1 * 2]), list(graph.node['proposal_out_data_2']['shape'])) diff --git a/tools/mo/unit_tests/mo/ops/psroipooling_test.py b/tools/mo/unit_tests/mo/ops/psroipooling_test.py deleted file mode 100644 index 1ea8dbf7f9425d..00000000000000 --- a/tools/mo/unit_tests/mo/ops/psroipooling_test.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.psroipooling import PSROIPoolingOp -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'}, - 'node_2': {'type': 'Identity', 'kind': 'op'}, - 'psroipool': {'type': 'PSROIPooling', 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'kind': 'op'}, - 'op_output': { 'kind': 'op', 'op': 'Result'} - } - - -class TestPSROIPooling(unittest.TestCase): - def test_psroipool_infer_nchw(self): - graph = build_graph(nodes_attributes, - [('node_1', 'psroipool'), - ('node_2', 'psroipool'), - ('psroipool', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227])}, - 'node_2': {'shape': np.array([100, 5])}, - 'psroipool': {'output_dim': 4, 'group_size': 15} - }) - graph.graph['layout'] = 'NCHW' - psroipool_node = Node(graph, 'psroipool') - PSROIPoolingOp.psroipooling_infer(psroipool_node) - exp_shape = np.array([100, 4, 15, 15]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_psroipool_infer_nhwc(self): - graph = build_graph(nodes_attributes, - [('node_1', 'psroipool'), - ('node_2', 'psroipool'), - ('psroipool', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': np.array([1, 227, 227, 3])}, - 'node_2': {'shape': np.array([100, 5])}, - 'psroipool': {'output_dim': 4, 'group_size': 15} - }) - graph.graph['layout'] = 'NHWC' - psroipool_node = Node(graph, 'psroipool') - PSROIPoolingOp.psroipooling_infer(psroipool_node) - exp_shape = np.array([100, 15, 15, 4]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_psroipool_infer_no_shape(self): - graph = build_graph(nodes_attributes, - [('node_1', 'psroipool'), - ('node_2', 'psroipool'), - ('psroipool', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None}, - 'node_1': {'shape': None}, - 'node_2': {'shape': np.array([100, 5])}, - 'psroipool': {'output_dim': 4, 'group_size': 224} - }) - graph.graph['layout'] = 'NCHW' - - psroipool_node = Node(graph, 'psroipool') - PSROIPoolingOp.psroipooling_infer(psroipool_node) - res_shape = graph.node['node_3']['shape'] - self.assertIsNone(res_shape) diff --git a/tools/mo/unit_tests/mo/ops/quantize_test.py b/tools/mo/unit_tests/mo/ops/quantize_test.py deleted file mode 100644 index 567c6574235b00..00000000000000 --- a/tools/mo/unit_tests/mo/ops/quantize_test.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.fakequantize import FakeQuantize, broadcastable -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - - -class TestBroadcastable(unittest.TestCase): - def test_matching(self): - self.assertTrue(broadcastable([1, 2, 3], [1, 2, 3])) - - def test_incomplete(self): - self.assertTrue(broadcastable([1, 1, 1], [1, 2, 3])) - self.assertTrue(broadcastable([2, 3], [1, 2, 3])) - self.assertTrue(broadcastable([1, 3], [1, 2, 3])) - self.assertTrue(broadcastable([1, 1], [1, 2, 3])) - self.assertTrue(broadcastable([], [1, 2, 3])) - self.assertTrue(broadcastable([1], [1, 2, 3])) - - def test_reverse_incomplete(self): - self.assertFalse(broadcastable([1, 2, 3], [1, 1, 1])) - self.assertFalse(broadcastable([1, 2, 3], [2, 3])) - self.assertFalse(broadcastable([1, 2, 3], [1, 3])) - self.assertFalse(broadcastable([1, 2, 3], [1, 1])) - self.assertFalse(broadcastable([1, 2, 3], [])) - self.assertFalse(broadcastable([1, 2, 3], [1])) - - def test_invalid(self): - self.assertFalse(broadcastable([3, 2, 1], [1, 2, 3])) - self.assertFalse(broadcastable([5], [6])) - self.assertFalse(broadcastable([5], [1])) - self.assertFalse(broadcastable([64], [1, 55, 56, 56])) - - -nodes_attributes = {'node_in_1': {'op': 'Identity', 'kind': 'op'}, - 'node_in_2': {'op': 'Identity', 'kind': 'op'}, - 'node_in_3': {'op': 'Identity', 'kind': 'op'}, - 'node_in_4': {'op': 'Identity', 'kind': 'op'}, - 'node_in_5': {'op': 'Identity', 'kind': 'op'}, - 'quantize': {'op': 'FakeQuantize', 'kind': 'op', 'levels': 2}, - 'node_out_1': {'op': 'Identity', 'kind': 'op'}, - 'op_output': {'kind': 'op', 'op': 'Result'} - } - - -class TestFakeQuantizeOp(unittest.TestCase): - def test_shape_only(self): - graph = build_graph(nodes_attributes, - [('node_in_1', 'quantize'), - ('node_in_2', 'quantize'), - ('node_in_3', 'quantize'), - ('node_in_4', 'quantize'), - ('node_in_5', 'quantize'), - ('quantize', 'node_out_1'), - ('node_out_1', 'op_output') - ], - {'node_out_1': {'shape': None}, - 'node_in_1': {'shape': np.array([1, 3, 10, 20])}, - 'node_in_2': {'shape': np.array([1, 3, 10, 20])}, - 'node_in_3': {'shape': np.array([1, 3, 10, 20])}, - 'node_in_4': {'shape': np.array([1, 3, 10, 20])}, - 'node_in_5': {'shape': np.array([1, 3, 10, 20])}, - }) - - quantize_node = Node(graph, 'quantize') - FakeQuantize.infer(quantize_node) - quantize_shape = np.array([1, 3, 10, 20]) - res_shape = graph.node['node_out_1']['shape'] - for i in range(0, len(quantize_shape)): - self.assertEqual(quantize_shape[i], res_shape[i]) - - def test_shape_and_value(self): - graph = build_graph(nodes_attributes, - [('node_in_1', 'quantize'), - ('node_in_2', 'quantize'), - ('node_in_3', 'quantize'), - ('node_in_4', 'quantize'), - ('node_in_5', 'quantize'), - ('quantize', 'node_out_1'), - ('node_out_1', 'op_output') - ], - { - 'node_out_1': { - 'shape': None, - 'value': None, - }, - 'node_in_1': { - 'shape': np.array([4]), - 'value': np.array([5, 17, 0, 100], dtype=np.float32), - }, - 'node_in_2': { - 'shape': np.array([4]), - 'value': np.array([0, 12, 12, 12], dtype=np.float32), - }, - 'node_in_3': { - 'shape': np.array([4]), - 'value': np.array([10, 20, 20, 20], dtype=np.float32), - }, - 'node_in_4': { - 'shape': np.array([4]), - 'value': np.array([0, 0, 0, 0], dtype=np.float32), - }, - 'node_in_5': { - 'shape': np.array([4]), - 'value': np.array([1, 1, 1, 1], dtype=np.float32), - }, - }) - - exp_node = Node(graph, 'quantize') - FakeQuantize.infer(exp_node) - quantize_shape = np.array([4]) - quantize_value = np.array([1, 1, 0, 1], dtype=np.float32) - res_shape = graph.node['node_out_1']['shape'] - res_value = graph.node['node_out_1']['value'] - for i in range(0, len(quantize_shape)): - self.assertEqual(quantize_shape[i], res_shape[i]) - for i in range(0, len(quantize_value)): - self.assertAlmostEqual(quantize_value[i], res_value[i], places=6) diff --git a/tools/mo/unit_tests/mo/ops/regionyolo_test.py b/tools/mo/unit_tests/mo/ops/regionyolo_test.py deleted file mode 100644 index b55a700342eede..00000000000000 --- a/tools/mo/unit_tests/mo/ops/regionyolo_test.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.regionyolo import RegionYoloOp -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'}, - 'region': {'type': 'RegionYolo', 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'kind': 'op'}, - 'op_output': { 'kind': 'op', 'op': 'Result'} - } - - -class TestRegionYOLOCaffe(unittest.TestCase): - def test_region_infer(self): - graph = build_graph(nodes_attributes, - [('node_1', 'region'), - ('region', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227])}, - 'region': {'axis': 1, 'end_axis': -1, 'do_softmax': 1, **layout_attrs()} - }) - graph.graph['layout'] = 'NCHW' - reorg_node = Node(graph, 'region') - RegionYoloOp.regionyolo_infer(reorg_node) - exp_shape = np.array([1, 3 * 227 * 227]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_region_infer_flatten(self): - graph = build_graph(nodes_attributes, - [('node_1', 'region'), - ('region', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227])}, - 'region': {'end_axis': 1, 'axis': 0, 'do_softmax': 1, **layout_attrs()} - }) - graph.graph['layout'] = 'NCHW' - reorg_node = Node(graph, 'region') - RegionYoloOp.regionyolo_infer(reorg_node) - exp_shape = np.array([1 * 3, 227, 227]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_region_infer_dynamic_flatten(self): - graph = build_graph(nodes_attributes, - [('node_1', 'region'), - ('region', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': shape_array([1, dynamic_dimension_value, 227, 227])}, - 'region': {'end_axis': 1, 'axis': 0, 'do_softmax': 1, **layout_attrs()} - }) - graph.graph['layout'] = 'NCHW' - reorg_node = Node(graph, 'region') - RegionYoloOp.regionyolo_infer(reorg_node) - exp_shape = shape_array([dynamic_dimension_value, 227, 227]) - res_shape = graph.node['node_3']['shape'] - self.assertTrue(strict_compare_tensors(exp_shape, res_shape)) - - def test_region_infer_flatten_again(self): - graph = build_graph(nodes_attributes, - [('node_1', 'region'), - ('region', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227])}, - 'region': {'end_axis': 2, 'axis': 0, 'do_softmax': 1, **layout_attrs()} - }) - graph.graph['layout'] = 'NCHW' - reorg_node = Node(graph, 'region') - RegionYoloOp.regionyolo_infer(reorg_node) - exp_shape = np.array([1 * 3 * 227, 227]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_region_infer_do_softmax(self): - graph = build_graph(nodes_attributes, - [('node_1', 'region'), - ('region', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227])}, - 'region': {'do_softmax': 0, 'end_axis': -1, 'axis': 1, 'classes': 80, 'coords': 4, - 'mask': np.array([6, 7, 8]), **layout_attrs()} - }) - - graph.graph['layout'] = 'NCHW' - reorg_node = Node(graph, 'region') - RegionYoloOp.regionyolo_infer(reorg_node) - exp_shape = np.array([1, (80 + 4 + 1) * 3, 227, 227]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - -class TestRegionYOLOTF(unittest.TestCase): - def test_region_infer(self): - graph = build_graph(nodes_attributes, - [('node_1', 'region'), - ('region', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': np.array([1, 227, 227, 3])}, - 'region': {'axis': 1, 'end_axis': -1, 'do_softmax': 1, **layout_attrs()} - }) - graph.graph['layout'] = 'NHWC' - reorg_node = Node(graph, 'region') - RegionYoloOp.regionyolo_infer(reorg_node) - exp_shape = np.array([1, 3 * 227 * 227]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) - - def test_region_infer_do_softmax(self): - graph = build_graph(nodes_attributes, - [('node_1', 'region'), - ('region', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': np.array([1, 227, 227, 3])}, - 'region': {'do_softmax': 0, 'end_axis': -1, 'axis': 1, 'classes': 80, 'coords': 4, - 'mask': np.array([6, 7, 8]), **layout_attrs()} - }) - - graph.graph['layout'] = 'NHWC' - reorg_node = Node(graph, 'region') - RegionYoloOp.regionyolo_infer(reorg_node) - exp_shape = np.array([1, 227, 227, (80 + 4 + 1) * 3]) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) diff --git a/tools/mo/unit_tests/mo/ops/reorgyolo_test.py b/tools/mo/unit_tests/mo/ops/reorgyolo_test.py deleted file mode 100644 index ae269b28582e68..00000000000000 --- a/tools/mo/unit_tests/mo/ops/reorgyolo_test.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.reorgyolo import ReorgYoloOp -from openvino.tools.mo.front.common.extractors.utils import layout_attrs -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'}, - 'reorg': {'type': 'ReorgYolo', 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'kind': 'op'}, - 'op_output': { 'kind': 'op', 'op': 'Result'} - } - - -def calculate_reorgyolo_output(input, stride): - output = np.full_like(input, -1, dtype=np.int64) - output[0] = input[0] - output[1] = input[1] * stride ** 2 - output[2] = np.round(input[2] / stride) - output[3] = np.round(input[3] / stride) - return output - - -class TestReorgYOLO(unittest.TestCase): - def test_reorgyolo_infer(self): - graph = build_graph(nodes_attributes, - [('node_1', 'reorg'), - ('reorg', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': np.array([1, 3, 227, 227]), 'value': None}, - 'reorg': {'stride': 2, - **layout_attrs()} - }) - - reorg_node = Node(graph, 'reorg') - ReorgYoloOp.reorgyolo_infer(reorg_node) - exp_shape = calculate_reorgyolo_output(np.array([1, 3, 227, 227]), 2) - res_shape = graph.node['node_3']['shape'] - for i in range(0, len(exp_shape)): - self.assertEqual(exp_shape[i], res_shape[i]) diff --git a/tools/mo/unit_tests/mo/ops/reshape_test.py b/tools/mo/unit_tests/mo/ops/reshape_test.py deleted file mode 100644 index 0a5298004b7a42..00000000000000 --- a/tools/mo/unit_tests/mo/ops/reshape_test.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.reshape import Reshape -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'input': { - 'kind': 'op', - 'op': 'Parameter', - 'shape': None, - 'value': None, - }, - 'data': { - 'kind': 'data', - 'shape': None, - 'value': None, - }, - 'output_shape': { - 'kind': 'op', - 'op': 'Const', - 'value': None, - 'shape': None, - }, - 'output_shape_data': { - 'kind': 'data', - 'shape': None, - 'value': None, - }, - 'reshape': { - 'op': 'Reshape', - 'kind': 'op', - 'special_zero': True, - }, - 'reshape_out': { - 'kind': 'data', - 'shape': None, - 'value': None, - } -} - - -class TestReshapeShapeInfer(): - @pytest.mark.parametrize("input_value, input_shape, output_shape, ref_value, ref_shape",[ - (None, shape_array([1, 100, 4]), shape_array([-1, 25]), None, [16, 25]), - (None, shape_array([5, 100, 4]), shape_array([0, -1, 25]), None, [5, 16, 25]), - (None, shape_array([5, dynamic_dimension_value, 4]), shape_array([4, -1, 5]), None, - shape_array([4, dynamic_dimension_value, 5])), - (None, shape_array([5, dynamic_dimension_value, 4]), shape_array([4, dynamic_dimension_value, 5]), None, - shape_array([4, dynamic_dimension_value, 5])), - (None, shape_array([dynamic_dimension_value, 4, 5]), shape_array([0, -1]), None, - shape_array([dynamic_dimension_value, 20])), - (None, shape_array([dynamic_dimension_value, 4, 5]), shape_array([5, -1, dynamic_dimension_value]), - None, shape_array([5, dynamic_dimension_value, dynamic_dimension_value])), - (None, shape_array([dynamic_dimension_value, 1, 546]), shape_array([dynamic_dimension_value, -1, 91]), - None, shape_array([dynamic_dimension_value, dynamic_dimension_value, 91])), - (None, shape_array([5, dynamic_dimension_value, 8]), shape_array([4, -1]), - None, shape_array([4, dynamic_dimension_value])), - (None, shape_array([dynamic_dimension_value]), shape_array([5]), None, shape_array([5])), - (None, shape_array([dynamic_dimension_value]), shape_array([0]), None, shape_array([dynamic_dimension_value])), - (None, shape_array([dynamic_dimension_value]), shape_array([-1]), None, shape_array([dynamic_dimension_value])), - (None, shape_array([dynamic_dimension_value]), shape_array([dynamic_dimension_value]), None, - shape_array([dynamic_dimension_value])), - # even though the target shape is dynamic since all the inputs are static so we can calculate output - (None, shape_array([5, 3, 8]), shape_array([4, dynamic_dimension_value]), None, shape_array([4, 30])), - (None, shape_array([3, 14, 5]), shape_array([dynamic_dimension_value, 2, 0]), None, shape_array([21, 2, 5])), - (shape_array([1, 2, dynamic_dimension_value, 4, 5, 6]), shape_array([6]), shape_array([-1, 2]), - shape_array([1, 2, dynamic_dimension_value, 4, 5, 6]).reshape((3, 2)), shape_array([3, 2])), - ]) - def test_reshape_infer(self, input_value, input_shape, output_shape, ref_value, ref_shape): - graph = build_graph(nodes_attributes, - [('input', 'data'), - ('data', 'reshape'), - ('output_shape', 'output_shape_data'), - ('output_shape_data', 'reshape'), - ('reshape', 'reshape_out')], - {'data': {'shape': input_shape, 'value': input_value}, - 'output_shape': {'value': output_shape, 'shape': output_shape.shape}, - 'output_shape_data': {'value': output_shape, 'shape': output_shape.shape}, - }) - node = Node(graph, 'reshape') - Reshape.infer(node) - if ref_value is not None: - assert strict_compare_tensors(node.out_port(0).data.get_value(), shape_array(ref_value)) - assert strict_compare_tensors(node.out_port(0).data.get_shape(), shape_array(ref_shape)) diff --git a/tools/mo/unit_tests/mo/ops/roialign_test.py b/tools/mo/unit_tests/mo/ops/roialign_test.py deleted file mode 100644 index 06144107937599..00000000000000 --- a/tools/mo/unit_tests/mo/ops/roialign_test.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, float32_array - -import numpy as np -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.roialign import ROIAlign -from unit_tests.utils.graph import build_graph - - -class TestROIAlignOps(unittest.TestCase): - node_attrs = { - # input 1 - "1_input": {"kind": "op", "type": "Parameter", "value": None}, - "input_data": {"shape": None, "kind": "data", "value": None}, - #input 2 - "2_rois": {"kind": "op", "type": "Parameter","value": None}, - "rois_data": {"shape": None,"kind": "data", "value": None}, - # input 3 - "3_indices": {"kind": "op","type": "Parameter"}, - "indices_data": {"shape": None, "kind": "data", "value": None}, - # ROIAlign - "node": { - "kind": "op", - "type": "ROIAlign", - "pooled_h": None, - "pooled_w": None, - "mode": None, - "sampling_ratio": 2, - "spatial_scale": 16, - "aligned_mode": None, - }, - "node_data": {"shape": None, "kind": "data", "value": None}, - # output - "result": {"kind": "op","type": "Result"}, - } - - def test_roialignv1(self): - graph = build_graph( - self.node_attrs, - [ - ("1_input", "input_data"), - ("input_data", "node", {"in": 0}), - ("2_rois", "rois_data"), - ("rois_data", "node", {"in": 1}), - ("3_indices", "indices_data"), - ("indices_data", "node", {"in": 2}), - ("node", "node_data"), - ("node_data", "result"), - ], - { - 'input_data': {'shape': int64_array([1, 256, 200, 272])}, - 'rois_data': {'shape': int64_array([1000, 4])}, - 'indices_data': {'shape': int64_array([1000])}, - 'node': {'mode': 'max', 'pooled_h': 7, 'pooled_w': 7, 'aligned_mode': 'asymmetric', 'version': 'opset9'}, - } - ) - graph.graph["layout"] = "NCHW" - node = Node(graph, "node") - ROIAlign.infer(node) - self.assertListEqual(list([1000, 256, 7, 7]), graph.node['node_data']['shape'].data.tolist()) - - def test_roialignv2(self): - graph = build_graph( - self.node_attrs, - [ - ("1_input", "input_data"), - ("input_data", "node", {"in": 0}), - ("2_rois", "rois_data"), - ("rois_data", "node", {"in": 1}), - ("3_indices", "indices_data"), - ("indices_data", "node", {"in": 2}), - ("node", "node_data"), - ("node_data", "result"), - ], - { - 'input_data': {'shape': int64_array([7, 256, 200, 200])}, - 'rois_data': {'shape': int64_array([300, 4])}, - 'indices_data': {'shape': int64_array([300])}, - 'node': {'mode': 'max', 'pooled_h': 5, 'pooled_w': 6, 'aligned_mode': 'half_pixel_for_nn', 'version':'opset9'}, - } - ) - graph.graph["layout"] = "NCHW" - node = Node(graph, "node") - - ROIAlign.infer(node) - self.assertListEqual(list([300, 256, 5, 6]), graph.node['node_data']['shape'].data.tolist()) - - def test_roialignv3(self): - graph = build_graph( - self.node_attrs, - [ - ("1_input", "input_data"), - ("input_data", "node", {"in": 0}), - ("2_rois", "rois_data"), - ("rois_data", "node", {"in": 1}), - ("3_indices", "indices_data"), - ("indices_data", "node", {"in": 2}), - ("node", "node_data"), - ("node_data", "result"), - ], - { - 'input_data': {'shape': int64_array([2, 3, 5, 5])}, - 'rois_data': {'shape': int64_array([7, 4])}, - 'indices_data': {'shape': int64_array([7])}, - 'node': {'mode': 'max', 'pooled_h': 2, 'pooled_w': 2, 'aligned_mode': 'half_pixel', 'version': 'opset9'}, - } - ) - graph.graph["layout"] = "NCHW" - node = Node(graph, "node") - - ROIAlign.infer(node) - self.assertListEqual(list([7, 3, 2, 2]), graph.node['node_data']['shape'].data.tolist()) - - - def test_roialign_wrong_aligned_mode(self): - graph = build_graph( - self.node_attrs, - [ - ("1_input", "input_data"), - ("input_data", "node", {"in": 0}), - ("2_rois", "rois_data"), - ("rois_data", "node", {"in": 1}), - ("3_indices", "indices_data"), - ("indices_data", "node", {"in": 2}), - ("node", "node_data"), - ("node_data", "result"), - ], - { - 'input_data': {'shape': int64_array([2, 3, 5, 5])}, - 'rois_data': {'shape': int64_array([7, 4])}, - 'indices_data': {'shape': int64_array([7])}, - 'node': {'mode': 'max', 'pooled_h': 2, 'pooled_w': 2, 'aligned_mode': 'full_pixel', 'version': 'opset9'}, - } - ) - graph.graph["layout"] = "NCHW" - node = Node(graph, "node") - self.assertRaises(AssertionError, ROIAlign.infer, node) diff --git a/tools/mo/unit_tests/mo/ops/scatter_test.py b/tools/mo/unit_tests/mo/ops/scatter_test.py deleted file mode 100644 index d61d69a2b52643..00000000000000 --- a/tools/mo/unit_tests/mo/ops/scatter_test.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.scatter import ScatterElementsUpdate, ScatterUpdate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph, regular_op_with_empty_data, result, connect, valued_const_with_data - - -class TestScatterElementsInferTest(): - @pytest.mark.parametrize("data, indices, updates, axis, ref_res",[ - ([[0.0, 0.0, 0.0], - [0.0, 0.0, 0.0], - [0.0, 0.0, 0.0]], - [[1, 0, 2], - [0, 2, 1]], - [[1.0, 1.1, 1.2], - [2.0, 2.1, 2.2]], - 0, - [[2.0, 1.1, 0.0], - [1.0, 0.0, 2.2], - [0.0, 2.1, 1.2]]), - - ([[1.0, 2.0, 3.0, 4.0, 5.0]], - [[1, 3]], - [[1.1, 2.1]], - 1, - [[1.0, 1.1, 3.0, 2.1, 5.0]]), - - ([[1.0, 2.0, 3.0, 4.0, 5.0]], - [[1, 3]], - [[1.1, 2.1]], - [1], - [[1.0, 1.1, 3.0, 2.1, 5.0]]), - - ([ # 3D case - [[1, 2], - [3, 4]], - [[5, 6], - [7, 8]], - [[9, 10], - [11, 12]] - ], - [ - [[1, 0], - [0, 1]], - [[1, 0], - [1, 0]], - [[0, 1], - [1, 0]] - ], - [ - [[21, 22], - [23, 24]], - [[25, 26], - [27, 28]], - [[29, 30], - [31, 32]] - ], - -1, # axis - [ - [[22, 21], - [23, 24]], - [[26, 25], - [28, 27]], - [[29, 30], - [32, 31]] - ]), - ]) - def test_scatterelements_value_infer(self, data, indices, updates, axis, ref_res): - nodes = { - **valued_const_with_data('data', np.array(data)), - **valued_const_with_data('indices', int64_array(indices)), - **valued_const_with_data('updates', np.array(updates)), - **valued_const_with_data('axis', int64_array(axis)), - **regular_op_with_empty_data('scatter_elements', {'op': 'ScatterElementsUpdate', 'axis': axis}), - **result() - } - - graph = build_graph(nodes_attrs=nodes, edges=[ - *connect('data', '0:scatter_elements'), - *connect('indices', '1:scatter_elements'), - *connect('updates', '2:scatter_elements'), - *connect('axis', '3:scatter_elements'), - *connect('scatter_elements', 'output') - ], nodes_with_edges_only=True) - graph.stage = 'middle' - - scatter_el_node = Node(graph, 'scatter_elements') - ScatterElementsUpdate.infer(scatter_el_node) - - res_output_shape = scatter_el_node.out_node().shape - assert np.array_equal(int64_array(ref_res).shape, res_output_shape) - - res_output_value = scatter_el_node.out_node().value - assert np.array_equal(ref_res, res_output_value) - - -class TestScatterUpdateInferTest(): - @pytest.mark.parametrize("data, indices, updates, axis, ref_res",[ - ([[0.0, 0.0, 0.0], - [0.0, 0.0, 0.0], - [0.0, 0.0, 0.0]], - [[1, 2]], - [[[1.0, 1.1, 1.2], - [2.0, 2.1, 2.2]]], - 0, - [[0.0, 0.0, 0.0], - [1.0, 1.1, 1.2], - [2.0, 2.1, 2.2]]), - - # negative axis - ([[0.0, 0.0, 0.0], - [0.0, 0.0, 0.0], - [0.0, 0.0, 0.0]], - [[1, 2]], - [[[1.0, 1.1]], - [[1.2, 2.0]], - [[2.1, 2.2]]], - -1, - [[0.0, 1.0, 1.1], - [0.0, 1.2, 2.0], - [0.0, 2.1, 2.2]]), - - # one element - ([[[0., 0.], [0., 0.], [0., 0.]], - [[0., 0.], [0., 0.], [0., 0.]], - [[0., 0.], [0., 0.], [0., 0.]]], - [[1]], - [[[[1., 2.], [3., 4.], [5., 6.]]]], - 0, - [[[0., 0.], [0., 0.], [0., 0.]], - [[1., 2.], [3., 4.], [5., 6.]], - [[0., 0.], [0., 0.], [0., 0.]]]), - - # shape [2,3,3] - ([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], - [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], - # indices [3,2] - [[1, 2], [0, 1], [1, 2]], - # updates [2,3,2,3] - [[[[1., 2., 3.], [4., 5., 6.]], - [[7., 8., 9.], [9., 8., 7.]], - [[6., 5., 4.], [3., 2., 1.]]], - [[[1., 2., 3.], [4., 5., 6.]], - [[7., 8., 9.], [9., 8., 7.]], - [[6., 5., 4.], [3., 2., 1.]]]], - # axis - 1, - # ref - [[[7., 8., 9.], [6., 5., 4.], [3., 2., 1.]], - [[7., 8., 9.], [6., 5., 4.], [3., 2., 1.]]]), - - # dynamic updates - ([0, 0, 0], - [2], - shape_array([dynamic_dimension_value]), - 0, - shape_array([0, 0, dynamic_dimension_value])), - ]) - def test_scatter_update_value_infer(self, data, indices, updates, axis, ref_res): - nodes = { - **valued_const_with_data('data', np.array(data)), - **valued_const_with_data('indices', int64_array(indices)), - **valued_const_with_data('updates', np.array(updates)), - **valued_const_with_data('axis', int64_array(axis)), - **regular_op_with_empty_data('scatter_update', {'op': 'ScatterUpdate', 'axis': axis}), - **result() - } - - graph = build_graph(nodes_attrs=nodes, edges=[ - *connect('data', '0:scatter_update'), - *connect('indices', '1:scatter_update'), - *connect('updates', '2:scatter_update'), - *connect('axis', '3:scatter_update'), - *connect('scatter_update', 'output') - ], nodes_with_edges_only=True) - graph.stage = 'middle' - - scatter_update_node = Node(graph, 'scatter_update') - ScatterUpdate.infer(scatter_update_node) - - res_output_shape = scatter_update_node.out_node().shape - assert np.array_equal(int64_array(ref_res).shape, res_output_shape) - - res_output_value = scatter_update_node.out_node().value - assert np.array_equal(ref_res, res_output_value) diff --git a/tools/mo/unit_tests/mo/ops/scatternd_test.py b/tools/mo/unit_tests/mo/ops/scatternd_test.py deleted file mode 100644 index 2d5ef18af85b6d..00000000000000 --- a/tools/mo/unit_tests/mo/ops/scatternd_test.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.scatternd import ScatterNDUpdate -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'input': {'shape': None, 'value': None, 'kind': 'data'}, - 'indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'updates': {'shape': None, 'value': None, 'kind': 'data'}, - 'scatternd_node': {'op': 'ScatterNDUpdate', 'kind': 'op'}, - 'output': {'shape': None, 'value': None, 'kind': 'data'}} - -# graph 1 -edges = [('input', 'scatternd_node', {'in': 0}), - ('indices', 'scatternd_node', {'in': 1}), - ('updates', 'scatternd_node', {'in': 2}), - ('scatternd_node', 'output', {'out': 0})] - -# test data for partial infer -inputs1 = {'input': {'shape': int64_array([10, 40]), 'value': None}, - 'indices': {'shape': int64_array([3, 2]), 'value': None}, - 'updates': {'shape': int64_array([3]), 'value': None}} - -inputs2 = {'input': {'shape': int64_array([20, 30]), 'value': None}, - 'indices': {'shape': int64_array([2]), 'value': None}, - 'updates': {'shape': int64_array([]), 'value': None}} - -inputs3 = {'input': {'shape': int64_array([20, 30, 5]), 'value': None}, - 'indices': {'shape': int64_array([2]), 'value': None}, - 'updates': {'shape': int64_array([5]), 'value': None}} - -inputs4 = {'input': {'shape': int64_array([10, 40, 50]), 'value': None}, - 'indices': {'shape': int64_array([7, 3, 2]), 'value': None}, - 'updates': {'shape': int64_array([7, 3, 50]), 'value': None}} - -# test data for constant folding -inputs5 = {'input': {'shape': int64_array([8]), 'value': int64_array([1, 2, 3, 4, 5, 6, 7, 8])}, - 'indices': {'shape': int64_array([4, 1]), 'value': int64_array([[4], [3], [1], [7]])}, - 'updates': {'shape': int64_array([4]), 'value': int64_array([9, 10, 11, 12])}} -output5 = int64_array([1, 11, 3, 10, 9, 6, 7, 12]) - -inputs6 = {'input': {'shape': int64_array([4, 4, 4]), 'value': int64_array([[[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], - [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], - [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]], - [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]])}, - 'indices': {'shape': int64_array([2, 1]), 'value': int64_array([[0], [2]])}, - 'updates': {'shape': int64_array([2, 4, 4]), 'value': int64_array([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]]])}} -output6 = int64_array([[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - [[1, 2, 3, 4], [5, 6, 7, 8], [8, 7, 6, 5], [4, 3, 2, 1]], - [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3], [4, 4, 4, 4]], - [[8, 7, 6, 5], [4, 3, 2, 1], [1, 2, 3, 4], [5, 6, 7, 8]]]) - -inputs7 = {'input': {'shape': int64_array([8]), 'value': int64_array([1, 2, 3, 4, 5, 6, 7, 8])}, - 'indices': {'shape': int64_array([1]), 'value': int64_array([4])}, - 'updates': {'shape': int64_array([]), 'value': 9}} -output7 = int64_array([1, 2, 3, 4, 9, 6, 7, 8]) - -inputs8 = {'input': {'shape': int64_array([3]), 'value': int64_array([1, 2, 3])}, - 'indices': {'shape': int64_array([1]), 'value': int64_array([2])}, - 'updates': {'shape': int64_array([1]), 'value': int64_array([9])}} -output8 = int64_array([1, 2, 9]) - -inputs9 = {'input': {'shape': int64_array([1, 5, 5, 1]), 'value': np.zeros([1, 5, 5, 1],dtype=np.int32)}, - 'indices': {'shape': int64_array([1, 2, 2, 1, 4]), - 'value': np.array([[[[[0, 0, 0, 0]], [[0, 0, 1, 0]]], [[[0, 2, 1, 0]], [[0, 3, 4, 0]]]]])}, - 'updates': {'shape': int64_array([1, 2, 2, 1]), 'value': np.ones([1, 2, 2, 1])}} - -output9 = np.array([[[[1], [1], [0], [0], [0]], # shape [1, 5, 5, 1] - [[0], [0], [0], [0], [0]], - [[0], [1], [0], [0], [0]], - [[0], [0], [0], [0], [1]], - [[0], [0], [0], [0], [0]]]]) - -class TestScatterNDUpdate(unittest.TestCase): - def test_partial_infer1(self): - graph = build_graph(nodes_attributes, edges, inputs1) - scatternd_node = Node(graph, 'scatternd_node') - ScatterNDUpdate.infer(scatternd_node) - - # prepare reference results - ref_output_shape = np.array([10, 40], dtype=np.int32) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer2(self): - graph = build_graph(nodes_attributes, edges, inputs2) - scatternd_node = Node(graph, 'scatternd_node') - ScatterNDUpdate.infer(scatternd_node) - - # prepare reference results - ref_output_shape = np.array([20, 30], dtype=np.int32) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer3(self): - graph = build_graph(nodes_attributes, edges, inputs3) - scatternd_node = Node(graph, 'scatternd_node') - ScatterNDUpdate.infer(scatternd_node) - - # prepare reference results - ref_output_shape = np.array([20, 30, 5], dtype=np.int32) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_partial_infer4(self): - graph = build_graph(nodes_attributes, edges, inputs4) - scatternd_node = Node(graph, 'scatternd_node') - ScatterNDUpdate.infer(scatternd_node) - - # prepare reference results - ref_output_shape = np.array([10, 40, 50], dtype=np.int32) - - # get the result - res_output_shape = graph.node['output']['shape'] - - self.assertTrue(np.array_equal(ref_output_shape, res_output_shape), - 'values do not match expected: {} and given: {}'.format(ref_output_shape, res_output_shape)) - - def test_infer5(self): - graph = build_graph(nodes_attributes, edges, inputs5) - scatternd_node = Node(graph, 'scatternd_node') - ScatterNDUpdate.infer(scatternd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(output5, res_output_value), - 'values do not match expected: {} and given: {}'.format(output5, res_output_value)) - - def test_infer6(self): - graph = build_graph(nodes_attributes, edges, inputs6) - scatternd_node = Node(graph, 'scatternd_node') - ScatterNDUpdate.infer(scatternd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(output6, res_output_value), - 'values do not match expected: {} and given: {}'.format(output6, res_output_value)) - - def test_infer7_scalar(self): - graph = build_graph(nodes_attributes, edges, inputs7) - scatternd_node = Node(graph, 'scatternd_node') - ScatterNDUpdate.infer(scatternd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(output7, res_output_value), - 'values do not match expected: {} and given: {}'.format(output7, res_output_value)) - - def test_infer8(self): - graph = build_graph(nodes_attributes, edges, inputs8) - scatternd_node = Node(graph, 'scatternd_node') - ScatterNDUpdate.infer(scatternd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(output8, res_output_value), - 'values do not match expected: {} and given: {}'.format(output8, res_output_value)) - - def test_infer9(self): - graph = build_graph(nodes_attributes, edges, inputs9) - scatternd_node = Node(graph, 'scatternd_node') - ScatterNDUpdate.infer(scatternd_node) - - # get the result - res_output_value = graph.node['output']['value'] - - self.assertTrue(np.array_equal(output9, res_output_value), - 'values do not match expected: {} and given: {}'.format(output8, res_output_value)) diff --git a/tools/mo/unit_tests/mo/ops/select_test.py b/tools/mo/unit_tests/mo/ops/select_test.py deleted file mode 100644 index 63ebf459ba0159..00000000000000 --- a/tools/mo/unit_tests/mo/ops/select_test.py +++ /dev/null @@ -1,330 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.select import Select -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension, shape_array, dynamic_dimension_value -from openvino.tools.mo.front.common.partial_infer.utils import strict_compare_tensors, int64_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.graph import build_graph, valued_const_with_data, result, regular_op_with_empty_data, \ - connect - - -class TestSelect(unittest.TestCase): - - @staticmethod - def build_select_graph_and_infer(condition_value, then_value, else_value, out_value, - condition_shape=None, then_shape=None, else_shape=None, out_shape=None, - auto_broadcast='numpy', fw_format=None): - if then_value is not None: - then_shape = int64_array(then_value.shape) - if else_value is not None: - else_shape = int64_array(else_value.shape) - - nodes = { - **valued_const_with_data('then', then_value, then_shape), - **valued_const_with_data('else', else_value, else_shape), - **valued_const_with_data('condition', condition_value, condition_shape), - **regular_op_with_empty_data('select', {'op': 'Select', 'auto_broadcast': auto_broadcast, 'format': fw_format}), - **result('out'), - } - edges = [ - *connect('condition', '0:select'), - *connect('then', '1:select'), - *connect('else', '2:select'), - *connect('select', 'out'), - ] - graph = build_graph(nodes, edges) - - select_node = Node(graph, 'select') - Select.infer(select_node) - - select_out_node = Node(graph, 'select_d') - - value_desc = 'values' - ref_val = out_value - actual_val = select_out_node['value'] - if out_shape is not None: - value_desc = 'shapes' - ref_val = out_shape - actual_val = select_out_node['shape'] - assert select_out_node['value'] is None, "if 'out_shape' is defined manually 'value' must be None" - - flag = strict_compare_tensors(actual_val, ref_val) - msg = '' if flag else 'reference {} and actual {} {} do not match\n'.format(ref_val, actual_val, value_desc) - return flag, msg - - def test_1(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([5, 6], dtype=bool), - then_value=np.ones([5, 6], dtype=float), - else_value=np.zeros([5, 6], dtype=float), - out_value=np.ones([5, 6], dtype=float)) - self.assertTrue(flag, msg) - - def test_2(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([15, 3, 5], dtype=bool), - then_value=np.ones([15, 3, 5], dtype=float), - else_value=np.zeros([15, 1, 5], dtype=float), - out_value=np.ones([15, 3, 5], dtype=float)) - self.assertTrue(flag, msg) - - def test_select_infer_no_condition(self): - flag, msg = self.build_select_graph_and_infer(condition_value=None, condition_shape=[2], - then_value=None, then_shape=[2], - else_value=None, else_shape=[2], - out_value=None, out_shape=[2]) - self.assertTrue(flag, msg) - - def test_select_infer_condition_true(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.array([True, True], dtype=bool), - then_value=np.array([1, 1], dtype=np.int8), - else_value=np.array([2, 2], dtype=np.int8), - out_value=np.array([1, 1], dtype=np.int8)) - self.assertTrue(flag, msg) - - def test_select_infer_condition_false(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.array([False, False], dtype=bool), - then_value=np.array([1, 1], dtype=np.int8), - else_value=np.array([2, 2], dtype=np.int8), - out_value=np.array([2, 2], dtype=np.int8)) - self.assertTrue(flag, msg) - - def test_select_infer_condition_true_2(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.array([True], dtype=bool), - then_value=np.ones([15, 3, 5], dtype=float), - else_value=np.zeros([15, 1, 5], dtype=float), - out_value=np.ones([15, 3, 5], dtype=float)) - self.assertTrue(flag, msg) - - def test_select_infer_condition_true_then_and_else_are_scalars(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.array([True], dtype=bool), - then_value=np.array(3, dtype=float), - else_value=np.array(1, dtype=float), - out_value=np.array([3], dtype=float)) - self.assertTrue(flag, msg) - - def test_select_infer_condition_true_then_and_else_are_scalars_2(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.array(True, dtype=bool), - then_value=np.array(3, dtype=float), - else_value=np.array(1, dtype=float), - out_value=np.array(3, dtype=float)) - self.assertTrue(flag, msg) - - def test_select_infer_condition_false_then_and_else_are_scalars(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.array([False], dtype=bool), - then_value=np.array(3, dtype=float), - else_value=np.array(1, dtype=float), - out_value=np.array([1], dtype=float)) - self.assertTrue(flag, msg) - - def test_select_infer_condition_false_then_and_else_are_scalars_2(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.array(False, dtype=bool), - then_value=np.array(3, dtype=float), - else_value=np.array(1, dtype=float), - out_value=np.array(1, dtype=float)) - self.assertTrue(flag, msg) - - def test_select_infer_condition_false_2(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.array([False], dtype=bool), - then_value=np.ones([15, 3, 5], dtype=float), - else_value=np.zeros([15, 1, 5], dtype=float), - out_value=np.zeros([15, 3, 5], dtype=float)) - self.assertTrue(flag, msg) - - # if one of the branches is None then np.where shouldn't be used to avoid object dtype in output - # res = np.where(condition, numpy_array_of_int[float]_dtype, None) - # print(res.dtype) => object which is not compatible with other numeric dtypes, will fail further without - # clear explanation, need to catch such cases as soon as possible - def test_select_infer_None_then_branch_1(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.zeros([15, 3, 5], dtype=bool), - then_value=None, then_shape=[15, 3, 5], - else_value=np.ones([15, 1, 5], dtype=float), - out_value=np.ones([15, 3, 5], dtype=float)) - self.assertTrue(flag, msg) - - def test_select_infer_None_then_branch_2(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([15, 3, 5], dtype=bool), - then_value=None, then_shape=[15, 3, 5], - else_value=np.ones([15, 1, 5], dtype=float), - out_value=None) - self.assertTrue(flag, msg) - - def test_select_infer_None_else_branch_1(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([15, 3, 5], dtype=bool), - then_value=np.ones([15, 1, 5], dtype=float), - else_value=None, else_shape=[15, 3, 5], - out_value=np.ones([15, 3, 5], dtype=float)) - self.assertTrue(flag, msg) - - def test_select_infer_None_else_branch_2(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.zeros([15, 3, 5], dtype=bool), - then_value=np.ones([15, 1, 5], dtype=float), - else_value=None, else_shape=[15, 3, 5], - out_value=None) - self.assertTrue(flag, msg) - - def test_select_broadcast_1(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 4, 5], dtype=bool), - then_value=np.ones([], dtype=float), - else_value=np.zeros([2, 3, 4, 5], dtype=float), - out_value=np.ones([2, 3, 4, 5], dtype=float)) - self.assertTrue(flag, msg) - - def test_select_broadcast_2(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 4, 1], dtype=bool), - then_value= np.ones([1, 3, 1, 5], dtype=float), - else_value=np.zeros([2, 1, 1, 5], dtype=float), - out_value=np.ones([2, 3, 4, 5], dtype=float)) - self.assertTrue(flag, msg) - - def test_select_broadcast_3(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 1, 1], dtype=bool), - then_value= np.ones([2, 3, 4, 5], dtype=float), - else_value=np.zeros([2, 1, 1, 5], dtype=float), - out_value=np.ones([2, 3, 4, 5], dtype=float)) - self.assertTrue(flag, msg) - - def test_select_broadcast_4(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ones([2, 3, 4, 5], dtype=bool), - then_value= np.ones([5], dtype=float), - else_value=np.zeros([2, 3, 4, 5], dtype=float), - out_value=np.ones([2, 3, 4, 5], dtype=float)) - self.assertTrue(flag, msg) - - # when output shape is broadcasted from condition, then, and else shapes - def test_select_broadcast_with_shape(self): - flag, msg = self.build_select_graph_and_infer(condition_shape=[2, 3, 4, 1], condition_value=None, - then_shape=[1, 3, 1, 5], then_value=None, - else_shape=[2, 1, 1, 5], else_value=None, - out_shape=[2, 3, 4, 5], out_value=None) - self.assertTrue(flag, msg) - - def test_select_infer_assert_shapes(self): - with self.assertRaisesRegex(AssertionError, "must be broadcastable"): - self.build_select_graph_and_infer(condition_value=None, condition_shape=[2, 2], - then_value=None, then_shape=[2, 2], - else_value=None, else_shape=[3, 3], - out_value=None, out_shape=[42, 42]) - - def test_select_infer_assert_condition_shapes_are_compatible(self): - with self.assertRaisesRegex(AssertionError, "must be broadcastable"): - self.build_select_graph_and_infer(condition_value=None, condition_shape=[42, 3], - then_value=None, then_shape=[1, 3], - else_value=None, else_shape=[3, 3], - out_value=None, out_shape=[3, 3]) - - def test_select_infer_masked_1(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ma.array([True, True], mask=[1, 1]), - # condition_value = [dynamic_dimension, dynamic_dimension]) - then_value=None, then_shape=[2], - else_value=np.zeros((2, 2), dtype=np.int64), - out_value=None) - self.assertTrue(flag, msg) - - def test_select_infer_masked_2(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ma.array([False, False], mask=[1, 1]), - # condition_value = [dynamic_dimension, dynamic_dimension]) - then_value=None, then_shape=[2], - else_value=np.zeros((2, 2), dtype=np.int64), - out_value=None) - self.assertTrue(flag, msg) - - def test_select_infer_masked_3(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ma.array([True, True], mask=[1, 1]), - # condition_value = [dynamic_dimension, dynamic_dimension]) - then_value=None, then_shape=[2], - else_value=np.zeros((2, 2), dtype=np.int64), - out_value=None) - self.assertTrue(flag, msg) - - def test_select_infer_masked_4(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ma.array([True, False], mask=[0, 1]), - # condition_value = [True, dynamic_dimension]) - then_value=np.ones((2, 2), dtype=np.int64), - else_value=np.zeros((2, 2), dtype=np.int64), - out_value=np.ma.array([[1, 42], [1, 42]], mask=[[0, 1], [0, 1]])) - # out_value = [[1, dynamic_dimension], [1, dynamic_dimension]] - self.assertTrue(flag, msg) - - def test_select_infer_masked_5(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ma.array([False, True], mask=[0, 1]), - # condition_value = [True, dynamic_dimension]) - then_value=np.ones((2, 2), dtype=np.int64), - else_value=np.zeros((2, 2), dtype=np.int64), - out_value=np.ma.array([[0, 42], [0, 42]], mask=[[0, 1], [0, 1]])) - # out_value = [[0, dynamic_dimension], [0, dynamic_dimension]] - self.assertTrue(flag, msg) - - def test_select_infer_masked_6(self): - flag, msg = self.build_select_graph_and_infer(condition_value=np.ma.array([True, False], mask=[1, 0]), - # condition_value = [True, dynamic_dimension]) - then_value=np.ones((2, 2), dtype=np.int64), - else_value=np.zeros((2, 2), dtype=np.int64), - out_value=np.ma.array([[42, 0], [42, 0]], mask=[[1, 0], [1, 0]])) - # out_value = [[dynamic_dimension, 0], [dynamic_dimension, 0]] - self.assertTrue(flag, msg) - - def test_select_infer_no_broadcast_dynamic_then_else_shapes(self): - flag, msg = self.build_select_graph_and_infer(condition_value=None, condition_shape=shape_array([100, 100]), - then_value=None, then_shape=shape_array([100, dynamic_dimension_value]), - else_value=None, else_shape=shape_array([dynamic_dimension_value, 100]), - out_value=None, out_shape=shape_array([100, 100]), - auto_broadcast='none') - self.assertTrue(flag, msg) - - def test_select_infer_no_broadcast_dynamic_then_else_shapes_2(self): - flag, msg = self.build_select_graph_and_infer(condition_value=None, condition_shape=shape_array([100, 100]), - then_value=None, then_shape=shape_array([dynamic_dimension_value, 100]), - else_value=None, else_shape=shape_array([100, dynamic_dimension_value]), - out_value=None, out_shape=shape_array([100, 100]), - auto_broadcast='none') - self.assertTrue(flag, msg) - - def test_select_infer_no_broadcast_dynamic_shapes(self): - flag, msg = self.build_select_graph_and_infer(condition_value=None, condition_shape=shape_array([100, 100]), - then_value=None, then_shape=shape_array([100, dynamic_dimension_value]), - else_value=None, else_shape=shape_array([dynamic_dimension_value, 100]), - out_value=None, out_shape=shape_array([100, 100]), - auto_broadcast='none') - self.assertTrue(flag, msg) - - def test_select_infer_tf_condition(self): - flag, msg = self.build_select_graph_and_infer(condition_value=None, condition_shape=shape_array([100]), - then_value=None, then_shape=shape_array([100, 20]), - else_value=None, else_shape=shape_array([100, 20]), - out_value=None, out_shape=shape_array([100, 20]), - auto_broadcast='numpy', fw_format='tf') - self.assertTrue(flag, msg) - - def test_select_infer_tf_condition_dyn(self): - flag, msg = self.build_select_graph_and_infer(condition_value=None, - condition_shape=shape_array([dynamic_dimension_value]), - then_value=None, - then_shape=shape_array([dynamic_dimension_value, 20]), - else_value=None, - else_shape=shape_array([dynamic_dimension_value, 20]), - out_value=None, - out_shape=shape_array([dynamic_dimension_value, 20]), - auto_broadcast='numpy', fw_format='tf') - self.assertTrue(flag, msg) - - def test_select_infer_tf_condition_assert_raises(self): - with self.assertRaisesRegex(AssertionError, "if 'condition' is a 1D tensor then it's size"): - self.build_select_graph_and_infer(condition_value=None, condition_shape=shape_array([42]), - then_value=None, then_shape=shape_array([100, 20]), - else_value=None, else_shape=shape_array([100, 20]), - out_value=None, out_shape=shape_array([100, 20]), - auto_broadcast='numpy', fw_format='tf') - - def test_select_infer_assert_pdpd(self): - with self.assertRaisesRegex(Error, "PDPD broadcasting rule is not implemented yet"): - self.build_select_graph_and_infer(condition_value=None, condition_shape=[2, 2], - then_value=None, then_shape=[2, 2], - else_value=None, else_shape=[3, 3], - out_value=None, out_shape=[42, 42], - auto_broadcast='pdpd') - diff --git a/tools/mo/unit_tests/mo/ops/slice_like_test.py b/tools/mo/unit_tests/mo/ops/slice_like_test.py deleted file mode 100644 index a45bb10830d0d5..00000000000000 --- a/tools/mo/unit_tests/mo/ops/slice_like_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.slice_like import SliceLike -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'input': {'kind': 'op', 'op': 'Const'}, - 'input_data': {'kind': 'data', 'shape': int64_array([3, 4]), 'value': np.arange(1, 13).reshape([3, 4])}, - 'shape_like': {'kind': 'op', 'op': 'Const', 'shape': int64_array([2, 3]), 'value': None}, - 'shape_like_data': {'kind': 'data', 'shape': int64_array([2, 3]), 'value': None}, - 'slice_like': {'kind': 'op', 'op': 'slice_data'}, - 'out_data': {'kind': 'data', 'shape': None, 'value': None} -} - -edges = [ - ('input', 'input_data'), - ('input_data', 'slice_like', {'in': 0}), - ('shape_like', 'shape_like_data'), - ('shape_like_data', 'slice_like', {'in': 1}), - ('slice_like', 'out_data') -] - - -class SliceLikeTest(unittest.TestCase): - - def test_1(self): - graph = build_graph(nodes_attributes, edges, {'slice_like': {'axes': None}}) - slice_like = Node(graph, 'slice_like') - SliceLike.infer(slice_like) - ref_shape = int64_array([2, 3]) - ref_value = np.array([[1, 2, 3], [5, 6, 7]]) - res_shape = graph.node['out_data']['shape'] - res_value = graph.node['out_data']['value'] - self.assertTrue(np.array_equal(res_shape, ref_shape)) - self.assertTrue(np.array_equal(res_value, ref_value)) - - def test_2(self): - graph = build_graph(nodes_attributes, edges, {'slice_like': {'axes': (0, 1)}}) - slice_like = Node(graph, 'slice_like') - SliceLike.infer(slice_like) - ref_shape = int64_array([2, 3]) - ref_value = np.array([[1, 2, 3], [5, 6, 7]]) - res_shape = graph.node['out_data']['shape'] - res_value = graph.node['out_data']['value'] - self.assertTrue(np.array_equal(res_shape, ref_shape)) - self.assertTrue(np.array_equal(res_value, ref_value)) - - def test_3(self): - graph = build_graph(nodes_attributes, edges, {'slice_like': {'axes': (0,)}}) - slice_like = Node(graph, 'slice_like') - SliceLike.infer(slice_like) - ref_shape = int64_array([2, 4]) - ref_value = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) - res_shape = graph.node['out_data']['shape'] - res_value = graph.node['out_data']['value'] - self.assertTrue(np.array_equal(res_shape, ref_shape)) - self.assertTrue(np.array_equal(res_value, ref_value)) - - def test_4(self): - graph = build_graph(nodes_attributes, edges, {'slice_like': {'axes': (-1,)}}) - slice_like = Node(graph, 'slice_like') - SliceLike.infer(slice_like) - ref_shape = int64_array([3, 3]) - ref_value = np.array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) - res_shape = graph.node['out_data']['shape'] - res_value = graph.node['out_data']['value'] - self.assertTrue(np.array_equal(res_shape, ref_shape)) - self.assertTrue(np.array_equal(res_value, ref_value)) diff --git a/tools/mo/unit_tests/mo/ops/slice_test.py b/tools/mo/unit_tests/mo/ops/slice_test.py deleted file mode 100644 index 356a18b5f28b2c..00000000000000 --- a/tools/mo/unit_tests/mo/ops/slice_test.py +++ /dev/null @@ -1,205 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value, shape_array, \ - strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.slice import Slice, OvSlice -from unit_tests.utils.graph import build_graph, valued_const_with_data, valued_data, regular_op_with_empty_data, \ - connect, shaped_data, shaped_const_with_data - - -class TestSliceOp(): - @pytest.mark.parametrize("inp_value, inp_shape, starts, ends, axes, steps, expected_value, expected_shape",[ - # standard case - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, 2], [0, 1], [1, 1], - [[5], [3], [6]], [3, 1]), - # negative bounds - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, -2], [0, 1], [1, 1], - [[5], [3], [6]], [3, 1]), - # unusual order of axes - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, -2], [1, 0], [1, 1], - [[2, 3, 5]], [1, 3]), - # when only input_shape is defined without values (one from bottom element is shape) - (None, [4, 5, 6], [1, 2], [4, 3], [0, 1], [1, 1], None, [3, 1, 6]), - # boundary case - (None, [4, 5, 6], [0, 2], [np.iinfo(np.int32).max, 3], [0, 1], [1, 1], None, [4, 1, 6]), - # boundary case - (None, [4, 5, 6], [np.iinfo(np.int32).min, 2], [3, 3], [0, 1], [1, 1], None, [3, 1, 6],), - # 1D input - ([1, 3, 224, 224], [4], [1], [2], [0], [1], [3], [1]), - # 1D input with negative starts - (None, [4], [-1], [1], [0], [-1], None, [2]), - # 1D input with negative ends - (None, [4], [1], [-1], [0], [1], None, [2]), - # with rounding (e.g. take from 1st to 3rd with step 4 should give shape 1 not 0) - (None, [4], [1], [3], [0], [4], None, [1]), - # with rounding and negative steps (e.g. take from 1st to 3rd with step 4 should give shape 1 not 0) - (None, [10], [7], [3], [0], [-7], None, [1]), - # reversing the sequence of elements - (None, [10], [-1], [np.iinfo(np.int32).min], [0], [-1], None, [10]), - # dynamic dimensions cases - # starts are non-constant - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], None, [3, 2], [0, 1], [1, 1], None, - [dynamic_dimension_value, dynamic_dimension_value]), - # ends are non-constant - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], None, [0, 1], [1, 1], None, - [dynamic_dimension_value, dynamic_dimension_value]), - # axes are non-constant - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, -2], None, [1, 1], None, - [dynamic_dimension_value, dynamic_dimension_value]), - # steps are non-constant - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, -2], [0, 1], None, None, - [dynamic_dimension_value, dynamic_dimension_value]), - # negative steps and since after normalization starts < ends output shape has 0-size dimension - (None, [20], [1], [-1], [0], [-2], None, [0]), - # since starts == ends output shape has 0-size dimension - (None, [4], [1], [1], [0], [1], None, [0]), - # since starts > ends output shape has 0-size dimension - (None, [4], [2], [1], [0], [1], None, [0]) - ]) - def test_slice_infer(self, inp_value, inp_shape, starts, ends, axes, steps, expected_value, expected_shape): - if inp_value is None: - input_node = shaped_data('data_1', int64_array(inp_shape)) - else: - input_node = valued_data('data_1', int64_array(inp_value)) - if inp_value is not None and inp_shape is not None: - assert np.array_equal(np.array(inp_value).shape, inp_shape) - - def convert_args(val, name=''): - if val is not None: - return valued_const_with_data(name, int64_array(val)) - else: - return shaped_const_with_data(name, [0]) # fake shape - - starts = convert_args(starts, 'starts') - ends = convert_args(ends, 'ends') - axes = convert_args(axes, 'axes') - steps = convert_args(steps, 'steps') - if expected_shape is not None: - expected_shape = shape_array(expected_shape) - - nodes = { - **input_node, - **regular_op_with_empty_data('slice', {'op': 'Slice'}), - **starts, - **ends, - **axes, - **steps, - } - - graph = build_graph(nodes, - [('data_1', 'slice'), - *connect('starts', '1:slice'), - *connect('ends', '2:slice'), - *connect('axes', '3:slice'), - *connect('steps', '4:slice'), - *connect('slice', 'slice_d')]) - - graph.stage = 'middle' - slice_node = Node(graph, 'slice') - - Slice.infer(slice_node) - if expected_value is not None: - assert strict_compare_tensors(slice_node.out_node().value, expected_value) - assert strict_compare_tensors(slice_node.out_node().shape, expected_shape) - - -class TestOvSliceOp(): - @pytest.mark.parametrize("inp_value, inp_shape, starts, ends, axes, steps, expected_value, expected_shape",[ - # standard case - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, 2], [0, 1], [1, 1], - [[5], [3], [6]], [3, 1]), - # negative bounds - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, -2], [0, 1], [1, 1], - [[5], [3], [6]], [3, 1]), - # unusual order of axes - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, -2], [1, 0], [1, 1], - [[2, 3, 5]], [1, 3]), - # when only input_shape is defined without values (one from bottom element is shape) - (None, [4, 5, 6], [1, 2], [4, 3], [0, 1], [1, 1], None, [3, 1, 6]), - # boundary case - (None, [4, 5, 6], [0, 2], [np.iinfo(np.int32).max, 3], [0, 1], [1, 1], None, [4, 1, 6]), - # boundary case - (None, [4, 5, 6], [np.iinfo(np.int32).min, 2], [3, 3], [0, 1], [1, 1], None, [3, 1, 6],), - # 1D input - ([1, 3, 224, 224], [4], [1], [2], [0], [1], [3], [1]), - # 1D input with negative starts - (None, [4], [-1], [1], [0], [-1], None, [2]), - # 1D input with negative ends - (None, [4], [1], [-1], [0], [1], None, [2]), - # with rounding (e.g. take from 1st to 3rd with step 4 should give shape 1 not 0) - (None, [4], [1], [3], [0], [4], None, [1]), - # with rounding and negative steps (e.g. take from 1st to 3rd with step 4 should give shape 1 not 0) - (None, [10], [7], [3], [0], [-7], None, [1]), - # reversing the sequence of elements - (None, [10], [-1], [np.iinfo(np.int32).min], [0], [-1], None, [10]), - # dynamic dimensions cases - # starts are non-constant - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], None, [3, 2], [0, 1], [1, 1], None, - [dynamic_dimension_value, dynamic_dimension_value]), - # ends are non-constant - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], None, [0, 1], [1, 1], None, - [dynamic_dimension_value, dynamic_dimension_value]), - # axes are non-constant - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, -2], None, [1, 1], None, - [dynamic_dimension_value, dynamic_dimension_value]), - # steps are non-constant - ([[4, 5, 6, 7], [2, 3, 5, 6], [5, 6, 8, 9], [5, 6, 8, 9]], [4, 4], [0, 1], [3, -2], [0, 1], None, None, - [dynamic_dimension_value, dynamic_dimension_value]), - # negative steps and since after normalization starts < ends output shape has 0-size dimension - (None, [20], [1], [-1], [0], [-2], None, [0]), - # since starts == ends output shape has 0-size dimension - (None, [4], [1], [1], [0], [1], None, [0]), - # since starts > ends output shape has 0-size dimension - (None, [4], [2], [1], [0], [1], None, [0]) - ]) - def test_ov_slice_infer(self, inp_value, inp_shape, starts, ends, axes, steps, expected_value, expected_shape): - if inp_value is None: - input_node = shaped_data('data_1', int64_array(inp_shape)) - else: - input_node = valued_data('data_1', int64_array(inp_value)) - if inp_value is not None and inp_shape is not None: - assert np.array_equal(np.array(inp_value).shape, inp_shape) - - def convert_args(val, name=''): - if val is not None: - return valued_const_with_data(name, int64_array(val)) - else: - return shaped_const_with_data(name, [0]) # fake shape - - starts = convert_args(starts, 'starts') - ends = convert_args(ends, 'ends') - steps = convert_args(steps, 'steps') - axes = convert_args(axes, 'axes') - if expected_shape is not None: - expected_shape = shape_array(expected_shape) - - nodes = { - **input_node, - **regular_op_with_empty_data('slice', {'op': 'OvSlice'}), - **starts, - **ends, - **steps, - **axes, - } - - graph = build_graph(nodes, - [('data_1', 'slice'), - *connect('starts', '1:slice'), - *connect('ends', '2:slice'), - *connect('steps', '3:slice'), - *connect('axes', '4:slice'), - *connect('slice', 'slice_d')]) - - graph.stage = 'middle' - slice_node = Node(graph, 'slice') - - OvSlice.infer(slice_node) - if expected_value is not None: - assert strict_compare_tensors(slice_node.out_node().value, expected_value) - assert strict_compare_tensors(slice_node.out_node().shape, expected_shape) diff --git a/tools/mo/unit_tests/mo/ops/space_to_depth_test.py b/tools/mo/unit_tests/mo/ops/space_to_depth_test.py deleted file mode 100644 index c8725e39ed0e5a..00000000000000 --- a/tools/mo/unit_tests/mo/ops/space_to_depth_test.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.space_to_depth import SpaceToDepth -from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.graph import build_graph - -nodes = { - 'in_data_node': {'value': None, 'kind': 'data', 'shape': np.array([1, 2048, 1152, 64])}, - 'StD': {'op': 'SpaceToDepth', 'kind': 'op', 'block_size': 2}, - 'out_data_node': {'value': None, 'kind': 'data', 'shape': None} -} - -edges = [ - ('in_data_node', 'StD'), - ('StD', 'out_data_node') -] - - -class TestSpaceToDepthPartialInfer(unittest.TestCase): - def test_tf_space_to_depth_infer_nhwc(self): - graph = build_graph(nodes, edges) - graph.graph['layout'] = 'NHWC' - std_node = Node(graph, 'StD') - SpaceToDepth.infer(std_node) - exp_shape = np.array([1, 1024, 576, 256]) - res_shape = graph.node['out_data_node']['shape'] - self.assertTrue(np.array_equal(exp_shape, res_shape)) - - def test_tf_space_to_depth_infer_nchw(self): - graph = build_graph(nodes, edges) - graph.graph['layout'] = 'NCHW' - graph.node['in_data_node']['shape'] = np.array([1, 64, 2048, 1152]) - std_node = Node(graph, 'StD') - SpaceToDepth.infer(std_node) - exp_shape = np.array([1, 256, 1024, 576]) - res_shape = graph.node['out_data_node']['shape'] - self.assertTrue(np.array_equal(exp_shape, res_shape)) - - def test_tf_space_to_depth_infer_nchw_dynamic(self): - graph = build_graph(nodes, edges) - graph.graph['layout'] = 'NCHW' - graph.node['in_data_node']['shape'] = shape_array([1, 64, dynamic_dimension_value, 1152]) - std_node = Node(graph, 'StD') - SpaceToDepth.infer(std_node) - exp_shape = shape_array([1, 256, dynamic_dimension_value, 576]) - res_shape = graph.node['out_data_node']['shape'] - self.assertTrue(strict_compare_tensors(exp_shape, res_shape)) - - def test_tf_space_to_depth_infer_shape_error(self): - graph = build_graph(nodes, edges) - graph.graph['layout'] = 'NHWC' - graph.node['in_data_node']['shape'] = np.array([1024, 576, 256]) - std_node = Node(graph, 'StD') - self.assertRaises(Error, SpaceToDepth.infer, std_node) - - def test_tf_space_to_depth_infer_divisibility_error_1(self): - graph = build_graph(nodes, edges) - graph.graph['layout'] = 'NHWC' - graph.node['in_data_node']['shape'] = np.array([1, 1024, 577, 256]) - std_node = Node(graph, 'StD') - self.assertRaises(Error, SpaceToDepth.infer, std_node) - - def test_tf_space_to_depth_infer_divisibility_error_2(self): - graph = build_graph(nodes, edges) - graph.graph['layout'] = 'NCHW' - graph.node['in_data_node']['shape'] = np.array([1, 256, 1024, 577]) - std_node = Node(graph, 'StD') - self.assertRaises(Error, SpaceToDepth.infer, std_node) diff --git a/tools/mo/unit_tests/mo/ops/sparse_fill_empty_rows_test.py b/tools/mo/unit_tests/mo/ops/sparse_fill_empty_rows_test.py deleted file mode 100644 index 2ade627b81942f..00000000000000 --- a/tools/mo/unit_tests/mo/ops/sparse_fill_empty_rows_test.py +++ /dev/null @@ -1,125 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.sparse_fill_empty_rows import SparseFillEmptyRows -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'input_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_values': {'shape': None, 'value': None, 'kind': 'data'}, - 'dense_shape': {'shape': None, 'value': None, 'kind': 'data'}, - 'default_value': {'shape': None, 'value': None, 'kind': 'data'}, - 'sparse_fill_empty_rows_node': {'op': 'SparseFillEmptyRows', 'kind': 'op'}, - 'output_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'output_values': {'shape': None, 'value': None, 'kind': 'data'}, - 'empty_row_indicator': {'shape': None, 'value': None, 'kind': 'data'}, - 'result_indices': {'kind': 'op', 'op': 'Result'}, - 'result_values': {'kind': 'op', 'op': 'Result'}, - 'result_empty_row_indicator': {'kind': 'op', 'op': 'Result'}, - } - -# graph 1 -edges1 = [('input_indices', 'sparse_fill_empty_rows_node', {'in': 0}), - ('input_values', 'sparse_fill_empty_rows_node', {'in': 1}), - ('dense_shape', 'sparse_fill_empty_rows_node', {'in': 2}), - ('default_value', 'sparse_fill_empty_rows_node', {'in': 3}), - ('sparse_fill_empty_rows_node', 'output_indices', {'out': 0}), - ('sparse_fill_empty_rows_node', 'output_values', {'out': 1}), - ('sparse_fill_empty_rows_node', 'empty_row_indicator', {'out': 2}), - ('output_indices', 'result_indices', {'out': 0}), - ('output_values', 'result_values', {'out': 0}), - ('empty_row_indicator', 'result_empty_row_indicator', {'out': 0}), - ] - -inputs1 = {'input_indices': {'shape': int64_array([20, 2]), 'value': None}, - 'input_values': {'shape': int64_array([20]), 'value': None}, - 'dense_shape': {'shape': int64_array([2]), 'value': np.array([4, 5])}, - 'default_value': {'shape': int64_array([]), 'value': None}} - - -class TestSparseFillEmptyRows(unittest.TestCase): - def test_partial_infer(self): - graph = build_graph(nodes_attributes, edges1, inputs1) - - sparse_fill_empty_rows_node = Node(graph, 'sparse_fill_empty_rows_node') - SparseFillEmptyRows.infer(sparse_fill_empty_rows_node) - - # prepare reference results - ref_output_indices_shape = int64_array([20, 2]) - ref_output_values_shape = int64_array([20]) - ref_empty_row_indicator_shape = int64_array([4]) - - # get resulted shapes - res_output_indices_shape = graph.node['output_indices']['shape'] - res_output_values_shape = graph.node['output_values']['shape'] - res_empty_row_indicator_shape = graph.node['empty_row_indicator']['shape'] - - self.assertTrue(np.array_equal(ref_output_indices_shape, res_output_indices_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_indices_shape, res_output_indices_shape)) - - self.assertTrue(np.array_equal(ref_output_values_shape, res_output_values_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_values_shape, res_output_values_shape)) - - self.assertTrue(np.array_equal(ref_empty_row_indicator_shape, res_empty_row_indicator_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_empty_row_indicator_shape, res_empty_row_indicator_shape)) - - def test_partial_infer_for_some_out_ports(self): - edges = [('input_indices', 'sparse_fill_empty_rows_node', {'in': 0}), - ('input_values', 'sparse_fill_empty_rows_node', {'in': 1}), - ('dense_shape', 'sparse_fill_empty_rows_node', {'in': 2}), - ('default_value', 'sparse_fill_empty_rows_node', {'in': 3}), - ('sparse_fill_empty_rows_node', 'output_indices', {'out': 0}), - ('sparse_fill_empty_rows_node', 'empty_row_indicator', {'out': 2}), - ('output_indices', 'result_indices', {'out': 0}), - ('empty_row_indicator', 'result_empty_row_indicator', {'out': 0}), - ] - graph = build_graph(nodes_attributes, edges, inputs1) - - sparse_fill_empty_rows_node = Node(graph, 'sparse_fill_empty_rows_node') - SparseFillEmptyRows.infer(sparse_fill_empty_rows_node) - - # prepare reference results - ref_output_indices_shape = int64_array([20, 2]) - ref_empty_row_indicator_shape = int64_array([4]) - - # get resulted shapes - res_output_indices_shape = graph.node['output_indices']['shape'] - res_empty_row_indicator_shape = graph.node['empty_row_indicator']['shape'] - - self.assertTrue(np.array_equal(ref_output_indices_shape, res_output_indices_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_indices_shape, res_output_indices_shape)) - - self.assertTrue(np.array_equal(ref_empty_row_indicator_shape, res_empty_row_indicator_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_empty_row_indicator_shape, res_empty_row_indicator_shape)) - - def test_incorrect_shape_of_default_value(self): - inputs = {'input_indices': {'shape': int64_array([20, 2]), 'value': None}, - 'input_values': {'shape': int64_array([20]), 'value': None}, - 'dense_shape': {'shape': int64_array([2]), 'value': np.array([4, 5])}, - 'default_value': {'shape': int64_array([3]), 'value': None}} - graph = build_graph(nodes_attributes, edges1, inputs) - sparse_fill_empty_rows_node = Node(graph, 'sparse_fill_empty_rows_node') - self.assertRaises(AssertionError, SparseFillEmptyRows.infer, sparse_fill_empty_rows_node) - - def test_no_value_of_dense_shape(self): - inputs = {'input_indices': {'shape': int64_array([20, 2]), 'value': None}, - 'input_values': {'shape': int64_array([20]), 'value': None}, - 'dense_shape': {'shape': int64_array([2]), 'value': None}, - 'default_value': {'shape': int64_array([]), 'value': None}} - graph = build_graph(nodes_attributes, edges1, inputs) - sparse_fill_empty_rows_node = Node(graph, 'sparse_fill_empty_rows_node') - self.assertRaises(AssertionError, SparseFillEmptyRows.infer, sparse_fill_empty_rows_node) - - def test_incorrect_shape_of_dense_shape(self): - inputs = {'input_indices': {'shape': int64_array([20, 2]), 'value': None}, - 'input_values': {'shape': int64_array([20]), 'value': None}, - 'dense_shape': {'shape': int64_array([2, 2]), 'value': np.array([[4, 5],[1, 2]])}, - 'default_value': {'shape': int64_array([]), 'value': None}} - graph = build_graph(nodes_attributes, edges1, inputs) - sparse_fill_empty_rows_node = Node(graph, 'sparse_fill_empty_rows_node') - self.assertRaises(AssertionError, SparseFillEmptyRows.infer, sparse_fill_empty_rows_node) diff --git a/tools/mo/unit_tests/mo/ops/sparse_reshape_test.py b/tools/mo/unit_tests/mo/ops/sparse_reshape_test.py deleted file mode 100644 index 57f5e92fd31e22..00000000000000 --- a/tools/mo/unit_tests/mo/ops/sparse_reshape_test.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np - -from openvino.tools.mo.ops.sparse_reshape import SparseReshape -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.utils.error import Error -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from unit_tests.utils.graph import valued_const_with_data, result, regular_op_with_empty_data, connect, \ - shaped_parameter, build_graph, empty_data - -dyn = dynamic_dimension - - -class TestSparseReshape(UnitTestWithMockedTelemetry): - - def build_and_test_shape_inference(self, input_indices_sparse_shape, input_actual_shape, new_shape, ref_out_shape, - input_indices=None, ref_out_indices=None): - # sparse tensor is stored in COO format - nodes = { - **shaped_parameter('input_indices', shape_array(input_indices_sparse_shape), {'value': input_indices}), - **valued_const_with_data('input_shape', shape_array(input_actual_shape)), - **valued_const_with_data('new_shape', shape_array(new_shape)), - **regular_op_with_empty_data('sparse_reshape_node', {'op': 'SparseReshape', - 'special_zero': True, - 'infer': SparseReshape.infer}), - **empty_data('sparse_reshape_node_d:out_port_1'), - - **result('output_indices'), - **result('output_shape'), - } - - edges = [ - *connect('input_indices', '0:sparse_reshape_node'), - *connect('input_shape', '1:sparse_reshape_node'), - *connect('new_shape', '2:sparse_reshape_node'), - *connect('sparse_reshape_node:0', 'output_indices'), - ('sparse_reshape_node', 'sparse_reshape_node_d:out_port_1', {'out': 1}), - ('sparse_reshape_node_d:out_port_1', 'output_shape', {'in': 0}), - ] - - graph = build_graph(nodes, edges, update_attributes={'input_indices_d': {'value': input_indices}}) - graph.stage = 'middle' - partial_infer(graph) - - node = Node(graph, 'sparse_reshape_node') - output_indices = node.out_port(0).data.get_value() - actual_output_shape = node.out_port(1).data.get_value() - self.assertTrue(strict_compare_tensors(actual_output_shape, ref_out_shape)) - self.assertTrue(strict_compare_tensors(output_indices, ref_out_indices)) - - def test_sparse_shape_1(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - self.build_and_test_shape_inference(input_indices_sparse_shape=[11, 2], - input_actual_shape=[4, 5], - new_shape=[5, -1, 2], - ref_out_shape=[5, 2, 2]) - - def test_sparse_shape_2(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - self.build_and_test_shape_inference(input_indices_sparse_shape=[11, 2], - input_actual_shape=[dyn, 5, 6], - new_shape=[5, -1], - ref_out_shape=[5, dyn]) - - def test_sparse_shape_3(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - self.build_and_test_shape_inference(input_indices_sparse_shape=[11, 2], - input_actual_shape=[5, 3, 8], - new_shape=[4, dyn], - ref_out_shape=[4, 30]) - - def test_sparse_shape_4(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - self.build_and_test_shape_inference(input_indices_sparse_shape=[11, 2], - input_actual_shape=[1, 30], - new_shape=[1, dyn], - ref_out_shape=[1, 30]) - - def test_sparse_shape_5(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - self.build_and_test_shape_inference(input_indices_sparse_shape=[11, 2], - input_actual_shape=[1, 30], - new_shape=[3, dyn, dyn], - ref_out_shape=[3, dyn, dyn]) - - def test_sparse_shape_6(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - self.build_and_test_shape_inference(input_indices_sparse_shape=[11, 2], - input_actual_shape=[1, 30], - new_shape=[dyn, 3, dyn], - ref_out_shape=[dyn, 3, dyn]) - - def test_sparse_shape_7(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - self.build_and_test_shape_inference(input_indices_sparse_shape=[11, 2], - input_actual_shape=[dyn, 30], - new_shape=[dyn, dyn, 33], - ref_out_shape=[dyn, dyn, 33]) - - def test_sparse_shape_8(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - self.build_and_test_shape_inference(input_indices_sparse_shape=[11, 2], - input_actual_shape=[dyn, 30], - new_shape=[dyn, 3, -1], - ref_out_shape=[dyn, 3, dyn]) - - def test_sparse_shape_9(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - self.build_and_test_shape_inference(input_indices_sparse_shape=[11, 2], - input_actual_shape=[dyn, 30], - new_shape=[1, dyn], - ref_out_shape=[1, dyn]) - - def test_sparse_shape_10(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - sparse_shape = [11, 2] - input_indices_value = np.arange(0, np.prod(sparse_shape)).reshape(sparse_shape) - self.build_and_test_shape_inference(input_indices_sparse_shape=sparse_shape, - input_actual_shape=[1, 30], - new_shape=[1, dyn], - ref_out_shape=[1, 30], - input_indices=input_indices_value, - ref_out_indices=input_indices_value) - - def test_sparse_shape_11(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - sparse_shape = [11, 2] - self.build_and_test_shape_inference(input_indices_sparse_shape=sparse_shape, - input_actual_shape=[1, 30], - new_shape=[1, 15, 2], - ref_out_shape=[1, 15, 2]) - - # negative test with uncompatible shapes - def test_sparse_shape_12(self): - # ref_output_indices_shape = np.array([11, 3], dtype=np.int32) - sparse_shape = [11, 2] - with self.assertRaisesRegex(Error, 'Stopped shape/value propagation'): - self.build_and_test_shape_inference(input_indices_sparse_shape=sparse_shape, - input_actual_shape=[1, 30], - new_shape=[1, 64, 2], - ref_out_shape=[1, 15, 2]) diff --git a/tools/mo/unit_tests/mo/ops/sparse_segment_mean_test.py b/tools/mo/unit_tests/mo/ops/sparse_segment_mean_test.py deleted file mode 100644 index 680d37dfc6f8ac..00000000000000 --- a/tools/mo/unit_tests/mo/ops/sparse_segment_mean_test.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.sparse_segment_mean import SparseSegmentMean -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -# graph 1 -nodes_attributes1 = {'input_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_segment_ids': {'shape': None, 'value': None, 'kind': 'data'}, - 'sparse_segment_mean_node': {'op': 'SparseSegmentMean', 'kind': 'op'}, - 'output_segments': {'shape': None, 'value': None, 'kind': 'data'}, - } - -edges1 = [('input_data', 'sparse_segment_mean_node', {'in': 0}), - ('input_indices', 'sparse_segment_mean_node', {'in': 1}), - ('input_segment_ids', 'sparse_segment_mean_node', {'in': 2}), - ('sparse_segment_mean_node', 'output_segments', {'out': 0})] - -inputs1 = {'input_data': {'shape': int64_array([20, 4, 5]), 'value': None}, - 'input_indices': {'shape': int64_array([40]), 'value': None}, - 'input_segment_ids': {'shape': int64_array([40]), 'value': None}} - -# graph 2 with constant input, mean -nodes_attributes2 = {'input_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_segment_ids': {'shape': None, 'value': None, 'kind': 'data'}, - 'sparse_segment_mean_node': {'op': 'SparseSegmentMean', 'kind': 'op'}, - 'output_segments': {'shape': None, 'value': None, 'kind': 'data'}, - } - -edges2 = [('input_data', 'sparse_segment_mean_node', {'in': 0}), - ('input_indices', 'sparse_segment_mean_node', {'in': 1}), - ('input_segment_ids', 'sparse_segment_mean_node', {'in': 2}), - ('sparse_segment_mean_node', 'output_segments', {'out': 0})] - -inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=float)}, - 'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 2, 1, 1, 2], dtype=float)}, - 'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 1, 2, 2], dtype=float)}} - -class TestSparseSegmentMean(unittest.TestCase): - def test_partial_infer(self): - graph = build_graph(nodes_attributes1, edges1, inputs1) - - sparse_segment_mean_node = Node(graph, 'sparse_segment_mean_node') - SparseSegmentMean.infer(sparse_segment_mean_node) - - # prepare reference results - ref_output_segments_shape = int64_array([40, 4, 5]) - - # get resulted shapes - res_output_segments_shape = graph.node['output_segments']['shape'] - - self.assertTrue(np.array_equal(ref_output_segments_shape, res_output_segments_shape), - 'Shapes do not match expected: {} and given: {}'.format(ref_output_segments_shape, res_output_segments_shape)) - - def test_incorrect_shapes(self): - inputs = {'input_data': {'shape': int64_array([20, 4, 5]), 'value': None}, - 'input_indices': {'shape': int64_array([39]), 'value': None}, - 'input_segment_ids': {'shape': int64_array([40]), 'value': None}} - graph = build_graph(nodes_attributes1, edges1, inputs) - sparse_segment_mean_node = Node(graph, 'sparse_segment_mean_node') - self.assertRaises(AssertionError, SparseSegmentMean.infer, sparse_segment_mean_node) - - def test_infer_constant_input_mean(self): - graph = build_graph(nodes_attributes2, edges2, inputs2) - - sparse_segment_mean_node = Node(graph, 'sparse_segment_mean_node') - SparseSegmentMean.infer(sparse_segment_mean_node) - - # prepare reference results - ref_output_segments_shape = int64_array([3, 4]) - ref_output_segments_value = np.array([[3, 4, 5, 6], [-1, -2, -3, -4], [2, 2, 2, 2]], dtype=float) - - # get resulted shapes - res_output_segments_shape = graph.node['output_segments']['shape'] - res_output_segments_value = graph.node['output_segments']['value'] - - self.assertTrue(np.array_equal(ref_output_segments_shape, res_output_segments_shape), - 'Shapes do not match expected: {} and given: {}'.format(ref_output_segments_shape, res_output_segments_shape)) - self.assertTrue(np.array_equal(ref_output_segments_value, res_output_segments_value), - 'Shapes do not match expected: {} and given: {}'.format(ref_output_segments_value, res_output_segments_value)) diff --git a/tools/mo/unit_tests/mo/ops/sparse_segment_sqrtn_test.py b/tools/mo/unit_tests/mo/ops/sparse_segment_sqrtn_test.py deleted file mode 100644 index 5f7cf685ef30eb..00000000000000 --- a/tools/mo/unit_tests/mo/ops/sparse_segment_sqrtn_test.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.sparse_segment_sqrtn import SparseSegmentSqrtN -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -# graph 1 -nodes_attributes1 = {'input_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_segment_ids': {'shape': None, 'value': None, 'kind': 'data'}, - 'sparse_segment_sqrtn_node': {'op': 'SparseSegmentSqrtN', 'kind': 'op'}, - 'output_segments': {'shape': None, 'value': None, 'kind': 'data'}, - } - -edges1 = [('input_data', 'sparse_segment_sqrtn_node', {'in': 0}), - ('input_indices', 'sparse_segment_sqrtn_node', {'in': 1}), - ('input_segment_ids', 'sparse_segment_sqrtn_node', {'in': 2}), - ('sparse_segment_sqrtn_node', 'output_segments', {'out': 0})] - -inputs1 = {'input_data': {'shape': int64_array([20, 4, 5]), 'value': None}, - 'input_indices': {'shape': int64_array([40]), 'value': None}, - 'input_segment_ids': {'shape': int64_array([40]), 'value': None}} - -# graph 2 with constant input, sqrtn -nodes_attributes2 = {'input_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_segment_ids': {'shape': None, 'value': None, 'kind': 'data'}, - 'sparse_segment_sqrtn_node': {'op': 'SparseSegmentSqrtN', 'kind': 'op'}, - 'output_segments': {'shape': None, 'value': None, 'kind': 'data'}, - } - -edges2 = [('input_data', 'sparse_segment_sqrtn_node', {'in': 0}), - ('input_indices', 'sparse_segment_sqrtn_node', {'in': 1}), - ('input_segment_ids', 'sparse_segment_sqrtn_node', {'in': 2}), - ('sparse_segment_sqrtn_node', 'output_segments', {'out': 0})] - -inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=float)}, - 'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 2, 1, 1, 2], dtype=float)}, - 'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 0, 0, 2], dtype=float)}} - -class TestSparseSegmentSqrtN(unittest.TestCase): - def test_partial_infer(self): - graph = build_graph(nodes_attributes1, edges1, inputs1) - - sparse_segment_sqrtn_node = Node(graph, 'sparse_segment_sqrtn_node') - SparseSegmentSqrtN.infer(sparse_segment_sqrtn_node) - - # prepare reference results - ref_output_segments_shape = int64_array([40, 4, 5]) - - # get resulted shapes - res_output_segments_shape = graph.node['output_segments']['shape'] - - self.assertTrue(np.array_equal(ref_output_segments_shape, res_output_segments_shape), - 'Shapes do not match expected: {} and given: {}'.format(ref_output_segments_shape, res_output_segments_shape)) - - def test_incorrect_shapes(self): - inputs = {'input_data': {'shape': int64_array([20, 4, 5]), 'value': None}, - 'input_indices': {'shape': int64_array([39]), 'value': None}, - 'input_segment_ids': {'shape': int64_array([40]), 'value': None}} - graph = build_graph(nodes_attributes1, edges1, inputs) - sparse_segment_sqrtn_node = Node(graph, 'sparse_segment_sqrtn_node') - self.assertRaises(AssertionError, SparseSegmentSqrtN.infer, sparse_segment_sqrtn_node) - - def test_infer_constant_input_sqrtn(self): - graph = build_graph(nodes_attributes2, edges2, inputs2) - - sparse_segment_sqrtn_node = Node(graph, 'sparse_segment_sqrtn_node') - SparseSegmentSqrtN.infer(sparse_segment_sqrtn_node) - - # prepare reference results - ref_output_segments_shape = int64_array([3, 4]) - ref_output_segments_value = np.array([[2, 2, 2, 2], [0, 0, 0, 0], [5, 6, 7, 8]], dtype=float) - - # get resulted shapes - res_output_segments_shape = graph.node['output_segments']['shape'] - res_output_segments_value = graph.node['output_segments']['value'] - - self.assertTrue(np.array_equal(ref_output_segments_shape, res_output_segments_shape), - 'Shapes do not match expected: {} and given: {}'.format(ref_output_segments_shape, res_output_segments_shape)) - self.assertTrue(np.array_equal(ref_output_segments_value, res_output_segments_value), - 'Shapes do not match expected: {} and given: {}'.format(ref_output_segments_value, res_output_segments_value)) diff --git a/tools/mo/unit_tests/mo/ops/sparse_segment_sum_test.py b/tools/mo/unit_tests/mo/ops/sparse_segment_sum_test.py deleted file mode 100644 index b33ca570e17bb3..00000000000000 --- a/tools/mo/unit_tests/mo/ops/sparse_segment_sum_test.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.sparse_segment_sum import SparseSegmentSum -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -# graph 1 -nodes_attributes1 = {'input_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_segment_ids': {'shape': None, 'value': None, 'kind': 'data'}, - 'sparse_segment_sum_node': {'op': 'SparseSegmentSum', 'kind': 'op'}, - 'output_segments': {'shape': None, 'value': None, 'kind': 'data'}, - } - -edges1 = [('input_data', 'sparse_segment_sum_node', {'in': 0}), - ('input_indices', 'sparse_segment_sum_node', {'in': 1}), - ('input_segment_ids', 'sparse_segment_sum_node', {'in': 2}), - ('sparse_segment_sum_node', 'output_segments', {'out': 0})] - -inputs1 = {'input_data': {'shape': int64_array([20, 4, 5]), 'value': None}, - 'input_indices': {'shape': int64_array([40]), 'value': None}, - 'input_segment_ids': {'shape': int64_array([40]), 'value': None}} - -# graph 2 with constant input, sum -nodes_attributes2 = {'input_data': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'input_segment_ids': {'shape': None, 'value': None, 'kind': 'data'}, - 'sparse_segment_sum_node': {'op': 'SparseSegmentSum', 'kind': 'op'}, - 'output_segments': {'shape': None, 'value': None, 'kind': 'data'}, - } - -edges2 = [('input_data', 'sparse_segment_sum_node', {'in': 0}), - ('input_indices', 'sparse_segment_sum_node', {'in': 1}), - ('input_segment_ids', 'sparse_segment_sum_node', {'in': 2}), - ('sparse_segment_sum_node', 'output_segments', {'out': 0})] - -inputs2 = {'input_data': {'shape': int64_array([3, 4]), 'value': np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=float)}, - 'input_indices': {'shape': int64_array([3]), 'value': np.array([0, 1, 2], dtype=float)}, - 'input_segment_ids': {'shape': int64_array([3]), 'value': np.array([0, 0, 1], dtype=float)}} - -class TestSparseSegmentSum(unittest.TestCase): - def test_partial_infer(self): - graph = build_graph(nodes_attributes1, edges1, inputs1) - - sparse_segment_sum_node = Node(graph, 'sparse_segment_sum_node') - SparseSegmentSum.infer(sparse_segment_sum_node) - - # prepare reference results - ref_output_segments_shape = int64_array([40, 4, 5]) - - # get resulted shapes - res_output_segments_shape = graph.node['output_segments']['shape'] - - self.assertTrue(np.array_equal(ref_output_segments_shape, res_output_segments_shape), - 'Shapes do not match expected: {} and given: {}'.format(ref_output_segments_shape, res_output_segments_shape)) - - def test_incorrect_shapes(self): - inputs = {'input_data': {'shape': int64_array([20, 4, 5]), 'value': None}, - 'input_indices': {'shape': int64_array([39]), 'value': None}, - 'input_segment_ids': {'shape': int64_array([40]), 'value': None}} - graph = build_graph(nodes_attributes1, edges1, inputs) - sparse_segment_sum_node = Node(graph, 'sparse_segment_sum_node') - self.assertRaises(AssertionError, SparseSegmentSum.infer, sparse_segment_sum_node) - - def test_infer_constant_input_sum(self): - graph = build_graph(nodes_attributes2, edges2, inputs2) - - sparse_segment_sum_node = Node(graph, 'sparse_segment_sum_node') - SparseSegmentSum.infer(sparse_segment_sum_node) - - # prepare reference results - ref_output_segments_shape = int64_array([2, 4]) - ref_output_segments_value = np.array([[0, 0, 0, 0], [5, 6, 7, 8]], dtype=float) - - # get resulted shapes - res_output_segments_shape = graph.node['output_segments']['shape'] - res_output_segments_value = graph.node['output_segments']['value'] - - self.assertTrue(np.array_equal(ref_output_segments_shape, res_output_segments_shape), - 'Shapes do not match expected: {} and given: {}'.format(ref_output_segments_shape, res_output_segments_shape)) - self.assertTrue(np.array_equal(ref_output_segments_value, res_output_segments_value), - 'Shapes do not match expected: {} and given: {}'.format(ref_output_segments_value, res_output_segments_value)) diff --git a/tools/mo/unit_tests/mo/ops/split_test.py b/tools/mo/unit_tests/mo/ops/split_test.py deleted file mode 100644 index 255bec213a4f86..00000000000000 --- a/tools/mo/unit_tests/mo/ops/split_test.py +++ /dev/null @@ -1,414 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np -import pytest - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, \ - dynamic_dimension_value, dynamic_dimension, strict_compare_tensors, mo_array -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.split import AttributedSplit, AttributedVariadicSplit, VariadicSplit -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class TestSplitOp(unittest.TestCase): - nodes = { - 'input': {'kind': 'op'}, - 'split_input_data': {'kind': 'data', 'shape': None, 'value': None}, - 'split_op': {'kind': 'op', 'axis': None, 'num_splits': None, 'op': 'AttributedSplit'}, - 'split_output_0_data': {'kind': 'data', 'shape': None, 'value': None}, - 'output_0': {'kind': 'op'}, - 'split_output_1_data': {'kind': 'data', 'shape': None, 'value': None}, - 'output_1': {'kind': 'op'}, - } - edges = [ - ('input', 'split_input_data'), - ('split_input_data', 'split_op'), - ('split_op', 'split_output_0_data'), - ('split_output_0_data', 'output_0'), - ('split_op', 'split_output_1_data'), - ('split_output_1_data', 'output_1'), - ] - - def test_split_shape_infer(self): - # test configuration - input_shape = [2, 10] - input_value = None - axis = 1 - num_splits = 2 - output_shape = [2, 5] - output_value = [None, None] - - # action - graph = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': int64_array(input_shape), - 'value': input_value}, - 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)}, - } - ) - - split_op = Node(graph, 'split_op') - AttributedSplit.infer(split_op) - - # reference - graph_ref = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': int64_array(input_shape), - 'value': input_value}, - 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)}, - 'split_output_0_data': {'shape': int64_array(output_shape), - 'value': output_value[0]}, - 'split_output_1_data': {'shape': int64_array(output_shape), - 'value': output_value[1]}, - } - ) - - # check - (flag, resp) = compare_graphs(graph, graph_ref, 'split_input_data') - self.assertTrue(flag, resp) - - def test_split_dynamic_shape_infer(self): - # test configuration - input_shape = [2, dynamic_dimension_value] - input_value = None - axis = 1 - num_splits = 2 - output_shape = [2, dynamic_dimension_value] - output_value = [None, None] - - # action - graph = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': shape_array(input_shape), - 'value': input_value}, - 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)}, - } - ) - - split_op = Node(graph, 'split_op') - AttributedSplit.infer(split_op) - - # reference - graph_ref = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': shape_array(input_shape), - 'value': input_value}, - 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)}, - 'split_output_0_data': {'shape': shape_array(output_shape), - 'value': output_value[0]}, - 'split_output_1_data': {'shape': shape_array(output_shape), - 'value': output_value[1]}, - } - ) - - # check - (flag, resp) = compare_graphs(graph, graph_ref, 'split_input_data') - self.assertTrue(flag, resp) - self.assertTrue(strict_compare_tensors(Node(graph, 'split_output_0_data').shape, shape_array(output_shape))) - - def test_split_value_infer(self): - # test configuration - input_shape = [2, 10] - input_value = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [10, 11, 12, 13, 14, 15, 16, 17, 18, 19]] - axis = 1 - num_splits = 2 - output_shape = [2, 5] - output_value = [[[0, 1, 2, 3, 4], [10, 11, 12, 13, 14]], [[5, 6, 7, 8, 9], [15, 16, 17, 18, 19]]] - - # action - graph = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': int64_array(input_shape), - 'value': int64_array(input_value)}, - 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)}, - } - ) - - split_op = Node(graph, 'split_op') - AttributedSplit.infer(split_op) - - # reference - graph_ref = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': int64_array(input_shape), - 'value': int64_array(input_value)}, - 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)}, - 'split_output_0_data': {'shape': int64_array(output_shape), - 'value': int64_array(output_value[0])}, - 'split_output_1_data': {'shape': int64_array(output_shape), - 'value': int64_array(output_value[1])}, - } - ) - - # check - (flag, resp) = compare_graphs(graph, graph_ref, 'split_input_data') - self.assertTrue(flag, resp) - - -class TestAttributedVariadicSplitOp(unittest.TestCase): - nodes = { - 'input': {'kind': 'op'}, - 'split_input_data': {'kind': 'data', 'shape': None, 'value': None}, - 'split_op': {'kind': 'op', 'axis': None, 'split_lengths': None, 'op': 'AttributedVariadicSplit'}, - 'split_output_0_data': {'kind': 'data', 'shape': None, 'value': None}, - 'output_0': {'kind': 'op'}, - 'split_output_1_data': {'kind': 'data', 'shape': None, 'value': None}, - 'output_1': {'kind': 'op'}, - 'split_output_2_data': {'kind': 'data', 'shape': None, 'value': None}, - 'output_2': {'kind': 'op'}, - } - edges = [ - ('input', 'split_input_data'), - ('split_input_data', 'split_op'), - ('split_op', 'split_output_0_data'), - ('split_output_0_data', 'output_0'), - ('split_op', 'split_output_1_data'), - ('split_output_1_data', 'output_1'), - ('split_op', 'split_output_2_data'), - ('split_output_2_data', 'output_2'), - ] - - def test_splitv_zero(self): - graph = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': int64_array([2, 12, 25, 30])}, - 'split_op': {'axis': np.array(2), 'split_lengths': np.array([2, 13, 10, 0]), - 'out_ports_count': 4}, - } - ) - node = Node(graph, 'split_op') - for p in range(len(node.out_edges()), node.out_ports_count): - node.add_output_port(p) - - AttributedVariadicSplit.infer(node) - - self.assertTrue(len(node.out_edges()) == 3) - self.assertTrue(np.all(node.split_lengths == np.array([2, 13, 10]))) - - def test_splitv_dynamic_input(self): - graph = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': shape_array([2, 12, dynamic_dimension_value, 30])}, - 'split_op': {'axis': np.array(2), 'split_lengths': np.array([2, 13, 10]), - 'out_ports_count': 4}, - } - ) - node = Node(graph, 'split_op') - for p in range(len(node.out_edges()), node.out_ports_count): - node.add_output_port(p) - - AttributedVariadicSplit.infer(node) - - self.assertTrue(len(node.out_edges()) == 3) - self.assertTrue(np.all(node.split_lengths == np.array([2, 13, 10]))) - - def test_splitv_zero_not_last(self): - graph = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': int64_array([2, 12, 25, 30])}, - 'split_op': {'axis': np.array(2), 'split_lengths': np.array([2, 13, 0, 10]), - 'out_ports_count': 4}, - } - ) - node = Node(graph, 'split_op') - - # extractor should do it - for p in range(len(node.out_edges()), node.out_ports_count): - node.add_output_port(p) - node.out_port(2).get_connection().set_source(node.out_port(3)) - - AttributedVariadicSplit.infer(node) - - self.assertTrue(node.out_port(3).disconnected()) - self.assertTrue(np.all(node.split_lengths == np.array([2, 13, 10]))) - - def test_splitv_2_zero_not_last(self): - graph = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': int64_array([2, 12, 25, 30])}, - 'split_op': {'axis': np.array(2), 'split_lengths': np.array([2, 13, 0, 0, 10]), - 'out_ports_count': 5}, - } - ) - node = Node(graph, 'split_op') - - # extractor should do it - for p in range(len(node.out_edges()), node.out_ports_count): - node.add_output_port(p) - node.out_port(2).get_connection().set_source(node.out_port(4)) - - AttributedVariadicSplit.infer(node) - - self.assertTrue(node.out_port(4).disconnected()) - self.assertTrue(node.out_port(3).disconnected()) - self.assertTrue(np.all(node.split_lengths == np.array([2, 13, 10]))) - - -class TestVariadicSplitOp(): - nodes = { - 'input': {'kind': 'op'}, - 'split_input_data': {'kind': 'data', 'shape': None, 'value': None}, - 'split_axis': {'kind': 'op', 'op': 'Const'}, - 'split_axis_data': {'kind': 'data', 'shape': None, 'value': None}, - 'split_lengths': {'kind': 'op', 'op': 'Const'}, - 'split_lengths_data': {'kind': 'data', 'shape': None, 'value': None}, - 'split_op': {'kind': 'op', 'op': 'VariadicSplit'}, - 'split_output_0_data': {'kind': 'data', 'shape': None, 'value': None}, - 'output_0': {'kind': 'op'}, - 'split_output_1_data': {'kind': 'data', 'shape': None, 'value': None}, - 'output_1': {'kind': 'op'}, - 'split_output_2_data': {'kind': 'data', 'shape': None, 'value': None}, - 'output_2': {'kind': 'op'}, - } - edges = [ - ('input', 'split_input_data'), - ('split_input_data', 'split_op'), - ('split_axis', 'split_axis_data'), - ('split_axis_data', 'split_op'), - ('split_lengths', 'split_lengths_data'), - ('split_lengths_data', 'split_op'), - ('split_op', 'split_output_0_data'), - ('split_output_0_data', 'output_0'), - ('split_op', 'split_output_1_data'), - ('split_output_1_data', 'output_1'), - ('split_op', 'split_output_2_data'), - ('split_output_2_data', 'output_2'), - ] - - @pytest.mark.parametrize("axis",[int64_array(2), - int64_array([2])]) - def test_variadic_split_axis(self, axis): - lengths = int64_array([2, 13, 10]) - graph = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': int64_array([2, 12, 25, 30])}, - 'split_axis_data': {'value': axis}, - 'split_lengths_data': {'value': lengths}, - 'split_op': {'out_ports_count': 4}, - } - ) - node = Node(graph, 'split_op') - for p in range(len(node.out_edges()), node.out_ports_count): - node.add_output_port(p) - - VariadicSplit.infer(node) - - ont_nodes_count = len(node.out_edges()) - assert ont_nodes_count == 3 - for out in range(ont_nodes_count): - assert np.all(node.out_node(out).shape == int64_array([2, 12, lengths[out], 30])) - - def test_variadic_split_value_inference_with_uint32(self): - axis = int64_array(2) - # because sum of Python int and Numpy np.uint64 gives float64 - - # but np.split accepts only integers and raises error for floats - # therefore needed to explicitly cast np.split arguments into integer - # added this test for that case - lengths = mo_array([2, 13, 10], dtype=np.uint64) - input_shape = mo_array([2, 12, 25, 30]) - input_value = np.zeros(input_shape) - - graph = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': input_shape, 'value': input_value}, - 'split_axis_data': {'value': axis}, - 'split_lengths_data': {'value': lengths}, - 'split_op': {'out_ports_count': 4}, - } - ) - node = Node(graph, 'split_op') - for p in range(len(node.out_edges()), node.out_ports_count): - node.add_output_port(p) - - VariadicSplit.infer(node) - - ont_nodes_count = len(node.out_edges()) - assert ont_nodes_count == 3 - for out in range(ont_nodes_count): - assert np.all(node.out_node(out).shape == int64_array([2, 12, lengths[out], 30])) - - @pytest.mark.parametrize("axis",[int64_array([[2], [2]]), - int64_array([2, 2])]) - def test_negative_variadic_split_axis(self, axis): - lengths = int64_array([2, 13, 10]) - graph = build_graph(self.nodes, self.edges, - { - 'split_input_data': {'shape': int64_array([2, 12, 25, 30])}, - 'split_axis_data': {'value': axis}, - 'split_lengths_data': {'value': lengths}, - 'split_op': {'out_ports_count': 4}, - } - ) - node = Node(graph, 'split_op') - for p in range(len(node.out_edges()), node.out_ports_count): - node.add_output_port(p) - - try: - VariadicSplit.infer(node) - except AssertionError as e: - assert e.args[0] == 'VariadicSplit `axis` should be scalar or tensor with shape [1], '\ - 'but it`s not for node split_op' - - -class TestSplitReverseInfer(unittest.TestCase): - - def test_split_reverse_infer(self): - ref_input_shape = [7, 4, 6] - axis = 2 - num_splits = 2 - output_shape_1 = [dynamic_dimension, 4, 3] - output_shape_2 = [7, dynamic_dimension, 3] - - graph = build_graph(TestSplitOp.nodes, TestSplitOp.edges, - { - 'split_input_data': {'shape': None, - 'value': None}, - 'split_op': {'axis': np.array(axis), 'num_splits': np.array(num_splits)}, - 'split_output_0_data': {'shape': shape_array(output_shape_1), - 'value': None}, - 'split_output_1_data': {'shape': shape_array(output_shape_2), - 'value': None}, - }) - - split_node = Node(graph, 'split_op') - AttributedSplit.reverse_infer(split_node) - actual_input_shape = split_node.in_port(0).data.get_shape() - self.assertTrue(strict_compare_tensors(ref_input_shape, actual_input_shape)) - - -class TestAttributedVariadicSplitReverseInfer(unittest.TestCase): - - def test_splitv_dynamic_input(self): - ref_input_shape = [7, 4, 11] - axis = 2 - num_splits = 2 - output_shape_1 = [dynamic_dimension, 4, 3] - output_shape_2 = [7, dynamic_dimension, 3] - output_shape_3 = [7, dynamic_dimension, 5] - - graph = build_graph(TestAttributedVariadicSplitOp.nodes, TestAttributedVariadicSplitOp.edges, - { - 'split_input_data': {'shape': None}, - 'split_op': {'axis': np.array(2), 'split_lengths': np.array([3, 3, 5]), - 'out_ports_count': 2}, - 'split_output_0_data': {'shape': shape_array(output_shape_1), - 'value': None}, - 'split_output_1_data': {'shape': shape_array(output_shape_2), - 'value': None}, - 'split_output_2_data': {'shape': shape_array(output_shape_3), - 'value': None}, - } - ) - node = Node(graph, 'split_op') - for p in range(len(node.out_edges()), node.out_ports_count): - node.add_output_port(p) - - AttributedVariadicSplit.reverse_infer(node) - - actual_input_shape = node.in_port(0).data.get_shape() - self.assertTrue(strict_compare_tensors(ref_input_shape, actual_input_shape)) diff --git a/tools/mo/unit_tests/mo/ops/squeeze_test.py b/tools/mo/unit_tests/mo/ops/squeeze_test.py deleted file mode 100644 index 49b70dce8fdece..00000000000000 --- a/tools/mo/unit_tests/mo/ops/squeeze_test.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.squeeze import Squeeze -from openvino.tools.mo.utils.error import Error -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'data': { - 'kind': 'data', - 'shape': None, - 'value': None, - }, - 'squeeze_dims': { - 'kind': 'op', - 'op': 'Const', - 'value': np.array([]), - 'shape': None, - }, - 'squeeze_dims_data': { - 'kind': 'data', - 'shape': None, - 'value': np.array([]), - }, - 'squeeze': { - 'op': 'Squeeze', - 'kind': 'op', - }, - 'data_out': { - 'kind': 'data', - 'shape': None, - 'value': None, - } -} - - -class TestSqueezeInfer(): - @pytest.mark.parametrize("input_value, input_shape, squeeze_dims, ref_value, ref_shape",[ - (None, shape_array([1, 2, 1, 4]), shape_array([2]), None, [1, 2, 4]), - # allow squeezing dynamic dimensions - (None, shape_array([1, 2, dynamic_dimension_value, 4]), shape_array([2]), None, [1, 2, 4]), - (None, shape_array([1, 2, 1, 4]), shape_array([]), None, [2, 4]), - (None, shape_array([1, dynamic_dimension_value, 1, 4]), shape_array([]), None, - shape_array([dynamic_dimension_value, 4])), - # do not allow squeeze dimensions not equal to 1 - (None, shape_array([1, 2, 1, 4]), shape_array([1]), None, None), - # do not allow squeeze input shape to be None - (None, None, shape_array([1]), None, None), - ]) - def test_squeeze_squeeze_dims(self, input_value, input_shape, squeeze_dims, ref_value, ref_shape): - graph = build_graph(nodes_attributes, - [('data', 'squeeze'), - ('squeeze_dims', 'squeeze_dims_data'), - ('squeeze_dims_data', 'squeeze'), - ('squeeze', 'data_out')], - {'data': {'shape': input_shape, 'value': input_value}, - 'squeeze_dims': {'value': squeeze_dims, 'shape': squeeze_dims.shape}, - 'squeeze_dims_data': {'value': squeeze_dims, 'shape': squeeze_dims.shape}, - }) - node = Node(graph, 'squeeze') - if ref_shape is None: # the test should fail - with pytest.raises(Error): - Squeeze.infer(node) - else: - Squeeze.infer(node) - if ref_value is not None: - assert strict_compare_tensors(node.out_port(0).data.get_value(), ref_value) - assert strict_compare_tensors(node.out_port(0).data.get_shape(), ref_shape) diff --git a/tools/mo/unit_tests/mo/ops/strided_slice_test.py b/tools/mo/unit_tests/mo/ops/strided_slice_test.py deleted file mode 100644 index 21d2b867681c2f..00000000000000 --- a/tools/mo/unit_tests/mo/ops/strided_slice_test.py +++ /dev/null @@ -1,1547 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from collections.abc import Iterable - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.strided_slice import StridedSlice -from unit_tests.utils.graph import build_graph, valued_const_with_data, result, regular_op_with_empty_data, \ - shaped_const_with_data, connect - - -class TestStridedSliceInfer(unittest.TestCase): - - def run_test(self, inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask): - if is_shape: - input_node = shaped_const_with_data('input', shape_array(inp)) - else: - input_node = valued_const_with_data('input', shape_array(inp)) - - nodes = { - **input_node, - **regular_op_with_empty_data('sslice', - {'op': 'StridedSlice', 'begin_mask': begin_mask, 'end_mask': end_mask, - 'shrink_axis_mask': shrink_axis_mask, 'ellipsis_mask': ellipsis_mask, - 'new_axis_mask': new_axis_mask}), - **valued_const_with_data('begin', shape_array(begin)), - **valued_const_with_data('end', shape_array(end)), - **valued_const_with_data('strides', shape_array(strides)), - **result('res'), - } - - edges = [ - *connect('input', '0:sslice'), - *connect('begin', '1:sslice'), - *connect('end', '2:sslice'), - *connect('strides', '3:sslice'), - *connect('sslice', 'res') - ] - - graph = build_graph(nodes, edges) - node = Node(graph, 'sslice') - StridedSlice.infer(node) - res = node.out_port(0).data.get_shape() if is_shape else node.out_port(0).data.get_value() - if isinstance(ref_res, Iterable): - self.assertTrue(strict_compare_tensors(res, shape_array(ref_res))) - else: - self.assertEqual(res, ref_res) - - def test_slice_infer_value_1( self, # out = inp[:4:1] - inp=(1, 34, 34, 62), ref_res=(1, 34, 34, 62), is_shape=False, - begin=(0,), end=(4,), strides=(1,), begin_mask=(0,), end_mask=(1,), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_value_2(self, # inp[1:3:1] = [34, 34] - inp=(1, 34, 34, 62), ref_res=(34, 34), is_shape=False, - begin=(1,), end=(3,), strides=(1,), begin_mask=(1,), end_mask=(1,), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,)): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_value_3(self, # inp[np.newaxis, :4:1] = [[1, 34, 34, 62]] - inp=(1, 34, 34, 62), ref_res=((1, 34, 34, 62),), is_shape=False, - begin=(0, 0,), end=(0, 4,), strides=(1, 1), begin_mask=(0, 0), end_mask=(1, 1), - shrink_axis_mask=(0,), new_axis_mask=(1,), ellipsis_mask=(0,)): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_value_4(self, # inp[1] = 34 - inp=(1, 34, 34, 62), ref_res=34, is_shape=False, - begin=(1,), end=(4,), strides=(1,), begin_mask=(1,), end_mask=(1,), - shrink_axis_mask=(1,), new_axis_mask=(0,), ellipsis_mask=(0,)): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_value_5(self, # inp[::-1] = [62, 34, 34, 1] - inp=(1, 34, 34, 62), ref_res=(62, 34, 34, 1), is_shape=False, - begin=(0,), end=(4,), strides=(-1,), begin_mask=(0,), end_mask=(0,), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,)): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_value_6(self, # inp[0, 0:4:1] - inp=((1, 34, 34, 62),), ref_res=(1, 34, 34, 62), is_shape=False, - begin=(0, 0), end=(0, 4), strides=(1, 1), begin_mask=(0, 1), end_mask=(0, 1), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0)): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_value_7(self, # inp[:-1:1] = [1, 34, 34], since begin_mask is [0], begin can be of any value - inp=(1, 34, 34, 62), ref_res=(1, 34, 34), is_shape=False, - begin=(0,), end=(-1,), strides=(1,), begin_mask=(0,), end_mask=(1,), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,)): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_value_8( - self, # inp_shape = (1, 2, 4), out = inp[..., :2, None] => out_shape = (1, 2, 2, 1) - inp=(((0, 1, 2, 3), (4, 5, 6, 7)),), ref_res=((((0.,), (1.,)), ((4.,), (5.,))),), is_shape=False, - begin=(0, 0, 0), end=(0, 2, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 1, 0), - shrink_axis_mask=(0, 0, 0), new_axis_mask=(0, 0, 1), ellipsis_mask=(1, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_1( - self, # inp[0:3, 0:1, 0:5] - inp=(10, 10, 10, 10), ref_res=(3, 1, 5, 10), is_shape=True, - begin=(0, 0, 0), end=(3, 1, 5), strides=(1, 1, 1), begin_mask=(1, 1, 1), end_mask=(1, 1, 1), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_2( - self, # inp[0:3, 0:1, 5:0:-1] - inp=(10, 10, 10, 10), ref_res=(3, 1, 5, 10), is_shape=True, - begin=(0, 0, 5), end=(3, 1, 0), strides=(1, 1, -1), begin_mask=(1, 1, 1), end_mask=(1, 1, 1), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,)): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_3( - self, # inp[1:34, 0, :, :2] - inp=(1, 35, 35, 3), ref_res=(1, 35, 2), is_shape=True, - begin=(0, 0, 0, 0), end=(1, 34, 0, 2), strides=(1, 1, 1, 1), begin_mask=(1, 1, 0, 0), end_mask=(1, 0, 0, 1), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 0, 0), ellipsis_mask=(0, 0, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_4( - self, # inp[1:34, :, :, :2] begin mask is (1,) so only one value can be specified - inp=(1, 35, 35, 3), ref_res=(1, 35, 2), is_shape=True, - begin=(0, 0, 0, 0), end=(1, 34, 20, 2), strides=(1, 1, 1, 1), begin_mask=(1, 0, 0, ), end_mask=(1, 0, 0, 1), - shrink_axis_mask=(0, 1), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_5( - self, # inp[:, :, :, :] since all begin and end masks are zero - inp=(1, 35, 35, 3), ref_res=(1, 35, 35, 3), is_shape=True, - begin=(1, 10, 10, 0), end=(1, 34, 20, 2), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_6( - self, # inp[0] - inp=(1, 35, 35, 3), ref_res=(35, 35, 3), is_shape=True, - begin=(0,), end=(1,), strides=(1,), begin_mask=(1,), end_mask=(0,), - shrink_axis_mask=(1,), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_7( - self, # inp[0, 20], ends can be of any value - inp=(1, 35, 35, 3), ref_res=(35, 3), is_shape=True, - begin=(0, 20), end=(1, 9999), strides=(1, 1), begin_mask=(0,), end_mask=(0,), - shrink_axis_mask=(1, 1), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_8( - self, # inp[0, 0:34, 20:22, new_axis], both new_axis and shrink_axis are present - inp=(1, 35, 35, 3), ref_res=(34, 2, 1, 3), is_shape=True, - begin=(0, 0, 20, 0), end=(1, 34, 22, 2), strides=(1, 1, 1, 1), begin_mask=(0,), end_mask=(0,), - shrink_axis_mask=(1,), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_9( - self, # inp[:, 0:4, 20, new_axis], both new_axis and shrink_axis are present - inp=(1, 35, 35, 3), ref_res=(1, 4, 1, 3), is_shape=True, - begin=(0, 0, 20, 0), end=(0, 4, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 1, 0, 0), end_mask=(0, 1, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_10( - self, # inp[:, 0:4, new_axis, 20], both new_axis and shrink_axis are present - inp=(1, 35, 35, 3), ref_res=(1, 4, 1, 3), is_shape=True, - begin=(0, 0, 0, 20), end=(0, 4, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 1, 0, 0), end_mask=(0, 1, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_11( - self, # inp[0, :, 0:34, 20:22, new_axis], both new_axis and shrink_axis are present - inp=(1, 3, 35, 35), ref_res=(3, 34, 2, 1), is_shape=True, - begin=(0, 0, 0, 20, 0), end=(1, 0, 34, 22, 0), strides=(1, 1, 1, 1, 1), - begin_mask=(1, 0, 1, 1, 1), end_mask=(1, 0, 1, 1, 1), - shrink_axis_mask=(1,), new_axis_mask=(0, 0, 0, 0, 1), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_12( - self, # inp[0, :34, 20, :2] - inp=(1, 35, 35, 3), ref_res=(34, 2), is_shape=True, - begin=(0, 0, 0, 0), end=(1, 34, 20, 2), strides=(1, 1, 1, 1), begin_mask=(0, 1, 1, 1), end_mask=(0, 1, 1, 1), - shrink_axis_mask=(1, 0, 1, 0), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_13( - self, # inp[0, 0, 0], since it's shrink_axis ends can be of any value - inp=(1, 35, 35, 3), ref_res=(3,), is_shape=True, - begin=(0, 0, 0), end=(1, 34444, 20), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(1, 1, 1), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_14( - self, # inp[0, 0, 0], since begin_mask is [0], begin can be of any value - inp=(1, 35, 35, 3), ref_res=(1, 18, 18, 3), is_shape=True, - begin=(0, 0, 0), end=(1, 35, 35), strides=(2, 2, 2), begin_mask=(1, 1, 1), end_mask=(1, 1, 1), - shrink_axis_mask=(0, 0, 0), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - # with ellipsis - def test_slice_infer_shape_15( - self, # inp[..., np.newaxis] - inp=(1, 35, 35), ref_res=(1, 35, 35, 1), is_shape=True, - begin=(101, 0), end=(0, 0), strides=(-1, -1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(1, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_16( - self, # inp_shape = (1, 720, 1080), out = inp[..., :100, None] => out_shape = (1, 720, 100, 1) - inp=(1, 720, 1080), ref_res=(1, 720, 100, 1), is_shape=True, - begin=(0, 0, 0), end=(0, 100, 0), strides=(1, 1, 1), begin_mask=(0, 1, 0), end_mask=(0, 1, 0), - shrink_axis_mask=(0,), new_axis_mask=(0, 0, 1), ellipsis_mask=(1,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_17( - self, # inp_shape = (1, 720, 1080, 3), out = inp[..., :-1] => out_shape = (1, 720, 100, 2) - inp=(1, 720, 1080, 3), ref_res=(1, 720, 1080, 2), is_shape=True, - begin=(0, 0), end=(0, -1), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 1), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_18( - self, # inp_shape = (1, 720, 1080, 3), out = inp[..., -2] => out_shape = (1, 720, 1080) - inp=(1, 720, 1080, 3), ref_res=(1, 720, 1080), is_shape=True, - begin=(0, -2), end=(0, 0), strides=(1, 1), begin_mask=(0, 1), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_shape_19( - self, # inp_shape = (1, 720, 1080, 3), out = input[..., 0:10, 0:3] => out_shape = (1, 720, 10, 3) - inp=(1, 720, 1080, 3), ref_res=(1, 720, 10, 3), is_shape=True, - begin=(0, 0, 0), end=(0, 10, 3), strides=(1, 1, 1), begin_mask=(0, 1, 1), end_mask=(0, 1, 1), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(1,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_dynamic_shape_1( - self, # inp[0:3, 0:1, 0:5] - inp=(dynamic_dimension_value, 10, 10, 10), ref_res=(dynamic_dimension_value, 1, 5, 10), is_shape=True, - begin=(0, 0, 0), end=(3, 1, 5), strides=(1, 1, 1), begin_mask=(1, 1, 1), end_mask=(1, 1, 1), - shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_dynamic_shape_2( - self, # inp[0:d, 0:1, 0:5] - inp=(10, 10, 10, 10), ref_res=(dynamic_dimension_value, 1, 5, 10), is_shape=True, - begin=(0, 0, 0), end=(dynamic_dimension_value, 1, 5), strides=(1, 1, 1), begin_mask=(1, 1, 1), - end_mask=(1, 1, 1), shrink_axis_mask=(0,), new_axis_mask=(0,), ellipsis_mask=(0,) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_slice_infer_dynamic_shape_3( - self, # inp[1:34, 0, :, :d] - inp=(1, 35, 35, 3), ref_res=(1, 35, dynamic_dimension_value), is_shape=True, - begin=(0, 0, 0, 0), end=(1, 34, 0, dynamic_dimension_value), strides=(1, 1, 1, 1), begin_mask=(1, 1, 0, 0), - end_mask=(1, 0, 0, 1), shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 0, 0), ellipsis_mask=(0, 0, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_0( - self, # inp_shape = (1, 100, 200, 3), out = inp[np.newaxis, ..., 0, :], out_shape=(1, 1, 100, 3) - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_1( - self, # inp_shape = (1, 100, 200, 3), out = inp[..., np.newaxis, 0, :], out_shape=(1, 100, 1, 3) - inp=(1, 100, 200, 3), ref_res=(1, 100, 1, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_2( - self, # inp_shape = (1, 100, 200, 3), out = inp[0, np.newaxis, ..., :], out_shape=(1, 100, 200, 3) - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_3( - self, # inp_shape = (1, 100, 200, 3), out = inp[0, ..., np.newaxis, :], out_shape=(100, 200, 1, 3) - inp=(1, 100, 200, 3), ref_res=(100, 200, 1, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_4( - self, # inp_shape = (1, 100, 200, 3), out = inp[np.newaxis, 0, ..., :], out_shape=(1, 100, 200, 3) - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_5( - self, # inp_shape = (1, 100, 200, 3), out = inp[..., 0, np.newaxis, :], out_shape=(1, 100, 1, 3) - inp=(1, 100, 200, 3), ref_res=(1, 100, 1, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_6( - self, # inp_shape = (1, 100, 200, 3), out = inp[np.newaxis, ..., :, 0], out_shape=(1, 1, 100, 200) - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 200), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_7( - self, # inp_shape = (1, 100, 200, 3), out = inp[..., np.newaxis, :, 0], out_shape=(1, 100, 1, 200) - inp=(1, 100, 200, 3), ref_res=(1, 100, 1, 200), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_8( - self, # inp_shape = (1, 100, 200, 3), out = inp[0, np.newaxis, :, ...], out_shape=(1, 100, 200, 3) - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_9( - self, # inp_shape = (1, 100, 200, 3), out = inp[0, ..., :, np.newaxis], out_shape=(100, 200, 3, 1) - inp=(1, 100, 200, 3), ref_res=(100, 200, 3, 1), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 1, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_10( - self, # inp_shape = (1, 100, 200, 3), out = inp[np.newaxis, 0, :, ...], out_shape=(1, 100, 200, 3) - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_11( - self, # inp_shape = (1, 100, 200, 3), out = inp[..., 0, :, np.newaxis], out_shape=(1, 100, 3, 1) - inp=(1, 100, 200, 3), ref_res=(1, 100, 3, 1), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(1, 0, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_12( - self, # inp_shape = (1, 100, 200, 3), out = inp[np.newaxis, :, ..., 0], out_shape=(1, 1, 100, 200) - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 200), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_13( - self, # inp_shape = (1, 100, 200, 3), out = inp[..., :, np.newaxis, 0], out_shape=(1, 100, 200, 1) - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 1), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_14( - self, # inp_shape = (1, 100, 200, 3), out = inp[0, :, np.newaxis, ...], out_shape=(100, 1, 200, 3) - inp=(1, 100, 200, 3), ref_res=(100, 1, 200, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 0, 0, 1) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_15( - self, # inp_shape = (1, 100, 200, 3), out = inp[0, :, ..., np.newaxis], out_shape=(100, 200, 3, 1) - inp=(1, 100, 200, 3), ref_res=(100, 200, 3, 1), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 0, 1, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_16( - self, # inp_shape = (1, 100, 200, 3), out = inp[np.newaxis, :, 0, ...], out_shape=(1, 1, 200, 3) - inp=(1, 100, 200, 3), ref_res=(1, 1, 200, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_17( - self, # inp_shape = (1, 100, 200, 3), out = inp[..., :, 0, np.newaxis], out_shape=(1, 100, 200, 1) - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 1), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(1, 0, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_18( - self, # inp_shape = (1, 100, 200, 3), out = inp[:, np.newaxis, ..., 0], out_shape=(1, 1, 100, 200) - inp=(1, 100, 200, 3), ref_res=(1, 1, 100, 200), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_19( - self, # inp_shape = (1, 100, 200, 3), out = inp[:, ..., np.newaxis, 0], out_shape=(1, 100, 200, 1) - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 1), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_20( - self, # inp_shape = (1, 100, 200, 3), out = inp[:, 0, np.newaxis, ...], out_shape=(1, 1, 200, 3) - inp=(1, 100, 200, 3), ref_res=(1, 1, 200, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 0, 0, 1) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_21( - self, # inp_shape = (1, 100, 200, 3), out = inp[:, 0, ..., np.newaxis], out_shape=(1, 200, 3, 1) - inp=(1, 100, 200, 3), ref_res=(1, 200, 3, 1), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 0, 1, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_22( - self, # inp_shape = (1, 100, 200, 3), out = inp[:, np.newaxis, 0, ...], out_shape=(1, 1, 200, 3) - inp=(1, 100, 200, 3), ref_res=(1, 1, 200, 3), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - def test_strided_slice_23( - self, # inp_shape = (1, 100, 200, 3), out = inp[:, ..., 0, np.newaxis], out_shape=(1, 100, 200, 1) - inp=(1, 100, 200, 3), ref_res=(1, 100, 200, 1), is_shape=True, - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 1, 0, 0) - ): - self.run_test(inp, is_shape, ref_res, begin, end, strides, - begin_mask, end_mask, shrink_axis_mask, new_axis_mask, ellipsis_mask) - - # automatically generated the whole range of 2d slices over 2d, 3d and 4d input tensors - def test_auto_infer_strided_slice_2d_over_2d_0(self): - """ - inp_shape = (1, 100), out = inp[:, :] => out_shape = (1, 100) - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_2d_1(self): - """ - inp_shape = (1, 100), out = inp[:, None] => out_shape = (1, 1, 100) - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(1, 1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_2d_2(self): - """ - inp_shape = (1, 100), out = inp[:, 0] => out_shape = (1,) - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(1,), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_2d_3(self): - """ - inp_shape = (1, 100), out = inp[..., :] => out_shape = (1, 100) - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_auto_infer_strided_slice_2d_over_2d_4(self): - """ - inp_shape = (1, 100), out = inp[..., None] => out_shape = (1, 100, 1) - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(1, 100, 1), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(1, 0) - ) - - def test_auto_infer_strided_slice_2d_over_2d_5(self): - """ - inp_shape = (1, 100), out = inp[..., 0] => out_shape = (1,) - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(1,), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_auto_infer_strided_slice_2d_over_2d_6(self): - """ - inp_shape = (1, 100), out = inp[None, :] => out_shape = (1, 1, 100) - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(1, 1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_2d_7(self): - """ - inp_shape = (1, 100), out = inp[None, None] => out_shape = (1, 1, 1, 100) - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(1, 1, 1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 1), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_2d_8(self): - """ - inp_shape = (1, 100), out = inp[None, 0] => out_shape = (1, 100) - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_2d_9(self): - """ - inp_shape = (1, 100), out = inp[0, :] => out_shape = (100,) - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(100,), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_2d_10(self): - """ - inp_shape = (1, 100), out = inp[0, None] => out_shape = (1, 100) - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_2d_11(self): - """ - inp_shape = (1, 100), out = inp[0, 0] => out_shape = () - """ - self.run_test( - inp=(1, 100), is_shape=True, ref_res=(), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_0(self): - """ - inp_shape = (1, 100, 200), out = inp[:, :] => out_shape = (1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_1(self): - """ - inp_shape = (1, 100, 200), out = inp[:, None] => out_shape = (1, 1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_2(self): - """ - inp_shape = (1, 100, 200), out = inp[:, 0] => out_shape = (1, 200) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_3(self): - """ - inp_shape = (1, 100, 200), out = inp[..., :] => out_shape = (1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_4(self): - """ - inp_shape = (1, 100, 200), out = inp[..., None] => out_shape = (1, 100, 200, 1) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 100, 200, 1), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(1, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_5(self): - """ - inp_shape = (1, 100, 200), out = inp[..., 0] => out_shape = (1, 100) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 100), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_6(self): - """ - inp_shape = (1, 100, 200), out = inp[None, :] => out_shape = (1, 1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_7(self): - """ - inp_shape = (1, 100, 200), out = inp[None, None] => out_shape = (1, 1, 1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 1, 1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 1), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_8(self): - """ - inp_shape = (1, 100, 200), out = inp[None, 0] => out_shape = (1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_9(self): - """ - inp_shape = (1, 100, 200), out = inp[0, :] => out_shape = (100, 200) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_10(self): - """ - inp_shape = (1, 100, 200), out = inp[0, None] => out_shape = (1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_3d_11(self): - """ - inp_shape = (1, 100, 200), out = inp[0, 0] => out_shape = (200,) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(200,), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_0(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, :] => out_shape = (1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_1(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, None] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_2(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, 0] => out_shape = (1, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_3(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., :] => out_shape = (1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_4(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., None] => out_shape = (1, 100, 200, 3, 1) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 3, 1), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(0, 1), ellipsis_mask=(1, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_5(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., 0] => out_shape = (1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(0, 0), ellipsis_mask=(1, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_6(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, :] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_7(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, None] => out_shape = (1, 1, 1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 1, 1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 0), new_axis_mask=(1, 1), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_8(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, 0] => out_shape = (1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(0, 1), new_axis_mask=(1, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_9(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, :] => out_shape = (100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_10(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, None] => out_shape = (1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 0), new_axis_mask=(0, 1), ellipsis_mask=(0, 0) - ) - - def test_auto_infer_strided_slice_2d_over_4d_11(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, 0] => out_shape = (200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(200, 3), - begin=(0, 0), end=(0, 0), strides=(1, 1), begin_mask=(0, 0), end_mask=(0, 0), - shrink_axis_mask=(1, 1), new_axis_mask=(0, 0), ellipsis_mask=(0, 0) - ) - - # automatically generated slices from 3d to 5d d input tensors - # fixed number of ellipsis, newaxis and shrink_axis - def test_auto_infer_strided_slice_3d_over_3d_0(self): - """ - inp_shape = (1, 100, 200), out = inp[None, ..., 0] => out_shape = (1, 1, 100) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 1, 100), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(0, 0, 1), new_axis_mask=(1, 0, 0), ellipsis_mask=(0, 1, 0) - ) - - def test_auto_infer_strided_slice_3d_over_3d_1(self): - """ - inp_shape = (1, 100, 200), out = inp[..., None, 0] => out_shape = (1, 100, 1) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 100, 1), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(0, 0, 1), new_axis_mask=(0, 1, 0), ellipsis_mask=(1, 0, 0) - ) - - def test_auto_infer_strided_slice_3d_over_3d_2(self): - """ - inp_shape = (1, 100, 200), out = inp[0, None, ...] => out_shape = (1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 100, 200), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(1, 0, 0), new_axis_mask=(0, 1, 0), ellipsis_mask=(0, 0, 1) - ) - - def test_auto_infer_strided_slice_3d_over_3d_3(self): - """ - inp_shape = (1, 100, 200), out = inp[0, ..., None] => out_shape = (100, 200, 1) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(100, 200, 1), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(1, 0, 0), new_axis_mask=(0, 0, 1), ellipsis_mask=(0, 1, 0) - ) - - def test_auto_infer_strided_slice_3d_over_3d_4(self): - """ - inp_shape = (1, 100, 200), out = inp[None, 0, ...] => out_shape = (1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 100, 200), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(0, 1, 0), new_axis_mask=(1, 0, 0), ellipsis_mask=(0, 0, 1) - ) - - def test_auto_infer_strided_slice_3d_over_3d_5(self): - """ - inp_shape = (1, 100, 200), out = inp[..., 0, None] => out_shape = (1, 100, 1) - """ - self.run_test( - inp=(1, 100, 200), is_shape=True, ref_res=(1, 100, 1), - begin=(0, 0, 0), end=(0, 0, 0), strides=(1, 1, 1), begin_mask=(0, 0, 0), end_mask=(0, 0, 0), - shrink_axis_mask=(0, 1, 0), new_axis_mask=(0, 0, 1), ellipsis_mask=(1, 0, 0) - ) - - def test_auto_infer_strided_slice_3d_over_4d_0(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, ..., 0, :] => out_shape = (1, 1, 100, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 1, 100, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_3d_over_4d_1(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., None, 0, :] => out_shape = (1, 100, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_3d_over_4d_2(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, None, ..., :] => out_shape = (1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_3d_over_4d_3(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, ..., None, :] => out_shape = (100, 200, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(100, 200, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_3d_over_4d_4(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, 0, ..., :] => out_shape = (1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_3d_over_4d_5(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., 0, None, :] => out_shape = (1, 100, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_3d_over_5d_0(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, ..., 0, :, :] => out_shape = (1, 1, 100, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 1, 100, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_3d_over_5d_1(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., None, 0, :, :] => out_shape = (1, 100, 1, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_3d_over_5d_2(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, None, ..., :, :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_3d_over_5d_3(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, ..., None, :, :] => out_shape = (100, 200, 1, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(100, 200, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_3d_over_5d_4(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, 0, ..., :, :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_3d_over_5d_5(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., 0, None, :, :] => out_shape = (1, 100, 1, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_0(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, ..., 0, :] => out_shape = (1, 1, 100, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 1, 100, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_1(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., None, 0, :] => out_shape = (1, 100, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_2(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, None, ..., :] => out_shape = (1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_3(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, ..., None, :] => out_shape = (100, 200, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(100, 200, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_4(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, 0, ..., :] => out_shape = (1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_5(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., 0, None, :] => out_shape = (1, 100, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 1, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_6(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, ..., :, 0] => out_shape = (1, 1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 1, 100, 200), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_7(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., None, :, 0] => out_shape = (1, 100, 1, 200) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 1, 200), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_8(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, None, :, ...] => out_shape = (1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_auto_infer_strided_slice_4d_over_4d_9(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, ..., :, None] => out_shape = (100, 200, 3, 1) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(100, 200, 3, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_10(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, 0, :, ...] => out_shape = (1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_auto_infer_strided_slice_4d_over_4d_11(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., 0, :, None] => out_shape = (1, 100, 3, 1) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 3, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_12(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, :, ..., 0] => out_shape = (1, 1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 1, 100, 200), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_13(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., :, None, 0] => out_shape = (1, 100, 200, 1) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_14(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, :, None, ...] => out_shape = (100, 1, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(100, 1, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_auto_infer_strided_slice_4d_over_4d_15(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[0, :, ..., None] => out_shape = (100, 200, 3, 1) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(100, 200, 3, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_16(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[None, :, 0, ...] => out_shape = (1, 1, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 1, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_auto_infer_strided_slice_4d_over_4d_17(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[..., :, 0, None] => out_shape = (1, 100, 200, 1) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_18(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, None, ..., 0] => out_shape = (1, 1, 100, 200) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 1, 100, 200), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_19(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, ..., None, 0] => out_shape = (1, 100, 200, 1) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_20(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, 0, None, ...] => out_shape = (1, 1, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 1, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 1, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_auto_infer_strided_slice_4d_over_4d_21(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, 0, ..., None] => out_shape = (1, 200, 3, 1) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 200, 3, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_4d_22(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, None, 0, ...] => out_shape = (1, 1, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 1, 200, 3), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0), ellipsis_mask=(0, 0, 0, 1) - ) - - def test_auto_infer_strided_slice_4d_over_4d_23(self): - """ - inp_shape = (1, 100, 200, 3), out = inp[:, ..., 0, None] => out_shape = (1, 100, 200, 1) - """ - self.run_test( - inp=(1, 100, 200, 3), is_shape=True, ref_res=(1, 100, 200, 1), - begin=(0, 0, 0, 0), end=(0, 0, 0, 0), strides=(1, 1, 1, 1), begin_mask=(0, 0, 0, 0), end_mask=(0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0), new_axis_mask=(0, 0, 0, 1), ellipsis_mask=(0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_0(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, ..., 0, :, :] => out_shape = (1, 1, 100, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 1, 100, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_1(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., None, 0, :, :] => out_shape = (1, 100, 1, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_2(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, None, ..., :, :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_3(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, ..., None, :, :] => out_shape = (100, 200, 1, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(100, 200, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_4(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, 0, ..., :, :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_5(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., 0, None, :, :] => out_shape = (1, 100, 1, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 1, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_6(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, ..., :, 0, :] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 1, 100, 200, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_7(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., None, :, 0, :] => out_shape = (1, 100, 1, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 1, 200, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_8(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, None, :, ..., :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_9(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, ..., :, None, :] => out_shape = (100, 200, 10, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(100, 200, 10, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_10(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, 0, :, ..., :] => out_shape = (1, 100, 200, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_11(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., 0, :, None, :] => out_shape = (1, 100, 10, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 10, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_12(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, :, ..., 0, :] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 1, 100, 200, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_13(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., :, None, 0, :] => out_shape = (1, 100, 200, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 200, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_14(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, :, None, ..., :] => out_shape = (100, 1, 200, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(100, 1, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_15(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[0, :, ..., None, :] => out_shape = (100, 200, 10, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(100, 200, 10, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(1, 0, 0, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_16(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[None, :, 0, ..., :] => out_shape = (1, 1, 200, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 1, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(1, 0, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_17(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[..., :, 0, None, :] => out_shape = (1, 100, 200, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 200, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(1, 0, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_18(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, None, ..., 0, :] => out_shape = (1, 1, 100, 200, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 1, 100, 200, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_19(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, ..., None, 0, :] => out_shape = (1, 100, 200, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 200, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 0, 1, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_20(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, 0, None, ..., :] => out_shape = (1, 1, 200, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 1, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(0, 0, 1, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_21(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, 0, ..., None, :] => out_shape = (1, 200, 10, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 200, 10, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 1, 0, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(0, 0, 1, 0, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_22(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, None, 0, ..., :] => out_shape = (1, 1, 200, 10, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 1, 200, 10, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(0, 1, 0, 0, 0), ellipsis_mask=(0, 0, 0, 1, 0) - ) - - def test_auto_infer_strided_slice_4d_over_5d_23(self): - """ - inp_shape = (1, 100, 200, 10, 3), out = inp[:, ..., 0, None, :] => out_shape = (1, 100, 200, 1, 3) - """ - self.run_test( - inp=(1, 100, 200, 10, 3), is_shape=True, ref_res=(1, 100, 200, 1, 3), - begin=(0, 0, 0, 0, 0), end=(0, 0, 0, 0, 0), strides=(1, 1, 1, 1, 1), begin_mask=(0, 0, 0, 0, 0), end_mask=(0, 0, 0, 0, 0), - shrink_axis_mask=(0, 0, 1, 0, 0), new_axis_mask=(0, 0, 0, 1, 0), ellipsis_mask=(0, 1, 0, 0, 0) - ) diff --git a/tools/mo/unit_tests/mo/ops/switch_test.py b/tools/mo/unit_tests/mo/ops/switch_test.py deleted file mode 100644 index db05b7508ec9bc..00000000000000 --- a/tools/mo/unit_tests/mo/ops/switch_test.py +++ /dev/null @@ -1,239 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import Mock, call - -import numpy as np - -from openvino.tools.mo.ops.switch import Switch -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph_with_edge_attrs, build_graph_with_attrs - - -class TestSwitch(unittest.TestCase): - def test_switch_infer_with_condition(self): - nodes = [ - ('tensor', {'value': np.zeros((3, 3)), 'kind': 'data', 'executable': True, 'shape': np.array([3, 3])}), - ('pred_id', {'value': True, 'kind': 'data', 'executable': True}), - ('switch', {'type': 'Switch', 'kind': 'op', 'op': 'Switch', 'infer': Switch.infer}), - ('switch_data_0', {'value': None, 'kind': 'data', 'executable': True, 'shape': None}), - ('switch_data_1', {'value': None, 'kind': 'data', 'executable': True, 'shape': None}), - ('result_0', {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}), - ('result_1', {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}), - ] - edges = [ - ('tensor', 'switch', {'in': 0}), - ('pred_id', 'switch', {'in': 1}), - ('switch', 'switch_data_0', {'out': 0}), - ('switch', 'switch_data_1', {'out': 1}), - ('switch_data_0', 'result_0'), - ('switch_data_1', 'result_1'), - ] - graph = build_graph_with_attrs(nodes_with_attrs=nodes, edges_with_attrs=edges) - - # We should propagate shapes and values - graph_ref = build_graph_with_attrs(nodes_with_attrs=nodes, - edges_with_attrs=edges, - update_nodes_attributes=[('switch_data_0', {'shape': np.array([3, 3]), - 'value': np.zeros((3, 3))}), - ('switch_data_1', {'shape': np.array([3, 3]), - 'value': np.zeros((3, 3))})]) - - node = Node(graph, 'switch') - node.infer(node) - - (flag, resp) = compare_graphs(graph, graph_ref, 'switch_data_0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_switch_infer_no_condition(self): - nodes = [ - ('tensor', {'value': None, 'kind': 'data', 'executable': True, 'shape': np.array([1, 2, 1])}), - ('pred_id', {'value': None, 'kind': 'data', 'executable': True}), - ('switch', {'type': 'Switch', 'kind': 'op', 'op': 'Switch', 'infer': Switch.infer}), - ('switch_data_0', {'value': None, 'kind': 'data', 'executable': True, 'shape': None}), - ('switch_data_1', {'value': None, 'kind': 'data', 'executable': True, 'shape': None}), - ('result_0', {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}), - ('result_1', {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}), - ] - edges = [ - ('tensor', 'switch', {'in': 0}), - ('pred_id', 'switch', {'in': 1}), - ('switch', 'switch_data_0', {'out': 0}), - ('switch', 'switch_data_1', {'out': 1}), - ('switch_data_0', 'result_0'), - ('switch_data_1', 'result_1'), - ] - graph = build_graph_with_attrs(nodes_with_attrs=nodes, edges_with_attrs=edges) - - # We should propagate only shapes - graph_ref = build_graph_with_attrs(nodes_with_attrs=nodes, - edges_with_attrs=edges, - update_nodes_attributes=[('switch_data_0', {'shape': np.array([1, 2, 1])}), - ('switch_data_1', {'shape': np.array([1, 2, 1])})]) - - node = Node(graph, 'switch') - node.infer(node) - - (flag, resp) = compare_graphs(graph, graph_ref, 'switch_data_0', check_op_attrs=True) - self.assertTrue(flag, resp) - - def test_switch_cf_infer_no_condition(self): - me_mock = Mock() - nodes = { - 'tensor': {'value': True, 'kind': 'data', 'executable': True}, - 'pred_id': {'value': None, 'kind': 'data', 'executable': True}, - 'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch', 'control_flow_infer': Switch.control_flow_infer}, - 'switch_data_0': {'value': None, 'kind': 'data', 'executable': True}, - 'switch_data_1': {'value': None, 'kind': 'data', 'executable': True}, - 'result_0': {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}, - 'result_1': {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}, - } - edges = [ - ('tensor', 'switch', {'in': 0}), - ('pred_id', 'switch', {'in': 1}), - ('switch', 'switch_data_0', {'out': 0}), - ('switch', 'switch_data_1', {'out': 1}), - ('switch_data_0', 'result_0', {'in': 0}), - ('switch_data_1', 'result_1', {'in': 0}), - ] - graph = build_graph_with_edge_attrs(nodes, edges) - - node = Node(graph, 'switch') - node.control_flow_infer(node, True, me_mock) - # In this case we should mark all ports as executable - me_mock.assert_has_calls([call('switch_data_0', True), call('switch_data_1', True)], any_order=True) - - def test_switch_cf_true_both_ports(self): - me_mock = Mock() - nodes = { - 'tensor': {'value': True, 'kind': 'data', 'executable': True}, - 'pred_id': {'value': np.array(True), 'kind': 'data', 'executable': True}, - 'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch', 'control_flow_infer': Switch.control_flow_infer}, - 'switch_data_0': {'value': None, 'kind': 'data', 'executable': True}, - 'switch_data_1': {'value': None, 'kind': 'data', 'executable': True}, - 'result_0': {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}, - 'result_1': {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}, - } - edges = [ - ('tensor', 'switch', {'in': 0}), - ('pred_id', 'switch', {'in': 1}), - ('switch', 'switch_data_0', {'out': 0}), - ('switch', 'switch_data_1', {'out': 1}), - ('switch_data_0', 'result_0', {'in': 0}), - ('switch_data_1', 'result_1', {'in': 0}), - ] - graph = build_graph_with_edge_attrs(nodes, edges) - node = Node(graph, 'switch') - node.control_flow_infer(node, True, me_mock) - me_mock.assert_has_calls([call('switch_data_0', False), call('switch_data_1', True)], any_order=True) - - def test_switch_cf_false_both_ports(self): - me_mock = Mock() - - nodes = { - 'tensor': {'value': True, 'kind': 'data', 'executable': True}, - 'pred_id': {'value': np.array(False), 'kind': 'data', 'executable': True}, - 'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch', 'control_flow_infer': Switch.control_flow_infer}, - 'switch_data_0': {'value': None, 'kind': 'data', 'executable': True}, - 'switch_data_1': {'value': None, 'kind': 'data', 'executable': True}, - 'result_0': {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}, - 'result_1': {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}, - } - edges = [ - ('tensor', 'switch', {'in': 0}), - ('pred_id', 'switch', {'in': 1}), - ('switch', 'switch_data_0', {'out': 0}), - ('switch', 'switch_data_1', {'out': 1}), - ('switch_data_0', 'result_0', {'in': 0}), - ('switch_data_1', 'result_1', {'in': 0}), - ] - graph = build_graph_with_edge_attrs(nodes, edges) - node = Node(graph, 'switch') - node.control_flow_infer(node, True, me_mock) - me_mock.assert_has_calls([call('switch_data_0', True), call('switch_data_1', False)], any_order=True) - - def test_switch_cf_true_one_exec_port(self): - me_mock = Mock() - - nodes = { - 'tensor': {'value': True, 'kind': 'data', 'executable': True}, - 'pred_id': {'value': np.array(True), 'kind': 'data', 'executable': True}, - 'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch', 'control_flow_infer': Switch.control_flow_infer}, - 'switch_data_1': {'value': None, 'kind': 'data', 'executable': True}, - 'result_1': {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}, - } - edges = [ - ('tensor', 'switch', {'in': 0}), - ('pred_id', 'switch', {'in': 1}), - ('switch', 'switch_data_1', {'out': 1}), - ('switch_data_1', 'result_1', {'in': 0}), - ] - graph = build_graph_with_edge_attrs(nodes, edges) - node = Node(graph, 'switch') - node.control_flow_infer(node, True, me_mock) - me_mock.assert_has_calls([call('switch_data_1', True)], any_order=True) - - def test_switch_cf_false_one_exec_port(self): - me_mock = Mock() - - nodes = { - 'tensor': {'value': True, 'kind': 'data', 'executable': True}, - 'pred_id': {'value': np.array(False), 'kind': 'data', 'executable': True}, - 'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch', 'control_flow_infer': Switch.control_flow_infer}, - 'switch_data_0': {'value': None, 'kind': 'data', 'executable': True}, - 'result_0': {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}, - } - edges = [ - ('tensor', 'switch', {'in': 0}), - ('pred_id', 'switch', {'in': 1}), - ('switch', 'switch_data_0', {'out': 0}), - ('switch_data_0', 'result_0', {'in': 0}), - ] - graph = build_graph_with_edge_attrs(nodes, edges) - node = Node(graph, 'switch') - node.control_flow_infer(node, True, me_mock) - me_mock.assert_has_calls([call('switch_data_0', True)], any_order=True) - - def test_switch_cf_true_no_exec(self): - me_mock = Mock() - - nodes = { - 'tensor': {'value': True, 'kind': 'data', 'executable': True}, - 'pred_id': {'value': np.array(True), 'kind': 'data', 'executable': True}, - 'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch', 'control_flow_infer': Switch.control_flow_infer}, - 'switch_data_0': {'value': None, 'kind': 'data', 'executable': True}, - 'result_0': {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}, - } - edges = [ - ('tensor', 'switch', {'in': 0}), - ('pred_id', 'switch', {'in': 1}), - ('switch', 'switch_data_0', {'out': 0}), - ('switch_data_0', 'result_0', {'in': 0}), - ] - graph = build_graph_with_edge_attrs(nodes, edges) - node = Node(graph, 'switch') - node.control_flow_infer(node, True, me_mock) - me_mock.assert_has_calls([call('switch_data_0', False)], any_order=True) - - def test_switch_cf_false_no_exec(self): - me_mock = Mock() - - nodes = { - 'tensor': {'value': True, 'kind': 'data', 'executable': True}, - 'pred_id': {'value': np.array(False), 'kind': 'data', 'executable': True}, - 'switch': {'type': 'Switch', 'kind': 'op', 'op': 'Switch', 'control_flow_infer': Switch.control_flow_infer}, - 'switch_data_1': {'value': None, 'kind': 'data', 'executable': True}, - 'result_1': {'value': None, 'kind': 'op', 'executable': True, 'type': 'Result', 'op': 'Result'}, - } - edges = [ - ('tensor', 'switch', {'in': 0}), - ('pred_id', 'switch', {'in': 1}), - ('switch', 'switch_data_1', {'out': 1}), - ('switch_data_1', 'result_1', {'in': 0}), - ] - graph = build_graph_with_edge_attrs(nodes, edges) - node = Node(graph, 'switch') - node.control_flow_infer(node, True, me_mock) - me_mock.assert_has_calls([call('switch_data_1', False)], any_order=True) diff --git a/tools/mo/unit_tests/mo/ops/tile_test.py b/tools/mo/unit_tests/mo/ops/tile_test.py deleted file mode 100644 index 31833c3ad43800..00000000000000 --- a/tools/mo/unit_tests/mo/ops/tile_test.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.tile import Tile, AttributedTile -from unit_tests.utils.graph import build_graph - -nodes_attributes = { - 'op': {'kind': 'op'}, - 'data': {'value': None, 'shape': np.array([10, 20, 30, 40]), 'kind': 'data'}, - 'const': {'kind': 'op'}, - 'tile_values': {'value': None, 'shape': np.array([4]), 'kind': 'data'}, - 'tile': {'type': 'AttributedTile', 'kind': 'op'}, - 'tile_out': {'value': None, 'shape': None, 'kind': 'data'}, - -} - -edges = [ - ('op', 'data'), - ('data', 'tile'), - ('const', 'tile_values'), - ('tile_values', 'tile'), - ('tile', 'tile_out'), -] - -attributed_edges = [ - ('op', 'data'), - ('data', 'tile'), - ('tile', 'tile_out'), -] - - -class TestTileInfer(unittest.TestCase): - def test_tile_infer_correct(self): - graph = build_graph(nodes_attributes, edges, - {'tile_values': {'value': np.array([7, 1, 1, 1])}}) - tile_node = Node(graph, 'tile') - Tile.infer(tile_node) - self.assertTrue(np.all(np.array([70, 20, 30, 40]) == graph.node['tile_out']['shape'])) - - def test_tile_infer_correct_2(self): - graph = build_graph(nodes_attributes, edges, - {'tile_values': {'value': np.array([1, 7, 1, 1])}}) - tile_node = Node(graph, 'tile') - Tile.infer(tile_node) - self.assertTrue(np.all(np.array([10, 140, 30, 40]) == graph.node['tile_out']['shape'])) - - def test_tile_infer_correct_2d_tensor(self): - graph = build_graph(nodes_attributes, edges, - {'data': {'shape': np.array([3, 7])}, - 'tile_values': {'value': np.array([5, 1])}}) - tile_node = Node(graph, 'tile') - Tile.infer(tile_node) - self.assertTrue(np.all(np.array([15, 7]) == graph.node['tile_out']['shape'])) - - def test_tile_infer_all_ones(self): - graph = build_graph(nodes_attributes, edges, - {'tile_values': {'value': np.array([1, 1, 1, 1])}}) - tile_node = Node(graph, 'tile') - Tile.infer(tile_node) - self.assertTrue(np.all(np.array([10, 20, 30, 40]) == graph.node['tile_out']['shape'])) - - def test_tile_infer_two_non_one(self): - graph = build_graph(nodes_attributes, edges, - {'tile_values': {'value': np.array([2, 1, 1, 2])}}) - tile_node = Node(graph, 'tile') - Tile.infer(tile_node) - self.assertTrue(np.all(np.array([20, 20, 30, 80]) == graph.node['tile_out']['shape'])) - - def test_tile_infer_three_non_one(self): - graph = build_graph(nodes_attributes, edges, - {'tile_values': {'value': np.array([2, 1, 5, 2])}}) - tile_node = Node(graph, 'tile') - Tile.infer(tile_node) - self.assertTrue(np.all(np.array([20, 20, 150, 80]) == graph.node['tile_out']['shape'])) - - def test_tile_infer_none_input_shape(self): - graph = build_graph(nodes_attributes, edges, - {'data': {'shape': None}, - 'tile_values': {'value': np.array([1, 7, 1, 1])}}) - tile_node = Node(graph, 'tile') - self.assertRaises(AssertionError, Tile.infer, tile_node) - - def test_tile_infer_values_test(self): - input_data = np.arange(-30, 60, 0.25).reshape([2, 4, 3, -1]) - tile_values = np.array([3, 1, 1, 1]) - graph = build_graph(nodes_attributes, edges, - {'data': {'shape': np.array(input_data.shape), 'value': input_data}, - 'tile_values': {'value': tile_values}}) - tile_node = Node(graph, 'tile') - Tile.infer(tile_node) - self.assertTrue(np.all(np.tile(input_data, tile_values) == graph.node['tile_out']['value'])) - - def test_tile_infer_values_const_propagation(self): - """ - Test for constant propagation even if tile with multiple tile indices is not supported - """ - input_data = np.arange(-30, 60, 0.25).reshape([2, 4, 3, -1]) - tile_values = np.array([4, 3, 2, 5]) - graph = build_graph(nodes_attributes, edges, - {'data': {'shape': np.array(input_data.shape), 'value': input_data}, - 'tile_values': {'value': tile_values}}) - tile_node = Node(graph, 'tile') - Tile.infer(tile_node) - self.assertTrue(np.all(np.tile(input_data, tile_values) == graph.node['tile_out']['value'])) - - def test_tile_infer_undefined_tile_values(self): - graph = build_graph(nodes_attributes, edges, - {'tile_values': {'value': None}}) - tile_node = Node(graph, 'tile') - self.assertRaises(AssertionError, Tile.infer, tile_node) - - def test_tile_infer_shapes_alignment(self): - graph = build_graph(nodes_attributes, edges, - {'tile_values': {'value': np.array([1, 2, 3]), 'shape': np.array([3])}}) - tile_node = Node(graph, 'tile') - Tile.infer(tile_node) - self.assertTrue(np.all(np.array([10, 20, 60, 120]) == graph.node['tile_out']['shape'])) - - def test_tile_infer_one_input_correct(self): - graph = build_graph(nodes_attributes, attributed_edges, - {'tile': {'axis': 1, 'tiles': 7}}) - tile_node = Node(graph, 'tile') - AttributedTile.infer(tile_node) - self.assertTrue(np.all(np.array([10, 140, 30, 40]) == graph.node['tile_out']['shape'])) - self.assertEqual(tile_node.axis, 1) - self.assertEqual(tile_node.tiles, 7) - - def test_tile_infer_one_input_correct_missing_axis(self): - graph = build_graph(nodes_attributes, attributed_edges, - {'tile': {'tiles': 7}}) - tile_node = Node(graph, 'tile') - self.assertRaises(AssertionError, AttributedTile.infer, tile_node) - - def test_tile_infer_one_input_correct_missing_tiles(self): - graph = build_graph(nodes_attributes, attributed_edges, - {'tile': {'axis': 1}}) - tile_node = Node(graph, 'tile') - self.assertRaises(AssertionError, AttributedTile.infer, tile_node) diff --git a/tools/mo/unit_tests/mo/ops/topk_test.py b/tools/mo/unit_tests/mo/ops/topk_test.py deleted file mode 100644 index 98cd4e834a660b..00000000000000 --- a/tools/mo/unit_tests/mo/ops/topk_test.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.topk import TopK -from unit_tests.utils.graph import build_graph, regular_op_with_shaped_data, valued_const_with_data, result, connect - - -class TestTopKInfer(unittest.TestCase): - def setUp(self): - nodes = { - **regular_op_with_shaped_data('data', [20, 100, 4], {'type': 'Parameter', 'value': None, - '_out_port_data_type': {0: np.float32}}), - **valued_const_with_data('k', int64_array(10)), - **regular_op_with_shaped_data('topk', None, {'op': 'TopK', 'type': 'TopK', 'name': 'topk', 'axis': 1}), - 'topk_d2': {'kind': 'data', 'shape': None, 'value': None}, - **result('output_1'), - **result('output_2'), - } - self.graph = build_graph(nodes, [ - *connect('data', '0:topk'), - *connect('k', '1:topk'), - ('topk', 'topk_d', {'out': 0}), - ('topk', 'topk_d2', {'out': 1}), - ('topk_d', 'output_1'), - ('topk_d2', 'output_2'), - ], nodes_with_edges_only=True) - - nodes2 = { - **regular_op_with_shaped_data('data', [4, 10, 8], {'type': 'Parameter', 'value': None, - '_out_port_data_type': {0: np.float32}}), - **regular_op_with_shaped_data('k', [], {'type': 'Parameter', 'value': None, - '_out_port_data_type': {0: np.int64}}), - **regular_op_with_shaped_data('topk', None, {'op': 'TopK', 'type': 'TopK', 'name': 'topk', 'axis': 2}), - 'topk_d2': {'kind': 'data', 'shape': None, 'value': None}, - **result('output_1'), - **result('output_2'), - } - self.graph2 = build_graph(nodes2, [ - *connect('data', '0:topk'), - *connect('k', '1:topk'), - ('topk', 'topk_d', {'out': 0}), - ('topk', 'topk_d2', {'out': 1}), - ('topk_d', 'output_1'), - ('topk_d2', 'output_2'), - ], nodes_with_edges_only=True) - - def test_topk_infer_opset1(self): - topk_node = Node(self.graph, 'topk') - topk_node['version'] = 'opset1' - TopK.infer(topk_node) - TopK.type_infer(topk_node) - - self.assertTrue(np.array_equal(topk_node.out_port(0).data.get_shape(), int64_array([20, 10, 4]))) - self.assertTrue(np.array_equal(topk_node.out_port(1).data.get_shape(), int64_array([20, 10, 4]))) - self.assertTrue(topk_node.out_port(0).get_data_type() == np.float32) - self.assertTrue(topk_node.out_port(1).get_data_type() == np.int32) - - def test_topk_infer_i64_opset3(self): - topk_node = Node(self.graph, 'topk') - topk_node['version'] = 'opset3' - topk_node['index_element_type'] = np.int64 - TopK.infer(topk_node) - TopK.type_infer(topk_node) - - self.assertTrue(np.array_equal(topk_node.out_port(0).data.get_shape(), int64_array([20, 10, 4]))) - self.assertTrue(np.array_equal(topk_node.out_port(1).data.get_shape(), int64_array([20, 10, 4]))) - self.assertTrue(topk_node.out_port(0).get_data_type() == np.float32) - self.assertTrue(topk_node.out_port(1).get_data_type() == np.int64) - - def test_topk_infer_i32_opset3(self): - topk_node = Node(self.graph, 'topk') - topk_node['version'] = 'opset3' - topk_node['index_element_type'] = np.int32 - TopK.infer(topk_node) - TopK.type_infer(topk_node) - - self.assertTrue(np.array_equal(topk_node.out_port(0).data.get_shape(), int64_array([20, 10, 4]))) - self.assertTrue(np.array_equal(topk_node.out_port(1).data.get_shape(), int64_array([20, 10, 4]))) - self.assertTrue(topk_node.out_port(0).get_data_type() == np.float32) - self.assertTrue(topk_node.out_port(1).get_data_type() == np.int32) - - def test_topk_infer_with_dynamic_k(self): - topk_node = Node(self.graph2, 'topk') - topk_node['version'] = 'opset3' - topk_node['index_element_type'] = np.int32 - TopK.infer(topk_node) - TopK.type_infer(topk_node) - - self.assertTrue( - np.array_equal(topk_node.out_port(0).data.get_shape(), shape_array([4, 10, dynamic_dimension_value]))) - self.assertTrue( - np.array_equal(topk_node.out_port(1).data.get_shape(), shape_array([4, 10, dynamic_dimension_value]))) - self.assertTrue(topk_node.out_port(0).get_data_type() == np.float32) - self.assertTrue(topk_node.out_port(1).get_data_type() == np.int32) diff --git a/tools/mo/unit_tests/mo/ops/transpose_test.py b/tools/mo/unit_tests/mo/ops/transpose_test.py deleted file mode 100644 index 544eda98eaf93a..00000000000000 --- a/tools/mo/unit_tests/mo/ops/transpose_test.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import itertools -import unittest -import pytest -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, strict_compare_tensors, \ - dynamic_dimension_value -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.middle.passes.infer import partial_infer -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.ops.transpose import Transpose -from unit_tests.utils.graph import valued_const_with_data, result, regular_op_with_empty_data, connect, \ - build_graph, shaped_parameter - -input_shape = np.array([1, 3, 224, 224]) - - -class TestTransposeOp(): - nodes_attributes = { - 'parameter': { - 'kind': 'op', - 'op': 'Parameter', - 'shape': input_shape - }, - 'data_1': { - 'kind': 'data', - 'shape': input_shape, - 'value': None - }, - 'order_const': { - 'kind': 'op', - 'op': 'Const', - 'shape': np.array([4]) - }, - 'order_data': { - 'kind': 'data', - 'shape': np.array([4]) - }, - 'transpose': { - 'type': 'Transpose', - 'op': 'Transpose', - 'reverse_order': False, - 'kind': 'op', - }, - 'data_2': { - 'kind': 'data', - 'shape': None, - 'value': None - } - } - - def _create_graph_with_transpose(self, order): - if order is None: - graph = build_graph(self.nodes_attributes, - [('parameter', 'data_1'), - ('data_1', 'transpose'), - ('transpose', 'data_2')]) - else: - graph = build_graph(self.nodes_attributes, - [('parameter', 'data_1'), - ('data_1', 'transpose'), - ('order_const', 'order_data'), - ('order_data', 'transpose'), - ('transpose', 'data_2')], - {'order_data': {'value': order}}) - graph.graph['layout'] = 'NCHW' - return graph - - @pytest.mark.parametrize("order",[list(order) for order in list(itertools.permutations(np.arange(4)))]) - def test_transpose_infer_1(self, order): - graph = self._create_graph_with_transpose(order) - transpose_node = Node(graph, 'transpose') - - Transpose.infer(transpose_node) - - ref = [transpose_node.in_node().shape[i] for i in order] - assert np.array_equal(transpose_node.out_node().shape, np.array(ref)) - - def test_transpose_infer_2(self): - order = None - graph = self._create_graph_with_transpose(order) - transpose_node = Node(graph, 'transpose') - transpose_node['reverse_order'] = True - Transpose.infer(transpose_node) - - ref = np.array([x for x in reversed(transpose_node.in_node().shape)]) - assert np.array_equal(transpose_node.out_node().shape, ref),\ - "Shapes are not the same: {} and {}".format(transpose_node.out_node().shape, ref) - - def test_transpose_infer_neg_1(self): - order = np.array([0, 1, 2, 3]) - graph = self._create_graph_with_transpose(order) - transpose_node = Node(graph, 'transpose') - transpose_node['reverse_order'] = True - with pytest.raises(AssertionError): - Transpose.infer(transpose_node) - - def test_transpose_infer_neg_2(self): - order = None - graph = self._create_graph_with_transpose(order) - transpose_node = Node(graph, 'transpose') - transpose_node['reverse_order'] = False - with pytest.raises(AssertionError): - Transpose.infer(transpose_node) - - -dyn = dynamic_dimension_value - - -class TestTransposeReverseInfer(unittest.TestCase): - @staticmethod - def build_and_test_reverse_inference(order, out_shape, ref_shape): - nodes = { - **shaped_parameter('data', None, {'reverse_infer': Parameter.reverse_infer}), - **valued_const_with_data('order', int64_array(order)), - **regular_op_with_empty_data('transpose', {'op': 'Transpose', - 'infer': Transpose.infer, - 'reverse_infer': Transpose.reverse_infer}), - **result('res'), - } - - edges = [ - *connect('data', '0:transpose'), - *connect('order', '1:transpose'), - *connect('transpose', 'res') - ] - - graph = build_graph(nodes, edges) - graph.stage = 'middle' - Node(graph, 'transpose').out_port(0).data.set_shape(shape_array(out_shape)) - - partial_infer(graph) - actual_shape = Node(graph, 'data').out_port(0).data.get_shape() - assert strict_compare_tensors(actual_shape, shape_array(ref_shape)) - - def test_reverse_infer_1(self): - self.build_and_test_reverse_inference(order=[0, 3, 1, 2], - out_shape=[dyn, dyn, dyn, dyn], - ref_shape=[dyn, dyn, dyn, dyn]) - - def test_reverse_infer_2(self): - self.build_and_test_reverse_inference(order=[0, 3, 1, 2], - out_shape=[44, 32, 77, 1], - ref_shape=[44, 77, 1, 32]) - - def test_reverse_infer_3(self): - self.build_and_test_reverse_inference(order=[0, 2, 3, 1], - out_shape=[44, 32, 77, 1], - ref_shape=[44, 1, 32, 77]) diff --git a/tools/mo/unit_tests/mo/ops/unique_test.py b/tools/mo/unit_tests/mo/ops/unique_test.py deleted file mode 100644 index dd61980375093d..00000000000000 --- a/tools/mo/unit_tests/mo/ops/unique_test.py +++ /dev/null @@ -1,259 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -import numpy as np - -from openvino.tools.mo.ops.unique import Unique -from openvino.tools.mo.front.common.partial_infer.utils import int64_array -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -# graph 1 with two outputs: uniques and indices -nodes_attributes = {'input': {'shape': None, 'value': None, 'kind': 'data'}, - 'unique_node': {'op': 'Unique', 'kind': 'op'}, - 'output_uniques': {'shape': None, 'value': None, 'kind': 'data'}, - 'output_indices': {'shape': None, 'value': None, 'kind': 'data'}, - } -edges1 = [('input', 'unique_node', {'in': 0}), - ('unique_node', 'output_uniques', {'out': 0}), - ('unique_node', 'output_indices', {'out': 1})] -inputs1 = {'input': {'shape': int64_array([20]), 'value': None}, - 'unique_node': { - 'sorted': 'false', - 'return_inverse': 'true', - 'return_counts': 'false' - } - } - -# graph 2 with three outputs: uniques, indices and counts -nodes_attributes2 = {'input': {'shape': None, 'value': None, 'kind': 'data'}, - 'unique_node': {'op': 'Unique', 'kind': 'op'}, - 'output_uniques': {'shape': None, 'value': None, 'kind': 'data'}, - 'output_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'output_counts': {'shape': None, 'value': None, 'kind': 'data'} - } -edges2 = [('input', 'unique_node', {'in': 0}), - ('unique_node', 'output_uniques', {'out': 0}), - ('unique_node', 'output_indices', {'out': 1}), - ('unique_node', 'output_counts', {'out': 2})] -inputs2 = {'input': {'shape': int64_array([20]), 'value': None}, - 'unique_node': { - 'sorted': 'false', - 'return_inverse': 'true', - 'return_counts': 'true' - } - } - - -class TestUnique(unittest.TestCase): - # case 1: a graph with two outputs: uniques and indices - def test_partial_infer1(self): - graph = build_graph(nodes_attributes, edges1, inputs1) - - unique_node = Node(graph, 'unique_node') - Unique.infer(unique_node) - - # prepare reference results - ref_output_uniques_shape = int64_array([20]) - ref_output_indices_shape = int64_array([20]) - - # get resulted shapes - res_output_uniques_shape = graph.node['output_uniques']['shape'] - res_output_indices_shape = graph.node['output_indices']['shape'] - - self.assertTrue(np.array_equal(ref_output_uniques_shape, res_output_uniques_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_uniques_shape, res_output_uniques_shape)) - - self.assertTrue(np.array_equal(ref_output_indices_shape, res_output_indices_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_indices_shape, res_output_indices_shape)) - - # case 2: a graph with three outputs: uniques, indices and counts - def test_partial_infer2(self): - graph = build_graph(nodes_attributes2, edges2, inputs2) - - unique_node = Node(graph, 'unique_node') - Unique.infer(unique_node) - - # prepare reference results - ref_output_uniques_shape = int64_array([20]) - ref_output_indices_shape = int64_array([20]) - ref_output_counts_shape = int64_array([20]) - - # get resulted shapes - res_output_uniques_shape = graph.node['output_uniques']['shape'] - res_output_indices_shape = graph.node['output_indices']['shape'] - res_output_counts_shape = graph.node['output_counts']['shape'] - - self.assertTrue(np.array_equal(ref_output_uniques_shape, res_output_uniques_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_uniques_shape, res_output_uniques_shape)) - - self.assertTrue(np.array_equal(ref_output_indices_shape, res_output_indices_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_indices_shape, res_output_indices_shape)) - - self.assertTrue(np.array_equal(ref_output_counts_shape, res_output_counts_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_counts_shape, res_output_counts_shape)) - - # case 3: a graph with just unique output - def test_partial_infer_just_unique(self): - edges = [('input', 'unique_node', {'in': 0}), - ('unique_node', 'output_uniques', {'out': 0})] - graph = build_graph(nodes_attributes, edges, inputs1) - - unique_node = Node(graph, 'unique_node') - Unique.infer(unique_node) - - # prepare reference results - ref_output_uniques_shape = int64_array([20]) - - # get resulted shapes - res_output_uniques_shape = graph.node['output_uniques']['shape'] - - self.assertTrue(np.array_equal(ref_output_uniques_shape, res_output_uniques_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_uniques_shape, res_output_uniques_shape)) - - # case 4: an invalid graph with 2D input - def test_incorrect_input_shape(self): - inputs = {'input': {'shape': int64_array([20, 2]), 'value': None}} - - graph = build_graph(nodes_attributes, edges1, inputs) - - unique_node = Node(graph, 'unique_node') - self.assertRaises(AssertionError, Unique.infer, unique_node) - - # case 5: an invalid graph with return_counts = false and three outputs - def test_more_output_ports(self): - nodes_attributes1 = {'input': {'shape': None, 'value': None, 'kind': 'data'}, - 'unique_node': {'op': 'Unique', 'kind': 'op'}, - 'output_uniques': {'shape': None, 'value': None, 'kind': 'data'}, - 'output_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'output3': {'shape': None, 'value': None, 'kind': 'data'}, - } - edges = [('input', 'unique_node', {'in': 0}), - ('unique_node', 'output_uniques', {'out': 0}), - ('unique_node', 'output_indices', {'out': 1}), - ('unique_node', 'output3', {'out': 2})] - graph = build_graph(nodes_attributes1, edges, inputs1) - - unique_node = Node(graph, 'unique_node') - self.assertRaises(AssertionError, Unique.infer, unique_node) - - # case 6: an invalid graph without unique output - def test_no_uniques_output(self): - edges = [('input', 'unique_node', {'in': 0}), - ('unique_node', 'output_indices', {'out': 1})] - graph = build_graph(nodes_attributes, edges, inputs1) - - unique_node = Node(graph, 'unique_node') - self.assertRaises(AssertionError, Unique.infer, unique_node) - - # case 7: infer for constant input - # graph with a constant input, three outputs, sorted = 'false' - def test_constant_input(self): - nodes_attributes_ = {'input': {'shape': None, 'value': None, 'kind': 'data'}, - 'unique_node': {'op': 'Unique', 'kind': 'op'}, - 'output_uniques': {'shape': None, 'value': None, 'kind': 'data'}, - 'output_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'output_counts': {'shape': None, 'value': None, 'kind': 'data'} - } - edges_ = [('input', 'unique_node', {'in': 0}), - ('unique_node', 'output_uniques', {'out': 0}), - ('unique_node', 'output_indices', {'out': 1}), - ('unique_node', 'output_counts', {'out': 2})] - inputs_ = {'input': {'shape': int64_array([10]), - 'value': np.array([8.0, 1.0, 2.0, 1.0, 8.0, 5.0, 1.0, 5.0, 0.0, 0.0], dtype=float)}, - 'unique_node': { - 'sorted': 'false', - 'return_inverse': 'true', - 'return_counts': 'true' - } - } - graph = build_graph(nodes_attributes_, edges_, inputs_) - unique_node = Node(graph, 'unique_node') - Unique.infer(unique_node) - - # prepare reference results - ref_output_uniques_shape = int64_array([5]) - ref_output_uniques_value = np.array([8.0, 1.0, 2.0, 5.0, 0.0], dtype=float) - ref_output_indices_shape = int64_array([10]) - ref_output_indices_value = np.array([0.0, 1.0, 2.0, 1.0, 0.0, 3.0, 1.0, 3.0, 4.0, 4.0], dtype=float) - ref_output_counts_shape = int64_array([5]) - ref_output_counts_value = np.array([2.0, 3.0, 1.0, 2.0, 2.0], dtype=float) - - # get resulted shapes - res_output_uniques_shape = graph.node['output_uniques']['shape'] - res_output_uniques_value = graph.node['output_uniques']['value'] - res_output_indices_shape = graph.node['output_indices']['shape'] - res_output_indices_value = graph.node['output_indices']['value'] - res_output_counts_shape = graph.node['output_counts']['shape'] - res_output_counts_value = graph.node['output_counts']['value'] - - # verify the results - self.assertTrue(np.array_equal(ref_output_uniques_shape, res_output_uniques_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_uniques_shape, res_output_uniques_shape)) - self.assertTrue(np.array_equal(ref_output_uniques_value, res_output_uniques_value), - 'values do not match expected: {} and given: {}'.format(ref_output_uniques_value, res_output_uniques_value)) - self.assertTrue(np.array_equal(ref_output_indices_shape, res_output_indices_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_indices_shape, res_output_indices_shape)) - self.assertTrue(np.array_equal(ref_output_indices_value, res_output_indices_value), - 'values do not match expected: {} and given: {}'.format(ref_output_indices_value, res_output_indices_value)) - self.assertTrue(np.array_equal(ref_output_counts_shape, res_output_counts_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_counts_shape, res_output_counts_shape)) - self.assertTrue(np.array_equal(ref_output_counts_value, res_output_counts_value), - 'values do not match expected: {} and given: {}'.format(ref_output_counts_value, res_output_counts_value)) - - # case 8: infer for constant input - # graph with a constant input, three outputs, sorted = 'true' - def test_constant_input(self): - nodes_attributes_ = {'input': {'shape': None, 'value': None, 'kind': 'data'}, - 'unique_node': {'op': 'Unique', 'kind': 'op'}, - 'output_uniques': {'shape': None, 'value': None, 'kind': 'data'}, - 'output_indices': {'shape': None, 'value': None, 'kind': 'data'}, - 'output_counts': {'shape': None, 'value': None, 'kind': 'data'} - } - edges_ = [('input', 'unique_node', {'in': 0}), - ('unique_node', 'output_uniques', {'out': 0}), - ('unique_node', 'output_indices', {'out': 1}), - ('unique_node', 'output_counts', {'out': 2})] - inputs_ = {'input': {'shape': int64_array([10]), - 'value': np.array([8.0, 1.0, 2.0, 1.0, 8.0, 5.0, 1.0, 5.0, 0.0, 0.0], dtype=float)}, - 'unique_node': { - 'sorted': 'true', - 'return_inverse': 'true', - 'return_counts': 'true' - } - } - graph = build_graph(nodes_attributes_, edges_, inputs_) - unique_node = Node(graph, 'unique_node') - Unique.infer(unique_node) - - # prepare reference results - ref_output_uniques_shape = int64_array([5]) - ref_output_uniques_value = np.array([0.0, 1.0, 2.0, 5.0, 8.0], dtype=float) - ref_output_indices_shape = int64_array([10]) - ref_output_indices_value = np.array([4.0, 1.0, 2.0, 1.0, 4.0, 3.0, 1.0, 3.0, 0.0, 0.0], dtype=float) - ref_output_counts_shape = int64_array([5]) - ref_output_counts_value = np.array([2.0, 3.0, 1.0, 2.0, 2.0], dtype=float) - - # get resulted shapes - res_output_uniques_shape = graph.node['output_uniques']['shape'] - res_output_uniques_value = graph.node['output_uniques']['value'] - res_output_indices_shape = graph.node['output_indices']['shape'] - res_output_indices_value = graph.node['output_indices']['value'] - res_output_counts_shape = graph.node['output_counts']['shape'] - res_output_counts_value = graph.node['output_counts']['value'] - - # verify the results - self.assertTrue(np.array_equal(ref_output_uniques_shape, res_output_uniques_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_uniques_shape, res_output_uniques_shape)) - self.assertTrue(np.array_equal(ref_output_uniques_value, res_output_uniques_value), - 'values do not match expected: {} and given: {}'.format(ref_output_uniques_value, res_output_uniques_value)) - self.assertTrue(np.array_equal(ref_output_indices_shape, res_output_indices_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_indices_shape, res_output_indices_shape)) - self.assertTrue(np.array_equal(ref_output_indices_value, res_output_indices_value), - 'values do not match expected: {} and given: {}'.format(ref_output_indices_value, res_output_indices_value)) - self.assertTrue(np.array_equal(ref_output_counts_shape, res_output_counts_shape), - 'shapes do not match expected: {} and given: {}'.format(ref_output_counts_shape, res_output_counts_shape)) - self.assertTrue(np.array_equal(ref_output_counts_value, res_output_counts_value), - 'values do not match expected: {} and given: {}'.format(ref_output_counts_value, res_output_counts_value)) diff --git a/tools/mo/unit_tests/mo/ops/unsqueeze_test.py b/tools/mo/unit_tests/mo/ops/unsqueeze_test.py deleted file mode 100644 index f21dcbfc3205e1..00000000000000 --- a/tools/mo/unit_tests/mo/ops/unsqueeze_test.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.ops.unsqueeze import Unsqueeze -from openvino.tools.mo.utils.ir_engine.compare_graphs import compare_graphs -from unit_tests.utils.graph import build_graph - - -class TestUnsqueezeOp(): - nodes_attributes = { - 'data_1': { - 'kind': 'data', - 'shape': None, - 'value': None, - }, - 'unsq': { - 'op': 'Unsqueeze', - 'kind': 'op', - }, - 'unsq_dims_const': { - 'op': 'Const', - 'kind': 'op', - }, - 'unsq_dims': { - 'kind': 'data', - }, - 'data_2': { - 'kind': 'data', - 'shape': None, - 'value': None, - } - } - - @pytest.mark.parametrize("input_shape, unsq_dims, output_shape, ref_uns_dims, input_value, output_value", - [(shape_array([1, 3, 64, 64]), int64_array([0, 4]), shape_array([1, 1, 3, 64, 1, 64]), - int64_array([0, 4]), None, None), - (shape_array([2, 3, 64, 64]), int64_array([-1]), shape_array([2, 3, 64, 64, 1]), int64_array([4]), None, - None), - (shape_array([2, 3, dynamic_dimension_value, 64]), int64_array([0]), - shape_array([1, 2, 3, dynamic_dimension_value, 64]), int64_array([0]), None, None), - (shape_array([1, 2]), int64_array([-1]), shape_array([1, 2, 1]), int64_array([2]), - shape_array([5, dynamic_dimension_value]).reshape((1, 2)), - shape_array([5, dynamic_dimension_value]).reshape((1, 2, 1))), - ]) - def test_unsqueeze_infer(self, input_shape, unsq_dims, output_shape, ref_uns_dims, input_value, output_value): - graph = build_graph(self.nodes_attributes, - [('data_1', 'unsq'), - ('unsq_dims_const', 'unsq_dims'), - ('unsq_dims', 'unsq'), - ('unsq', 'data_2')], - {'data_1': {'shape': input_shape, 'value': input_value}, - 'unsq_dims': {'value': unsq_dims, 'shape': unsq_dims.shape}, - 'unsq_dims_const': {'value': unsq_dims, 'shape': unsq_dims.shape}, - }) - - graph_ref = build_graph(self.nodes_attributes, - [('data_1', 'unsq'), - ('unsq_dims_const', 'unsq_dims'), - ('unsq_dims', 'unsq'), - ('unsq', 'data_2')], - {'data_1': {'shape': input_shape, 'value': input_value}, - 'unsq_dims': {'value': ref_uns_dims, 'shape': ref_uns_dims.shape}, - 'unsq_dims_const': {'value': ref_uns_dims, 'shape': ref_uns_dims.shape}, - 'data_2': {'shape': output_shape, 'value': output_value}, - }) - - unsqueeze_node = Node(graph, 'unsq') - Unsqueeze.infer(unsqueeze_node) - - (flag, resp) = compare_graphs(graph, graph_ref, 'data_2') - assert flag, resp - assert strict_compare_tensors(Node(graph, 'data_2').shape, Node(graph_ref, 'data_2').shape) - if Node(graph_ref, 'data_2').value is not None: - assert strict_compare_tensors(Node(graph, 'data_2').value, Node(graph_ref, 'data_2').value) diff --git a/tools/mo/unit_tests/mo/ops/upsample_test.py b/tools/mo/unit_tests/mo/ops/upsample_test.py deleted file mode 100644 index c0fc50225ca92f..00000000000000 --- a/tools/mo/unit_tests/mo/ops/upsample_test.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.ops.upsample import UpsampleOp -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from unit_tests.utils.graph import build_graph - -nodes_attributes = {'node_1': {'type': 'Identity', 'kind': 'op'}, - 'upsample': {'type': 'Upsample', 'kind': 'op'}, - 'node_3': {'type': 'Identity', 'kind': 'op'}, - 'op_output': {'kind': 'op', 'op': 'Result'}, - } - - -class TestUpsampleOp(): - @pytest.mark.parametrize("scales, input_shape, expected_shape",[ - (np.array([1., 1., 2., 2.]), shape_array([1, 3, 227, 227]), shape_array([1, 3, 454, 454])), - (np.array([1., 1., 2.5, 1.5]), shape_array([1, 5, 227, 227]), shape_array([1, 5, 567, 340])), - (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, 1023, 713]), shape_array([1, 14, 1329, 499])), - (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, dynamic_dimension_value, 713]), - shape_array([1, 14, dynamic_dimension_value, 499])), - (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, 1023, dynamic_dimension_value]), - shape_array([1, 14, 1329, dynamic_dimension_value])), - ]) - def test_upsample_with_scales_infer(self, scales, input_shape, expected_shape): - graph = build_graph(nodes_attributes, - [('node_1', 'upsample'), - ('upsample', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': input_shape}, - 'upsample': {'mode': 'linear', - 'height_scale': scales[2], - 'width_scale': scales[3]} - }) - - graph.graph['layout'] = 'NCHW' - upsample_node = Node(graph, 'upsample') - UpsampleOp.upsample_infer(upsample_node) - res_shape = graph.node['node_3']['shape'] - assert strict_compare_tensors(expected_shape, res_shape) - - @pytest.mark.parametrize("scales, input_shape, expected_shape",[ - (np.array([1., 1., 2., 2.]), shape_array([1, 3, 227, 227]), shape_array([1, 3, 454, 454])), - (np.array([1., 1., 2.5, 1.5]), shape_array([1, 5, 227, 227]), shape_array([1, 5, 567, 340])), - (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, 1023, 713]), shape_array([1, 14, 1329, 499])), - (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, dynamic_dimension_value, 713]), - shape_array([1, 14, dynamic_dimension_value, 499])), - (np.array([1., 1., 1.3, 0.7]), shape_array([1, 14, 1023, dynamic_dimension_value]), - shape_array([1, 14, 1329, dynamic_dimension_value])), - ]) - def test_upsample_with_second_input_infer(self, scales, input_shape, expected_shape): - nodes_attributes['scales'] = {'kind': 'data', 'value': scales} - graph = build_graph(nodes_attributes, - [('node_1', 'upsample'), - ('scales', 'upsample'), - ('upsample', 'node_3'), - ('node_3', 'op_output') - ], - {'node_3': {'shape': None, 'value': None}, - 'node_1': {'shape': input_shape}, - 'upsample': {'mode': 'linear', - 'height_scale': None, - 'width_scale': None} - }) - - graph.graph['layout'] = 'NCHW' - upsample_node = Node(graph, 'upsample') - UpsampleOp.upsample_infer(upsample_node) - res_shape = graph.node['node_3']['shape'] - assert strict_compare_tensors(expected_shape, res_shape) diff --git a/tools/mo/unit_tests/mo/pipeline/__init__.py b/tools/mo/unit_tests/mo/pipeline/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/pipeline/common_test.py b/tools/mo/unit_tests/mo/pipeline/common_test.py deleted file mode 100644 index 220f409f23a936..00000000000000 --- a/tools/mo/unit_tests/mo/pipeline/common_test.py +++ /dev/null @@ -1,225 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -import pytest -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.pipeline.common import determined_sort, get_fw_tensor_debug_info, get_sorted_outputs -from unit_tests.utils.graph import build_graph_with_edge_attrs - - -class TestTopologicalSort(): - @pytest.mark.parametrize( "edges",[ - [('A', 'Ad', {'out': 0}), - ('Ad', 'B', {'in': 0}), - ('B', 'Bd', {'out': 0}), - ('Bd', 'C', {'in': 0}), - ('C', 'Cd', {'out': 0}), - ('Cd', 'D', {'in': 0}), - ('D', 'Dd', {'out': 0}), - ('Dd', 'E', {'in': 0}), - ('E', 'Ed', {'out': 0}), - ('Ed', 'I', {'in': 0}), - ('Cd', 'F', {'in': 0}), - ('F', 'Fd', {'out': 0}), - ('Fd', 'G', {'in': 0}), - ('G', 'Gd', {'out': 0}), - ('Gd', 'I', {'in': 1}), - ('Cd', 'H', {'in': 0}), - ('H', 'Hd', {'out': 0}), - ('Hd', 'I', {'in': 2}), - ('I', 'Id', {'out': 0}), - ('Id', 'J', {'in': 0}), - ('J', 'Jd', {'out': 0}), - ('Jd', 'K', {'in': 0}), - ('K', 'Kd', {'out': 0})], - - [('A', 'Ad', {'out': 0}), - ('Ad', 'B', {'in': 0}), - ('B', 'Bd', {'out': 0}), - ('Bd', 'C', {'in': 0}), - ('C', 'Cd', {'out': 0}), - ('Cd', 'D', {'in': 0}), - ('D', 'Dd', {'out': 0}), - ('Dd', 'E', {'in': 0}), - ('E', 'Ed', {'out': 0}), - ('Ed', 'I', {'in': 0}), - ('Cd', 'F', {'in': 0}), - ('F', 'Fd', {'out': 0}), - ('Fd', 'G', {'in': 0}), - ('G', 'Gd', {'out': 0}), - ('Gd', 'I', {'in': 1}), - ('Cd', 'H', {'in': 0}), - ('H', 'Hd', {'out': 0}), - ('Hd', 'I', {'in': 2}), - ('I', 'Id', {'out': 0}), - ('Id', 'J', {'in': 0}), - ('J', 'Jd', {'out': 0}), - ('Jd', 'K', {'in': 0}), - ('K', 'Kd', {'out': 0}), - ('Ad', 'E', {'in': 1}), - ('Bd', 'K', {'in': 1}), - ('Hd', 'J', {'in': 1})], - - [('A', 'Ad', {'out': 0}), - ('Ad', 'B', {'in': 0}), - ('B', 'Bd', {'out': 0}), - ('Bd', 'C', {'in': 0}), - ('C', 'Cd', {'out': 0}), - ('Cd', 'D', {'in': 0}), - ('D', 'Dd', {'out': 0}), - ('Dd', 'E', {'in': 0}), - ('E', 'Ed', {'out': 0}), - ('Ed', 'I', {'in': 0}), - ('Cd', 'F', {'in': 0}), - ('F', 'Fd', {'out': 0}), - ('Fd', 'G', {'in': 0}), - ('G', 'Gd', {'out': 0}), - ('Gd', 'I', {'in': 1}), - ('Cd', 'H', {'in': 0}), - ('H', 'Hd', {'out': 0}), - ('Hd', 'I', {'in': 2}), - ('I', 'Id', {'out': 0}), - ('Id', 'J', {'in': 0}), - ('J', 'Jd', {'out': 0}), - ('Jd', 'K', {'in': 0}), - ('K', 'Kd', {'out': 0}), - ('Ad', 'E', {'in': 1}), - ('Bd', 'K', {'in': 1}), - ('Hd', 'J', {'in': 1}), - ('Dd', 'F', {'in': 1}), - ('Fd', 'H', {'in': 1}), - ('Gd', 'H', {'in': 0})]] - ) - def test_determined_topological_sort(self, edges): - nodes = {'A': {'type': 'Identity', 'kind': 'op'}, - 'B': {'type': 'Identity', 'kind': 'op'}, - 'C': {'type': 'Identity', 'kind': 'op'}, - 'D': {'type': 'Identity', 'kind': 'op'}, - 'E': {'type': 'Identity', 'kind': 'op'}, - 'F': {'type': 'Identity', 'kind': 'op'}, - 'G': {'type': 'Identity', 'kind': 'op'}, - 'H': {'type': 'Identity', 'kind': 'op'}, - 'I': {'type': 'Identity', 'kind': 'op'}, - 'J': {'type': 'Identity', 'kind': 'op'}, - 'K': {'type': 'Identity', 'kind': 'op'}, - 'Ad': {'value': None, 'kind': 'data'}, - 'Bd': {'value': None, 'kind': 'data'}, - 'Cd': {'value': None, 'kind': 'data'}, - 'Dd': {'value': None, 'kind': 'data'}, - 'Ed': {'value': None, 'kind': 'data'}, - 'Fd': {'value': None, 'kind': 'data'}, - 'Gd': {'value': None, 'kind': 'data'}, - 'Hd': {'value': None, 'kind': 'data'}, - 'Id': {'value': None, 'kind': 'data'}, - 'Jd': {'value': None, 'kind': 'data'}, - 'Kd': {'value': None, 'kind': 'data'}, - } - - graph = build_graph_with_edge_attrs(nodes, edges) - outputs = [Node(graph, 'Kd')] - for i in range(100): - op_order, data_order = determined_sort(outputs) - assert op_order == ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'] - assert data_order == ['Ad', 'Bd', 'Cd', 'Dd', 'Ed', 'Fd', 'Gd', 'Hd', 'Id', 'Jd', 'Kd'] - - -class TestGetFWTensorName(unittest.TestCase): - def test_get_fw_tensor_debug_info(self): - nodes = { - 'A': {'type': 'Identity', 'kind': 'op'}, - 'B': {'type': 'Identity', 'kind': 'op'}, - 'C': {'type': 'Identity', 'kind': 'op'}, - 'Ad': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('A', 0)]}, - 'Bd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('B', 0)]}, - 'Cd': {'value': None, 'kind': 'data'}, - } - edges = [ - ('A', 'Ad', {'out': 0}), - ('Ad', 'B', {'in': 0}), - ('B', 'Bd', {'out': 0}), - ('Bd', 'C', {'in': 0}), - ('C', 'Cd', {'out': 0}) - ] - graph = build_graph_with_edge_attrs(nodes, edges) - fw_debug_info = get_fw_tensor_debug_info(Node(graph, 'Cd')) - self.assertEqual(len(fw_debug_info), 1) - self.assertEqual(fw_debug_info[0], ('B', 0)) - - -class TestOutputSort(unittest.TestCase): - def test_get_sorted_outputs(self): - nodes = {'A': {'type': 'Identity', 'kind': 'op'}, - 'B': {'type': 'Identity', 'kind': 'op'}, - 'C': {'type': 'Identity', 'kind': 'op'}, - 'D': {'type': 'Identity', 'kind': 'op'}, - 'E': {'type': 'Identity', 'kind': 'op'}, - 'F': {'type': 'Identity', 'kind': 'op'}, - 'G': {'type': 'Identity', 'kind': 'op'}, - 'H': {'type': 'Identity', 'kind': 'op'}, - 'Ad': {'value': None, 'kind': 'data'}, - 'Bd': {'value': None, 'kind': 'data'}, - 'Cd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('C', 0)]}, - 'Dd': {'value': None, 'kind': 'data'}, - 'Ed': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('E', 0)]}, - 'Fd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('F', 0)]}, - 'Gd': {'value': None, 'kind': 'data'}, - 'Hd': {'value': None, 'kind': 'data'} - } - edges = [ - ('A', 'Ad', {'out': 0}), - ('Ad', 'B', {'in': 0}), - ('B', 'Bd', {'out': 0}), - ('Bd', 'C', {'in': 0}), - ('C', 'Cd', {'out': 0}), - ('Cd', 'D', {'in': 0}), - ('D', 'Dd', {'out': 0}), - ('Dd', 'E', {'in': 0}), - ('E', 'Ed', {'out': 0}), - ('Cd', 'F', {'in': 0}), - ('F', 'Fd', {'out': 0}), - ('Fd', 'G', {'in': 0}), - ('G', 'Gd', {'out': 0}), - ('Cd', 'H', {'in': 0}), - ('H', 'Hd', {'out': 0}) - ] - graph = build_graph_with_edge_attrs(nodes, edges) - self.assertListEqual([node.id for node in get_sorted_outputs(graph)], ['Hd', 'Ed', 'Gd']) - - def test_get_sorted_outputs_fine_situation(self): - nodes = {'A': {'type': 'Identity', 'kind': 'op'}, - 'B': {'type': 'Identity', 'kind': 'op'}, - 'C': {'type': 'Identity', 'kind': 'op'}, - 'D': {'type': 'Identity', 'kind': 'op'}, - 'E': {'type': 'Identity', 'kind': 'op'}, - 'F': {'type': 'Identity', 'kind': 'op'}, - 'G': {'type': 'Identity', 'kind': 'op'}, - 'H': {'type': 'Identity', 'kind': 'op'}, - 'Ad': {'value': None, 'kind': 'data'}, - 'Bd': {'value': None, 'kind': 'data'}, - 'Cd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('C', 0)]}, - 'Dd': {'value': None, 'kind': 'data'}, - 'Ed': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('E', 0)]}, - 'Fd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('F', 0)]}, - 'Gd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('G', 0)]}, - 'Hd': {'value': None, 'kind': 'data', 'fw_tensor_debug_info': [('H', 0)]} - } - edges = [ - ('A', 'Ad', {'out': 0}), - ('Ad', 'B', {'in': 0}), - ('B', 'Bd', {'out': 0}), - ('Bd', 'C', {'in': 0}), - ('C', 'Cd', {'out': 0}), - ('Cd', 'D', {'in': 0}), - ('D', 'Dd', {'out': 0}), - ('Dd', 'E', {'in': 0}), - ('E', 'Ed', {'out': 0}), - ('Cd', 'F', {'in': 0}), - ('F', 'Fd', {'out': 0}), - ('Fd', 'G', {'in': 0}), - ('G', 'Gd', {'out': 0}), - ('Cd', 'H', {'in': 0}), - ('H', 'Hd', {'out': 0}) - ] - graph = build_graph_with_edge_attrs(nodes, edges) - self.assertListEqual([node.id for node in get_sorted_outputs(graph)], ['Ed', 'Gd', 'Hd']) diff --git a/tools/mo/unit_tests/mo/unit_test_with_mocked_telemetry.py b/tools/mo/unit_tests/mo/unit_test_with_mocked_telemetry.py deleted file mode 100644 index 9cedc52921341a..00000000000000 --- a/tools/mo/unit_tests/mo/unit_test_with_mocked_telemetry.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import Mock - -try: - import openvino_telemetry as tm - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm - - -class UnitTestWithMockedTelemetry(unittest.TestCase): - def setUp(self): - tm.Telemetry.__init__ = Mock(return_value=None) - tm.Telemetry.send_event = Mock() - tm.Telemetry.start_session = Mock() - tm.Telemetry.end_session = Mock() - tm.Telemetry.force_shutdown = Mock() diff --git a/tools/mo/unit_tests/mo/utils/__init__.py b/tools/mo/unit_tests/mo/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/utils/args_to_string_test.py b/tools/mo/unit_tests/mo/utils/args_to_string_test.py deleted file mode 100644 index 572e8971f1c519..00000000000000 --- a/tools/mo/unit_tests/mo/utils/args_to_string_test.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -from openvino.runtime import Layout, Dimension - -from openvino.tools.mo import LayoutMap -from openvino.tools.mo.utils.cli_parser import mean_scale_value_to_str, \ - transform_param_to_str, str_list_to_str, source_target_layout_to_str, layout_param_to_str -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry - - -class TestConvertingConvertArgumentsToString(UnitTestWithMockedTelemetry): - def test_mean_scale_value_to_str(self): - values = [0.5, 1.3, 0.67] - self.assertTrue(mean_scale_value_to_str(values) == "[0.5,1.3,0.67]") - - values = {"input": [0.5, 1.3, 0.67]} - self.assertTrue(mean_scale_value_to_str(values) == "input[0.5,1.3,0.67]") - - values = {"input1": [0.5, 1.3, 0.67], "input2": [4.2, 6.7, 3.15], "input3": [0.757, 4.6, 7.3]} - self.assertTrue(mean_scale_value_to_str(values) == - "input1[0.5,1.3,0.67],input2[4.2,6.7,3.15],input3[0.757,4.6,7.3]") - - self.assertRaises(Exception, mean_scale_value_to_str, **{"value": {("a", "b"): [0.5, 1.3, 0.67]}}) - self.assertRaises(Exception, mean_scale_value_to_str, **{"value": {"name": Dimension(1)}}) - self.assertRaises(Exception, mean_scale_value_to_str, **{"value": Dimension(1)}) - - def test_transform_param_to_str(self): - transform = 'MakeStateful' - self.assertTrue(transform_param_to_str(transform) == "MakeStateful") - - transform1 = ('LowLatency2', {'use_const_initializer': False}) - self.assertTrue(transform_param_to_str(transform1) == - "LowLatency2[use_const_initializer=False]") - - transform2 = ('MakeStateful', {'param_res_names': { - 'input_name_1': 'output_name_1', 'input_name_2': 'output_name_2'}}) - self.assertTrue(transform_param_to_str(transform2) == - "MakeStateful[param_res_names={\'input_name_1\':\'output_name_1\'," - "\'input_name_2\':\'output_name_2\'}]") - - transform = [transform1, transform2] - - self.assertTrue(transform_param_to_str(transform) == "LowLatency2[use_const_initializer=False]," - "MakeStateful[param_res_names={" - "\'input_name_1\':\'output_name_1\'," - "\'input_name_2\':\'output_name_2\'}]") - - self.assertRaises(Exception, transform_param_to_str, **{"value": ('LowLatency2', - {'use_const_initializer': False}, - "param")}) - self.assertRaises(Exception, transform_param_to_str, **{"value": (("a", "b"), {})}) - self.assertRaises(Exception, transform_param_to_str, **{"value": ('LowLatency2', Dimension(1))}) - self.assertRaises(Exception, transform_param_to_str, **{"value": ('LowLatency2', - {('a', 'b'): False})}) - self.assertRaises(Exception, transform_param_to_str, **{"value": Dimension(1)}) - - def test_str_list_to_str(self): - list_str = ["data1", "data2", "data3"] - self.assertTrue(str_list_to_str(list_str) == "data1,data2,data3") - - list_str = "data1" - self.assertTrue(str_list_to_str(list_str) == "data1") - - self.assertRaises(Exception, str_list_to_str, **{"values": [int, 1]}) - self.assertRaises(Exception, str_list_to_str, **{"values": Dimension(1)}) - - def test_source_target_layout_to_str(self): - layout = {"input1": Layout("nhwc"), "input2": Layout("n??"), "input3": "nchw"} - self.assertTrue(source_target_layout_to_str(layout) == "input1([N,H,W,C]),input2([N,?,?]),input3(nchw)") - - self.assertRaises(Exception, source_target_layout_to_str, **{"value": {"op": Dimension(1)}}) - self.assertRaises(Exception, source_target_layout_to_str, **{"value": {("a", "b"): Layout("nhwc")}}) - self.assertRaises(Exception, source_target_layout_to_str, **{"value": Dimension(1)}) - - def test_layout_param_to_str_to_str(self): - layout = {"input1": Layout("nhwc"), "input2": Layout("n??"), "input3": "nchw"} - self.assertTrue(layout_param_to_str(layout) == "input1([N,H,W,C]),input2([N,?,?]),input3(nchw)") - - layout_map1 = LayoutMap(source_layout=Layout("n??"), target_layout=None) - layout_map2 = LayoutMap(source_layout=Layout("nhwc"), target_layout=("nchw")) - layout_map3 = LayoutMap(source_layout="abc", target_layout="cab") - - layout = {"input1": layout_map1, "input2": layout_map2, "input3": layout_map3, "input4": Layout("nhwc"), - "input5": "n?"} - - self.assertTrue(layout_param_to_str(layout) == "input1([N,?,?]),input2([N,H,W,C]->nchw)," - "input3(abc->cab),input4([N,H,W,C]),input5(n?)") - - self.assertRaises(Exception, layout_param_to_str, **{"value": {"op": Dimension(1)}}) - self.assertRaises(Exception, layout_param_to_str, **{"value": {("a", "b"): Layout("nhwc")}}) - self.assertRaises(Exception, layout_param_to_str, **{"value": Dimension(1)}) - - layout = ["nhwc", "[n,c]"] - self.assertTrue(layout_param_to_str(layout) == "nhwc,[n,c]") - - layout = ["abc->cab", "..nc"] - self.assertTrue(layout_param_to_str(layout) == "abc->cab,..nc") - - layout_map1 = LayoutMap(source_layout=Layout("n??"), target_layout=None) - layout = [layout_map1, "..nc"] - self.assertTrue(layout_param_to_str(layout) == "[N,?,?],..nc") - - layout_map2 = LayoutMap(source_layout=Layout("nhwc"), target_layout=("nchw")) - layout_map3 = LayoutMap(source_layout="abc", target_layout="cab") - layout = [layout_map2, layout_map3] - self.assertTrue(layout_param_to_str(layout) == "[N,H,W,C]->nchw,abc->cab") diff --git a/tools/mo/unit_tests/mo/utils/broadcasting_test.py b/tools/mo/unit_tests/mo/utils/broadcasting_test.py deleted file mode 100644 index 5432f4a27eea0a..00000000000000 --- a/tools/mo/unit_tests/mo/utils/broadcasting_test.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import pytest - -import numpy as np - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, dynamic_dimension_value, shape_array, strict_compare_tensors -from openvino.tools.mo.utils.broadcasting import uni_directional_broadcasting, uni_directional_shape_broadcasting, \ - bi_directional_shape_broadcasting - - -class TestingBroadcasting(): - @pytest.mark.parametrize("input_shape, target_shape, expected_shape",[([], [20, 30, 10], [20, 30, 10]), - ([1], [20, 30, 10], [20, 30, 10]), - ([1, 1, 10], [20, 30, 10], [20, 30, 10]), - ([20, 1, 10], [20, 30, 10], [20, 30, 10]), - ([20, 30, 10], [20, 30, 10], [20, 30, 10]), - ([20, 30, 10], [5, 7, 20, 30, 10], [5, 7, 20, 30, 10]), - ([1, 2], [20, 3, 10, 2], [20, 3, 10, 2]), - ([1, 1], [1], None), - ([5, 10], [1, 10], None), - ]) - def test_uni_directional_broadcasting(self, input_shape, target_shape, expected_shape): - assert np.array_equal(uni_directional_shape_broadcasting(input_shape, target_shape), expected_shape) - - input_value = np.array(np.random.rand(*input_shape)) - if expected_shape is not None: - expected_value = np.broadcast_to(input_value, int64_array(target_shape)) - assert np.array_equal(uni_directional_broadcasting(input_value, int64_array(target_shape)), - expected_value) - else: - with pytest.raises(Exception,match = '.*cannot be uni-directionally broadcasted.*'): - uni_directional_broadcasting(input_value, int64_array(target_shape)) - - @pytest.mark.parametrize("input_shape, target_shape, expected_shape",[([], [20, 30, 10], [20, 30, 10]), - ([1], [20, 30, 10], [20, 30, 10]), - ([1, 1, 10], [20, 30, 10], [20, 30, 10]), - ([20, 1, 10], [20, 30, 10], [20, 30, 10]), - ([20, 30, 10], [20, 30, 10], [20, 30, 10]), - ([20, 30, 10], [5, 7, 20, 30, 10], [5, 7, 20, 30, 10]), - ([1, 2], [20, 3, 10, 2], [20, 3, 10, 2]), - ([1, 1], [1], None), - ([5, 10], [1, 10], None), - ([10, 2], shape_array([dynamic_dimension_value, 3, 10, 2]), - shape_array([dynamic_dimension_value, 3, 10, 2])), - (shape_array([10, dynamic_dimension_value]), shape_array([dynamic_dimension_value, 3, 10, 2]), - shape_array([dynamic_dimension_value, 3, 10, 2])), - (shape_array([dynamic_dimension_value, 2]), shape_array([dynamic_dimension_value, 3, 10, 2]), - shape_array([dynamic_dimension_value, 3, 10, 2])), - (shape_array([dynamic_dimension_value]), shape_array([1]), shape_array([1])), - (shape_array([1]), shape_array([dynamic_dimension_value]), shape_array([dynamic_dimension_value])), - (shape_array([dynamic_dimension_value]), shape_array([6]), shape_array([6])), - (shape_array([6]), shape_array([dynamic_dimension_value]), shape_array([6])), - ]) - def test_uni_directional_shape_broadcasting(self, input_shape, target_shape, expected_shape): - result = uni_directional_shape_broadcasting(input_shape, target_shape) - if expected_shape is None: - assert result is None - else: - assert strict_compare_tensors(result, expected_shape) - - @pytest.mark.parametrize("input_shape, target_shape, expected_shape",[([], [20, 30, 10], [20, 30, 10]), - ([1], [20, 30, 10], [20, 30, 10]), - ([1, 1, 10], [20, 30, 10], [20, 30, 10]), - ([20, 1, 10], [20, 30, 10], [20, 30, 10]), - ([20, 30, 10], [20, 30, 10], [20, 30, 10]), - ([20, 30, 10], [5, 7, 20, 30, 10], [5, 7, 20, 30, 10]), - ([1, 2], [20, 3, 10, 2], [20, 3, 10, 2]), - ([3, 2], [3], None), - ([5, 10], [1, 20], None), - ([10, 2], shape_array([dynamic_dimension_value, 3, 1, 2]), - shape_array([dynamic_dimension_value, 3, 10, 2])), - (shape_array([10, dynamic_dimension_value]), shape_array([dynamic_dimension_value, 3, 1, 2]), - shape_array([dynamic_dimension_value, 3, 10, 2])), - (shape_array([dynamic_dimension_value, 2]), shape_array([dynamic_dimension_value, 3, 10, 1]), - shape_array([dynamic_dimension_value, 3, 10, 2])), - (shape_array([dynamic_dimension_value]), shape_array([1]), shape_array([dynamic_dimension_value])), - (shape_array([1]), shape_array([dynamic_dimension_value]), shape_array([dynamic_dimension_value])), - (shape_array([dynamic_dimension_value]), shape_array([6]), shape_array([6])), - (shape_array([6]), shape_array([dynamic_dimension_value]), shape_array([6])), - ]) - def test_bi_directional_shape_broadcasting(self, input_shape, target_shape, expected_shape): - result = bi_directional_shape_broadcasting(input_shape, target_shape) - if expected_shape is None: - assert result is None - else: - assert strict_compare_tensors(result, expected_shape) diff --git a/tools/mo/unit_tests/mo/utils/cli_parser_test.py b/tools/mo/unit_tests/mo/utils/cli_parser_test.py deleted file mode 100644 index 7413845bd7ece7..00000000000000 --- a/tools/mo/unit_tests/mo/utils/cli_parser_test.py +++ /dev/null @@ -1,2071 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import numpy -import os -import shutil -import sys -import tempfile -import unittest -from unittest.mock import patch - -import numpy as np - -from openvino.tools.mo.utils.cli_parser import get_placeholder_shapes, get_tuple_values, get_mean_scale_dictionary, \ - get_model_name, \ - parse_tuple_pairs, check_positive, writable_dir, readable_dirs, \ - readable_file, get_freeze_placeholder_values, parse_transform, check_available_transforms, get_layout_values, get_all_cli_parser, \ - get_mo_convert_params -from openvino.tools.mo.convert_impl import pack_params_to_args_namespace -from openvino.tools.mo.utils.error import Error -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from openvino.runtime import PartialShape, Dimension, Layout -from openvino.tools.mo import LayoutMap, InputCutInfo - - -class TestingMeanScaleGetter(UnitTestWithMockedTelemetry): - def test_tuple_parser(self): - tuple_values = "data(1.1,22.22,333.333),info[2.2,33.33,444.444]" - result = parse_tuple_pairs(tuple_values) - exp_res = { - 'data': np.array([1.1, 22.22, 333.333]), - 'info': np.array([2.2, 33.33, 444.444]) - } - for el in exp_res.keys(): - assert np.array_equal(result[el], exp_res[el]) - - def test_tuple_parser_name_digits_only(self): - tuple_values = "0448(1.1,22.22,333.333),0449[2.2,33.33,444.444]" - result = parse_tuple_pairs(tuple_values) - exp_res = { - '0448': np.array([1.1, 22.22, 333.333]), - '0449': np.array([2.2, 33.33, 444.444]) - } - for el in exp_res.keys(): - assert np.array_equal(result[el], exp_res[el]) - - def test_tuple_parser_same_values(self): - tuple_values = "data(1.1,22.22,333.333),info[1.1,22.22,333.333]" - result = parse_tuple_pairs(tuple_values) - exp_res = { - 'data': np.array([1.1, 22.22, 333.333]), - 'info': np.array([1.1, 22.22, 333.333]) - } - for el in exp_res.keys(): - assert np.array_equal(result[el], exp_res[el]) - - def test_tuple_parser_no_inputs(self): - tuple_values = "(1.1,22.22,333.333),[2.2,33.33,444.444]" - result = parse_tuple_pairs(tuple_values) - exp_res = [np.array([1.1, 22.22, 333.333]), - np.array([2.2, 33.33, 444.444])] - for i in range(0, len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_tuple_parser_error_mixed_with_and_without_name(self): - tuple_values = "(1.1,22.22,333.333),data[2.2,33.33,444.444]" - self.assertRaises(Error, parse_tuple_pairs, tuple_values) - - def test_tuple_parser_error_mixed_with_and_without_name_1(self): - tuple_values = "data(1.1,22.22,333.333),[2.2,33.33,444.444]" - self.assertRaises(Error, parse_tuple_pairs, tuple_values) - - def test_tuple_parser_error_mixed_with_and_without_name_digits(self): - tuple_values = "(0.1,22.22,333.333),0448[2.2,33.33,444.444]" - self.assertRaises(Error, parse_tuple_pairs, tuple_values) - - def test_tuple_parser_error_mixed_with_and_without_name_digits_1(self): - tuple_values = "447(1.1,22.22,333.333),[2.2,33.33,444.444]" - self.assertRaises(Error, parse_tuple_pairs, tuple_values) - - def test_mean_scale_no_input(self): - mean_values = "data(1.1,22.22,333.333)" - scale_values = "info[1.1,22.22,333.333]" - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': None - }, - 'info': { - 'mean': None, - 'scale': np.array([1.1, 22.22, 333.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_mean_scale_no_input_diff_len(self): - mean_values = "data(1.1,22.22,333.333),info(2.1,33.22,333.333)" - scale_values = "info[1.1,22.22,333.333]" - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': None - }, - 'info': { - 'mean': np.array([2.1, 33.22, 333.333]), - 'scale': np.array([1.1, 22.22, 333.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_mean_only_input(self): - mean_values = "data(1.1,22.22,333.333)" - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(''), None) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': None - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_scale_only_input(self): - scale_values = "data(1.1,22.22,333.333)" - result = get_mean_scale_dictionary(parse_tuple_pairs(''), parse_tuple_pairs(scale_values), None) - exp_res = { - 'data': { - 'mean': None, - 'scale': np.array([1.1, 22.22, 333.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_scale_only_no_input(self): - scale_values = "(1.1,22.22,333.333)" - mean_values = "" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, None) - exp_res = [ - [ - None, - np.array([1.1, 22.22, 333.333]) - ] - ] - for i in range(len(exp_res)): - for j in range(len(exp_res[i])): - if type(exp_res[i][j]) is np.ndarray: - assert np.array_equal(exp_res[i][j], result[i][j]) - else: - self.assertEqual(exp_res[i][j], result[i][j]) - - def test_scale_only_with_input(self): - scale_values = "(1.1,22.22,333.333)" - mean_values = "" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, 'data') - exp_res = { - 'data': { - 'mean': None, - 'scale': np.array([1.1, 22.22, 333.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_2_scale_only_with_input(self): - scale_values = "(1.1,22.22,333.333),(1.2,22.33,333.444)" - mean_values = "" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, 'data,info') - exp_res = { - 'data': { - 'mean': None, - 'scale': np.array([1.1, 22.22, 333.333]) - }, - 'info': { - 'mean': None, - 'scale': np.array([1.2, 22.33, 333.444]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_2_mean_only_with_input(self): - scale_values = "" - mean_values = "(1.1,22.22,333.333),(1.2,22.33,333.444)" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, 'data,info') - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': None, - }, - 'info': { - 'mean': np.array([1.2, 22.33, 333.444]), - 'scale': None, - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_mean_only_with_input(self): - scale_values = "" - mean_values = "(1.1,22.22,333.333)" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, 'data') - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': None - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_mean_scale_diff_no_input(self): - scale_values = "(1.1,22.22,333.333),(1.1,22.22,333.333)" - mean_values = "(2.1,11.22,444.333)" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, None) - exp_res = [ - [ - np.array([2.1, 11.22, 444.333]), # mean - np.array([1.1, 22.22, 333.333]) # scale - ], - [ - None, # mean - np.array([1.1, 22.22, 333.333]) # scale - ] - ] - for i in range(len(exp_res)): - for j in range(len(exp_res[i])): - if type(exp_res[i][j]) is np.ndarray: - assert np.array_equal(exp_res[i][j], result[i][j]) - else: - self.assertEqual(exp_res[i][j], result[i][j]) - - def test_multi_mean_scale_no_input(self): - mean_values = "data(1.1,22.22,333.333),info(2.1,33.22,444.333)" - scale_values = "data[1.1,22.22,333.333],info[2.1,33.22,444.333]" - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': np.array([1.1, 22.22, 333.333]) - }, - 'info': { - 'mean': np.array([2.1, 33.22, 444.333]), - 'scale': np.array([2.1, 33.22, 444.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_multi_mean_scale_input(self): - mean_values = "data(1.1,22.22,333.333),info(2.1,33.22,444.333)" - scale_values = "data[1.1,22.22,333.333],info[2.1,33.22,444.333]" - input_names = 'data,info' - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), input_names) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': np.array([1.1, 22.22, 333.333]) - }, - 'info': { - 'mean': np.array([2.1, 33.22, 444.333]), - 'scale': np.array([2.1, 33.22, 444.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_multi_mean_scale_input_arrays(self): - mean_values = "(1.1,22.22,333.333),(2.1,33.22,444.333)" - scale_values = "[1.1,22.22,333.333],[2.1,33.22,444.333]" - input_names = 'data,info' - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), input_names) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': np.array([1.1, 22.22, 333.333]) - }, - 'info': { - 'mean': np.array([2.1, 33.22, 444.333]), - 'scale': np.array([2.1, 33.22, 444.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_multi_mean_scale_arrays_no_input(self): - mean_values = "(1.1,22.22,333.333),(2.1,33.22,444.333)" - scale_values = "[1.1,22.22,333.333],[2.1,33.22,444.333]" - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None) - exp_res = [ - [ - np.array([1.1, 22.22, 333.333]), # mean - np.array([1.1, 22.22, 333.333]) # scale - ], - [ - np.array([2.1, 33.22, 444.333]), # mean - np.array([2.1, 33.22, 444.333]) # scale - ] - ] - for i in range(0, len(exp_res)): - for j in range(0, len(exp_res[i])): - assert np.array_equal(exp_res[i][j], result[i][j]) - - def test_scale_do_not_match_input(self): - scale_values = parse_tuple_pairs("input_not_present(255),input2(255)") - mean_values = parse_tuple_pairs("input1(255),input2(255)") - self.assertRaises(Error, get_mean_scale_dictionary, mean_values, scale_values, "input1,input2") - - def test_mean_do_not_match_input(self): - scale_values = parse_tuple_pairs("input1(255),input2(255)") - mean_values = parse_tuple_pairs("input_not_present(255),input2(255)") - self.assertRaises(Error, get_mean_scale_dictionary, mean_values, scale_values, "input1,input2") - - def test_values_match_input_name(self): - # to be sure that we correctly processes complex names - res_values = parse_tuple_pairs("input255(255),input255.0(255.0),multi-dotted.input.3.(255,128,64)") - exp_res = {'input255': np.array([255.0]), - 'input255.0': np.array([255.0]), - 'multi-dotted.input.3.': np.array([255., 128., 64.])} - self.assertEqual(len(exp_res), len(res_values)) - for i, j in zip(exp_res, res_values): - self.assertEqual(i, j) - assert np.array_equal(exp_res[i], res_values[j]) - - def test_input_without_values(self): - self.assertRaises(Error, parse_tuple_pairs, "input1,input2") - - -class TestSingleTupleParsing(UnitTestWithMockedTelemetry): - def test_get_values_ideal(self): - values = "(1.11, 22.22, 333.333)" - result = get_tuple_values(values) - exp_res = ['1.11, 22.22, 333.333'] - self.assertEqual(exp_res, result) - - def test_get_values_ideal_spaces(self): - values = "(1 , 22 ,333)" - result = get_tuple_values(values) - exp_res = ['1 , 22 ,333'] - self.assertEqual(exp_res, result) - - def test_get_values_ideal_square(self): - values = "[1,22,333]" - result = get_tuple_values(values) - exp_res = ['1,22,333'] - self.assertEqual(exp_res, result) - - def test_get_values_ideal_square_spaces(self): - values = "[1 , 22 ,333]" - result = get_tuple_values(values) - exp_res = ['1 , 22 ,333'] - self.assertEqual(exp_res, result) - - def test_get_neg_values_ideal(self): - values = "(-1,-22,-333)" - result = get_tuple_values(values) - exp_res = ['-1,-22,-333'] - self.assertEqual(exp_res, result) - - def test_get_neg_values_minus(self): - values = "(-1,--22,-3-33)" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_unbalanced(self): - values = "(1,22,333]" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_unbalanced2(self): - values = "[1,22,333)" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_exactly_3(self): - values = "[1,22,333,22]" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_exactly_3_1(self): - values = "[1,22]" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_empty(self): - values = "" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_empty_tuple(self): - values = () - result = get_tuple_values(values) - exp_res = () - self.assertEqual(exp_res, result) - - -class TestShapesParsing(UnitTestWithMockedTelemetry): - def test_get_shapes_several_inputs_several_shapes(self): - argv_input = "inp1,inp2" - input_shapes = "(1,22,333,123), (-1,45,7,1)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': np.array([1, 22, 333, 123]), 'inp2': np.array([-1, 45, 7, 1])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_shapes_several_inputs_several_shapes2(self): - # shapes specified using --input command line parameter and no values - argv_input = "inp1[1 22 333 123],inp2[-1 45 7 1]" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': np.array([1, 22, 333, 123]), 'inp2': np.array([-1, 45, 7, 1])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {} - input_node_names_ref = "inp1,inp2" - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_and_freezing_with_scalar_and_without_shapes_in_input(self): - # shapes and value for freezing specified using --input command line parameter - argv_input = "inp1,inp2->157" - input_list, result_shapes, _ = get_placeholder_shapes(argv_input, None) - ref_shapes = {'inp1': None, 'inp2': None} - self.assertEqual(list(ref_shapes.keys()), list(result_shapes.keys())) - self.assertEqual(input_list, ["inp1","inp2"]) - for i in ref_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_shapes[i]) - - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp2': 157} - - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - for i in placeholder_values_ref.keys(): - self.assertEqual(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_and_freezing_with_scalar(self): - # shapes and value for freezing specified using --input command line parameter - argv_input = "inp1,inp2[]->157" - input_list, result_shapes, _ = get_placeholder_shapes(argv_input, None) - ref_shapes = {'inp1': None, 'inp2': ()} - self.assertEqual(list(ref_shapes.keys()), list(result_shapes.keys())) - for i in ref_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_shapes[i]) - self.assertEqual(input_list, ["inp1","inp2"]) - - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp2': 157} - - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - for i in placeholder_values_ref.keys(): - self.assertEqual(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_shapes3(self): - # shapes and value for freezing specified using --input command line parameter - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[5]->[1.0 1.0 2.0 3.0 5.0]" - input_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), - 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} - input_node_names_ref = "inp1,inp2,inp3" - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(input_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_shapes3_comma_sep(self): - # shapes and value for freezing specified using --input command line parameter - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[5]->[1.0, 1.0, 2.0, 3.0,5.0]" - input_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), - 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} - input_node_names_ref = "inp1,inp2,inp3" - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(input_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_shapes4(self): - # shapes specified using --input_shape and values for freezing using --input command line parameter - argv_input = "inp1->[1.0 2.0 3.0],inp2,inp3->[1.0 1.0 2.0 3.0 5.0]" - input_shapes = "(3,1), (3,2,3), (5)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), - 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} - input_node_names_ref = "inp1,inp2,inp3" - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - self.assertEqual(input_node_names_ref, input_node_names_res) - - def test_get_shapes_several_inputs_several_shapes5(self): - # some values for freezing specified using --freeze_placeholder_with_value - argv_input = "inp1->[1.0 2.0 3.0],inp2,inp3->[1.0 1.0 2.0 3.0 5.0]" - input_shapes = "(3,1), (3,2,3), (5)" - argv_freeze_placeholder_with_value = "inp2->[5.0 7.0 3.0],inp4->[100.0 200.0]" - - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, - argv_freeze_placeholder_with_value) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), - 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'], ), - 'inp2': np.array(['5.0', '7.0', '3.0']), 'inp4': np.array(['100.0', '200.0'])} - input_node_names_ref = "inp1,inp2,inp3" - self.assertEqual(sorted(list(placeholder_values_res.keys())), sorted(list(placeholder_values_ref.keys()))) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - self.assertEqual(input_node_names_ref, input_node_names_res) - - def test_get_shapes_several_inputs_several_shapes6(self): - # 0D value for freezing specified using --input command line parameter without shape - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3->False" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), 'inp2': PartialShape([3, 2, 3]), 'inp3': None} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': False} - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_shapes7(self): - # 0D shape and value for freezing specified using --input command line parameter - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[]->True" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array(False).shape} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': True} - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_and_data_types1(self): - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3]{i32},inp3[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array([5])} - ref_result_data_types = {'inp2': np.int32, 'inp3': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1","inp2","inp3"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_get_shapes_and_data_types_with_input_ports(self): - argv_input = "1:inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3]{i32},0:inp3[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'1:inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), '0:inp3': np.array([5])} - ref_result_data_types = {'inp2': np.int32, '0:inp3': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["1:inp1","inp2","0:inp3"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_get_shapes_and_data_types_with_output_ports(self): - argv_input = "inp1:1[3 1]->[1.0 2.0 3.0],inp2[3 2 3]{i32},inp3:4[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3:4': np.array([5])} - ref_result_data_types = {'inp2': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1","inp2","inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_get_shapes_and_data_types_with_output_ports_comma_sep(self): - argv_input = "inp1:1[3,1]->[1.0,2.0 ,3.0],inp2[3,2, 3]{i32},inp3:4[5]{f32}->[1.0, 1.0,2.0, 3.0,5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3:4': np.array([5])} - ref_result_data_types = {'inp2': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1","inp2","inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_get_shapes_and_data_types_shape_only(self): - argv_input = "placeholder1[3 1],placeholder2,placeholder3" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'placeholder1': np.array([3, 1]), 'placeholder2': None, - 'placeholder3': None} - ref_result_data_types = {} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["placeholder1","placeholder2","placeholder3"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_get_shapes_and_data_types_shape_with_ports_only(self): - argv_input = "placeholder1:4[3 1],placeholder2,2:placeholder3" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'placeholder1:4': np.array([3, 1]), 'placeholder2': None, - '2:placeholder3': None} - ref_result_data_types = {} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["placeholder1:4","placeholder2","2:placeholder3"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_get_shapes_and_data_types_when_no_freeze_value(self): - argv_input = "placeholder1{i32}[3 1],placeholder2,placeholder3{i32}" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'placeholder1': np.array([3, 1]), 'placeholder2': None, - 'placeholder3': None} - ref_result_data_types = {'placeholder1': np.int32, 'placeholder3': np.int32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["placeholder1","placeholder2","placeholder3"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_wrong_data_types(self): - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3]{abracadabra},inp3[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") - - def test_shapes_specified_using_both_params(self): - # shapes specified using both command line parameter --input and --input_shape - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[5]->[1.0 1.0 2.0 3.0 5.0]" - input_shapes = "(3,1), (3,2,3), (5)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_shape_and_value_shape_mismatch(self): - # size of value tensor does not correspond to specified shape for the third node - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[5 3]->[2.0 3.0 5.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, None) - - def test_wrong_data_for_input_cmd_param(self): - # test that wrongly formatted data specified in --input is handled properly - argv_input = "abc->[1.0" - self.assertRaises(Error, get_freeze_placeholder_values, argv_input, None) - argv_input = "def[2 2]->[1.0 2.0 3.0 4.0],abc->1.0 34]" - self.assertRaises(Error, get_freeze_placeholder_values, argv_input, None) - - def test_get_shapes_several_inputs_several_shapes_not_equal(self): - argv_input = "inp1,inp2,inp3" - input_shapes = "(1,22,333,123), (-1,45,7,1)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_several_shapes_one_input(self): - argv_input = "inp1" - input_shapes = "(1,22,333,123), (-1,45,7,1), (-1,456,7,1)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_one_shape(self): - argv_input = "inp1" - input_shapes = "(1,22,333,123)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': np.array([1, 22, 333, 123])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_shapes_no_input_no_shape(self): - argv_input = "" - input_shapes = "" - _, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = None - assert np.array_equal(result, exp_res) - - def test_get_shapes_no_input_one_shape(self): - argv_input = "" - input_shapes = "(12,4,1)" - _, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = np.array([12, 4, 1]) - assert np.array_equal(result, exp_res) - - def test_get_shapes_no_input_one_shape2(self): - argv_input = "" - input_shapes = "[12,4,1]" - _, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = np.array([12, 4, 1]) - assert np.array_equal(result, exp_res) - - - def test_get_shapes_one_input_no_shape(self): - argv_input = "inp1" - input_shapes = "" - input_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': None} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(input_list, ["inp1"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_shapes_one_input_wrong_shape8(self): - argv_input = "inp1" - input_shapes = "[2,4,1)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape9(self): - argv_input = "inp1" - input_shapes = "(2,4,1]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape10(self): - argv_input = "inp1" - input_shapes = "(2,,,4,1]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape2(self): - argv_input = "inp1" - input_shapes = "(2,4,1" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape3(self): - argv_input = "inp1" - input_shapes = "2,4,1" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape4(self): - argv_input = "inp1" - input_shapes = "2;4;1" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape5(self): - argv_input = "inp1" - input_shapes = "2, 4,1" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape6(self): - argv_input = "inp1" - input_shapes = "(2, 4,1),[4,6,8]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape7(self): - argv_input = "inp1" - input_shapes = "[2,4,1],(4,6,8)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_several_shapes(self): - argv_input = "inp1" - input_shapes = "(2,4,1),(4,6,8)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_first_neg_shape1(self): - argv_input = "inp1,inp2" - input_shapes = "(-1,4,1),(4,6,8)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': np.array([-1, 4, 1]), 'inp2': np.array([4, 6, 8])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_shapes_one_input_first_neg_shape_not_one(self): - argv_input = "inp1" - input_shapes = "(-12,4,1),(4,6,8)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_long_dimension_with_invalid_character(self): - # test for regular expression denial of service - argv_input = "inp1,inp2" - input_shapes = "(222222222222222222222222222222222222222222!,4,1),(4,6,8)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_any_neg_shape(self): - argv_input = "inp1, inp2" - input_shapes = "(12,4,1),(4,-6,8)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_several_inputs_several_partial_shapes(self): - argv_input = "inp1,inp2" - input_shapes = "(1,..22,1..100,?), (-1,45..,7,1)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': PartialShape([1, Dimension(0, 22), Dimension(1, 100), -1]), 'inp2': PartialShape([-1, Dimension(45, -1), 7, 1])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_shapes_several_inputs_several_partial_shapes2(self): - # shapes specified using --input command line parameter and no values - argv_input = "inp1[1 ? 50..100 123],inp2[-1 45.. ..7 1]" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([1, -1, (50, 100), 123]), 'inp2': PartialShape([-1, Dimension(45,-1), Dimension(0, 7), 1])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {} - input_node_names_ref = "inp1,inp2" - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_partial_shapes3(self): - # shapes and value for freezing specified using --input command line parameter - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3.. ..2 5..10 ? -1],inp3[5]->[1.0 1.0 2.0 3.0 5.0]" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': (3, 1), 'inp2': PartialShape([Dimension(3, -1), Dimension(0, 2), Dimension(5, 10), -1, -1]), 'inp3': (5,)} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} - input_node_names_ref = "inp1,inp2,inp3" - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_partial_shapes4(self): - # shapes specified using --input_shape and values for freezing using --input command line parameter - argv_input = "inp1->[1.0 2.0 3.0],inp2,inp3->[1.0 1.0 2.0 3.0 5.0]" - input_shapes = "(3,1), (3..,..2,5..10,?,-1), (5)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': (3, 1), 'inp2': PartialShape([Dimension(3, -1), Dimension(0, 2), Dimension(5, 10), -1, -1]), 'inp3': (5,)} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} - input_node_names_ref = "inp1,inp2,inp3" - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - self.assertEqual(input_node_names_ref, input_node_names_res) - - def test_get_shapes_several_inputs_several_partial_shapes5(self): - # some values for freezing specified using --freeze_placeholder_with_value - argv_input = "inp1->[1.0 2.0 3.0],inp2,inp3->[1.0 1.0 2.0 3.0 5.0]" - input_shapes = "(3,1), (3..,..2,5..10,?,-1), (5)" - argv_freeze_placeholder_with_value = "inp2->[5.0 7.0 3.0],inp4->[100.0 200.0]" - - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': PartialShape([3, 1]), 'inp2': PartialShape([(3, np.iinfo(np.int64).max), (0, 2), (5, 10), -1, -1]), 'inp3': PartialShape([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, argv_freeze_placeholder_with_value) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'],), - 'inp2': np.array(['5.0', '7.0', '3.0']), 'inp4': np.array(['100.0', '200.0'])} - input_node_names_ref = "inp1,inp2,inp3" - self.assertEqual(sorted(list(placeholder_values_res.keys())), sorted(list(placeholder_values_ref.keys()))) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - self.assertEqual(input_node_names_ref, input_node_names_res) - - def test_get_shapes_several_inputs_several_partial_shapes6(self): - # 0D value for freezing specified using --input command line parameter without shape - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3.. ..2 5..10 ? -1],inp3->False" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), 'inp2': PartialShape([(3, np.iinfo(np.int64).max), (0, 2), (5, 10), -1, -1]), 'inp3': None} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': False} - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_partial_shapes7(self): - # 0D shape and value for freezing specified using --input command line parameter - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3.. ..2 5..10 ? -1],inp3[]->True" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), 'inp2': PartialShape([(3, np.iinfo(np.int64).max), (0, 2), (5, 10), -1, -1]), 'inp3': np.array(False).shape} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': True} - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_and_data_types_partial_shape_with_input_port(self): - argv_input = "inp1:1[3 1]->[1.0 2.0 3.0],0:inp2[3.. ..2 5..10 ? -1]{i32},inp3:4[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': PartialShape([3, 1]), '0:inp2': PartialShape([Dimension(3, -1), Dimension(-1, 2), Dimension(5, 10), -1, -1]), 'inp3:4': np.array([5])} - ref_result_data_types = {'0:inp2': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1","0:inp2","inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_get_shapes_and_data_types_partial_shape_with_output_port(self): - argv_input = "inp1:1[3 1]->[1.0 2.0 3.0],inp2:3[3.. ..2 5..10 ? -1]{i32},inp3:4[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': PartialShape([3, 1]), 'inp2:3': PartialShape([Dimension(3, -1), Dimension(0, 2), Dimension(5, 10), -1, -1]), 'inp3:4': PartialShape([5])} - ref_result_data_types = {'inp2:3': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1","inp2:3","inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_partial_shapes_negative_case(self): - argv_input = "inp1" - input_shapes = "[6754fg..23ed]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_partial_shapes_freeze_dynamic_negative_case1(self): - argv_input = "inp1:1[3 1..10]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") - - def test_partial_shapes_freeze_dynamic_negative_case2(self): - argv_input = "inp1:1[1 2 -1]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") - - def test_partial_shapes_freeze_dynamic_negative_case3(self): - # some values for freezing specified using --freeze_placeholder_with_value - argv_input = "inp1->[1.0 2.0 3.0]" - input_shapes = "[3,1..10]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_several_inputs_several_partial_shapes2_comma_separator(self): - # shapes specified using --input command line parameter and no values - argv_input = "inp1[1,?,50..100,123],inp2[-1,45..,..7,1]" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([1, -1, (50, 100), 123]), - 'inp2': PartialShape([-1, Dimension(45, -1), Dimension(0, 7), 1])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {} - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1", "inp2"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_partial_shapes3_comma_separator(self): - # shapes and value for freezing specified using --input command line parameter - argv_input = "inp1[3,1]->[1.0 2.0 3.0],inp2[3..,..2,5..10,?,-1],inp3[5]->[1.0 1.0 2.0 3.0 5.0]" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), 'inp2': PartialShape([Dimension(3, -1), Dimension(0, 2), Dimension(5, 10), -1, -1]), - 'inp3': PartialShape([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), - 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1", "inp2", "inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_partial_shapes6_comma_separator(self): - # 0D value for freezing specified using --input command line parameter without shape - argv_input = "inp1[3, 1]->[1.0 2.0 3.0],inp2[3.., ..2, 5..10, ?,-1],inp3->False" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), - 'inp2': PartialShape([(3, np.iinfo(np.int64).max), (0, 2), (5, 10), -1, -1]), 'inp3': None} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': False} - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1", "inp2", "inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_partial_shapes7_comma_separator(self): - # 0D shape and value for freezing specified using --input command line parameter - argv_input = "inp1[3,1]->[1.0 2.0 3.0],inp2[3.., ..2,5..10, ?,-1],inp3[]->True" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), - 'inp2': PartialShape([(3, np.iinfo(np.int64).max), (0, 2), (5, 10), -1, -1]), - 'inp3': np.array(False).shape} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': True} - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1", "inp2", "inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_and_data_types_partial_shape_with_input_port_comma_separator(self): - argv_input = "inp1:1[3,1]->[1.0 2.0 3.0],0:inp2[ 3.. ,..2, 5..10, ?,-1]{i32},inp3:4[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': PartialShape([3, 1]), - '0:inp2': PartialShape([Dimension(3, -1), Dimension(-1, 2), Dimension(5, 10), -1, -1]), - 'inp3:4': np.array([5])} - ref_result_data_types = {'0:inp2': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1", "0:inp2", "inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_get_shapes_and_data_types_partial_shape_with_output_port_comma_separator(self): - argv_input = "inp1:1[3,1]->[1.0 2.0 3.0],inp2:3[3..,..2,5..10,?,-1]{i32},inp3:4[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': PartialShape([3, 1]), - 'inp2:3': PartialShape([Dimension(3, -1), Dimension(0, 2), Dimension(5, 10), -1, -1]), - 'inp3:4': PartialShape([5])} - ref_result_data_types = {'inp2:3': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1", "inp2:3", "inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_partial_shapes_freeze_dynamic_negative_case1_comma_separator(self): - argv_input = "inp1:1[3,1..10]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") - - def test_partial_shapes_freeze_dynamic_negative_case2_comma_separator(self): - argv_input = "inp1:1[1,2,-1]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") - - def test_partial_shapes_freeze_dynamic_negative_case3_comma_separator(self): - argv_input = "inp1:1[3,1..10]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") - - def test_partial_shapes_freeze_dynamic_negative_case4_comma_separator(self): - argv_input = "inp1:1[1, 2, -1]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") - - -class TestModelNameParsing(unittest.TestCase): - def test_model_name_ideal(self): - model_name = '/home/models/mymodel.caffemodel' - res = get_model_name(model_name) - exp_res = 'mymodel' - self.assertEqual(exp_res, res) - - def test_model_name_no_name(self): - model_name = '/home/models/.caffemodel' - res = get_model_name(model_name) - exp_res = 'model' - self.assertEqual(exp_res, res) - - def test_model_name_no_ext(self): - model_name = '/home/models/caffemodel' - res = get_model_name(model_name) - exp_res = 'caffemodel' - self.assertEqual(exp_res, res) - - def test_model_name_no_name_no_path(self): - model_name = '.caffemodel' - res = get_model_name(model_name) - exp_res = 'model' - self.assertEqual(exp_res, res) - - @patch("openvino.tools.mo.utils.cli_parser.os") - def test_model_name_win(self, old_os): - old_os.path.basename.return_value = "caffemodel" - old_os.path.splitext.return_value = ("caffemodel", "") - model_name = r'\home\models\caffemodel' - res = get_model_name(model_name) - - exp_res = 'caffemodel' - self.assertEqual(exp_res, res) - - def test_model_name_dots(self): - model_name = r'/home/models/squeezenet_v1.1.caffemodel' - res = get_model_name(model_name) - exp_res = 'squeezenet_v1.1' - self.assertEqual(exp_res, res) - - -class PositiveChecker(unittest.TestCase): - def test_positive_checker_batch(self): - res = check_positive('1') - self.assertEqual(res, 1) - - def test_positive_checker_batch_negative(self): - self.assertRaises(argparse.ArgumentTypeError, check_positive, '-1') - - def test_positive_checker_batch_not_int(self): - self.assertRaises(argparse.ArgumentTypeError, check_positive, 'qwe') - - -class PathCheckerFunctions(unittest.TestCase): - READABLE_DIR = tempfile.gettempdir() - WRITABLE_DIR = os.path.join(tempfile.gettempdir(), 'writable_dir') - WRITABLE_NON_EXISTING_DIR = os.path.join(WRITABLE_DIR, 'non_existing_dir') - NOT_WRITABLE_DIR = os.path.join(tempfile.gettempdir(), 'not_writable_dir') - NOT_WRITABLE_SUB_DIR = os.path.join(tempfile.gettempdir(), 'another_not_writable_dir', 'not_existing_dir') - EXISTING_FILE = tempfile.NamedTemporaryFile(mode='r+', delete=False).name - NOT_EXISTING_FILE = '/abcd/efgh/ijkl' - - @classmethod - def setUpClass(cls): - if not os.path.exists(__class__.WRITABLE_DIR): - os.makedirs(__class__.WRITABLE_DIR) - if os.path.exists(__class__.WRITABLE_NON_EXISTING_DIR): - os.removedirs(__class__.WRITABLE_NON_EXISTING_DIR) - - if not os.path.exists(__class__.NOT_WRITABLE_DIR): - os.makedirs(__class__.NOT_WRITABLE_DIR) - os.chmod(__class__.NOT_WRITABLE_DIR, 0) - - if not os.path.exists(os.path.dirname(__class__.NOT_WRITABLE_SUB_DIR)): - os.makedirs(os.path.dirname(__class__.NOT_WRITABLE_SUB_DIR)) - os.chmod(os.path.dirname(__class__.NOT_WRITABLE_SUB_DIR), 0) - if os.path.exists(__class__.NOT_EXISTING_FILE): - os.remove(__class__.NOT_EXISTING_FILE) - - @classmethod - def tearDownClass(cls): - if os.path.exists(__class__.WRITABLE_DIR): - os.removedirs(__class__.WRITABLE_DIR) - if os.path.exists(__class__.NOT_WRITABLE_DIR): - shutil.rmtree(__class__.NOT_WRITABLE_DIR, ignore_errors=True) - if os.path.exists(os.path.dirname(__class__.NOT_WRITABLE_SUB_DIR)): - shutil.rmtree(os.path.dirname(__class__.NOT_WRITABLE_SUB_DIR), ignore_errors=True) - if os.path.exists(__class__.EXISTING_FILE): - os.remove(__class__.EXISTING_FILE) - - def test_single_writable_dir(self): - self.assertEqual(__class__.WRITABLE_DIR, writable_dir(__class__.WRITABLE_DIR)) - - @unittest.skipIf(sys.platform.startswith("win"), "chmod() on Windows do nor support not writable dir") - @unittest.skipIf(sys.platform.startswith("lin") and os.geteuid() == 0, "root user does not support not writable dir") - def test_single_non_writable_dir(self): - with self.assertRaises(Error) as cm: - writable_dir(__class__.NOT_WRITABLE_DIR) - - @unittest.skipIf(sys.platform.startswith("win"), "chmod() on Windows do nor support not writable dir") - @unittest.skipIf(sys.platform.startswith("lin") and os.geteuid() == 0, "root user does not support not writable dir") - def test_single_non_writable_sub_dir(self): - with self.assertRaises(Error) as cm: - writable_dir(__class__.NOT_WRITABLE_SUB_DIR) - - def test_multiple_writable_dirs(self): - dirs_str = ','.join([__class__.WRITABLE_DIR, __class__.WRITABLE_NON_EXISTING_DIR]) - self.assertEqual(dirs_str, writable_dir(dirs_str)) - - def test_single_writable_non_existing_dir(self): - self.assertEqual(__class__.WRITABLE_NON_EXISTING_DIR, writable_dir(__class__.WRITABLE_NON_EXISTING_DIR)) - - def test_readable_dirs(self): - dirs_str = ','.join([__class__.WRITABLE_DIR, __class__.READABLE_DIR]) - self.assertEqual(dirs_str, readable_dirs(dirs_str)) - - def test_not_readable_dirs(self): - dirs_str = ','.join([__class__.WRITABLE_DIR, __class__.WRITABLE_NON_EXISTING_DIR]) - with self.assertRaises(Error) as cm: - readable_dirs(dirs_str) - - def test_readable_file(self): - self.assertEqual(__class__.EXISTING_FILE, readable_file(__class__.EXISTING_FILE)) - - def test_non_readable_file(self): - with self.assertRaises(Error) as cm: - readable_file(__class__.NOT_EXISTING_FILE) - - -class TransformChecker(unittest.TestCase): - def test_empty(self): - self.assertEqual(parse_transform(""), []) - - def test_single_pass(self): - self.assertEqual(parse_transform("LowLatency2"), [("LowLatency2", {})]) - - def test_single_pass_with_args(self): - self.assertEqual(parse_transform("LowLatency2[use_const_initializer=True]"), - [("LowLatency2", {"use_const_initializer": True})]) - - def test_single_pass_with_multiple_args(self): - self.assertEqual(parse_transform("LowLatency2[use_const_initializer=True;dummy_attr=3.14]"), - [("LowLatency2", {"use_const_initializer": True, "dummy_attr": 3.14})]) - - def test_multiple_passes_with_args(self): - self.assertEqual(parse_transform("LowLatency2[use_const_initializer=True],DummyPass[type=ReLU]"), - [("LowLatency2", {"use_const_initializer": True}), - ("DummyPass", {"type": "ReLU"})]) - - def test_multiple_passes_with_args2(self): - self.assertEqual(parse_transform("LowLatency2[use_const_initializer=True,False],DummyPass1," - "DummyPass2[types=ReLU,PReLU;values=1,2,3]"), - [("LowLatency2", {"use_const_initializer": [True, False]}), - ("DummyPass1", {}), - ("DummyPass2", {"types": ["ReLU", "PReLU"], "values": [1, 2, 3]})]) - - def test_multiple_passes_no_args(self): - self.assertEqual(parse_transform("DummyPass,LowLatency22"), - [("DummyPass", {}), ("LowLatency22", {})]) - - def test_single_pass_neg(self): - self.assertRaises(Error, parse_transform, "LowLatency2!") - - def test_multiple_passes_neg(self): - self.assertRaises(Error, parse_transform, "LowLatency2;DummyPass") - - def test_single_pass_with_args_neg1(self): - self.assertRaises(Error, parse_transform, "LowLatency2[=2]") - - def test_single_pass_with_args_neg2(self): - self.assertRaises(Error, parse_transform, "LowLatency2[key=]") - - def test_single_pass_with_args_neg3(self): - self.assertRaises(Error, parse_transform, "LowLatency2[]") - - def test_single_pass_with_args_neg4(self): - self.assertRaises(Error, parse_transform, "LowLatency2[key=value;]") - - def test_single_pass_with_args_neg5(self): - self.assertRaises(Error, parse_transform, "LowLatency2[value]") - - def test_single_pass_with_args_neg6(self): - self.assertRaises(Error, parse_transform, "LowLatency2[key=value") - - @patch("openvino.tools.mo.back.offline_transformations.get_available_transformations") - def test_check_low_latency_is_available(self, available_transformations): - available_transformations.return_value = {"LowLatency2": None} - try: - check_available_transforms([("LowLatency2", "")]) - except Error as e: - self.assertTrue(False, "Exception \"{}\" is unexpected".format(e)) - - @patch("openvino.tools.mo.back.offline_transformations.get_available_transformations") - def test_check_dummy_pass_is_available(self, available_transformations): - available_transformations.return_value = {"LowLatency2": None} - self.assertRaises(Error, check_available_transforms, [("DummyPass", "")]) - - -class TestLayoutParsing(unittest.TestCase): - def test_get_layout_1(self): - argv_layout = "name1([n,h,w,c]),name2([n,h,w,c]->[n,c,h,w])" - result = get_layout_values(argv_layout) - exp_res = {'name1': {'source_layout': '[n,h,w,c]', 'target_layout': None}, - 'name2': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_2(self): - argv_layout = "name1(nhwc),name2(nhwc->nchw)" - result = get_layout_values(argv_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': 'nhwc', 'target_layout': 'nchw'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_3(self): - argv_layout = "name1(n...c),name2(n...c->nc...)" - result = get_layout_values(argv_layout) - exp_res = {'name1': {'source_layout': 'n...c', 'target_layout': None}, - 'name2': {'source_layout': 'n...c', 'target_layout': 'nc...'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_4(self): - argv_layout = "nhwc" - result = get_layout_values(argv_layout) - exp_res = {'': {'source_layout': 'nhwc', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_5(self): - argv_layout = "[n,h,w,c]" - result = get_layout_values(argv_layout) - exp_res = {'': {'source_layout': '[n,h,w,c]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_6(self): - argv_layout = "nhwc->nchw" - result = get_layout_values(argv_layout) - exp_res = {'': {'source_layout': 'nhwc', 'target_layout': 'nchw'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_7(self): - argv_layout = "[n,h,w,c]->[n,c,h,w]" - result = get_layout_values(argv_layout) - exp_res = {'': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_8(self): - argv_layout = "name1-0(n...c),name2-0(n...c->nc...)" - result = get_layout_values(argv_layout) - exp_res = {'name1-0': {'source_layout': 'n...c', 'target_layout': None}, - 'name2-0': {'source_layout': 'n...c', 'target_layout': 'nc...'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_scalar(self): - argv_layout = "name1(nhwc),name2([])" - result = get_layout_values(argv_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': '[]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_1(self): - argv_source_layout = "[n,h,w,c]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'': {'source_layout': '[n,h,w,c]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_2(self): - argv_source_layout = "nhwc" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'': {'source_layout': 'nhwc', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_3(self): - argv_source_layout = "name1(nhwc),name2(nchw)" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': 'nchw', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_4(self): - argv_source_layout = "name1([n,h,w,c]),name2([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'name1': {'source_layout': '[n,h,w,c]', 'target_layout': None}, - 'name2': {'source_layout': '[n,c,h,w]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_5(self): - argv_source_layout = "name1(nhwc),name2([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': '[n,c,h,w]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_6(self): - argv_source_layout = "name1(nhwc),name2[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': '[n,c,h,w]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_scalar(self): - argv_source_layout = "name1(nhwc),name2([])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': '[]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_1(self): - argv_target_layout = "[n,h,w,c]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'': {'source_layout': None, 'target_layout': '[n,h,w,c]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_2(self): - argv_target_layout = "nhwc" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'': {'source_layout': None, 'target_layout': 'nhwc'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_3(self): - argv_target_layout = "name1(nhwc),name2(nchw)" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': None, 'target_layout': 'nhwc'}, - 'name2': {'source_layout': None, 'target_layout': 'nchw'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_4(self): - argv_target_layout = "name1([n,h,w,c]),name2([n,c,h,w])" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': None, 'target_layout': '[n,h,w,c]'}, - 'name2': {'source_layout': None, 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_5(self): - argv_target_layout = "name1(nhwc),name2([n,c,h,w])" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': None, 'target_layout': 'nhwc'}, - 'name2': {'source_layout': None, 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_6(self): - argv_target_layout = "name1(nhwc),name2[n,c,h,w]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': None, 'target_layout': 'nhwc'}, - 'name2': {'source_layout': None, 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_scalar(self): - argv_target_layout = "name1(nhwc),name2[]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': None, 'target_layout': 'nhwc'}, - 'name2': {'source_layout': None, 'target_layout': '[]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_1(self): - argv_source_layout = "[n,h,w,c]" - argv_target_layout = "[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_2(self): - argv_source_layout = "nhwc" - argv_target_layout = "nchw" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'': {'source_layout': 'nhwc', 'target_layout': 'nchw'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_3(self): - argv_source_layout = "name1(nhwc),name2(nhwc)" - argv_target_layout = "name1(nchw),name2(nchw)" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': 'nchw'}, - 'name2': {'source_layout': 'nhwc', 'target_layout': 'nchw'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_4(self): - argv_source_layout = "name1([n,h,w,c]),name2([n,h,w,c])" - argv_target_layout = "name1([n,c,h,w]),name2([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}, - 'name2': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_5(self): - argv_source_layout = "name1(nhwc),name2[n,h,w,c]" - argv_target_layout = "name1(nchw),name2[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': 'nchw'}, - 'name2': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_6(self): - argv_source_layout = "name1.0:a/b(nhwc),name2\\d\\[n,h,w,c]" - argv_target_layout = "name1.0:a/b(nchw),name2\\d\\[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1.0:a/b': {'source_layout': 'nhwc', 'target_layout': 'nchw'}, - 'name2\\d\\': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_7(self): - argv_source_layout = "name1-0[n,h,w,c],name2-1(?c??)" - argv_target_layout = "name1-0(nchw),name2-1[?,?,?,c]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1-0': {'source_layout': '[n,h,w,c]', 'target_layout': 'nchw'}, - 'name2-1': {'source_layout': '?c??', 'target_layout': '[?,?,?,c]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_scalar(self): - argv_source_layout = "name1(nhwc),name2[]" - argv_target_layout = "name1(nchw),name2[]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': 'nchw'}, - 'name2': {'source_layout': '[]', 'target_layout': '[]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_raises_if_layout_and_source_layout_provided(self): - argv_layout = "nhwc" - argv_source_layout = "nhwc" - with self.assertRaises(Error): - get_layout_values(argv_layout=argv_layout, argv_source_layout=argv_source_layout) - - def test_get_layout_raises_if_layout_and_target_layout_provided(self): - argv_layout = "nhwc->nchw" - argv_target_layout = "nchw" - with self.assertRaises(Error): - get_layout_values(argv_layout=argv_layout, argv_target_layout=argv_target_layout) - - def test_get_layout_raises_if_layout_with_source_and_target_layout_provided(self): - argv_layout = "nhwc->nchw" - argv_source_layout = "nhwc" - argv_target_layout = "nchw" - with self.assertRaises(Error): - get_layout_values(argv_layout=argv_layout, argv_source_layout=argv_source_layout, - argv_target_layout=argv_target_layout) - - def test_get_layout_raises_incorrect_format(self): - argv_layout = "name[n,h,w,c]->nchw" - with self.assertRaises(Error): - res = get_layout_values(argv_layout=argv_layout) - print(res) - - -class TestLayoutParsingEmptyNames(unittest.TestCase): - def test_get_layout_1(self): - argv_layout = "([n,h,w,c]),([n,h,w,c]->[n,c,h,w])" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': None}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_2(self): - argv_layout = "(nhwc),(nhwc->nchw)" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': 'nhwc', 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_3(self): - argv_layout = "(n...c),(n...c->nc...)" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'n...c', 'target_layout': None}, - {'source_layout': 'n...c', 'target_layout': 'nc...'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_scalar(self): - argv_layout = "(nhwc),([])" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_3(self): - argv_source_layout = "(nhwc),(nchw)" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': 'nchw', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_4(self): - argv_source_layout = "([n,h,w,c]),([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_5(self): - argv_source_layout = "(nhwc),([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_6(self): - argv_source_layout = "(nhwc),[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_scalar(self): - argv_source_layout = "(nhwc),([])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_3(self): - argv_target_layout = "(nhwc),(nchw)" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_4(self): - argv_target_layout = "([n,h,w,c]),([n,c,h,w])" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': '[n,h,w,c]'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_5(self): - argv_target_layout = "(nhwc),([n,c,h,w])" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_6(self): - argv_target_layout = "(nhwc),[n,c,h,w]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_scalar(self): - argv_target_layout = "(nhwc),[]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_3(self): - argv_source_layout = "(nhwc),(nhwc)" - argv_target_layout = "(nchw),(nchw)" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': 'nhwc', 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_4(self): - argv_source_layout = "([n,h,w,c]),([n,h,w,c])" - argv_target_layout = "([n,c,h,w]),([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_5(self): - argv_source_layout = "(nhwc),[n,h,w,c]" - argv_target_layout = "(nchw),[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_scalar(self): - argv_source_layout = "(nhwc),[]" - argv_target_layout = "(nchw),[]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': '[]', 'target_layout': '[]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - -class TestLayoutParsingEmptyNamesNoBrackets(unittest.TestCase): - def test_get_layout_1(self): - argv_layout = "[n,h,w,c],[n,h,w,c]->[n,c,h,w]" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': None}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_2(self): - argv_layout = "nhwc,nhwc->nchw" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': 'nhwc', 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_3(self): - argv_layout = "n...c,n...c->nc..." - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'n...c', 'target_layout': None}, - {'source_layout': 'n...c', 'target_layout': 'nc...'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_scalar(self): - argv_layout = "nhwc,[]" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_3(self): - argv_source_layout = "nhwc,nchw" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': 'nchw', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_4(self): - argv_source_layout = "[n,h,w,c],[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_5(self): - argv_source_layout = "nhwc,[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_6(self): - argv_source_layout = "nhwc,[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_scalar(self): - argv_source_layout = "nhwc,[]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_3(self): - argv_target_layout = "nhwc,nchw" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_4(self): - argv_target_layout = "[n,h,w,c],[n,c,h,w]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': '[n,h,w,c]'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_5(self): - argv_target_layout = "nhwc,[n,c,h,w]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_6(self): - argv_target_layout = "nhwc,[n,c,h,w]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_scalar(self): - argv_target_layout = "nhwc,[]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_3(self): - argv_source_layout = "nhwc,nhwc" - argv_target_layout = "nchw,nchw" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': 'nhwc', 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_4(self): - argv_source_layout = "[n,h,w,c],[n,h,w,c]" - argv_target_layout = "[n,c,h,w],[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_5(self): - argv_source_layout = "nhwc,[n,h,w,c]" - argv_target_layout = "nchw,[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_scalar(self): - argv_source_layout = "nhwc,[]" - argv_target_layout = "nchw,[]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': '[]', 'target_layout': '[]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def wrong_case_1(self): - argv_source_layout = "[n,h,w,c]),[n,h,w,c]" - argv_target_layout = "[n,c,h,w],[n,c,h,w]" - self.assertRaises(get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout)) - - def wrong_case_2(self): - argv_source_layout = "[nchv" - self.assertRaises(get_layout_values(argv_source_layout=argv_source_layout)) - - def wrong_case_3(self): - argv_source_layout = "nchv->" - self.assertRaises(get_layout_values(argv_source_layout=argv_source_layout)) - -class TestPackParamsToArgsNamespace(unittest.TestCase): - def test_mo_convert_params(self): - from openvino.frontend import ConversionExtension - args = {'input_model': os.path.dirname(__file__), - 'input_shape': [PartialShape([1,100,100,3]), [2,3]], - 'extensions': ConversionExtension("Ext", lambda x: x), - 'reverse_input_channels': True, - 'scale': 0.5, - 'input': ['name', InputCutInfo("a", [1,2,3], numpy.float32, [5, 6, 7])], - 'batch': 1, - 'output': ["a", "b", "c"], - 'mean_values': [0.5, 0.3], - 'scale_values': {"a": np.array([0.4]), "b": [0.5, 0.6]}, - 'source_layout': Layout("nchw"), - 'layout': {"a": LayoutMap("nchw","nhwc"), "b": "nc"}, - 'transform': ('LowLatency2', {'use_const_initializer': False})} - - cli_parser = get_all_cli_parser() - argv = pack_params_to_args_namespace(args, cli_parser) - - assert argv.input_model == args['input_model'] - assert argv.extensions == [args['extensions']] - assert argv.reverse_input_channels == args['reverse_input_channels'] - assert argv.scale == 0.5 - assert argv.batch == 1 - assert argv.input_shape == [PartialShape([1,100,100,3]), [2,3]] - assert argv.input == ['name', InputCutInfo("a", [1,2,3], numpy.float32, [5, 6, 7])] - assert argv.output == "a,b,c" - assert argv.mean_values == "[0.5,0.3]" - assert argv.scale_values == "a[0.4],b[0.5,0.6]" - assert argv.source_layout == "[N,C,H,W]" - assert argv.layout == "a(nchw->nhwc),b(nc)" - assert argv.transform == "LowLatency2[use_const_initializer=False]" - - for arg, value in vars(argv).items(): - if arg not in args and arg != 'is_python_api_used': - assert value == cli_parser.get_default(arg) - - def test_not_existing_dir(self): - args = {"input_model": "abc"} - cli_parser = get_all_cli_parser() - - with self.assertRaisesRegex(Error, "The \"abc\" is not existing file or directory"): - pack_params_to_args_namespace(args, cli_parser) - - def test_unknown_params(self): - args = {"input_model": os.path.dirname(__file__), - "a": "b"} - cli_parser = get_all_cli_parser() - - with self.assertRaisesRegex(Error, "Unrecognized argument: a"): - pack_params_to_args_namespace(args, cli_parser) - - -class TestConvertModelParamsParsing(unittest.TestCase): - def test_mo_convert_params_parsing(self): - ref_params = { - 'Optional parameters:': {'help', 'framework'}, - 'Framework-agnostic parameters:': {'input_model', 'input_shape', 'scale', 'reverse_input_channels', - 'log_level', 'input', 'output', 'mean_values', 'scale_values', 'source_layout', - 'target_layout', 'layout', 'compress_to_fp16', 'transform', 'extensions', - 'batch', 'silent', 'version', 'progress', 'stream_output', - 'transformations_config', 'example_input', 'share_weights'}, - 'Caffe*-specific parameters:': {'input_proto', 'caffe_parser_path', 'k', 'disable_omitting_optional', - 'enable_flattening_nested_params'}, - 'TensorFlow*-specific parameters:': {'input_model_is_text', 'input_checkpoint', 'input_meta_graph', - 'saved_model_dir', 'saved_model_tags', - 'tensorflow_custom_operations_config_update', - 'tensorflow_object_detection_api_pipeline_config', - 'tensorboard_logdir', 'tensorflow_custom_layer_libraries'}, - 'Kaldi-specific parameters:': {'counts', 'remove_output_softmax', 'remove_memory'}, - 'PaddlePaddle-specific parameters:': {'example_output'}, - } - - params = get_mo_convert_params() - for group_name in ref_params: - assert group_name in params - assert params[group_name].keys() == ref_params[group_name] - - cli_parser = get_all_cli_parser() - for group_name, params in ref_params.items(): - for param_name in params: - param_name = '--' + param_name - if group_name == 'PaddlePaddle-specific parameters:': - assert param_name not in cli_parser._option_string_actions - else: - assert param_name in cli_parser._option_string_actions - diff --git a/tools/mo/unit_tests/mo/utils/convert_impl_tmp_irs_cleanup_test_actual.py b/tools/mo/unit_tests/mo/utils/convert_impl_tmp_irs_cleanup_test_actual.py deleted file mode 100644 index 49ef37b62344f0..00000000000000 --- a/tools/mo/unit_tests/mo/utils/convert_impl_tmp_irs_cleanup_test_actual.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import unittest -from unittest.mock import patch - -from openvino.tools.mo.convert import convert_model -from openvino.tools.mo.utils.error import Error - - -class TestConvertImplTmpIrsCleanup(unittest.TestCase): - test_model_file = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, - "moc_tf_fe/test_models/mul_with_unknown_rank_y.pbtxt") - - @staticmethod - def are_tmp_files_left(orig_model_name): - for suf in [".xml", ".bin", ".mapping"]: - path_to_file = orig_model_name.replace('.pbtxt', '_tmp' + suf) - if os.path.exists(path_to_file): - return True - return False - - def test_tmp_irs_cleanup_convert_impl_1(self): - with patch("openvino.tools.mo.back.offline_transformations.apply_offline_transformations") as emit_ir_func: - emit_ir_func.side_effect = Error('offline transformations step has failed') - - params = {'input_model': self.test_model_file, 'input_model_is_text': True, 'input': 'x[3],y[1 3]', - 'use_legacy_frontend': True} - self.assertRaisesRegex(Error, 'offline transformations step has failed', convert_model, **params) - self.assertFalse(self.are_tmp_files_left(self.test_model_file)) - - def test_tmp_irs_cleanup_convert_impl_2(self): - with patch("openvino.tools.mo.back.ie_ir_ver_2.emitter.add_net_rt_info") as emit_ir_func: - emit_ir_func.side_effect = Error('emitting tmp IR has failed') - - params = {'input_model': self.test_model_file, 'input_model_is_text': True, 'input': 'x[3],y[1 3]', - 'use_legacy_frontend': True} - self.assertRaisesRegex(Error, 'emitting tmp IR has failed', convert_model, **params) - self.assertFalse(self.are_tmp_files_left(self.test_model_file)) - - def test_tmp_irs_cleanup_convert_impl_3(self): - with patch("openvino.tools.mo.convert_impl.read_model") as emit_ir_func: - emit_ir_func.side_effect = Exception('FEM read_model has failed') - - params = {'input_model': self.test_model_file, 'input_model_is_text': True, 'input': 'x[3],y[1 3]', - 'use_legacy_frontend': True} - self.assertRaisesRegex(Error, 'FEM read_model has failed', convert_model, **params) - self.assertFalse(self.are_tmp_files_left(self.test_model_file)) diff --git a/tools/mo/unit_tests/mo/utils/custom_replacement_config_test.py b/tools/mo/unit_tests/mo/utils/custom_replacement_config_test.py deleted file mode 100644 index fddb77599289ff..00000000000000 --- a/tools/mo/unit_tests/mo/utils/custom_replacement_config_test.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -from fnmatch import fnmatch - -from openvino.tools.mo.utils.custom_replacement_config import load_and_validate_json_config -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.utils import get_mo_root_dir -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry - - -def get_json_configs(mo_root_dir): - config_path = os.path.join(mo_root_dir, 'extensions', 'front') - pattern = "*.json" - config_files_list = [] - for path, subdirs, files in os.walk(config_path): - for name in files: - if fnmatch(name, pattern): - config_files_list.append((os.path.join(path, name),)) - return config_files_list - - -class TestSchema(UnitTestWithMockedTelemetry): - base_dir = get_mo_root_dir() - schema_file = os.path.join(base_dir, 'mo', 'utils', 'schema.json') - transformation_configs = get_json_configs(base_dir) - test_json1 = '[{"id": "", "match_kind": "general", "custom_attributes": {}}]' - test_json2 = '[{"id": "someid", "match_kind": "abc", "custom_attributes": {}}]' - - def test_schema_file(self): - for transformation_config in self.transformation_configs: - self.assertTrue(load_and_validate_json_config(transformation_config)) - - def test_schema_id_empty(self): - self.assertRaises(Error, load_and_validate_json_config, self.test_json1) - - def test_schema_match_kind_wrong(self): - self.assertRaises(Error, load_and_validate_json_config, self.test_json2) diff --git a/tools/mo/unit_tests/mo/utils/error_test.py b/tools/mo/unit_tests/mo/utils/error_test.py deleted file mode 100644 index 5a328808ecc924..00000000000000 --- a/tools/mo/unit_tests/mo/utils/error_test.py +++ /dev/null @@ -1,24 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.utils.error import classify_error_type - - -class TestingErrorClassifier(unittest.TestCase): - def test_no_module(self): - message = "No module named 'openvino._offline_transformations.offline_transformations_api'" - self.assertEqual(classify_error_type(message), message) - - def test_no_module_neg(self): - message = "No module 'openvino'" - self.assertEqual(classify_error_type(message), "undefined") - - def test_cannot_import_name(self): - message = "cannot import name 'IECore' from 'openvino.inference_engine' (unknown location)" - self.assertEqual(classify_error_type(message), "cannot import name 'IECore'") - - def test_cannot_import_name_neg(self): - message = "import name 'IECore' from 'openvino.inference_engine' (unknown location)" - self.assertEqual(classify_error_type(message), "undefined") diff --git a/tools/mo/unit_tests/mo/utils/freeze_placeholder_test.py b/tools/mo/unit_tests/mo/utils/freeze_placeholder_test.py deleted file mode 100644 index af9d89d90b0c2c..00000000000000 --- a/tools/mo/unit_tests/mo/utils/freeze_placeholder_test.py +++ /dev/null @@ -1,302 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import os -import pytest -from unittest.mock import patch, Mock - -import numpy as np -import onnx -from onnx.helper import make_graph, make_model, make_tensor_value_info - -from openvino.frontend import ( - FrontEndManager, - FrontEnd, -) # pylint: disable=no-name-in-module,import-error -from openvino.runtime import Core -from openvino.tools.mo.convert_impl import prepare_ir - - -def base_args_config(use_legacy_fe: bool = None, use_new_fe: bool = None): - args = argparse.Namespace() - args.feManager = FrontEndManager() - args.extensions = None - args.use_legacy_frontend = use_legacy_fe - args.use_new_frontend = use_new_fe - args.framework = "onnx" - args.model_name = None - args.input_model = None - args.input_checkpoint = None - args.silent = True - args.transform = [] - args.scale = None - args.output = None - args.input = None - args.input_shape = None - args.batch = None - args.mean_values = None - args.scale_values = None - args.output_dir = os.getcwd() - args.freeze_placeholder_with_value = None - args.transformations_config = None - args.static_shape = None - args.reverse_input_channels = None - args.data_type = None - args.layout = None - args.source_layout = None - args.target_layout = None - return args - - -try: - import openvino_telemetry as tm - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm - - -def get_test_default_frontends(): - return {"onnx": "new", "tf": "legacy"} - - -class TestMoFreezePlaceholder(): - @classmethod - def setup_method(cls): - tm.Telemetry.__init__ = Mock(return_value=None) - tm.Telemetry.send_event = Mock() - FrontEnd.add_extension = Mock() - - cls.models = {} - add = onnx.helper.make_node("Add", inputs=["in1", "in2"], outputs=["add_out"]) - input_tensors = [ - make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (2, 2)), - make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (2, 2)), - ] - output_tensors = [ - make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, (2, 2)), - ] - graph = make_graph([add], "test_graph", input_tensors, output_tensors) - model = make_model( - graph, - producer_name="MO tests", - opset_imports=[onnx.helper.make_opsetid("", 13)], - ) - cls.models["test_model.onnx"] = model - - input_tensors_2 = [ - make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (1, 1, 3)), - make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (1,)), - ] - output_tensors_2 = [ - make_tensor_value_info("mul_out", onnx.TensorProto.FLOAT, (1, 1, 3)), - ] - mul = onnx.helper.make_node("Mul", inputs=["in1", "in2"], outputs=["mul_out"]) - graph_2 = make_graph([mul], "test_graph_2", input_tensors_2, output_tensors_2) - model_2 = make_model( - graph_2, - producer_name="MO tests", - opset_imports=[onnx.helper.make_opsetid("", 13)], - ) - cls.models["test_model_2.onnx"] = model_2 - - input_tensors_3 = [ - make_tensor_value_info("in1", onnx.TensorProto.INT32, (2, 3)), - make_tensor_value_info("in2", onnx.TensorProto.INT32, (3,)), - ] - output_tensors_3 = [ - make_tensor_value_info("mul_out", onnx.TensorProto.INT32, (2, 3)), - ] - mul = onnx.helper.make_node("Mul", inputs=["in1", "in2"], outputs=["mul_out"]) - graph_3 = make_graph([mul], "test_graph_3", input_tensors_3, output_tensors_3) - model_3 = make_model( - graph_3, - producer_name="MO tests", - opset_imports=[onnx.helper.make_opsetid("", 13)], - ) - cls.models["test_model_int.onnx"] = model_3 - - for name, model in cls.models.items(): - onnx.save(model, name) - @classmethod - def teardown_method(cls): - for name in cls.models.keys(): - os.remove(name) - - @pytest.mark.parametrize( - "input_freezing_value, use_new_fe, inputs, expected,dtype",[ - ( - "in1[1 4]{f32}->[1.0 2.0 3.0 4.0],in2[1 4]{f32}->[1.0 2.0 3.0 4.0]", - True, - {}, - np.array([2.0, 4.0, 6.0, 8.0]), - np.float32, - ), - ( - "in2{f32}->[0.0 0.0 0.0 0.0]", - True, - {"in1": np.array([[1.0, 2.0], [3.0, 4.0]])}, - np.array([[1.0, 2.0], [3.0, 4.0]]), - np.float32, - ), - ( - "in2{f32}->[1.0 15.0 15.5 1.0]", - True, - {"in1": np.array([[2.0, 4.0], [12.0, 8.0]])}, - np.array([[3.0, 19.0], [27.5, 9.0]]), - np.float32, - ), - ( - "in1[1 4]{i32}->[1 2 3 4],in2[1 4]{i32}->[1 2 3 4]", - True, - {}, - np.array([2.0, 4.0, 6.0, 8.0]), - np.int32, - ), - ], - ) - def test_freeze_placeholder_with_value_onnx_fe(self, input_freezing_value, use_new_fe, inputs, expected, - dtype): - with patch("openvino.tools.mo.convert_impl.get_default_frontends") as default_fe: - default_fe.return_value = get_test_default_frontends() - args = base_args_config(use_new_fe=use_new_fe) - args.input_model = "test_model.onnx" - args.input = input_freezing_value - - _, model = prepare_ir(args) - - ie = Core() - exec_net = ie.compile_model(model, "CPU") - req = exec_net.create_infer_request() - results = req.infer(inputs) - values = list(results.values())[0] - if dtype is not None: - assert values.dtype == dtype - assert np.allclose(values, expected) - - @pytest.mark.parametrize( - "input_freezing_value, use_new_fe, inputs, expected, dtype",[ - ( - "in1{f32}->[1.0 15.0 1.0]", - True, - {"in2": np.array([2])}, - np.array([2.0, 30.0, 2.0]), - np.float32, - ), - ( - "in1{f32}->[7.0 11.0 -1.0],in2{f32}->3.0", - True, - {}, - np.array([21.0, 33.0, -3.0]), - np.float32, - ), - ( - None, - True, - { - "in1": np.array([2.0, 2.0, 2.0]).reshape(1, 1, 3), - "in2": np.array([-1.0]), - }, - np.array([-2.0, -2.0, -2.0]), - np.float32, - ), - ( - "in1[3 1]{f32}->[7.0 11.0 -1.0],in2{f32}->3.0", - True, - {}, - np.array([21.0, 33.0, -3.0]).reshape(3, 1), - np.float32, - ), - ( - "in1[3 1]{f16}->[7.0 11.0 -1.0],in2{f16}->3.0", - True, - {}, - np.array([21.0, 33.0, -3.0]).reshape(3, 1), - np.float16, - ), - ( - "in1[3 1]{i32}->[7 11 -1],in2{i32}->3.0", - True, - {}, - np.array([21, 33, -3]).reshape(3, 1), - np.int32, - ), - ], - ) - def test_freeze_placeholder_with_value_mul(self, input_freezing_value, use_new_fe, inputs, expected, dtype): - with patch("openvino.tools.mo.convert_impl.get_default_frontends") as default_fe: - default_fe.return_value = get_test_default_frontends() - args = base_args_config(use_new_fe=use_new_fe) - args.input_model = "test_model_2.onnx" - args.input = input_freezing_value - - _, model = prepare_ir(args) - - ie = Core() - exec_net = ie.compile_model(model, "CPU") - req = exec_net.create_infer_request() - results = req.infer(inputs) - values = list(results.values())[0] - if dtype is not None: - assert values.dtype == dtype - assert np.allclose(values, expected) - - @pytest.mark.parametrize( - "input_freezing_value, use_new_fe, inputs, expected,dtype",[ - ( - "in1->[1.0 15.0 1.0]", - True, - {"in2": np.array([2])}, - np.array([2.0, 30.0, 2.0]), - np.float32, - ), - ], - ) - def test_value_without_type(self, input_freezing_value, use_new_fe, inputs, expected, - dtype): - with patch("openvino.tools.mo.convert_impl.get_default_frontends") as default_fe: - default_fe.return_value = get_test_default_frontends() - args = base_args_config(use_new_fe=use_new_fe) - args.input_model = "test_model_2.onnx" - args.input = input_freezing_value - - _, model = prepare_ir(args) - - ie = Core() - exec_net = ie.compile_model(model, "CPU") - req = exec_net.create_infer_request() - results = req.infer(inputs) - values = list(results.values())[0] - if dtype is not None: - assert values.dtype == dtype - assert np.allclose(values, expected) - - @pytest.mark.parametrize( - "input_freezing_value, use_new_fe, inputs, expected,dtype",[ - ( - "in2->[3 2 5]", - True, - {"in1": np.array([[2, 1, 3], [1, 5, 6]], dtype=np.int32)}, - np.array([[6, 2, 15], [3, 10, 30]], dtype=np.int32), - np.int32, - ), - ], - ) - def test_value_without_type_int32(self, input_freezing_value, use_new_fe, inputs, expected, - dtype): - with patch("openvino.tools.mo.convert_impl.get_default_frontends") as default_fe: - default_fe.return_value = get_test_default_frontends() - args = base_args_config(use_new_fe=use_new_fe) - args.input_model = "test_model_int.onnx" - args.input = input_freezing_value - - _, model = prepare_ir(args) - - ie = Core() - exec_net = ie.compile_model(model, "CPU") - req = exec_net.create_infer_request() - results = req.infer(inputs) - values = list(results.values())[0] - if dtype is not None: - assert values.dtype == dtype - assert np.allclose(values, expected) diff --git a/tools/mo/unit_tests/mo/utils/graph_test.py b/tools/mo/unit_tests/mo/utils/graph_test.py deleted file mode 100644 index dd0d9c77ea13bd..00000000000000 --- a/tools/mo/unit_tests/mo/utils/graph_test.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.graph import bfs_search, is_connected_component, sub_graph_between_nodes, backward_bfs_for_operation -from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from unit_tests.utils.graph import regular_op, result, build_graph_with_edge_attrs - - -class TestGraphUtils(UnitTestWithMockedTelemetry): - def test_simple_dfs(self): - graph = Graph() - graph.add_nodes_from(list(range(1, 5))) - graph.add_edges_from([(1, 2), (1, 3), (3, 4)]) - - visited = set() - order = graph.dfs(1, visited) - self.assertTrue(order == [4, 3, 2, 1] or order == [2, 4, 3, 1]) - - def test_bfs_search_default_start_nodes(self): - """ - Check that BFS automatically determines input nodes and start searching from them. - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 6))) - graph.add_edges_from([(1, 3), (2, 3), (3, 4), (4, 5)]) - - order = bfs_search(graph) - self.assertTrue(order == [1, 2, 3, 4, 5] or order == [2, 1, 3, 4, 5]) - - def test_bfs_search_specific_start_nodes(self): - """ - Check that BFS stars from the user defined nodes and doesn't go in backward edge direction. - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 7))) - graph.add_edges_from([(1, 3), (2, 3), (3, 4), (4, 5), (6, 1)]) - - order = bfs_search(graph, [1]) - self.assertTrue(order == [1, 3, 4, 5]) - - def test_is_connected_component_two_separate_sub_graphs(self): - """ - Check that if there are two separate sub-graphs the function returns False. - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 7))) - graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6)]) - self.assertFalse(is_connected_component(graph, list(range(1, 7)))) - self.assertFalse(is_connected_component(graph, [1, 3])) - self.assertFalse(is_connected_component(graph, [6, 4])) - self.assertFalse(is_connected_component(graph, [2, 5])) - - def test_is_connected_component_two_separate_sub_graphs_divided_by_ignored_node(self): - """ - Check that if there are two separate sub-graphs the function connected by an edge going through the ignored node - then the function returns False. - """ - graph = Graph() - node_names = list(range(1, 8)) - graph.add_nodes_from(node_names) - graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6), (1, 7), (7, 4)]) - self.assertFalse(is_connected_component(graph, list(range(1, 7)))) - - def test_is_connected_component_connected(self): - """ - Check that if the sub-graph is connected. - """ - graph = Graph() - node_names = list(range(1, 8)) - graph.add_nodes_from(node_names) - graph.add_edges_from([(1, 2), (2, 3), (4, 5), (5, 6), (1, 7), (7, 4)]) - self.assertTrue(is_connected_component(graph, list(range(1, 8)))) - - def test_is_connected_component_edges_direction_is_ignored(self): - """ - Check that edges direction is ignored when checking for the connectivity. - """ - graph = Graph() - node_names = list(range(1, 5)) - graph.add_nodes_from(node_names) - graph.add_edges_from([(2, 1), (2, 3), (4, 3)]) - self.assertTrue(is_connected_component(graph, node_names)) - self.assertTrue(is_connected_component(graph, [2, 1])) - self.assertTrue(is_connected_component(graph, [4, 2, 3])) - - def test_is_connected_component_edges_direction_is_ignored_not_connected(self): - """ - Check that edges direction is ignored when checking for the connectivity. In this case the graph is not - connected. - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 5))) - graph.add_edges_from([(2, 1), (2, 3), (4, 3)]) - self.assertFalse(is_connected_component(graph, [1, 2, 4])) - self.assertFalse(is_connected_component(graph, [1, 4])) - self.assertFalse(is_connected_component(graph, [2, 4])) - self.assertFalse(is_connected_component(graph, [3, 4, 1])) - - def test_sub_graph_between_nodes_include_incoming_edges_for_internal_nodes(self): - """ - Check that the function adds input nodes for the internal nodes of the graph. For example, we need to add node 5 - and 6 in the case below if we find match from node 1 till node 4. - 6 -> 5 -> - \ - 1 -> 2 -> 3 -> 4 - :return: - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 7))) - graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2), (6, 5)]) - sub_graph_nodes = sub_graph_between_nodes(graph, [1], [4]) - self.assertIsNotNone(sub_graph_nodes) - self.assertListEqual(sorted(sub_graph_nodes), list(range(1, 7))) - - sub_graph_nodes = sub_graph_between_nodes(graph, [1], [2]) - self.assertIsNotNone(sub_graph_nodes) - self.assertListEqual(sorted(sub_graph_nodes), [1, 2, 5, 6]) - - def test_sub_graph_between_nodes_do_not_include_incoming_edges_for_input_nodes(self): - """ - Check that the function doesn't add input nodes for the start nodes of the sub-graph. For example, we do not - need to add node 5 in the case below if we find match from node 1 till node 4. - 5-> - \ - 1 -> 2 -> 3 -> 4 - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 6))) - graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2)]) - sub_graph_nodes = sub_graph_between_nodes(graph, [2], [4]) - self.assertIsNotNone(sub_graph_nodes) - self.assertListEqual(sorted(sub_graph_nodes), [2, 3, 4]) - - def test_sub_graph_between_nodes_placeholder_included(self): - """ - Check that the function doesn't allow to add Placeholders to the sub-graph. 5 is the Placeholder op. - 5-> - \ - 1 -> 2 -> 3 -> 4 - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 6))) - graph.node[5]['op'] = 'Parameter' - graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2)]) - self.assertRaises(Error, sub_graph_between_nodes, graph, [1], [4]) - - def test_sub_graph_between_nodes_placeholder_excluded(self): - """ - Check that the function do not check that node is Placeholders for the nodes not included into the sub-graph. - For example, node 5 is Placeholder but it is not included into the sub-graph, so this attribute is ignored. - 5-> - \ - 1 -> 2 -> 3 -> 4 - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 6))) - graph.node[5]['op'] = 'Parameter' - graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2)]) - sub_graph_nodes = sub_graph_between_nodes(graph, [2], [4]) - self.assertIsNotNone(sub_graph_nodes) - self.assertListEqual(sorted(sub_graph_nodes), [2, 3, 4]) - - def test_sub_graph_between_nodes_multiple_inputs(self): - """ - Check that the function works correctly when multiple inputs specified. - 5-> - \ - 1 -> 2 -> 3 -> 4 - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 6))) - graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2)]) - sub_graph_nodes = sub_graph_between_nodes(graph, [2, 5], [4]) - self.assertIsNotNone(sub_graph_nodes) - self.assertListEqual(sorted(sub_graph_nodes), sorted([2, 3, 4, 5])) - - def test_sub_graph_between_nodes_branches_included(self): - """ - Check that the function works correctly for tree like structures. - 1 -> 2 -> 3 -> 4 - \ - 5 -> 6 - / \ - 9 -> -> 7 -> 8 - """ - graph = Graph() - node_names = list(range(1, 10)) - graph.add_nodes_from(node_names) - graph.add_edges_from([(1, 2), (2, 3), (3, 4), (2, 5), (5, 6), (5, 7), (7, 8), (9, 5)]) - self.assertListEqual(sorted(sub_graph_between_nodes(graph, [1], [4])), node_names) - self.assertListEqual(sorted(sub_graph_between_nodes(graph, [1], [6])), node_names) - self.assertListEqual(sorted(sub_graph_between_nodes(graph, [1], [8])), node_names) - # all nodes except 4 because it is a child of end node - self.assertListEqual(sorted(sub_graph_between_nodes(graph, [1], [3])), [n for n in node_names if n != 4]) - # all nodes except 1 because it is a parent node child of start node. The nodes 3 and 4 must be added because - # after merging node 2 into sub-graph the node 2 will be removed and it is not known how to calculate the tensor - # between node 2 and 3. - self.assertListEqual(sorted(sub_graph_between_nodes(graph, [2], [8])), [n for n in node_names if n != 1]) - - def test_sub_graph_between_nodes_control_flow_included(self): - """ - Check that the function works correctly for case when control flow edges must be traversed (edge 5 -> 2). - 6 -> 5-> - \ - 1 -> 2 -> 3 -> 4 - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 7))) - graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2, {'control_flow_edge': True}), (6, 5)]) - sub_graph_nodes = sub_graph_between_nodes(graph, [1], [4], include_control_flow=True) - self.assertIsNotNone(sub_graph_nodes) - self.assertListEqual(sorted(sub_graph_nodes), sorted([1, 2, 3, 4, 5, 6])) - - def test_sub_graph_between_nodes_control_flow_not_included(self): - """ - Check that the function works correctly for case when control flow edges should not be traversed (edge 5 -> 2). - 6 -> 5-> - \ - 1 -> 2 -> 3 -> 4 - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 7))) - graph.add_edges_from([(1, 2), (2, 3), (3, 4), (5, 2, {'control_flow_edge': True}), (6, 5)]) - sub_graph_nodes = sub_graph_between_nodes(graph, [1], [4], include_control_flow=False) - self.assertIsNotNone(sub_graph_nodes) - self.assertListEqual(sorted(sub_graph_nodes), sorted([1, 2, 3, 4])) - - def test_sub_graph_between_nodes_control_flow_included_forward(self): - """ - Check that the function works correctly for case when control flow edges should not be traversed (edge 3 -> 5). - 1 -> 2 -> 3 -> 4 - \ - -> 5 -> 6 - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 7))) - graph.add_edges_from([(1, 2), (2, 3), (3, 4), (3, 5, {'control_flow_edge': True}), (5, 6)]) - sub_graph_nodes = sub_graph_between_nodes(graph, [1], [4], include_control_flow=True) - self.assertIsNotNone(sub_graph_nodes) - self.assertListEqual(sorted(sub_graph_nodes), sorted([1, 2, 3, 4, 5, 6])) - - def test_sub_graph_between_nodes_control_flow_not_included_forward(self): - """ - Check that the function works correctly for case when control flow edges should not be traversed (edge 3 -> 5). - 1 -> 2 -> 3 -> 4 - \ - -> 5 -> 6 - """ - graph = Graph() - graph.add_nodes_from(list(range(1, 7))) - graph.add_edges_from([(1, 2), (2, 3), (3, 4), (3, 5, {'control_flow_edge': True}), (5, 6)]) - sub_graph_nodes = sub_graph_between_nodes(graph, [1], [4], include_control_flow=False) - self.assertIsNotNone(sub_graph_nodes) - self.assertListEqual(sorted(sub_graph_nodes), sorted([1, 2, 3, 4])) - - def test_backward_bfs_for_op_no_ops_detected(self): - nodes = {**regular_op('input', {'op': 'Parameter'}), - **regular_op('hsigmoid', {'op': 'HSigmoid'}), - **result('result'), - } - edges = [('input', 'hsigmoid', {'out': 0, 'in': 0}), - ('hsigmoid', 'result', {'out': 0, 'in': 0}), - ] - - graph = build_graph_with_edge_attrs(nodes, edges) - graph.stage = 'front' - - found_nodes = backward_bfs_for_operation(Node(graph, 'result'), ['NonExistingOp']) - self.assertEqual(len(found_nodes), 0) - - def test_backward_bfs_for_op_closest_op_detected(self): - """ - input -> hsigmoid_1 -> hsigmoid_2 -> result - The returned op should be first met HSigmoid which is hsigmoid_2 - """ - nodes = {**regular_op('input', {'op': 'Parameter'}), - **regular_op('hsigmoid_1', {'op': 'HSigmoid'}), - **regular_op('hsigmoid_2', {'op': 'HSigmoid'}), - **result('result'), - } - edges = [('input', 'hsigmoid_1', {'out': 0, 'in': 0}), - ('hsigmoid_1', 'hsigmoid_2', {'out': 0, 'in': 0}), - ('hsigmoid_2', 'result', {'out': 0, 'in': 0}), - ] - - graph = build_graph_with_edge_attrs(nodes, edges) - graph.stage = 'front' - - found_nodes = backward_bfs_for_operation(Node(graph, 'result'), ['HSigmoid']) - self.assertEqual(len(found_nodes), 1) - self.assertEqual(found_nodes[0].id, 'hsigmoid_2') - - def test_backward_bfs_for_op_parallel_branch_op_detected(self): - r""" - input_1 -> hsigmoid_1 -> hsigmoid_2 -> - \ - - Concat->result - / - input_2 -> hsigmoid_3 -> hsigmoid_4 -> - The returned op should be first met HSigmoids which are hsigmoid_2 and hsigmoid_4 - """ - nodes = {**regular_op('input_1', {'op': 'Parameter'}), - **regular_op('hsigmoid_1', {'op': 'HSigmoid'}), - **regular_op('hsigmoid_2', {'op': 'HSigmoid'}), - **regular_op('input_2', {'op': 'Parameter'}), - **regular_op('hsigmoid_3', {'op': 'HSigmoid'}), - **regular_op('hsigmoid_4', {'op': 'HSigmoid'}), - **regular_op('concat', {'op': 'Concat'}), - **result('result'), - } - edges = [('input_1', 'hsigmoid_1', {'out': 0, 'in': 0}), - ('hsigmoid_1', 'hsigmoid_2', {'out': 0, 'in': 0}), - ('hsigmoid_2', 'concat', {'out': 0, 'in': 0}), - ('input_2', 'hsigmoid_3', {'out': 0, 'in': 0}), - ('hsigmoid_3', 'hsigmoid_4', {'out': 0, 'in': 0}), - ('hsigmoid_4', 'concat', {'out': 0, 'in': 1}), - ('concat', 'result', {'out': 0, 'in': 0}), - ] - - graph = build_graph_with_edge_attrs(nodes, edges) - graph.stage = 'front' - - found_nodes = backward_bfs_for_operation(Node(graph, 'result'), ['HSigmoid']) - self.assertEqual(len(found_nodes), 2) - self.assertSetEqual({found_nodes[0].id, found_nodes[1].id}, {'hsigmoid_2', 'hsigmoid_4'}) - - def test_backward_bfs_for_op_parallel_branch_stop_op(self): - r""" - input_1 -> hsigmoid_1 -> hsigmoid_2 -> - \ - - Concat->result - / - input_2 -> hsigmoid_3 -> ShapeOf -> - The returned op should be first met HSigmoids which is hsigmoid_2, but not the hsigmoid_3 located after banned - operation of type "ShapeOf" - """ - nodes = {**regular_op('input_1', {'op': 'Parameter'}), - **regular_op('hsigmoid_1', {'op': 'HSigmoid'}), - **regular_op('hsigmoid_2', {'op': 'HSigmoid'}), - **regular_op('input_2', {'op': 'Parameter'}), - **regular_op('hsigmoid_3', {'op': 'HSigmoid'}), - **regular_op('shapeof', {'op': 'ShapeOf'}), - **regular_op('concat', {'op': 'Concat'}), - **result('result'), - } - edges = [('input_1', 'hsigmoid_1', {'out': 0, 'in': 0}), - ('hsigmoid_1', 'hsigmoid_2', {'out': 0, 'in': 0}), - ('hsigmoid_2', 'concat', {'out': 0, 'in': 0}), - ('input_2', 'hsigmoid_3', {'out': 0, 'in': 0}), - ('hsigmoid_3', 'shapeof', {'out': 0, 'in': 0}), - ('shapeof', 'concat', {'out': 0, 'in': 1}), - ('concat', 'result', {'out': 0, 'in': 0}), - ] - - graph = build_graph_with_edge_attrs(nodes, edges) - graph.stage = 'front' - - found_nodes = backward_bfs_for_operation(Node(graph, 'result'), ['HSigmoid'], ['ShapeOf']) - self.assertEqual(len(found_nodes), 1) - self.assertEqual(found_nodes[0].id, 'hsigmoid_2') diff --git a/tools/mo/unit_tests/mo/utils/ir_engine/__init__.py b/tools/mo/unit_tests/mo/utils/ir_engine/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/mo/utils/ir_engine/ir_engine_test.py b/tools/mo/unit_tests/mo/utils/ir_engine/ir_engine_test.py deleted file mode 100644 index 3f98741950bbbe..00000000000000 --- a/tools/mo/unit_tests/mo/utils/ir_engine/ir_engine_test.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import logging as log -import numpy as np -import os -import sys -import unittest -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, strict_compare_tensors -from openvino.tools.mo.graph.graph import Node -from openvino.tools.mo.utils.ir_engine.ir_engine import IREngine -from unittest import mock - -log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.DEBUG, stream=sys.stdout) - - -class TestFunction(unittest.TestCase): - def setUp(self): - path, _ = os.path.split(os.path.dirname(__file__)) - self.xml = os.path.join(path, os.pardir, os.pardir, - "utils", "test_data", "mxnet_synthetic_gru_bidirectional_FP16_1_v6.xml") - self.xml_negative = os.path.join(path, os.pardir, os.pardir, - "utils", "test_data", - "mxnet_synthetic_gru_bidirectional_FP16_1_v6_negative.xml") - self.bin = os.path.splitext(self.xml)[0] + '.bin' - self.assertTrue(os.path.exists(self.xml), 'XML file not found: {}'.format(self.xml)) - self.assertTrue(os.path.exists(self.bin), 'BIN file not found: {}'.format(self.bin)) - - self.IR = IREngine(path_to_xml=str(self.xml), path_to_bin=str(self.bin)) - self.IR_ref = IREngine(path_to_xml=str(self.xml), path_to_bin=str(self.bin)) - self.IR_negative = IREngine(path_to_xml=str(self.xml_negative), path_to_bin=str(self.bin)) - - def test_is_float(self): - test_cases = [(4.4, True), ('aaaa', False)] - for test_data, result in test_cases: - test_data = test_data - self.assertEqual(IREngine._IREngine__isfloat(test_data), result, - "Function __isfloat is not working with value: {}".format(test_data)) - log.info( - 'Test for function __is_float passed with value: {}, expected result: {}'.format(test_data, result)) - - # TODO add comparison not for type IREngine - def test_compare(self): - flag, msg = self.IR.compare(self.IR_ref) - self.assertTrue(flag, 'Comparing false, test compare function failed') - log.info('Test for function compare passed') - - def test_compare_negative(self): - # Reference data for test: - reference_msg = 'Current node "2" with type "Const" and reference node "2" with type "Input" have different ' \ - 'attr "type" : Const and Input' - # Check function: - flag, msg = self.IR.compare(self.IR_negative) - self.assertFalse(flag, 'Comparing flag failed, test compare function failed') - self.assertEqual('\n'.join(msg), reference_msg, 'Comparing message failed, test compare negative failed') - - log.info('Test for function compare passed') - - def test_find_input(self): - # Create references for this test: - ref_nodes = [Node(self.IR.graph, '0')] - # Check function: - a = IREngine._IREngine__find_input(self.IR.graph) - self.assertTrue(a == ref_nodes, 'Error') - - def test_get_inputs(self): - # Reference data for test: - ref_input_dict = {'data': shape_array([1, 10, 16])} - # Check function: - inputs_dict = self.IR.get_inputs() - self.assertTrue(strict_compare_tensors(ref_input_dict['data'], inputs_dict['data']), - 'Test on function get_inputs failed') - log.info('Test for function get_inputs passed') - - def test_eq_function(self): - self.assertTrue(self.IR == self.IR_ref, 'Comparing false, test eq function failed') - log.info('Test for function eq passed') - - @unittest.mock.patch('numpy.savez_compressed') - def test_generate_bin_hashes_file(self, numpy_savez): - # Generate bin_hashes file in default directory - self.IR.generate_bin_hashes_file() - numpy_savez.assert_called_once() - log.info('Test for function generate_bin_hashes_file with default folder passed') - - @unittest.mock.patch('numpy.savez_compressed') - def test_generate_bin_hashes_file_custom_directory(self, numpy_savez): - # Generate bin_hashes file in custom directory - directory_for_file = os.path.join(os.path.split(os.path.dirname(__file__))[0], "utils", "test_data", - "bin_hash") - self.IR.generate_bin_hashes_file(path_for_file=directory_for_file) - numpy_savez.assert_called_once() - log.info('Test for function generate_bin_hashes_file with custom folder passed') - - def test_normalize_attr(self): - test_cases = [({'order': '1,0,2'}, {'order': [1, 0, 2]}), - ({'order': '1'}, {'order': 1})] - for test_data, reference in test_cases: - result_dict = IREngine._IREngine__normalize_attrs(attrs=test_data) - self.assertTrue(reference == result_dict, 'Test on function normalize_attr failed') - log.info('Test for function normalize_attr passed') - - def test_load_bin_hashes(self): - path_for_file = self.IR.generate_bin_hashes_file() - IR = IREngine(path_to_xml=str(self.xml), path_to_bin=str(path_for_file)) - is_ok = True - # Check for constant nodes - const_nodes = IR.graph.get_op_nodes(type='Const') - for node in const_nodes: - if not node.has_valid('hashes'): - log.error('Constant node {} do not include hashes'.format(node.name)) - is_ok = False - - # Check for TensorIterator Body - ti_nodes = IR.graph.get_op_nodes(type='TensorIterator') - for ti in ti_nodes: - if not ti.has_valid('body'): - log.error("TensorIterator doesn't have body attribute for node: {}".format(ti.name)) - else: - const_ti_nodes = ti.body.graph.get_op_nodes(type='Const') - for node in const_ti_nodes: - if not node.has_valid('hashes'): - log.error('Constant node {} do not include hashes'.format(node.name)) - is_ok = False - - self.assertTrue(is_ok, 'Test for function load_bin_hashes failed') - os.remove(path_for_file) - - def test_isint(self): - test_cases = [ - ("0", True), - ("1", True), - ("-1", True), - ("-", False), - ("+1", True), - ("+", False), - ("1.0", False), - ("-1.0", False), - ("1.5", False), - ("+1.5", False), - ("abracadabra", False)] - for value, result in test_cases: - self.assertEqual(IREngine._IREngine__isint(value), result) diff --git a/tools/mo/unit_tests/mo/utils/mo_fallback_test_actual.py b/tools/mo/unit_tests/mo/utils/mo_fallback_test_actual.py deleted file mode 100644 index fcdf915788c5b2..00000000000000 --- a/tools/mo/unit_tests/mo/utils/mo_fallback_test_actual.py +++ /dev/null @@ -1,303 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import os -import shutil -from unittest.mock import patch, Mock - -import numpy as np -import onnx -import pytest -from onnx.helper import make_graph, make_model, make_tensor_value_info -from openvino.frontend import FrontEndManager, FrontEnd # pylint: disable=no-name-in-module,import-error - -from openvino.tools.mo.convert_impl import prepare_ir -from openvino.tools.mo.utils.error import Error - -try: - import openvino_telemetry as tm - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm - -try: - import paddle - paddle_imported = True -except ImportError: - paddle_imported = False - - -def base_args_config(use_legacy_fe:bool=None, use_new_fe:bool=None): - args = argparse.Namespace() - args.feManager = FrontEndManager() - args.extensions = None - args.use_legacy_frontend = use_legacy_fe - args.use_new_frontend = use_new_fe - args.framework = 'onnx' - args.model_name = None - args.input_model = None - args.silent = True - args.transform=[] - args.scale = None - args.output=None - args.input=None - args.input_shape=None - args.batch=None - args.mean_values=None - args.scale_values=None - args.output_dir=os.getcwd() - args.freeze_placeholder_with_value = None - args.transformations_config = None - args.static_shape = None - args.reverse_input_channels = None - args.data_type = None - args.layout = None - args.source_layout = None - args.target_layout = None - return args - - -def get_test_default_frontends(): - return { - 'onnx': 'new', - 'tf': 'legacy' - } - - -def save_paddle_model(name, exe, feedkeys:list, fetchlist:list, target_dir:str): - model_dir = os.path.join(target_dir, name) - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - paddle.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe) - paddle.fluid.io.save_inference_model(model_dir, feedkeys, fetchlist, exe, model_filename=name+".pdmodel", params_filename=name+".pdiparams") - - -class TestMoFallback(): - def setup_method(self): - tm.Telemetry.__init__ = Mock(return_value=None) - tm.Telemetry.send_event = Mock() - FrontEnd.add_extension = Mock() - - self.models = {} - add = onnx.helper.make_node("Add", inputs=["in1", "in2"], outputs=["add_out"]) - input_tensors = [ - make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (2, 2)), - make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (2, 2)), - ] - output_tensors = [ - make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, (1, 2)), - ] - graph = make_graph([add], "test_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="MO tests", - opset_imports=[onnx.helper.make_opsetid("", 13)]) - self.models["test_model.onnx"] = model - - for name, model in self.models.items(): - onnx.save(model, name) - - self.test_config_files = {} - self.test_config_files['fake_config.json'] = '[]' # json format - - self.test_config_files['test_config.json'] = """[ - { - "custom_attributes": { - "test_attribute": true - }, - "id": "TransformationName1", - "match_kind": "scope" - }, - { - "custom_attributes": { - }, - "id": "TransfromationName2", - "match_kind": "scope" - } - ]""" - - self.test_config_files['onnx_fe_ext.so'] = 'binary_content' - self.test_config_files['onnx_fe_ext_2.so'] = 'binary_content' - - for file, content in self.test_config_files.items(): - with open(file, 'w') as f: - f.write(content) - - if paddle_imported: - self.paddle_dir = "paddle_dir" - paddle.enable_static() - if not os.path.exists(self.paddle_dir): - os.mkdir(self.paddle_dir) - x = np.array([-2, 0, 1]).astype('float32') - node_x = paddle.static.data(name='x', shape=x.shape, dtype='float32') - out = paddle.nn.functional.relu(node_x) - - cpu = paddle.static.cpu_places(1) - exe = paddle.static.Executor(cpu[0]) - exe.run(paddle.static.default_startup_program()) - - save_paddle_model("relu", exe, feedkeys=['x'], fetchlist=[out], target_dir=self.paddle_dir) - - def teardown_method(self): - for name in self.models.keys(): - os.remove(name) - for name in self.test_config_files: - os.remove(name) - if paddle_imported: - shutil.rmtree(self.paddle_dir) - - @pytest.mark.parametrize("extension, use_legacy, use_new_fe, conversion_method, fallback_reason", [ - (['dir_to_extension'], None, None, 'mo_legacy', 'extensions'), # fallback - (['dir_to_extension'], None, True, None, None), # exception - (['dir_to_extension'], True, None, 'mo_legacy', None), - ([''], True, None, 'mo_legacy', None), - ([''], None, True, 'onnx_frontend', None), - (None, None, None, 'onnx_frontend', None) - ]) - def test_fallback_if_extension_specified(self, extension, use_legacy, use_new_fe, conversion_method, fallback_reason): - with patch('openvino.tools.mo.convert_impl.get_default_frontends') as default_fe: - default_fe.return_value = get_test_default_frontends() - args = base_args_config(use_legacy, use_new_fe) - args.extensions = extension - args.input_model = "test_model.onnx" - - if conversion_method: - prepare_ir(args) - tm.Telemetry.send_event.assert_any_call('mo', 'conversion_method', conversion_method) - if fallback_reason: - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason', fallback_reason) - else: - with pytest.raises(AssertionError): # not called - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason', fallback_reason) - else: - with pytest.raises(Error): # not supported extensions on new path - prepare_ir(args) - - @pytest.mark.parametrize("use_legacy, use_new_fe, conversion_method", [ - (None, None, 'onnx_frontend'), - (True, None, None), # exception - (None, True, 'onnx_frontend'), - ]) - def test_fallback_if_new_extension_specified(self, use_legacy, use_new_fe, conversion_method): - with patch('openvino.tools.mo.convert_impl.get_default_frontends') as default_fe: - default_fe.return_value = get_test_default_frontends() - args = base_args_config(use_legacy, use_new_fe) - args.extensions = ['onnx_fe_ext.so'] - args.input_model = "test_model.onnx" - - if conversion_method: - prepare_ir(args) - tm.Telemetry.send_event.assert_any_call('mo', 'conversion_method', conversion_method) - else: - with pytest.raises(Error): - prepare_ir(args) - - @pytest.mark.parametrize("use_legacy, use_new_fe, conversion_method", [ - (None, None, 'onnx_frontend'), - (True, None, None), # exception - (None, True, 'onnx_frontend') - ]) - def test_fallback_if_two_new_extension_specified(self, use_legacy, use_new_fe, conversion_method): - with patch('openvino.tools.mo.convert_impl.get_default_frontends') as default_fe: - default_fe.return_value = get_test_default_frontends() - args = base_args_config(use_legacy, use_new_fe) - args.extensions = ['onnx_fe_ext.so', 'onnx_fe_ext_2.so'] - args.input_model = "test_model.onnx" - - if conversion_method: - prepare_ir(args) - tm.Telemetry.send_event.assert_any_call('mo', 'conversion_method', conversion_method) - else: - with pytest.raises(Error): - prepare_ir(args) - - @pytest.mark.parametrize("trans_config, use_legacy, use_new_fe, expected_path, fallback_reason", [ - ('fake_config.json', None, None, 'mo_legacy', 'transformations_config'), # fallback - ('test_config.json', None, None, 'mo_legacy', 'transformations_config'), # fallback - ('fake_config.json', True, None, 'mo_legacy', None), - (None, None, True, 'onnx_frontend', None), - (None, None, None, 'onnx_frontend', None)]) - def test_fallback_if_tranformations_config_specified(self, trans_config, use_legacy, use_new_fe, expected_path, - fallback_reason): - with patch('openvino.tools.mo.convert_impl.get_default_frontends') as default_fe: - default_fe.return_value = get_test_default_frontends() - args = base_args_config(use_legacy, use_new_fe) - args.input_model = "test_model.onnx" - args.transformations_config = trans_config - - with patch('openvino.tools.mo.utils.class_registration.apply_transform'): # skip applying transforms - prepare_ir(args) - - tm.Telemetry.send_event.assert_any_call('mo', 'conversion_method', expected_path) - if fallback_reason: - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason', fallback_reason) - else: - with pytest.raises(AssertionError): # not called - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason', fallback_reason) - - @pytest.mark.parametrize("extension, trans_config, use_new_fe, expected_path, fallback_reason", [ - (['dir_to_extension'], 'fake_config.json', None, 'mo_legacy', 'extensions, transformations_config'), # fallback - (None, 'fake_config.json', None, 'mo_legacy', 'transformations_config'), # fallback - (['dir_to_extension'], None, None, 'mo_legacy', 'extensions'), # fallback - (None, None, True, 'onnx_frontend', None) - ]) - def test_fallback_if_both_extension_and_trans_config_specified(self, extension, trans_config, use_new_fe, expected_path, fallback_reason): - with patch('openvino.tools.mo.convert_impl.get_default_frontends') as default_fe: - default_fe.return_value = get_test_default_frontends() - args = base_args_config(use_new_fe=use_new_fe) - args.extensions = extension - args.input_model = "test_model.onnx" - args.transformations_config = trans_config - - prepare_ir(args) - - tm.Telemetry.send_event.assert_any_call('mo', 'conversion_method', expected_path) - if fallback_reason: - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason', fallback_reason) - else: - with pytest.raises(AssertionError): # not called - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason', fallback_reason) - - @pytest.mark.parametrize("trans_config, use_legacy, use_new_fe, expected_path", - [('fake_config.json', None, None, 'mo_legacy'), - ('fake_config.json', True, None, 'mo_legacy'), - (None, None, True, 'onnx_frontend')]) - def test_fallback_if_legacy_set_as_default(self, trans_config, use_legacy, use_new_fe, expected_path): - with patch('openvino.tools.mo.convert_impl.get_default_frontends') as default_fe: - default_fe.return_value = {'onnx': 'legacy', 'tf': 'legacy'} - args = base_args_config(use_legacy, use_new_fe) - args.input_model = "test_model.onnx" - args.transformations_config = trans_config - - prepare_ir(args) - - tm.Telemetry.send_event.assert_any_call('mo', 'conversion_method', expected_path) - with pytest.raises(AssertionError): # not called - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason') - - @pytest.mark.skipif(not paddle_imported, reason="PaddlePaddle is not installed") - @pytest.mark.parametrize("use_new_fe, use_legacy, extension, expected_path", - [(True, None, None, 'paddle_frontend'), - (None, None, None, 'paddle_frontend')]) - def test_no_fallback_if_pdpd(self, use_new_fe, use_legacy, extension, expected_path): - args = base_args_config(use_legacy, use_new_fe) - args.framework = 'paddle' - args.extensions = extension - args.input_model = 'paddle_dir/relu/relu.pdmodel' - - prepare_ir(args) - - tm.Telemetry.send_event.assert_any_call('mo', 'conversion_method', expected_path) - with pytest.raises(AssertionError): # not called - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason') - - @pytest.mark.skipif(not paddle_imported, reason="PaddlePaddle is not installed") - def test_exception_if_old_extensions_used_for_pdpd(self): - args = base_args_config() - args.framework = 'paddle' - args.extensions = ['dir_to_extension'] - args.input_model = 'paddle_dir/relu/relu.pdmodel' - - with pytest.raises(Error) as ex: # not called - prepare_ir(args) - assert str(ex) == 'Legacy transformations configuration is not supported for the new frontend' diff --git a/tools/mo/unit_tests/mo/utils/mo_fallback_test_tf_fe.py b/tools/mo/unit_tests/mo/utils/mo_fallback_test_tf_fe.py deleted file mode 100644 index cb8a3339376254..00000000000000 --- a/tools/mo/unit_tests/mo/utils/mo_fallback_test_tf_fe.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import os -import pytest -import unittest -from openvino.frontend import FrontEndManager, FrontEnd # pylint: disable=no-name-in-module,import-error -from openvino.tools.mo.convert_impl import prepare_ir -from unittest.mock import Mock - -try: - import openvino_telemetry as tm - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm - - -def base_args_config(use_legacy_fe: bool = None, use_new_fe: bool = None): - args = argparse.Namespace() - args.feManager = FrontEndManager() - args.extensions = None - args.use_legacy_frontend = use_legacy_fe - args.use_new_frontend = use_new_fe - args.framework = 'tf' - args.model_name = None - args.input_model = None - args.silent = True - args.transform = [] - args.scale = None - args.output = None - args.input = None - args.input_shape = None - args.batch = None - args.mean_values = None - args.scale_values = None - args.output_dir = os.getcwd() - args.freeze_placeholder_with_value = None - args.transformations_config = None - args.static_shape = None - args.reverse_input_channels = None - args.data_type = None - args.layout = None - args.source_layout = None - args.target_layout = None - args.input_model_is_text = False - args.input_checkpoint = None - args.saved_model_dir = None - args.input_meta_graph = None - args.saved_model_tags = None - return args - - -class TestMoFallback(unittest.TestCase): - test_directory = os.path.dirname(os.path.realpath(__file__)) - - def create_fake_json_file(self, output_dir): - json_data = '[]' # json format - json_path = os.path.join(output_dir, 'fake_config.json') - with open(json_path, 'w') as f: - f.write(json_data) - return json_path - - def create_tensorflow_model_pb(self, output_dir): - import tensorflow as tf - try: - import tensorflow.compat.v1 as tf_v1 - except ImportError: - import tensorflow as tf_v1 - - tf_v1.reset_default_graph() - with tf_v1.Session() as sess: - x = tf_v1.placeholder(tf.float32, [2, 3], 'x') - y = tf_v1.placeholder(tf.float32, [2, 3], 'y') - tf.add(x, y, name="add") - tf_v1.global_variables_initializer() - tf.io.write_graph(sess.graph, output_dir, 'model.pb', as_text=False) - return os.path.join(output_dir, 'model.pb') - - def create_tensorflow_saved_model(self, output_dir): - import tensorflow as tf - inputs = tf.keras.Input(shape=(3,)) - x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs) - outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x) - model = tf.keras.Model(inputs=inputs, outputs=outputs) - model_path = os.path.join(output_dir, 'saved_model') - model.save(model_path) - return model_path - - def setUp(self): - tm.Telemetry.__init__ = Mock(return_value=None) - tm.Telemetry.send_event = Mock() - FrontEnd.add_extension = Mock() - - def test_transform_config_fallback_tf_fe_pb(self): - import tempfile - test_cases = [ - # transformation config fallback even for use_new_frontend in case TF FE - # TODO: uncomment this case once TF FE is unbricked and obtains normal name openvino_tensorflow_frontend - # (False, True, 'mo_legacy', 'transformations_config'), - # no fallback since legacy FE is used - (True, False, 'mo_legacy', None), - # no fallback since legacy FE is default for TensorFlow - (False, False, 'mo_legacy', None) - ] - for use_legacy_frontend, use_new_frontend, expected_frontend, fallback_reason in test_cases: - with tempfile.TemporaryDirectory(dir=self.test_directory) as tmp_dir: - args = base_args_config(use_legacy_frontend, use_new_frontend) - model_path = self.create_tensorflow_model_pb(tmp_dir) - args.input_model = model_path - args.framework = 'tf' - args.transformations_config = self.create_fake_json_file(tmp_dir) - - prepare_ir(args) - - tm.Telemetry.send_event.assert_any_call('mo', 'conversion_method', expected_frontend) - if fallback_reason: - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason', fallback_reason) - else: - with pytest.raises(AssertionError): # not called - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason', fallback_reason) - - def test_transform_config_fallback_tf_fe_saved_model(self): - import tempfile - test_cases = [ - # transformation config fallback even for use_new_frontend in case TF FE - # TODO: uncomment this case once TF FE is unbricked and obtains normal name openvino_tensorflow_frontend - # (False, True, 'mo_legacy', 'transformations_config'), - # no fallback since legacy FE is used - (True, False, 'mo_legacy', None), - # no fallback since legacy FE is default for TensorFlow - (False, False, 'mo_legacy', None), - ] - for use_legacy_frontend, use_new_frontend, expected_frontend, fallback_reason in test_cases: - with tempfile.TemporaryDirectory(dir=self.test_directory) as tmp_dir: - args = base_args_config(use_legacy_frontend, use_new_frontend) - model_path = self.create_tensorflow_saved_model(tmp_dir) - args.saved_model_dir = model_path - args.framework = 'tf' - args.transformations_config = self.create_fake_json_file(tmp_dir) - - print("args = ", args) - prepare_ir(args) - - tm.Telemetry.send_event.assert_any_call('mo', 'conversion_method', expected_frontend) - if fallback_reason: - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason', fallback_reason) - else: - with pytest.raises(AssertionError): # not called - tm.Telemetry.send_event.assert_any_call('mo', 'fallback_reason', fallback_reason) diff --git a/tools/mo/unit_tests/mo/utils/pipeline_config_test.py b/tools/mo/unit_tests/mo/utils/pipeline_config_test.py deleted file mode 100644 index 271cc7828b2cff..00000000000000 --- a/tools/mo/unit_tests/mo/utils/pipeline_config_test.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest.mock - -from openvino.tools.mo.utils.error import Error -from openvino.tools.mo.utils.pipeline_config import PipelineConfig - -file_content = """model { - faster_rcnn { - num_classes: 90 - image_resizer { - keep_aspect_ratio_resizer { - min_dimension: 600 - max_dimension: 1024 - pad_to_max_dimension: true - } - } - feature_extractor { - type: "faster_rcnn_inception_v2" - first_stage_features_stride: 16 - } - first_stage_anchor_generator { - grid_anchor_generator { - height_stride: 16 - width_stride: 16 - scales: 0.25 - scales: 0.5 - scales: 1.0 - scales: 2.0 - aspect_ratios: 0.5 - aspect_ratios: 1.0 - aspect_ratios: 2.0 - } - } - first_stage_box_predictor_conv_hyperparams { - op: CONV - regularizer { - l2_regularizer { - weight: 0.0 - } - } - initializer { - truncated_normal_initializer { - stddev: 0.00999999977648 - } - } - } - first_stage_nms_score_threshold: 0.0 - first_stage_nms_iou_threshold: 0.699999988079 - first_stage_max_proposals: 100 - first_stage_localization_loss_weight: 2.0 - first_stage_objectness_loss_weight: 1.0 - initial_crop_size: 14 - maxpool_kernel_size: 2 - maxpool_stride: 2 - second_stage_box_predictor { - mask_rcnn_box_predictor { - fc_hyperparams { - op: FC - regularizer { - l2_regularizer { - weight: 0.0 - } - } - initializer { - variance_scaling_initializer { - factor: 1.0 - uniform: true - mode: FAN_AVG - } - } - } - use_dropout: false - dropout_keep_probability: 1.0 - } - } - second_stage_post_processing { - batch_non_max_suppression { - score_threshold: 0.300000011921 - iou_threshold: 0.600000023842 - max_detections_per_class: 100 - max_total_detections: 200 - } - score_converter: SOFTMAX - } - second_stage_localization_loss_weight: 2.0 - second_stage_classification_loss_weight: 1.0 - } -} -""" - - -class TestingSimpleProtoParser(unittest.TestCase): - def test_pipeline_config_not_existing_file(self): - self.assertRaises(Error, PipelineConfig, "/abc/def") - - def test_pipeline_config_non_model_file(self): - with unittest.mock.patch('builtins.open', unittest.mock.mock_open(read_data="non_model {}")): - self.assertRaises(Error, PipelineConfig, __file__) - - def test_pipeline_config_existing_file(self): - with unittest.mock.patch('builtins.open', unittest.mock.mock_open(read_data=file_content)): - pipeline_config = PipelineConfig(__file__) - expected_result = {'resizer_min_dimension': 600, - 'first_stage_nms_score_threshold': 0.0, - 'anchor_generator_aspect_ratios': [0.5, 1.0, 2.0], - 'num_classes': 90, - 'anchor_generator_scales': [0.25, 0.5, 1.0, 2.0], - 'first_stage_max_proposals': 100, - 'first_stage_nms_iou_threshold': 0.699999988079, - 'resizer_max_dimension': 1024, - 'initial_crop_size': 14, - 'frcnn_variance_height': 5.0, - 'frcnn_variance_width': 5.0, - 'frcnn_variance_x': 10.0, - 'frcnn_variance_y': 10.0, - 'ssd_anchor_generator_base_anchor_width': 1.0, - 'ssd_anchor_generator_base_anchor_height': 1.0, - 'anchor_generator_height': 256, - 'anchor_generator_width': 256, - 'anchor_generator_height_stride': 16, - 'anchor_generator_width_stride': 16, - 'ssd_anchor_generator_min_scale': 0.2, - 'ssd_anchor_generator_max_scale': 0.95, - 'ssd_anchor_generator_interpolated_scale_aspect_ratio': 1.0, - 'use_matmul_crop_and_resize': False, - 'add_background_class': True, - 'share_box_across_classes': False, - 'pad_to_max_dimension': True, - 'postprocessing_score_threshold': 0.300000011921, - 'postprocessing_score_converter': 'SOFTMAX', - 'postprocessing_iou_threshold': 0.600000023842, - 'postprocessing_max_detections_per_class': 100, - 'postprocessing_max_total_detections': 200, - } - self.assertDictEqual(pipeline_config._model_params, expected_result) diff --git a/tools/mo/unit_tests/mo/utils/simple_proto_parser_test.py b/tools/mo/unit_tests/mo/utils/simple_proto_parser_test.py deleted file mode 100644 index c3964d8a58945a..00000000000000 --- a/tools/mo/unit_tests/mo/utils/simple_proto_parser_test.py +++ /dev/null @@ -1,190 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import sys -import tempfile -import unittest - -from openvino.tools.mo.utils.simple_proto_parser import SimpleProtoParser - -correct_proto_message_1 = 'model { faster_rcnn { num_classes: 90 image_resizer { keep_aspect_ratio_resizer {' \ - ' min_dimension: 600 max_dimension: 1024 }}}}' - -correct_proto_message_2 = ' first_stage_anchor_generator {grid_anchor_generator {height_stride: 16 width_stride:' \ - ' 16 scales: 0.25 scales: 0.5 scales: 1.0 scales: 2.0 aspect_ratios: 0.5 aspect_ratios:' \ - ' 1.0 aspect_ratios: 2.0}}' - -correct_proto_message_3 = ' initializer \n{variance_scaling_initializer \n{\nfactor: 1.0 uniform: true bla: false ' \ - 'mode: FAN_AVG}}' - -correct_proto_message_4 = 'train_input_reader {label_map_path: "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt"' \ - ' tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/ mscoco_train.record" }}' - -correct_proto_message_5 = ' initializer \n # abc \n{variance_scaling_initializer \n{\nfactor: 1.0 \n # sd ' \ - '\nuniform: true bla: false mode: FAN_AVG}}' - -correct_proto_message_6 = ' first_stage_anchor_generator {grid_anchor_generator {height_stride: 16 width_stride:' \ - ' 16 scales: [ 0.25, 0.5, 1.0, 2.0] aspect_ratios: 0.5 aspect_ratios:' \ - ' 1.0 aspect_ratios: 2.0}}' - -correct_proto_message_7 = ' first_stage_anchor_generator {grid_anchor_generator {height_stride: 16 width_stride:' \ - ' 16 scales: [ 0.25, 0.5, 1.0, 2.0] aspect_ratios: [] }}' - -correct_proto_message_8 = 'model {good_list: [3.0, 5.0, ]}' - -correct_proto_message_9 = ' first_stage_anchor_generator {grid_anchor_generator {height_stride: 16, width_stride:' \ - ' 16 scales: [ 0.25, 0.5, 1.0, 2.0], aspect_ratios: [] }}' - -correct_proto_message_10 = r'train_input_reader {label_map_path: "C:\mscoco_label_map.pbtxt"' \ - ' tf_record_input_reader { input_path: "PATH_TO_BE_CONFIGURED/ mscoco_train.record" }}' - -correct_proto_message_11 = r'model {path: "C:\[{],}" other_value: [1, 2, 3, 4]}' - -incorrect_proto_message_1 = 'model { bad_no_value }' - -incorrect_proto_message_2 = 'model { abc: 3 { }' - -incorrect_proto_message_3 = 'model { too_many_values: 3 4 }' - -incorrect_proto_message_4 = 'model { missing_values: ' - -incorrect_proto_message_5 = 'model { missing_values: aa bb : }' - -incorrect_proto_message_6 = 'model : ' - -incorrect_proto_message_7 = 'model : {bad_list: [3.0, 4, , 4.0]}' - - -class TestingSimpleProtoParser(unittest.TestCase): - def test_correct_proto_reader_from_string_1(self): - result = SimpleProtoParser().parse_from_string(correct_proto_message_1) - expected_result = {'model': {'faster_rcnn': {'num_classes': 90, 'image_resizer': { - 'keep_aspect_ratio_resizer': {'min_dimension': 600, 'max_dimension': 1024}}}}} - self.assertDictEqual(result, expected_result) - - def test_correct_proto_reader_from_string_2(self): - result = SimpleProtoParser().parse_from_string(correct_proto_message_2) - expected_result = {'first_stage_anchor_generator': { - 'grid_anchor_generator': {'height_stride': 16, 'width_stride': 16, 'scales': [0.25, 0.5, 1.0, 2.0], - 'aspect_ratios': [0.5, 1.0, 2.0]}}} - self.assertDictEqual(result, expected_result) - - def test_correct_proto_reader_from_string_3(self): - result = SimpleProtoParser().parse_from_string(correct_proto_message_3) - expected_result = { - 'initializer': { - 'variance_scaling_initializer': {'factor': 1.0, 'uniform': True, 'bla': False, 'mode': 'FAN_AVG'}}} - self.assertDictEqual(result, expected_result) - - def test_correct_proto_reader_from_string_4(self): - result = SimpleProtoParser().parse_from_string(correct_proto_message_4) - expected_result = { - 'train_input_reader': {'label_map_path': "PATH_TO_BE_CONFIGURED/mscoco_label_map.pbtxt", - 'tf_record_input_reader': { - 'input_path': "PATH_TO_BE_CONFIGURED/ mscoco_train.record"}}} - self.assertDictEqual(result, expected_result) - - def test_correct_proto_reader_from_string_with_comments(self): - result = SimpleProtoParser().parse_from_string(correct_proto_message_5) - expected_result = { - 'initializer': { - 'variance_scaling_initializer': {'factor': 1.0, 'uniform': True, 'bla': False, 'mode': 'FAN_AVG'}}} - self.assertDictEqual(result, expected_result) - - def test_correct_proto_reader_from_string_with_lists(self): - result = SimpleProtoParser().parse_from_string(correct_proto_message_6) - expected_result = {'first_stage_anchor_generator': { - 'grid_anchor_generator': {'height_stride': 16, 'width_stride': 16, 'scales': [0.25, 0.5, 1.0, 2.0], - 'aspect_ratios': [0.5, 1.0, 2.0]}}} - self.assertDictEqual(result, expected_result) - - def test_correct_proto_reader_from_string_with_empty_list(self): - result = SimpleProtoParser().parse_from_string(correct_proto_message_7) - expected_result = {'first_stage_anchor_generator': { - 'grid_anchor_generator': {'height_stride': 16, 'width_stride': 16, 'scales': [0.25, 0.5, 1.0, 2.0], - 'aspect_ratios': []}}} - self.assertDictEqual(result, expected_result) - - def test_correct_proto_reader_from_string_with_comma_trailing_list(self): - result = SimpleProtoParser().parse_from_string(correct_proto_message_8) - expected_result = {'model': {'good_list': [3.0, 5.0]}} - self.assertDictEqual(result, expected_result) - - def test_correct_proto_reader_from_string_with_redundant_commas(self): - result = SimpleProtoParser().parse_from_string(correct_proto_message_9) - expected_result = {'first_stage_anchor_generator': { - 'grid_anchor_generator': {'height_stride': 16, 'width_stride': 16, 'scales': [0.25, 0.5, 1.0, 2.0], - 'aspect_ratios': []}}} - self.assertDictEqual(result, expected_result) - - def test_correct_proto_reader_from_string_with_windows_path(self): - result = SimpleProtoParser().parse_from_string(correct_proto_message_10) - expected_result = { - 'train_input_reader': {'label_map_path': r"C:\mscoco_label_map.pbtxt", - 'tf_record_input_reader': { - 'input_path': "PATH_TO_BE_CONFIGURED/ mscoco_train.record"}}} - self.assertDictEqual(result, expected_result) - - def test_correct_proto_reader_from_string_with_special_characters_in_string(self): - result = SimpleProtoParser().parse_from_string(correct_proto_message_11) - expected_result = {'model': {'path': r"C:\[{],}", - 'other_value': [1, 2, 3, 4]}} - self.assertDictEqual(result, expected_result) - - @unittest.skip - def test_incorrect_proto_reader_from_string_1(self): - result = SimpleProtoParser().parse_from_string(incorrect_proto_message_1) - self.assertIsNone(result) - - def test_incorrect_proto_reader_from_string_2(self): - result = SimpleProtoParser().parse_from_string(incorrect_proto_message_2) - self.assertIsNone(result) - - def test_incorrect_proto_reader_from_string_3(self): - result = SimpleProtoParser().parse_from_string(incorrect_proto_message_3) - self.assertIsNone(result) - - def test_incorrect_proto_reader_from_string_4(self): - result = SimpleProtoParser().parse_from_string(incorrect_proto_message_4) - self.assertIsNone(result) - - def test_incorrect_proto_reader_from_string_5(self): - result = SimpleProtoParser().parse_from_string(incorrect_proto_message_5) - self.assertIsNone(result) - - def test_incorrect_proto_reader_from_string_6(self): - result = SimpleProtoParser().parse_from_string(incorrect_proto_message_6) - self.assertIsNone(result) - - def test_incorrect_proto_reader_from_string_7(self): - result = SimpleProtoParser().parse_from_string(incorrect_proto_message_7) - self.assertIsNone(result) - - def test_correct_proto_reader_from_file(self): - file = tempfile.NamedTemporaryFile('wt', delete=False) - file.write(correct_proto_message_1) - file_name = file.name - file.close() - - result = SimpleProtoParser().parse_file(file_name) - expected_result = {'model': {'faster_rcnn': {'num_classes': 90, 'image_resizer': { - 'keep_aspect_ratio_resizer': {'min_dimension': 600, 'max_dimension': 1024}}}}} - self.assertDictEqual(result, expected_result) - os.unlink(file_name) - - @unittest.skip("Temporary disabled since chmod() is temporary not working on Linux. (Windows do not support not writable dir at all)") - def test_proto_reader_from_non_readable_file(self): - file = tempfile.NamedTemporaryFile('wt', delete=False) - file.write(correct_proto_message_1) - file_name = file.name - file.close() - os.chmod(file_name, 0000) - - result = SimpleProtoParser().parse_file(file_name) - self.assertIsNone(result) - os.unlink(file_name) - - def test_proto_reader_from_non_existing_file(self): - result = SimpleProtoParser().parse_file('/non/existing/file') - self.assertIsNone(result) diff --git a/tools/mo/unit_tests/mo/utils/summarize_graph_test.py b/tools/mo/unit_tests/mo/utils/summarize_graph_test.py deleted file mode 100644 index d9076d7e42cdb9..00000000000000 --- a/tools/mo/unit_tests/mo/utils/summarize_graph_test.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch, mock_open - -from openvino.tools.mo.front.tf.loader import load_tf_graph_def -from openvino.tools.mo.utils.summarize_graph import summarize_graph - -pbtxt = 'node{name:"Placeholder"op:"Placeholder"attr{key:"dtype"value{type:DT_FLOAT}}attr{key:"shape"value{shape{dim' + \ - '{size:1}dim{size:227}dim{size:227}dim{size:3}}}}}node{name:"Output/Identity"op:"Identity"input:"Placeholder' + \ - '"attr{key:"T"value{type:DT_FLOAT}}}' - - -class TestingSummarizeGraph(unittest.TestCase): - def test_summarize_graph(self): - with patch('openvino.tools.mo.front.tf.loader.open', mock_open(read_data=pbtxt)) as m: - graph_def, _, _, _ = load_tf_graph_def('path', False) - summary = summarize_graph(graph_def) - self.assertEqual(len(summary['outputs']), 1) - self.assertEqual(summary['outputs'][0], 'Output/Identity') - self.assertEqual(len(summary['inputs']), 1) - self.assertEqual('Placeholder' in summary['inputs'], True) - self.assertEqual(str(summary['inputs']['Placeholder']['shape']), '(1,227,227,3)') - self.assertEqual(str(summary['inputs']['Placeholder']['type']), 'float32') diff --git a/tools/mo/unit_tests/mo/utils/telemetry_utils_test.py b/tools/mo/unit_tests/mo/utils/telemetry_utils_test.py deleted file mode 100644 index c2021430eddd17..00000000000000 --- a/tools/mo/unit_tests/mo/utils/telemetry_utils_test.py +++ /dev/null @@ -1,103 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from collections import Counter -from unittest.mock import Mock - -from openvino.tools.mo.front.common.partial_infer.utils import int64_array, shape_array, dynamic_dimension_value -from openvino.tools.mo.graph.graph import Graph, Node -from openvino.tools.mo.utils.telemetry_utils import send_op_names_info, send_shapes_info -from unit_tests.utils.graph import build_graph, regular_op - -try: - import openvino_telemetry as tm - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm - - -class TestTelemetryUtils(unittest.TestCase): - @staticmethod - def init_telemetry_mocks(): - tm.Telemetry.__init__ = Mock(return_value=None) - tm.Telemetry.send_event = Mock() - - def test_send_op_names_info(self): - graph = Graph() - graph.add_nodes_from(['node1']) - graph.op_names_statistic = Counter(['a', 'a', 'a', 'b', 'b']) - - sub_graph1 = Graph() - sub_graph1.add_nodes_from(['node2']) - sub_graph1.op_names_statistic = Counter(['a', 'c', 'c']) - - sub_graph2 = Graph() - sub_graph2.op_names_statistic = Counter(['a', 'd']) - - node1 = Node(graph, 'node1') - node1['sub_graphs'] = ['sub_graph1'] - node1['sub_graph1'] = sub_graph1 - - node2 = Node(sub_graph1, 'node2') - node2['sub_graphs'] = ['sub_graph2'] - node2['sub_graph2'] = sub_graph2 - - self.init_telemetry_mocks() - - send_op_names_info('framework', graph) - tm.Telemetry.send_event.assert_any_call('mo', 'op_count', 'framework_a', 5) - tm.Telemetry.send_event.assert_any_call('mo', 'op_count', 'framework_b', 2) - tm.Telemetry.send_event.assert_any_call('mo', 'op_count', 'framework_c', 2) - tm.Telemetry.send_event.assert_any_call('mo', 'op_count', 'framework_d', 1) - - def test_send_shapes_info(self): - graph = build_graph({**regular_op('placeholder1', {'shape': int64_array([1, 3, 20, 20]), 'type': 'Parameter'}), - **regular_op('placeholder2', {'shape': int64_array([2, 4, 10]), 'type': 'Parameter'}), - **regular_op('mul', {'shape': int64_array([7, 8]), 'type': 'Multiply'})}, []) - - self.init_telemetry_mocks() - - send_shapes_info('framework', graph) - tm.Telemetry.send_event.assert_any_call('mo', 'input_shapes', '{fw:framework,shape:"[ 1 3 20 20],[ 2 4 10]"}') - tm.Telemetry.send_event.assert_any_call('mo', 'partially_defined_shape', - '{partially_defined_shape:0,fw:framework}') - - def test_send_dynamic_shapes_case1(self): - graph = build_graph({**regular_op('placeholder1', {'shape': shape_array([dynamic_dimension_value, 3, 20, 20]), - 'type': 'Parameter'}), - **regular_op('mul', {'shape': int64_array([7, 8]), 'type': 'Multiply'})}, []) - - self.init_telemetry_mocks() - - send_shapes_info('framework', graph) - tm.Telemetry.send_event.assert_any_call('mo', 'input_shapes', '{fw:framework,shape:"[-1 3 20 20]"}') - tm.Telemetry.send_event.assert_any_call('mo', 'partially_defined_shape', - '{partially_defined_shape:1,fw:framework}') - - def test_send_undefined_shapes(self): - graph = build_graph({**regular_op('placeholder1', {'shape': None, - 'type': 'Parameter'}), - **regular_op('mul', {'shape': int64_array([7, 8]), 'type': 'Multiply'})}, []) - - self.init_telemetry_mocks() - - send_shapes_info('framework', graph) - tm.Telemetry.send_event.assert_any_call('mo', 'input_shapes', '{fw:framework,shape:"Undefined"}') - tm.Telemetry.send_event.assert_any_call('mo', 'partially_defined_shape', - '{partially_defined_shape:1,fw:framework}') - - def test_send_dynamic_shapes_case2(self): - graph = build_graph({**regular_op('placeholder1', {'shape': int64_array([2, 3, 20, 20]), 'type': 'Parameter'}), - **regular_op('placeholder2', {'shape': int64_array([7, 4, 10]), 'type': 'Parameter'}), - **regular_op('placeholder3', {'shape': shape_array([5, 4, dynamic_dimension_value]), - 'type': 'Parameter'}), - **regular_op('mul', {'shape': int64_array([7, 8]), 'type': 'Multiply'})}, []) - - self.init_telemetry_mocks() - - send_shapes_info('framework', graph) - tm.Telemetry.send_event.assert_any_call('mo', 'input_shapes', - '{fw:framework,shape:"[ 2 3 20 20],[ 7 4 10],[ 5 4 -1]"}') - tm.Telemetry.send_event.assert_any_call('mo', 'partially_defined_shape', - '{partially_defined_shape:1,fw:framework}') diff --git a/tools/mo/unit_tests/mo/utils/test_mo_model_analysis_actual.py b/tools/mo/unit_tests/mo/utils/test_mo_model_analysis_actual.py deleted file mode 100644 index b8ed4f19b9f462..00000000000000 --- a/tools/mo/unit_tests/mo/utils/test_mo_model_analysis_actual.py +++ /dev/null @@ -1,185 +0,0 @@ -# Copyright (C) 2022 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch, Mock -import onnx -from onnx.helper import make_graph, make_model, make_tensor_value_info -import os -from os import environ -import json -import argparse -from openvino.tools.mo.convert_impl import prepare_ir -from openvino.frontend import FrontEndManager # pylint: disable=no-name-in-module,import-error - - -try: - import openvino_telemetry as tm - from openvino_telemetry.backend import backend_ga4 -except ImportError: - import openvino.tools.mo.utils.telemetry_stub as tm - - -def base_args_config(): - args = argparse.Namespace() - args.feManager = FrontEndManager() - args.extensions = None - args.use_legacy_frontend = False - args.use_new_frontend = True - args.framework = 'onnx' - args.model_name = None - args.input_model = None - args.input_checkpoint = None - args.silent = True - args.transform=[] - args.scale = None - args.output=None - args.input=None - args.input_shape=None - args.batch=None - args.mean_values=None - args.scale_values=None - args.output_dir=os.getcwd() - args.freeze_placeholder_with_value = None - args.transformations_config = None - args.static_shape = None - args.reverse_input_channels = None - args.data_type = None - args.layout = None - args.source_layout = None - args.target_layout = None - args.frontend_defaults = { - 'onnx': 'legacy', - 'tf': 'legacy' - } - return args - - -class TestMoFallback(unittest.TestCase): - def setUp(self): - environ.update({'MO_ENABLED_TRANSFORMS': 'ANALYSIS_JSON_PRINT'}) - - tm.Telemetry.__init__ = Mock(return_value=None) - tm.Telemetry.send_event = Mock() - - self.models = {} - add = onnx.helper.make_node("Add", inputs=["in1", "in2"], outputs=["add_out"]) - input_tensors = [ - make_tensor_value_info("in1", onnx.TensorProto.FLOAT, (1, 2)), - make_tensor_value_info("in2", onnx.TensorProto.FLOAT, (1, 2)), - ] - output_tensors = [ - make_tensor_value_info("add_out", onnx.TensorProto.FLOAT, (1, 2)), - ] - graph = make_graph([add], "test_graph", input_tensors, output_tensors) - model = make_model(graph, producer_name="MO tests", - opset_imports=[onnx.helper.make_opsetid("", 13)]) - self.models["test_model.onnx"] = model - - input_tensors_2 = [ - make_tensor_value_info("in1", onnx.TensorProto.INT64, (1, 'dyn_dim', 3)), - make_tensor_value_info("in2", onnx.TensorProto.INT64, None), - make_tensor_value_info("in3", onnx.TensorProto.INT64, ()), - ] - output_tensors_2 = [ - make_tensor_value_info("mul_out", onnx.TensorProto.FLOAT, None), - ] - mul = onnx.helper.make_node("Mul", inputs=["add_out", "in3"], outputs=["mul_out"]) - graph_2 = make_graph([add, mul], "test_graph_2", input_tensors_2, output_tensors_2) - model_2 = make_model(graph_2, producer_name="MO tests", - opset_imports=[onnx.helper.make_opsetid("", 13)]) - self.models["test_model_2.onnx"] = model_2 - - split_1 = onnx.helper.make_node("Split", inputs=["add_out"], - outputs=["out1", "out2"], axis=0) - split_2 = onnx.helper.make_node("Split", inputs=["mul_out"], - outputs=["out3", "out4"], axis=0) - output_tensors_3 = [ - make_tensor_value_info("out1", onnx.TensorProto.FLOAT, 'dyn_dim'), - make_tensor_value_info("out2", onnx.TensorProto.FLOAT, 'dyn_dim'), - make_tensor_value_info("out3", onnx.TensorProto.FLOAT, 'dyn_dim'), - make_tensor_value_info("out4", onnx.TensorProto.FLOAT, 'dyn_dim'), - ] - graph_3 = make_graph([add, mul, split_1, split_2], "test_graph_3", input_tensors_2, output_tensors_3) - model_3 = make_model(graph_3, producer_name="MO tests", - opset_imports=[onnx.helper.make_opsetid("", 13)]) - self.models["test_model_3.onnx"] = model_3 - - for name, model in self.models.items(): - onnx.save(model, name) - - def tearDown(self): - del environ['MO_ENABLED_TRANSFORMS'] - for name in self.models.keys(): - os.remove(name) - - - @patch('openvino.tools.mo.moc_frontend.analysis.json_model_analysis_print') - def test_model(self, json_print): - args = base_args_config() - args.input_model = "test_model.onnx" - - with patch('sys.exit') as exit_mock: # do not exit execution - prepare_ir(args) - - result = json_print.call_args.args[0] - - assert 'inputs' in result - assert result['inputs'] == json.loads('{"in1": {"shape": [1, 2], "data_type": "float32", "value": "None"}, \ - "in2": {"shape": [1, 2], "data_type": "float32", "value": "None"}}') - - assert 'intermediate' in result - assert result['intermediate'] == json.loads('{"in1": {"shape": [1, 2], "data_type": "float32", "value": "None"}, \ - "in2": {"shape": [1, 2], "data_type": "float32", "value": "None"}, \ - "add_out": {"shape": "None", "data_type": "None", "value": "None"}}') - - - @patch('openvino.tools.mo.moc_frontend.analysis.json_model_analysis_print') - def test_model_with_dyn_shapes(self, json_print): - args = base_args_config() - args.input_model = "test_model_2.onnx" - - with patch('sys.exit') as exit_mock: # do not exit execution - prepare_ir(args) - - result = json_print.call_args.args[0] - - assert 'inputs' in result - print(result['inputs']) - assert result['inputs'] == json.loads('{"in1": {"shape": [1, 0, 3], "data_type": "int64", "value": "None"}, \ - "in2": {"shape": "None", "data_type": "int64", "value": "None"}, \ - "in3": {"shape": [], "data_type": "int64", "value": "None"}}') - - assert 'intermediate' in result - assert result['intermediate'] == json.loads('{"in1": {"shape": [1, 0, 3], "data_type": "int64", "value": "None"}, \ - "in2": {"shape": "None", "data_type": "int64", "value": "None"}, \ - "in3": {"shape": [], "data_type": "int64", "value": "None"}, \ - "mul_out": {"shape": "None", "data_type": "None", "value": "None"}, \ - "add_out": {"shape": "None", "data_type": "None", "value": "None"}}') - - - @patch('openvino.tools.mo.moc_frontend.analysis.json_model_analysis_print') - def test_multi_outputs_model(self, json_print): - args = base_args_config() - args.input_model = "test_model_3.onnx" - - with patch('sys.exit') as exit_mock: # do not exit execution - prepare_ir(args) - - result = json_print.call_args.args[0] - - assert 'inputs' in result - assert result['inputs'] == json.loads('{"in1": {"shape": [1, 0, 3], "data_type": "int64", "value": "None"}, \ - "in2": {"shape": "None", "data_type": "int64", "value": "None"}, \ - "in3": {"shape": [], "data_type": "int64", "value": "None"}}') - - assert 'intermediate' in result - assert result['intermediate'] == json.loads('{"in1": {"shape": [1, 0, 3], "data_type": "int64", "value": "None"}, \ - "in2": {"shape": "None", "data_type": "int64", "value": "None"}, \ - "in3": {"shape": [], "data_type": "int64", "value": "None"}, \ - "mul_out": {"shape": "None", "data_type": "None", "value": "None"}, \ - "add_out": {"shape": "None", "data_type": "None", "value": "None"}, \ - "out1": {"shape": "None", "data_type": "None", "value": "None"}, \ - "out2": {"shape": "None", "data_type": "None", "value": "None"}, \ - "out3": {"shape": "None", "data_type": "None", "value": "None"}, \ - "out4": {"shape": "None", "data_type": "None", "value": "None"}}') diff --git a/tools/mo/unit_tests/mo/utils/utils_test.py b/tools/mo/unit_tests/mo/utils/utils_test.py deleted file mode 100644 index 3b534c502112eb..00000000000000 --- a/tools/mo/unit_tests/mo/utils/utils_test.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest - -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, dynamic_dimension_value -from openvino.tools.mo.utils.utils import match_shapes - - -class TestMatchShapes(unittest.TestCase): - def run_match_shapes(self, pattern: list, shape: list): - return match_shapes(shape_array(pattern), shape_array(shape)) - - def test_positive(self): - self.assertTrue(self.run_match_shapes([], [])) - self.assertTrue(self.run_match_shapes([1, 2, 3], [1, 2, 3])) - self.assertTrue(self.run_match_shapes([dynamic_dimension_value, 2, 3], [1, 2, 3])) - self.assertTrue(self.run_match_shapes([1, dynamic_dimension_value, 3], [1, 2, 3])) - self.assertTrue(self.run_match_shapes([dynamic_dimension_value, dynamic_dimension_value, - dynamic_dimension_value], [1, 2, 3])) - self.assertTrue(self.run_match_shapes([dynamic_dimension_value], [2])) - - def test_negative(self): - self.assertFalse(self.run_match_shapes([dynamic_dimension_value], [])) - self.assertFalse(self.run_match_shapes([dynamic_dimension_value], [1, 2, 3])) - self.assertFalse(self.run_match_shapes([dynamic_dimension_value, 2, 3], [1, 3, 3])) - self.assertFalse(self.run_match_shapes([1, dynamic_dimension_value, 3], [2, 2])) - self.assertFalse(self.run_match_shapes([dynamic_dimension_value, dynamic_dimension_value, - dynamic_dimension_value], [2, 3, 4, 5])) diff --git a/tools/mo/unit_tests/mo/utils/version_test.py b/tools/mo/unit_tests/mo/utils/version_test.py deleted file mode 100644 index 6d94ebc4fd7174..00000000000000 --- a/tools/mo/unit_tests/mo/utils/version_test.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import subprocess -import sys -import unittest -import unittest.mock as mock -from unittest.mock import mock_open -from unittest.mock import patch - -from openvino.tools.mo.subprocess_main import setup_env -from openvino.tools.mo.utils.version import get_version, extract_release_version, get_simplified_ie_version, \ - get_simplified_mo_version, extract_hash_from_version, VersionChecker - - -class TestingVersion(unittest.TestCase): - @patch('os.path.isfile') - @mock.patch('builtins.open', new_callable=mock_open, create=True, read_data='2021.1.0-1028-55e4d5673a8') - def test_get_version(self, mock_open, mock_isfile): - mock_isfile.return_value = True - mock_open.return_value.__enter__ = mock_open - self.assertEqual(get_version(), '2021.1.0-1028-55e4d5673a8') - - @patch('os.path.isfile') - @mock.patch('builtins.open', new_callable=mock_open, create=True, read_data='2021.1.0-1028-55e4d5673a8') - def test_release_version_extractor(self, mock_open, mock_isfile): - mock_isfile.return_value = True - mock_open.return_value.__enter__ = mock_open - self.assertEqual(extract_release_version(get_version()), ('2021', '1')) - - @patch('os.path.isfile') - @mock.patch('builtins.open', new_callable=mock_open, create=True, read_data='custom_releases/2021/1_55e4d5673a8') - def test_custom_release_version_extractor(self, mock_open, mock_isfile): - mock_isfile.return_value = True - mock_open.return_value.__enter__ = mock_open - self.assertEqual(extract_release_version(get_version()), ('2021', '1')) - - @patch('os.path.isfile') - @mock.patch('builtins.open', new_callable=mock_open, create=True, read_data='custom_my_branch/fix_55e4d5673a8') - def test_release_version_extractor_neg(self, mock_open, mock_isfile): - mock_isfile.return_value = True - mock_open.return_value.__enter__ = mock_open - self.assertEqual(extract_release_version(get_version()), (None, None)) - - @patch('os.path.isfile') - @mock.patch('builtins.open', new_callable=mock_open, create=True, read_data='custom_releases/2021/1_55e4d5673a8') - def test_simplify_mo_version_release(self, mock_open, mock_isfile): - mock_isfile.return_value = True - mock_open.return_value.__enter__ = mock_open - self.assertEqual(get_simplified_mo_version(), "2021.1") - - @patch('os.path.isfile') - @mock.patch('builtins.open', new_callable=mock_open, create=True, read_data='custom_my_branch/fix_55e4d5673a8') - def test_simplify_mo_version_custom(self, mock_open, mock_isfile): - mock_isfile.return_value = True - mock_open.return_value.__enter__ = mock_open - self.assertEqual(get_simplified_mo_version(), "custom") - - def test_simplify_ie_version_release_legacy(self): - self.assertEqual(get_simplified_ie_version(version="2.1.custom_releases/2021/3_4c8eae"), "2021.3") - - def test_simplify_ie_version_release(self): - self.assertEqual(get_simplified_ie_version(version="custom_releases/2021/3_4c8eae"), "2021.3") - - def test_simplify_ie_version_custom_legacy(self): - self.assertEqual(get_simplified_ie_version(version="2.1.custom_my/branch/3_4c8eae"), "custom") - - def test_simplify_ie_version_custom(self): - self.assertEqual(get_simplified_ie_version(version="custom_my/branch/3_4c8eae"), "custom") - - def test_extracting_version_hash_full_with_build_number(self): - self.assertEqual(extract_hash_from_version(full_version="2021.1.0-1028-55e4d5673a8"), "55e4d5673a8") - - def test_extracting_version_hash_full_with_build_number_dirty(self): - self.assertEqual(extract_hash_from_version(full_version="2021.1.0-1028-55e4d5673a8-dirty"), "55e4d5673a8") - - def test_extracting_version_hash_full_with_build_number_private(self): - self.assertEqual(extract_hash_from_version(full_version="2021.1.0-1028-55e4d5673a8-private"), "55e4d5673a8") - - def test_extracting_version_hash_custom_master(self): - self.assertEqual(extract_hash_from_version(full_version="custom_master_55e4d5673a833abab638ee9837bc87a0b7c3a043"), - "55e4d5673a833abab638ee9837bc87a0b7c3a043") - - def test_extracting_version_hash_mo_format(self): - self.assertEqual(extract_hash_from_version(full_version="2022.1.custom_master_55e4d5673a833abab638ee9837bc87a0b7c3a043"), - "55e4d5673a833abab638ee9837bc87a0b7c3a043") - - def test_negative_extracting_version_hash(self): - self.assertEqual(extract_hash_from_version(full_version="2022.1.custom_master"), - None) - - # format from the current nightly wheel - def test_extracting_version_hash_from_old_format(self): - self.assertEqual(extract_hash_from_version(full_version="2022.1.0-6311-a90bb1f"), - "a90bb1f") - - def test_version_checker(self): - setup_env() - args = [sys.executable, '-m', 'pytest', - os.path.join(os.path.dirname(os.path.dirname(__file__)), 'convert/version_checker_test_actual.py'), '-s'] - - status = subprocess.run(args, env=os.environ, capture_output=True) - assert not status.returncode diff --git a/tools/mo/unit_tests/moc_tf_fe/__init__.py b/tools/mo/unit_tests/moc_tf_fe/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/moc_tf_fe/check_info_messages_test.py b/tools/mo/unit_tests/moc_tf_fe/check_info_messages_test.py deleted file mode 100644 index 94729c0e8028db..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/check_info_messages_test.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import io -import os -import unittest -from contextlib import redirect_stdout -from unittest.mock import patch - -from openvino.tools.mo.main import main -from openvino.tools.mo.utils.get_ov_update_message import get_compression_message, \ - get_try_legacy_fe_message - - -def arg_parse_helper(input_model, - use_legacy_frontend, - use_new_frontend, - input_model_is_text, - framework, - compress_to_fp16=False, - freeze_placeholder_with_value=None): - path = os.path.dirname(__file__) - input_model = os.path.join(path, "test_models", input_model) - - return argparse.Namespace( - input_model=input_model, - use_legacy_frontend=use_legacy_frontend, - use_new_frontend=use_new_frontend, - framework=framework, - input_model_is_text=input_model_is_text, - log_level='INFO', - silent=True, - model_name=None, - transform=[], - scale=None, - output=None, - input=None, - input_shape=None, - batch=None, - input_checkpoint=None, - saved_model_dir=None, - input_meta_graph=None, - saved_model_tags=None, - output_dir='.', - mean_values=(), - scale_values=(), - layout={}, - source_layout={}, - target_layout={}, - freeze_placeholder_with_value=freeze_placeholder_with_value, - data_type=None, - tensorflow_custom_operations_config_update=None, - compress_to_fp16=compress_to_fp16, - extensions=None, - static_shape=False - ) - - -class TestInfoMessagesTFFE(unittest.TestCase): - @patch('openvino.tools.mo.convert_impl.driver', side_effect=Exception('MESSAGE')) - def run_fail_tf_fe(self, mock_driver): - from openvino.tools.mo import convert_model - path = os.path.dirname(__file__) - convert_model(os.path.join(path, "test_models", "model_int32.pbtxt"), silent=False) - - def test_suggest_legacy_fe(self): - f = io.StringIO() - with redirect_stdout(f): - try: - self.run_fail_tf_fe() - except: - pass - std_out = f.getvalue() - assert get_try_legacy_fe_message() in std_out - - -class TestInfoMessagesCompressFP16(unittest.TestCase): - @patch('argparse.ArgumentParser.parse_args', - return_value=arg_parse_helper(input_model="model_int32.pbtxt", - use_legacy_frontend=False, use_new_frontend=True, - compress_to_fp16=True, - framework=None, input_model_is_text=True)) - def test_compress_to_fp16(self, mock_argparse): - f = io.StringIO() - with redirect_stdout(f): - main(argparse.ArgumentParser()) - std_out = f.getvalue() - fp16_compression_message_found = get_compression_message() in std_out - assert fp16_compression_message_found diff --git a/tools/mo/unit_tests/moc_tf_fe/conversion_basic_models_test.py b/tools/mo/unit_tests/moc_tf_fe/conversion_basic_models_test.py deleted file mode 100644 index 598a26218eb1a0..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/conversion_basic_models_test.py +++ /dev/null @@ -1,341 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import os -import unittest -from openvino.runtime import Core -from openvino.tools.mo.convert import convert_model -from sys import platform - -# TODO: Segfault on CPU CVS-154874 -@unittest.skip("Segfault on CPU CVS-154874") -class TestMoFreezePlaceholderTFFE(unittest.TestCase): - def basic(self, input_model, argv_input, inputs, dtype, expected, freeze_placeholder_with_value=None, - input_shape=None, only_conversion=False, input_model_is_text=True, use_new_frontend=True, - use_legacy_frontend=False): - path = os.path.dirname(__file__) - input_model = os.path.join(path, "test_models", input_model) - - try: - model = convert_model(input_model, input=argv_input, - freeze_placeholder_with_value=freeze_placeholder_with_value, - input_shape=input_shape, input_model_is_text=input_model_is_text, - use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend, - framework="tf") - except Exception as ex: - self.fail("Model conversion failed due to error: {}".format(ex)) - - if only_conversion: - return - - ie = Core() - exec_net = ie.compile_model(model, "CPU") - req = exec_net.create_infer_request() - results = req.infer(inputs) - values = list(results.values())[0] - if dtype is not None: - assert values.dtype == dtype - assert np.allclose(values, expected) - - def test_fp32(self): - test_cases = [ - ( - "in1[1 4]->[1.0 2.0 3.0 4.0],in2[1 4]{f32}->[1.0 2.0 3.0 4.0]", - {}, - np.array([2.0, 4.0, 6.0, 8.0]), - np.float32, - ), - ( - "in2{f32}->[0.0 0.0 0.0 0.0]", - {"in1:0": np.array([[1.0, 2.0], [3.0, 4.0]])}, - np.array([[1.0, 2.0], [3.0, 4.0]]), - np.float32, - ), - ( - "in2->[1.0 15.0 15.5 1.0]", - {"in1:0": np.array([[2.0, 4.0], [12.0, 8.0]])}, - np.array([[3.0, 19.0], [27.5, 9.0]]), - np.float32, - ), - ( - "in1[1 4]{i32}->[1 2 3 4],in2[1 4]{i32}->[1 2 3 4]", - {}, - np.array([2.0, 4.0, 6.0, 8.0]), - np.int32, - ), - ] - for input_freezing_value, inputs, expected, dtype in test_cases: - self.basic("model_fp32.pbtxt", input_freezing_value, inputs, dtype, expected) - - def test_int32(self): - test_cases = [ - ( - "in1[1 4]->[1 2 3 4],in2[1 4]{i32}->[1 2 3 4]", - {}, - np.array([1, 4, 9, 16]), - np.int32, - ), - ( - "in2->[2 5 6 7 3 2]", - {"in1:0": np.array([[2, 4, 1], [1, 2, 8]])}, - np.array([[4, 20, 6], [7, 6, 16]]), - np.int32, - ), - ] - for input_freezing_value, inputs, expected, dtype in test_cases: - self.basic("model_int32.pbtxt", input_freezing_value, inputs, dtype, expected) - - def test_bool(self): - test_cases = [ - ( - "in1[2]->[True False],in2[2]->[True True]", - {}, - np.array([True, False], dtype=bool), - bool, - ), - ( - "in2[2,3]->[True,True,False,True,True,False]", - {"in1:0": np.array([[False, True, True], [False, True, True]], dtype=bool)}, - np.array([[False, True, False], [False, True, False]], dtype=bool), - bool, - ), - ( - "in2[]->True", - {"in1:0": np.array([[False, True, True], [False, True, True]], dtype=bool)}, - np.array([[False, True, True], [False, True, True]], dtype=bool), - bool, - ), - ] - for input_freezing_value, inputs, expected, dtype in test_cases: - self.basic("model_bool.pbtxt", input_freezing_value, inputs, dtype, expected) - - def test_bool2(self): - test_cases = [ - ( - "in1[3]->[1 2 3],in2[3]->[4 5 6],cond->False", - {}, - np.array([4, 5, 6], dtype=np.float32), - np.float32, - None, - None, - False - ), - ( - None, - {"in1:0": np.array([2.0, 4.0, 6.0], dtype=np.float32), - "in2:0": np.array([1.0, 3.0, 5.0], dtype=np.float32)}, - np.array([2, 4, 6], dtype=np.float32), - np.float32, - "cond:0->False", - None, - True # fill a bug to investigate why compilation of this model is hang on - ), - # case: input_shape + freeze_placeholder_with_value - ( - None, - {"in2:0": np.array([1.0, 3.0, 5.0], dtype=np.float32)}, - np.array([2, 4, 6], dtype=np.float32), - np.float32, - "in1:0->[2.0 4.0 6.0],cond:0->True", - "[3]", - False - ), - ] - for input_freezing_value, inputs, expected, dtype, freeze_placeholder_with_value, \ - input_shape, only_conversion in test_cases: - self.basic("model_bool2.pbtxt", input_freezing_value, inputs, dtype, expected, - freeze_placeholder_with_value, - input_shape, only_conversion) - - def test_cutting_fp32(self): - test_cases = [ - ( - "add:0[3],z:0", - {"add:0": np.array([4, 5, 6], dtype=np.float32), "z:0": np.array([1, 2, 3], dtype=np.float32)}, - np.array([4, 10, 18], dtype=np.float32), - np.float32, - None, - None, - False - ), - ( - "add:0{i32}[3],z:0{i32}", - {"add:0": np.array([4, 5, 6], dtype=np.int32), "z:0": np.array([1, 2, 3], dtype=np.int32)}, - np.array([4, 10, 18], dtype=np.int32), - np.int32, - None, - None, - False - ), - ] - for input_freezing_value, inputs, expected, dtype, freeze_placeholder_with_value, \ - input_shape, only_conversion in test_cases: - self.basic("model_three_inputs.pbtxt", input_freezing_value, inputs, dtype, expected, - freeze_placeholder_with_value, - input_shape, only_conversion, True) - - def test_placeholder_with_default(self): - test_cases = [ - ( - "x[1,4],y[4]", - {"x": np.array([[3, 2, 1, 5]], dtype=np.int32), "y": np.array([0, -1, -7, 8], dtype=np.int32)}, - np.array([[3, 1, -6, 13]], dtype=np.int32), - np.int32, - None, - None, - False - ), - ( - "x,y", - {"x": np.array([[-3, 20, 1]], dtype=np.int32), "y": np.array([[10, -11, -17]], dtype=np.int32)}, - np.array([[7, 9, -16]], dtype=np.int32), - np.int32, - None, - None, - False - ), - ( - "x", - {"x": np.array([[-3, 20, 1]], dtype=np.int32)}, - np.array([[-2, 22, 4], [1, 25, 7]], dtype=np.int32), - np.int32, - None, - None, - False - ), - ] - for inputs, inputs_data, expected, dtype, freeze_placeholder_with_value, \ - input_shape, only_conversion in test_cases: - self.basic("placeholder_with_default.pbtxt", inputs, inputs_data, dtype, expected, - freeze_placeholder_with_value, - input_shape, only_conversion, True) - - def test_freeze_placeholder_with_unknown_rank(self): - test_cases = [ - ( - "x[4],y->2.0", - {"x": np.array([3, 2, 1, 5], dtype=np.float32)}, - np.array([6, 4, 2, 10], dtype=np.float32), - np.float32, - None, - None, - False - ), - ( - "x[1],y->[2.0,3.0]", - {"x": np.array([3], dtype=np.float32)}, - np.array([6, 9], dtype=np.float32), - np.float32, - None, - None, - False - ), - ] - for inputs, inputs_data, expected, dtype, freeze_placeholder_with_value, \ - input_shape, only_conversion in test_cases: - self.basic("mul_with_unknown_rank_y.pbtxt", inputs, inputs_data, dtype, expected, - freeze_placeholder_with_value, - input_shape, only_conversion, True) - - def test_conversion_tf1_while_default(self): - self.basic("ctc_model_based.pbtxt", None, None, None, None, - None, None, True, True, False, False) - - def test_conversion_tf1_while_use_new_frontend(self): - self.basic("ctc_model_based.pbtxt", None, None, None, None, - None, None, True, True, True, False) - - @unittest.skip("88349: Fix auto-pruning in legacy FE") - def test_conversion_model_oneshot_iterator_use_legacy_frontend(self): - self.basic("model_oneshot_iterator.pbtxt", None, None, None, None, - None, None, True, True, False, True) - - def test_conversion_model_oneshot_iterator_default(self): - self.basic("model_oneshot_iterator.pbtxt", None, None, None, None, - None, None, True, True, False, False) - - @unittest.skip("109220: Use generating script for this test model instead of Git LFS") - def test_conversion_model_with_non_standard_extension(self): - test_cases = [ - ( - "in2{f32}->[0.0 0.0 0.0 0.0]", - {"in1": np.array([[1.0, 2.0], [3.0, 4.0]])}, - np.array([[1.0, 2.0], [3.0, 4.0]]), - np.float32, - ), - ( - "in2->[1.0 15.0 15.5 1.0]", - {"in1": np.array([[2.0, 4.0], [12.0, 8.0]])}, - np.array([[3.0, 19.0], [27.5, 9.0]]), - np.float32, - ), - ] - for input_freezing_value, inputs, expected, dtype in test_cases: - self.basic("model_fp32.frozen", input_freezing_value, inputs, dtype, expected, only_conversion=False, - input_model_is_text=False, use_new_frontend=True, - use_legacy_frontend=False) - - @unittest.skip("109220: Make TF FE to return the error") - def test_conversion_dir_model(self): - with self.assertRaisesRegex(Exception, - "Internal error or inconsistent input model: the frontend supports " - "only frozen binary protobuf format."): - self.basic(".", None, None, None, None, - only_conversion=True, input_model_is_text=False, use_new_frontend=True, - use_legacy_frontend=False) - - def test_conversion_pbtxt_model_with_inference(self): - test_cases = [ - ( - {"x:0": np.array([1, 2], dtype=np.int32), "y:0": np.array([4], dtype=np.int32)}, - np.array([-3, -2], dtype=np.int32), - np.int32, - ), - ( - {"x:0": np.array([20, 25], dtype=np.int32), "y:0": np.array([10], dtype=np.int32)}, - np.array([30, 35], dtype=np.int32), - np.int32, - ) - ] - for inputs, expected, dtype in test_cases: - self.basic("model_with_if.pbtxt", None, inputs, dtype, expected, only_conversion=False, - input_model_is_text=False, use_new_frontend=True, use_legacy_frontend=False) - - def test_conversion_model_with_undefined_constant(self): - test_cases = [ - # legacy frontend - ( - "model_add_with_undefined_constant.pbtxt", - "x[2,3]", - {"x": np.array([[2, 3, 0], [1, 4, 6]], dtype=np.float32)}, - np.array([[2, 3, 0], [1, 4, 6]], dtype=np.float32), - np.float32, False, True, - ), - ( - "model_mul_with_undefined_constant.pbtxt", - "x[2]", - {"x": np.array([-1, 2], dtype=np.int32)}, - np.array([0, 0], dtype=np.int32), - np.int32, False, True, - ), - # new frontend - ( - "model_add_with_undefined_constant.pbtxt", - "x[2,3]", - {"x": np.array([[12, 13, 10], [11, 14, 16]], dtype=np.float32)}, - np.array([[12, 13, 10], [11, 14, 16]], dtype=np.float32), - np.float32, True, False, - ), - ( - "model_mul_with_undefined_constant.pbtxt", - "x[2]", - {"x": np.array([11, -12], dtype=np.int32)}, - np.array([0, 0], dtype=np.int32), - np.int32, True, False, - ), - ] - for model_name, argv_input, inputs, expected, dtype, use_new_frontend, use_legacy_frontend in test_cases: - self.basic(model_name, argv_input, inputs, dtype, expected, only_conversion=False, - input_model_is_text=True, use_new_frontend=use_new_frontend, - use_legacy_frontend=use_legacy_frontend) diff --git a/tools/mo/unit_tests/moc_tf_fe/conversion_incorrect_models_test.py b/tools/mo/unit_tests/moc_tf_fe/conversion_incorrect_models_test.py deleted file mode 100644 index 3986cba4c10270..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/conversion_incorrect_models_test.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os -import tempfile -import unittest -from openvino.tools.mo.convert import convert_model - - -class TestMoFreezePlaceholderTFFE(unittest.TestCase): - def test_conversion_fake_pb_model(self): - test_cases = [ - # the default frontend - ( - False, False, None - ), - ( - False, False, "tf" - ), - # new frontend - ( - True, False, None - ), - ( - True, False, "tf" - ), - ] - for use_new_frontend, use_legacy_frontend, framework in test_cases: - with self.assertRaisesRegex(Exception, - "Internal error or inconsistent input model: the frontend supports frozen formats" - " \(.pb and .pbtxt\), SavedModel and MetaGraph \(.meta\), and v1 checkpoints."): - path = os.path.dirname(__file__) - input_model = os.path.join(path, "test_models", "fake.pb") - - convert_model(input_model, - use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend, - framework=framework) - - def test_conversion_empty_model(self): - test_cases = [ - # the default frontend - ( - False, False, None, - r"Framework name can not be deduced from the given options" - ), - ( - False, False, "tf", - r"Internal error or inconsistent input model: the frontend supports frozen formats" - " \(.pb and .pbtxt\), SavedModel and MetaGraph \(.meta\), and v1 checkpoints." - ), - # new frontend - ( - True, False, None, - r"Option \-\-use_new_frontend is specified but the Model Optimizer is unable to find new frontend" - ), - ( - True, False, "tf", - r"Internal error or inconsistent input model: the frontend supports frozen formats" - " \(.pb and .pbtxt\), SavedModel and MetaGraph \(.meta\), and v1 checkpoints." - ), - ] - for use_new_frontend, use_legacy_frontend, framework, exp_reg_exp in test_cases: - with tempfile.NamedTemporaryFile( - mode="w", delete=False - ) as tmp, self.assertRaisesRegex(Exception, exp_reg_exp): - tmp.write("") - # on Windows tmp file must be not deleted on close to avoid remove it when reopened by MO - convert_model(tmp.name, - use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend, - framework=framework) - os.remove(tmp.name) - - def test_conversion_fake_model_with_no_ext(self): - test_cases = [ - # the default frontend - ( - False, False, None, - r"Framework name can not be deduced from the given options" - ), - ( - False, False, "tf", - r"Internal error or inconsistent input model: the frontend supports frozen formats" - " \(.pb and .pbtxt\), SavedModel and MetaGraph \(.meta\), and v1 checkpoints." - ), - # new frontend - ( - True, False, None, - r"Option \-\-use_new_frontend is specified but the Model Optimizer is unable to find new frontend" - ), - ( - True, False, "tf", - r"Internal error or inconsistent input model: the frontend supports frozen formats" - " \(.pb and .pbtxt\), SavedModel and MetaGraph \(.meta\), and v1 checkpoints." - ), - ] - for use_new_frontend, use_legacy_frontend, framework, exp_reg_exp in test_cases: - with tempfile.NamedTemporaryFile( - mode="w", delete=False - ) as tmp, self.assertRaisesRegex(Exception, exp_reg_exp): - tmp.write("1212234\n12312") - # on Windows tmp file must be not deleted on close to avoid remove it when reopened by MO - convert_model( - tmp.name, - use_new_frontend=use_new_frontend, - use_legacy_frontend=use_legacy_frontend, - framework=framework, - ) - os.remove(tmp.name) diff --git a/tools/mo/unit_tests/moc_tf_fe/conversion_with_checkpoint_v1_test.py b/tools/mo/unit_tests/moc_tf_fe/conversion_with_checkpoint_v1_test.py deleted file mode 100644 index 6a731f20f03b3f..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/conversion_with_checkpoint_v1_test.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tempfile -import unittest - -import numpy as np - -from unit_tests.moc_tf_fe.utils import basic_check - - -class TestBasicConversion(unittest.TestCase): - def prepare_checkpoint_v1(self): - # quite old TensorFlow version can produce checkpoint v1 file - # so have hard coded bytestream corresponding to checkpoint v1 content - # this is a checkpoint v1 for Variable global_step with value = 14108582 of int64 type - buffer_checkpoint = [ - 0x00, 0x00, 0x1B, 0x0A, 0x19, 0x0A, 0x13, 0x0A, 0x0B, 0x67, 0x6C, 0x6F, - 0x62, 0x61, 0x6C, 0x5F, 0x73, 0x74, 0x65, 0x70, 0x12, 0x00, 0x18, 0x09, - 0x22, 0x00, 0x12, 0x02, 0x08, 0x01, 0x00, 0x0F, 0x19, 0x00, 0x67, 0x6C, - 0x6F, 0x62, 0x61, 0x6C, 0x5F, 0x73, 0x74, 0x65, 0x70, 0x00, 0x01, 0x00, - 0x12, 0x17, 0x0A, 0x0B, 0x67, 0x6C, 0x6F, 0x62, 0x61, 0x6C, 0x5F, 0x73, - 0x74, 0x65, 0x70, 0x12, 0x00, 0x1A, 0x06, 0x52, 0x04, 0xA6, 0x8F, 0xDD, - 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x62, 0x29, - 0x33, 0xD3, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xC0, - 0xF2, 0xA1, 0xB0, 0x00, 0x01, 0x02, 0x01, 0x00, 0x51, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x1A, 0x13, 0xD9, 0x46, 0x56, 0x08, - 0x63, 0x0E, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x57, 0xFB, 0x80, 0x8B, 0x24, 0x75, 0x47, 0xDB] - return buffer_checkpoint - - def test_basic_checkpoint_v1(self): - ckpt_file = tempfile.NamedTemporaryFile(delete=False) - checkpoint_byte_stream = self.prepare_checkpoint_v1() - ckpt_file.write(bytes(checkpoint_byte_stream)) - ckpt_file.close() - basic_check(input_model="model_with_variable_v1.pbtxt", argv_input=None, - input_data={'input1:0': np.array([[1]], dtype=np.int64)}, - expected_dtype=np.int64, expected_value=np.array([[14108583]], dtype=np.int64), - use_new_frontend=True, use_legacy_frontend=False, input_checkpoint=ckpt_file.name) diff --git a/tools/mo/unit_tests/moc_tf_fe/conversion_with_layout_test.py b/tools/mo/unit_tests/moc_tf_fe/conversion_with_layout_test.py deleted file mode 100644 index 197ae5ce2764e4..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/conversion_with_layout_test.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import numpy as np -import openvino.runtime.opset11 as opset11 -import os -import unittest -from openvino.runtime import Model -from openvino.runtime import PartialShape, Dimension -from openvino.tools.mo.convert import convert_model -from openvino.tools.mo.utils.error import Error - - -class TestConversionWithBatchAndLayout(unittest.TestCase): - def basic_check(self, model_name: str, batch: int, layout: str, refs_shapes: dict): - path = os.path.dirname(__file__) - input_model = os.path.join(path, "test_models", model_name) - ov_model = convert_model(input_model, batch=batch, layout=layout) - - for ov_input in ov_model.inputs: - input_name = ov_input.any_name - assert input_name in refs_shapes, "No reference input shape is found for {}".format(input_name) - input_shape = ov_input.get_partial_shape() - ref_shape = refs_shapes[input_name] - assert input_shape == ref_shape, "Incorrect shape for {} input:" \ - " expected shape - {}, actual shape - {}".format(input_name, ref_shape, - input_shape) - - @unittest.skip("Fix importing of openvino.test_utils in Jenkins") - def test_basic_model_no_layout(self): - from openvino.test_utils import compare_functions - path = os.path.dirname(__file__) - input_model = os.path.join(path, "test_models", "model_fp32.pbtxt") - ov_model = convert_model(input_model) - - # compare with the reference graph - param1 = opset11.parameter([2, 2], name="in1", dtype=np.float32) - param2 = opset11.parameter([2, 2], name="in2", dtype=np.float32) - add = opset11.add(param1, param2, name="add") - ref_model = Model(add, [param1, param2]) - flag, msg = compare_functions(ov_model, ref_model, compare_tensor_names=False) - assert flag, msg - - def test_basic_model_with_layout(self): - test_cases = [ - ( - "model_fp32.pbtxt", 5, "in1:0(cn),in2:0(cn)", - {"in1:0": PartialShape([2, 5]), "in2:0": PartialShape([2, 5])}, - ), - ( - "model_fp32.pbtxt", 9, "in1:0(nc),in2:0(nc)", - {"in1:0": PartialShape([9, 2]), "in2:0": PartialShape([9, 2])}, - ), - ( - "model_fp32.pbtxt", 7, "in1:0(?c),in2:0(?c)", - {"in1:0": PartialShape([2, 2]), "in2:0": PartialShape([2, 2])}, - ), - ] - for model_name, batch, layout, refs_shapes in test_cases: - self.basic_check(model_name, batch, layout, refs_shapes) - - def test_model_with_convolution_dynamic_rank(self): - test_cases = [ - ( - "model_with_convolution_dynamic_rank.pbtxt", 7, "x:0(n???),kernel:0(????)", - {"x:0": PartialShape([7, Dimension.dynamic(), Dimension.dynamic(), 3]), - "kernel:0": PartialShape([2, 2, 3, 1])}, - ), - ( - "model_with_convolution_dynamic_rank.pbtxt", 3, "x:0(???n),kernel:0(??n?)", - {"x:0": PartialShape([Dimension.dynamic(), Dimension.dynamic(), Dimension.dynamic(), 3]), - "kernel:0": PartialShape([2, 2, 3, 1])}, - ), - ] - for model_name, batch, layout, refs_shapes in test_cases: - self.basic_check(model_name, batch, layout, refs_shapes) - - def test_model_expected_failure(self): - test_cases = [ - ( - "model_fp32.pbtxt", 17, "", - {}, - ), - ] - for model_name, batch, layout, refs_shapes in test_cases: - # try to override batch size by default index (without specifying layout) - with self.assertRaisesRegex(Error, - "When you use -b \(--batch\) option, Model Optimizer applies its value to the first " - "element of the shape if it is equal to -1, 0 or 1\."): - self.basic_check(model_name, batch, layout, refs_shapes) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/ctc_model_based.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/ctc_model_based.pbtxt deleted file mode 100644 index 935cb9190c3274..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/ctc_model_based.pbtxt +++ /dev/null @@ -1,5245 +0,0 @@ -node { - name: "inputs" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 1 - } - dim { - size: 28 - } - dim { - size: 1 - } - dim { - size: 96 - } - } - } - } -} -node { - name: "reshape_to_rnn/shape" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 3 - } - } - tensor_content: "\377\377\377\377\034\000\000\000`\000\000\000" - } - } - } -} -node { - name: "reshape_to_rnn" - op: "Reshape" - input: "inputs" - input: "reshape_to_rnn/shape" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tshape" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Rank" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 3 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range/start" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 2 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range/delta" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range" - op: "Range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range/start" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Rank" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range/delta" - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat/values_0" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 2 - } - } - tensor_content: "\001\000\000\000\000\000\000\000" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat/axis" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat" - op: "ConcatV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat/values_0" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat/axis" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/transpose" - op: "Transpose" - input: "reshape_to_rnn" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tperm" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Shape" - op: "Shape" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/transpose" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "out_type" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice/stack" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice/stack_1" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 2 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice/stack_2" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice" - op: "StridedSlice" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Shape" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice/stack" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice/stack_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice/stack_2" - attr { - key: "Index" - value { - type: DT_INT32 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "begin_mask" - value { - i: 0 - } - } - attr { - key: "ellipsis_mask" - value { - i: 0 - } - } - attr { - key: "end_mask" - value { - i: 0 - } - } - attr { - key: "new_axis_mask" - value { - i: 0 - } - } - attr { - key: "shrink_axis_mask" - value { - i: 1 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/ExpandDims/dim" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/ExpandDims" - op: "ExpandDims" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/ExpandDims/dim" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "Tdim" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/Const" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 96 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/concat/axis" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/concat" - op: "ConcatV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/ExpandDims" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/Const" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/concat/axis" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/zeros/Const" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - } - float_val: 0.0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/zeros" - op: "Fill" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/concat" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/zeros/Const" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "index_type" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Shape_1" - op: "Shape" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/transpose" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "out_type" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1/stack" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1/stack_1" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1/stack_2" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1" - op: "StridedSlice" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Shape_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1/stack" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1/stack_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1/stack_2" - attr { - key: "Index" - value { - type: DT_INT32 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "begin_mask" - value { - i: 0 - } - } - attr { - key: "ellipsis_mask" - value { - i: 0 - } - } - attr { - key: "end_mask" - value { - i: 0 - } - } - attr { - key: "new_axis_mask" - value { - i: 0 - } - } - attr { - key: "shrink_axis_mask" - value { - i: 1 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/time" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray" - op: "TensorArrayV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1" - attr { - key: "clear_after_read" - value { - b: true - } - } - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "dynamic_size" - value { - b: false - } - } - attr { - key: "element_shape" - value { - shape { - dim { - size: -1 - } - dim { - size: 96 - } - } - } - } - attr { - key: "identical_element_shapes" - value { - b: true - } - } - attr { - key: "tensor_array_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/dynamic_rnn/output_0" - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray_1" - op: "TensorArrayV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1" - attr { - key: "clear_after_read" - value { - b: true - } - } - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "dynamic_size" - value { - b: false - } - } - attr { - key: "element_shape" - value { - shape { - dim { - size: -1 - } - dim { - size: 96 - } - } - } - } - attr { - key: "identical_element_shapes" - value { - b: true - } - } - attr { - key: "tensor_array_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/dynamic_rnn/input_0" - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/Shape" - op: "Shape" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/transpose" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "out_type" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/strided_slice/stack" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/strided_slice/stack_1" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/strided_slice/stack_2" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/strided_slice" - op: "StridedSlice" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/Shape" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/strided_slice/stack" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/strided_slice/stack_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/strided_slice/stack_2" - attr { - key: "Index" - value { - type: DT_INT32 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "begin_mask" - value { - i: 0 - } - } - attr { - key: "ellipsis_mask" - value { - i: 0 - } - } - attr { - key: "end_mask" - value { - i: 0 - } - } - attr { - key: "new_axis_mask" - value { - i: 0 - } - } - attr { - key: "shrink_axis_mask" - value { - i: 1 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/range/start" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/range/delta" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/range" - op: "Range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/range/start" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/strided_slice" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/range/delta" - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3" - op: "TensorArrayScatterV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/transpose" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray_1:1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/transpose" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Maximum/x" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Maximum" - op: "Maximum" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Maximum/x" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Minimum" - op: "Minimum" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Maximum" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/iteration_counter" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/iteration_counter" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: false - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Enter_1" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/time" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: false - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Enter_2" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray:1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: false - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Enter_3" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/GRUCellZeroState/zeros" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: false - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge" - op: "Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/NextIteration" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge_1" - op: "Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Enter_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/NextIteration_1" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge_2" - op: "Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Enter_2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/NextIteration_2" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge_3" - op: "Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Enter_3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/NextIteration_3" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Less/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/strided_slice_1" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Less" - op: "Less" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Less/Enter" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Less_1/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Minimum" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Less_1" - op: "Less" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Less_1/Enter" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/LogicalAnd" - op: "LogicalAnd" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Less" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Less_1" -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/LoopCond" - op: "LoopCond" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/LogicalAnd" -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Switch" - op: "Switch" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/LoopCond" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Switch_1" - op: "Switch" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/LoopCond" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge_1" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Switch_2" - op: "Switch" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge_2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/LoopCond" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge_2" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Switch_3" - op: "Switch" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge_3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/LoopCond" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Merge_3" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Switch:1" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity_1" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Switch_1:1" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity_2" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Switch_2:1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity_3" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Switch_3:1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/add/y" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/add" - op: "Add" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/add/y" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/TensorArrayReadV3/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray_1" - attr { - key: "T" - value { - type: DT_RESOURCE - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/TensorArrayReadV3/Enter_1" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/TensorArrayReadV3" - op: "TensorArrayReadV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/TensorArrayReadV3/Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/TensorArrayReadV3/Enter_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/gates/kernel" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - dim { - size: 192 - } - dim { - size: 192 - } - } - tensor_content: "\036,\342>\277h\337\275\'\200\226\276@r\036>Cv\367>\374\026|\277\244S\346\276\2604\240=\250\277\310\275\312\234\007?\264Ac\276\317Q\024>\337\242\361\276\326\265J=\311\243\312>;\303\037?\321\007\265>O;\236\276q\313\245=\230\272^=\211(\r\277\225Zi\276I\242\370=7\024\302\276\004\315X>]\217\013\277\332\035\203\274}\351@\275\314a{\276\270\264\000=\235\010\346\275\277\226\244<\234r\214\275\271\346\025<-d\361\276\275\277\r\277\346c\014\277\214\202\341\275\231\242\022?\253\376&\275\301Z\004\277\335\355R\276E\017\252>\034\234\314\274v\034\274=\255\212\200\275\022\030\271\2767D!\2760P\204>\316\037\312\275\310\007[>\212\201\337\276\206\\\331\276\340\361\200\275:\344\250\276Tz\r\276\t\306v>\016E@=\241\356\002\277V\322\222>>Jg\276\200\337\225\276t\210\006\277Q!$\276 \026\365\274\311,\335<\374c\225<\243\307Q=f\2624>V\216E\276\367v\001>\233\342\327\276 \2660\277\245S:\277\256\2537\276\n\375\">L\t\276\275\274\346i>>\355=\2777\026\363\275\275%\317=H2w\276G\336`\275\300(8\275\246\322\027?\3568\353>\305D\263\276\315AJ\276\024\202\023?\016\033!\277\213_\224\275\237\204p\276\212\200{\277\333\2314\276\343\203\\\275\252A\177>\026\333\253>\202m\001\276RsS\277\255\205\201\276\301\206\257>Q&\212\276jm\000\276AZ\034>\367W\224>\031\226\264>\014\326]>\306\003\014\276\275\255\343=\263\341\252>\344v%\276\300\026\227?\271\n\351\276?\234\260>\341\352\316\275 \345\314\276\023\345\303>B\014\315\276\021\245\241>\362u\001=X7m><\367\354\276\250z6=\260\206\333=\\\320\275>\304t\227>\024\3674\2765\210.?)\001\314\276\026\315[\276\204\355\037>ni\243\276\215*\230=\016\251\026<\371_\t>>\302\023\275-w\202\276\027R\023>\021\236\305>\024c\320\275\254\"\322>\355\"[\276\023]\304\276\224e\357\276\001\014\310\275\344M}>\321\021\266=\262M\277>\373\264\204>\245\311\000\277\251\342\357\2769\246\034?a\235@<\266\006\344\276\2616\217=\177\335>?;\351p\276\271\014\026>%\026g=\316/\255>\345\004\246\275\310\200\003\275\206-Z\276\264\302:>\351K\246\275Y$@=\377p\352>_\353#\275\006\320,\276\225\377\323\275T\242\034<#\r\322=\312s1\276\336\021\356\274\202\r6?gR\262\2765\275\030\275\212\317\266\276\321\r\205>g\"v\276}\\\301\275Z(\210\2764x\205\263{:>^Q\t>o\016\356\275\376\230\\\277{\226\375\2752+\006\276\\\367\033\275@\214\314\276\362\370\204>f\201\004>\250\217\t\277\"\202\200=#\204x=>\341\034>\276\251u\274\001\024\272\276\r\225\321\276\330\227\310\274H\334\242>\243%\314\276iP\303\275<\357\313\276\227\014\016\277\024\376]\276\363y\250\276\322\006V>\346{\220\276f\274#\277\014\014\214\276\343\275\312\276\312\357\034\277\351\237{\276\322|A\273\374!\274\276\362\367\232>^\305C>,3)>A\200\023?0\354b\276\315\354\332<\211#J\276\024\365\227\276b\337\017>a\254\223>\235\336\303=\327\323\'\273\243G\311>\277\327V\276\313\030\323>\0057\262\276)\231\322\275b\342y\275\202\361\266<|g\010\276\205D\277=\231\247W\277\225\321\t?\300\240\312\275\326\016\201>9)\025\277\002J\207\276M\267\376\276by\016\277\367\222\356\276Y\212(>x\342:>BA=\277\206\321\213>]L\307\274`R\001>.\001\325\275\332\242\323>\272o5\276/\006\231>wb\"\276y\306\200>\3534\240=\320\226\277\276&$j>d\2365\274\024\342\345\275\232\321d\276$]\226>\230\177\367>\200\332\204\2763\336D\276\004N\335>\311\006@>\3325k?\264\3109>]rU\276\275L\366>\300\352\036\274\276}\323\276|\341\355= \274!>\277\032\265>\321\255\337\275#\363\037\276F+\002\277\302\254\226=\346\2664\277B\021\231\276..\200\275ow\224>\374\312\000?\327@m>\'\230$>\364\214\243\274\354\354\331<\207\266\"\277\201\230\301>\306E2\276\017\017s=\352&\255\276\225$\n\277K\033\022?fU\256=w\036\302=n\227\245\276\337\202\000\277\302\002|\276\356\024\274=RT\211=\341)\014>\314\211\263\275leA=\262\355K\276\274\214\374\274s\316i\277\024\356;\276&\263\336\276\035H6=\350\"C>\034d\202\276\275n@=:i\243<\210\372\024>5\000\354\276\313\357\026>\301\010\007?^\220\372=\366\332\277=\323\352\002>\007L\207>\251\377\240>\353\200q\276\307\276\331>\256\370\247=\321\211\003>[\r\013\2771g\273>\242\016O>\232\365\301\276E\212\212\275x\321m>@\352\366\276\266\023\215>nP\336\275\247\032:\277K~4>\346\010\311\275\026\236\334>\277\261\3747?\357\rX\276si\253=\230\203\324>\245$\255>\022x\277=\345)\233\274;i\214<\227V\362\276\250Q4>\023\240a\276\2114\017?9\315\261\275\364*\222\276L\300\333>ix\332\275\243\360\335\275\333\321d>\'\253\022<\014r\177>\2463\026>\352\256=>\307\370\203=\354\327\223>u\304\231\274\273\"\"\277\252q\320>\017\247\222>\367b\353=\223\026\262=3\177\217\2763\353\270\276\332\263\331\275\004Z\231>\310\347 \276J\210.>\365?\260\276#\250\223\276Q\251\275>\001\n\372\275\237\331k\276\370$\227>\243\311\232\275\273\260\347\275\351K\006\277(\275\241\276\223\'\325\276\262.s\276\220Sc=W*\204\276\214<\260>4\302i=\335\224\222\275\265F\347\276\342\2214>\233<\350>\270\205\377>\275\223\351>jS\363\2751\273\221>\214n!\276\016\206\276>\030\246\234>Iu7=\2509\220>\374\036\364>\003\206\204>\304|:>G$\333>\2378\016>\375\334\006?p\277V=T~\002>{\310F>\360\245\r?\265\303(=9\r\037\277\335\265\007\276\237t\260>r\340\275\276\311\271(?#,\225>\367\224Y>\005\275\003>q\020]?*\254\316y\264\000\275\\w(\377\206\010\277=-\347>%c\202\276M\032J\276\305\005\026?I\336j\276\242W2\275\\f\270=N&e?\363\347\265=0\203\t?:\307n?s\247r>\245\233%> \337\252>\004t\211\276\207\340^\276\305\365\'\277j\263N\276\035\227\002\2777W<\276\240\310\224\231\020Q\2778C$?\020k;?\207\324\256\275\226\223\240>\225\273\337>v\002\242\276\313\035\270=S\375\202\276\203\374\313\276\251\025\346\276\342]\245=_\333\030>|\004x\275\315\267X>(\314\313>u\252\014>XH\206>\342\005\021>w\036\305=\n\337\267\275\220\031\200\276\262\350a>,\227>?\"a\002\276\313\314\231=s\263\027>R\236\271\275\030$Y=\027\357b>\030\217\245\276\0228\222\274\370\323\027?\232\035\301>t\212w>\367\032\022\277\325\242a\276#xV>\212C\241\274T\2511?,\315\347>\270RA=}\313\262>\342AA?-\223\224f\255\000=$VU\273\367\367;\276S\024%\276\034P\n>\352\206\231\275\024\014\201\276\347\261x\276\333\245\213=\340M\216\276&dw\276W\324)\275\220;\365=Xql\275\340\t!>\233\217\350\275\347:\246>\331\010\242\276x+\307>\331\221\223>?\2775\276\177\3115>\335\236\027?z\231L\276e-\252\276\004\244\256>\227\361F\276\022\350\200>\377\266\036\277VA&?\255u\265\276EX\n?X\013\203\276\366\315\252>V#\320>\366\222-\276\335\202\355\276\314\363Q=pd;>w\243N\276f\366\243\275\321\251\260\276&\314\007\274\311\335\321\274\305$!>n\270\230=\035\345\373\276a%\214=\317M\375\275_\277\370\273t(\270>\250\360\302\275\245X\312>YT\002?TE\021\277y\1773>T\016\221>`*)\277\252j\362\276\314\037Z\276:\225\246=L\324->i\367\361\275\307!\270>n\221\000?\267\246\024\276;\356@\277*I\'?\243\201\026\275FC\255>\247\265\371>\263\215\314=\034\262E>\r-\225\276\247L\311>\216\362J?\271D\300>\362\351\\\273\377\0027\276#\254\202>~*f=_.\204\273\3252\303=8\203h\276\322\314\322\275`E\245\276\365\344\025?\226P\017?\036\202\230\276h\271\344\277c\023x\276{\267\037>\261;\317\274\310\215\n\276f\024\370>K\347[\276\324\353~>\231\022G\276\360\336\252>q\t\321>9\327r\276X\305\331=\327E\215>\351x\037\277\301\3319\276)\201G\274\365a\340=Z\251\375\274\231\314:?\314\213\270\276\350(\300=\374\202\277>T\017\014\276c[\273\276\245\026%\277#\200o\275\353\347\3624\334\372>\302d\256\275 \276\203\275\221\327\336\276\323\004\'<4\204\241\276\237\304\026\277\306\230$>rg\206>\270\324#\2766\376\331>?\024#?\033\206\305\276FJ\035\275\036\014L\275S\367\264<\260\002S\2760\350\251>\367\306I\274\266\366\337>6\373H>0\027\312\276*\305\031\276s\325\260>\367\240r=\337\346\275>\201rI\276\006iO\277V\001\013?$\014n><\221&?\337?\233\275\266\323r\277q\366>\277\344\241\376\275\177\0376\276\032\013\333>\375\212w\275$-\352>H\221\014=-S\005>\313s\221=\354\206\250\276\024\306\361=\345g\305>g\216\342\276\"iU>\305\275\025=\260(\214>\224\222\312>\r\230\365>\241\250z\276\363\333\\\276 \033\243\276\271]&>\343y\300>\364\335\272>\210s\233>X\212\354<-c\016\277\373\017\236\274\345\\\r\277Li$\275\276\242\203>\000\344\002>Uo\223<\327\264\233>{t\025>\213&\273=\235\313\032\275:f\236>[\254_\276nWW>\226\376=<\231\235\230\275v\006+?\027\262t>\365\301\274=\252\217\266=,\201\022\277\035\017\207>\232L\240\276F(\313\276\274\3208?W^\002\277\272\334@>\247\276\254\275\242\270\327\275\367c\302=\331\037g\2762\004e\276G\245\267>\024\250\r=\267$\252\276\273\250m>q6B\276\363G\314\276\034!\016?y\321\311=\323j\r?!Q\326\274\260\271!\277P\325\302\276\304z\021>\352\256\035?\256h\212\276\2349\031\277\033vs>\315\316\357>\036\2274\276TSd>K\322\274>x\216\006\277D\r\330<\0005C>\210nQ\275\014\237\211\275\372A\004\277al8\275=\353k>\350\337\365\275\376\346\330;\004\372\217\2763h\367=\353+\303=@,n>K m=wy\002?\023=\217<#\203<>\257TM\277\031\211h\275\305\002.=y)(?\350\326:\276\316\212\342\275\305\362\220?\345\263\206\276\352?W>\257\362\030>\355tq\2765\\\300>9\245\010?\277\257\321=\r\262\032?\362\002\250\276\241\003\334\276\364\324\n\277N\261\2549\366\247\276)\340\214\276j\002\342\276\324h\237>A,\236\273R5\303=\371\256\306> \213\313>\207\370\360>\326\264\220\276Y\026\\>@Zx\276-\314\207>\325\224j?\310\322\252>C\235\203\2762:9\275\207\230a\275\332\307\010>1\333\"<\236\350\312\276\361\256y\275\036\226\355\276ZN#=\305[q>\014\247\223\277\261\002G\275\t\260v=\310S\273\276]\207c\276\214p;?\304{\314\276\312\260o>\276\376\'?S\237\355<\037f\005\276\2268 ? U\016\275\367}3>9\032h\276\235{\031=\236\277\020\276\246\250\003\277_$-?z:]=x\007u>\313\314G\2739\024#?\270\200s<\224\342\013?1_\366\2756\031E?)\033\037?\001X;?/\315\236>.\200\261>\333\"\206?`\206\300<\357:s?K\321B>Z\214\'>\232\356\317>)\341\200\276\205\361\271=\027K;\276\274X\260\276Z\335m\275\322;`?\312[\002?G\217l\276kC\370>\352\303\367>\016\351\325>kj\000\277]\031\373>\214\263U\276}\3575?S\262\264\275a\332\226=\307\215u\274\265b\213=6\331\335\274\1770n>\023\241\302\275kO\332>\222\036\244\276\367\205\237\276\'\306\375\275\214j\271>\301\n_>\014\353\371\275(\244\320\276\303J\022>\260\r\217\276\224*\007?<\277\226=\275R\345\276W\304\223\276y;\370>\006t]\276\210Zg>J\316\020?\211\214/>\301S\246>\325\326\366>+\373\214\277z\356\353>\007#\360\275\375c\253\271+\301>\230\035\214\276\325\261\006\276\317\262|\276\2237}?\206\002X\276*\213\264>\332\361L?]\354z\276\251uM?\265\326\002\277\256\266\272;H\321<>\0379\243\276\270P\310=\032\'\037\277\024\014\200>\344\371T>\037}\214>\201~\332;y\375\321>:w\306\276\221$\271\276\360\320\250>\223\t\333\276\314\000P\276\351\\\037>4\302]?\257\211\360\276MO\335\276\273\276.\277Ql\344<\343m\214>J\321a=\002\300\275>\206\243P?^\236M\276E\233`=\007?\377>S7\310\274{@\275\276!\245\010\277\205\354G?\21672>Q\007\307\276~\345\345>;k\234>\362\347\271\276\317\215(>\322\223H\276e\266\037\205\207\014?\r5\242=z?z>3\005)>+\337=\276\265\231<<\202VE\276%\207\023?*\271s\276\242\013\364>\363\243p\276\230\240\243><\357\307>\363\233(>\325]\031\277:0\036>\316\0051=\270(\203\276\243B\236=RB?\277;7\344\276\213\010b\276\365\033\000\277\006\202\017?\344\243\331\360\224\306\276\024G\263\274\367o\236\276\271\263H<\025\347\356\276\250\252A\276\253\203\261=\311\336(=\336Q!?\211\305\314=M\353*?\314\207\332>\303m\246>F\377\250>1\007}>\0132\322\276\006>\021?b\375\217>\001]\314=\224\020x\275\\\333\037\276?\246\367>\372\363\013\277\000s\036>~\254\340\275\231\266\235>Bj\212\2762/\244\275\262\242\r\276)(\006?M\250\337\275\265\276\240>e\033\204\276f\007\036?\330\252\010?\177\230\216>\031\302F>\013\273H\276\3567\r\277,\013\201\276\237\320A\276=a[\276\004\214W>\207!\366\274\303\337J>\331\t\375=\355H(=\035K\031>2\236\252=:\2316>\322N\314>\000.\346\275\206\216f?Q \327\274pq\230>\014lO\274t\300\256\275\247\020\201>\236H~>S\247\323>\225\346W>:l\267\276\211\354\n\277\327\025\210>x\337\022\276\322P\"?$L\244\276\261\316\231>\"\026Q>\373\234\361\274\352\316\245=\305\276\336>\377\323\024=y\217\036?wsG>\343\257\013\277\331\013:>\273\335]\276\264AP\276E\2138\276\201%\236=\\\205\245\276x\210\233=\206a\034\276&\306?\277\333X\355\276\3655\357>\013\216w\276\307\330\023\235\202\276\230\253\354\275\235\0104>\204\232\275>G\322\024\277\373/\301\276\267\2170\277\230,\306\276\275\237\327>e\245\355\276\331\324\001?P\207y>\032x\304\276\010\267\271>\024$\274\276\0106?\276\n\355\227=\315%\207=\306\016\305<3\204\260=\221E\333\276\347\267H\275\236F\210>t\375\214>\361\315\200\276d\370\035?\305\367\210>U\345\214>\203=J\276\021\327\330\274g]\016?\357\311-\276\243\310\255\275$C\374\274\357\235\257\275\014a\342\274\260\332z\277!\347\235>[\206\003>\300e\230>\221\267\266\276N\326\010\277\213\204#\277\261\275\"?\275\353\"?-^\376n\0269>\266\033\312>\326\347\332\275\376E\231>\013\003\207>}\033O>U\206\230>\2451\250>[\372\036\277\213\336\332>R\2237\276\344\207\n\277\263Od\276u\001\316i\253\321\275j\000W>\0101\036>&\225\321\276\373J\204=)\017\335\276\353\324\223>6\347\016>\023F\247>\370\343\357\274\0227=\276\245\023\242>4\340\256=\305U\315\276\336I\033\276\031\r\'\277D\246U?\375\253p?\014\031\000=\361@I\276\004\030\311>\271\340\014>\360\275\211\276\357\346V>,\006\322\276\333\317\247\276\265Xw>\260\357H>lj\335\276\034\334\010?\223\347\267\276\254:\341\275\246]\256\276\353T\272\276\2159\231\276d\023\246>t\261$\275\244\037\026?\\\273S>|} \276\245\353\025\277\266\276t\276R\022\033\276\372l\234>\010\267\006?s5,\277\323\253\250=ZUE>c]D\276(@\034<1\216\207>A\351\300\275\343\023B>ff\006?}\363\236>\342\256\342=\274\323\232\276\327u\014?\355\355^\276\353\027\222\276E\226h\276\320V\274>\376b\244>>\311\330\276K\3619\276\"\002\255\275yZ\222\276F\352\223\276\376Z\216\275\332\220D;\366\0149\277m\305\275\276`\226\205=*\261\254;\345d2\276si\321\276gl\304\275U\352\316>\352\316->\316@\322\276\266\321\277>H\266l\277%O\275>?\216\270>q\303\316\276\362\305\253>vv\217\2763\201 \276y0\353=\244t|\276\275\316\333\276o\342>\276\346\231\t\276\300\034\240\276K\023(>\242\2362\277\253\305-\276\363&&?\364RN\277\\W\317\276\376\022\014\277\364\221?>\237\332m\2759\034\021\273\316\246E\277\201\r\020\274O+\363=\271D\t\277\263\247\215\274\252}\037\277\353?\352\274\\\020\222\276\031v\224\276\'\224\251>\321\275+\276\243M2>\0171\244\275`p\026\276\232\371\350\275\r.\323\276\344o4>\206\301]\276\337\2635\276u\277\216=\311\024=\276\351\016Z=g\343\r\277\001#T\275w\212\230>o\326+\274\332\206w=\356A\001>\\I\010\277.\325D\276\327U#\276\214+\261>\001\345I\276\334\372\202;\227\032\315\2763\331\336\2750\224S\277U\360\026\276\275\004G=\320\261\030\277\037\254O>\244 \306\276\\\314\376\202*\232>#\255\225\275\275\177\335=\244-\357\2760S?\277\tZB\276\204\360\003\277\324#\005\277\257.\253\2761\243\n\275o\367\230\276\375\333\221>\316d\016\276!\302\203\276c\325\252=\372\355+\2768\017\217\276o\362\264=\021>!>k\323\202>\022\266\034>\256\'\235\276\224\007\003?{\005{\276\025W\220=u\237\346\276\303d\311\275\303v\261<\312Q\335\274\240\352\034=9\301\035\275\253\234\271=V\030F\275\r\352\315<\201\312\312\274I\343\231>eIl\276m\336\037>=\203\321\276\345(\324\275\334#\235\275\364|Y\274\364US\275M\232j\276\013\243\314>\200\324\220\276\t\263\255>\206\244\365\275\'\031\365\276\233\364I\276\347\023\262>W\245\316>\266MW\276,hY>z\214\341>\017\336\032>\364\n2\276;rV\277\354\377\244>\314\257B=,\330\334>\022\260\033?\207Or\276F\235\013?l~\r?\252)y\275\037/\340=\311J\250=F\367N>$\220t\276\344\002\273?\371\003\342\276\311\250\300\276|\177\"\276\231\365c=\237\373\266>K\350r\276\221\n\373\275\267\257{\275\217\256\221\275\267\374\315>\276\010 \277m\310\313>t\331\250>%\226B\275\2246#\277\0259i=\314\000\255\276\343\316?>\316\231\234\276\267\t[\275g\217\303>$\374\252=j0\033?#\030e?\335+;?\361\334\262>eqX?\357\002\262\276\277l\321\274\"I\376>\213\344F\277\302\235R\276&*-\276\2313\222>\314\035\232\276P\361\216\275\205lJ<\253\177F\276-\244\027\275\025\274*\275\226\\\337>Z\204\037\277\001\336m\276\250\222[\276\375Dk>Q\226\243\275\271\356\031\275WXI\276\034\214\273\276I\022 \276r\213\347\276\362\353\277>\013\3131?e\254&?\032\332z>f\364\246\275\224\365\221>e\355f>\246\3003>\272\216\210\276\251\231\216\275^\312\021>\265FR\277\334VL\276\002\350V\277O?\250>\302!\247>\017\217\351\276\327\201\255={\242\036\276\270Tn\277\323a\313>\327\236\356\276t\353\303\276\215\233,\277I\376?\276x\203\200\276\316\215\324>\272\366\207>h-\225\274T\335S\275K\264\216>\365\004d\275`\333\202\276[\234\232\276\276\235s>\275\004\305\274d\336\026\275Y\236\343>`\367*\276\\\364k>\3252I\276`\202\240\276:\211@>\266\235\201\274\027\327\032=?\341t\276\244z\335\274+\034\316\2767lN>\275\371e\275\263\031\306\276_\022\305\276jk6>\344\032\204\276\nJ\025\277\231;\376\274\310\3432>\035\277\323\2731\006\207\276dp\215\276\026t\n\277sz\217\276\007$\024\277\340\014\263\275Z\315\024\275\317Py>\2505\000\275<:\201>\025\036_\276\371\207\266\276\224)\204>\245\331\334>\205 \254>\326\221\227>\264\356\351>\371\013\221\275\244\276\033>\237j\026\277\016\373\250>\223l\351\276\202a\005\277[j}=\215F\360\276i\361\247\275\232{\220\276\325\262\227=1\273R\276 /\250>\343\267m>lj\025>\322\334\354=\205\031\004\276\232=\005\276\242\213\254=\344\226B>\035\212\261>\035\2772>\364I3<\\8$\277IiO>\307\366\273\276\253\306\016\277\207\311\334>#\3469?\336j\n\277\036%\321\275\'_i;\242\256i\276\307m\262>\306\250&\277\350\204\257\276n\356\250=\224,.\277 \370\370\275\037\367\312\276\327\022)\276\240\231>\276\320I]\276\351\353\305\276\177{H\276\307\006\246>L\001\224>\3209+\277\355\277J\275\t\377\261<\037\276e>\360G\354>\214\000S=\\\307\221=\340\230\035\277\354\364\337=\217\021\237\276\371\350\276\276\270\376q>\342\235\005>\325\313\200>\211\310\203\276\225\263b\275%B\374\276\202\3337\276\356^\251\276\354\263\214\276\240\320\374\276\353\226\242>\274`^\275\212\272\332>5\027\247\277wO\343\275\026\324\003?j\321g=G\035\364\273\033\230<\266T\020\276\206\273K=\255;;>\034@>\277\362A\221\275\375\243@\277^N\221?8\000\277\217\021\034=v\304\335=\322\354\304\276\276\3743>\366Xn\277CV;\277\177{\207?\212\353\357>j\263\355\276\310\030D\276G\254\230>\371=\001?\364\340\">\333I\343\276R\265\241\275%\213]>E\255\267\276h*\000>z\243\000>@AF>2\256\203\275\025?\322=\307\214\037>\025D\265\276\306\224\212\277s\031+\276\273\262\026?\361\376\271>u\361H\276A,\352>K\031}\276\220\230B\276\240\t\t\277\244\375\347\274<\224\255\276\026\'1>\024\360\004?\324\2008>\005~\370>\r&\315\276\374=K\276\270\314Z\275U\351h>~\213\223=um3\277q\031b\276\031{\214>\373\3638\276\375\202K\276_\304\272>\222\255\242=5\347\324=\265\237\307\275\303%\r\277B\034\346\275,\023\250>B{\n\277\233\021A>\026\200*\276\016\345\311=\375\004\316< r\022>\271\006\313=\251\014\035\2760)\301>i\322&\276%\021\276>\2719\n\277p,\n>\347\005\276\276\305H\365\247\363\201>8\026\321\276\302*\335=\363S\324\274\033\251\202\276\365\033 \277\341\270^>\304\315\314\275\335{2\275\224z\356\276\200\315\r\276\337\334\344\2759\020\261=|\250\365\276H\272q\275\225\315\347\276%\241\233>\315\262\024\275R\255,\277-\346\262\276\212)@\276-\256U\276n\245\205\275\323\301\251\275\031T\033>\210\t\002>O\207\223\276\321\356\225\275$\210\253>\302\262\030\276X\305\276>u\323\030>\365a\016\277\036Z\216\276\2077;>\253\231\375=\274V\244=S\303\360>\276\224\303>\030v\332;\207Q\267>\037\346\216\276\010\262L<\220z\230>(\375D>)\261t\275\032\305?>^\262U>:\261\215>`k\263>J\220[\276\017\220e>\374\024\264\276\300\316\250>\"\005\246\276\030f\t>i\366L\275^)\247\275Wf\'\275Z9\205>}5\346>\363\376\220\276\302\316b>\270\222;=\347\010\265>sF\375\274\333\032\372>\226\200\035>)\274\237=\317*\"\276\230\0233\276\3656\035=bX\316\276|\365\341\276q[\024\277\324\313+\277\367\031\037\276\256+\230=j\026K?W\235\002>d\225.\277F8\220?\227\017\024\276\203\324\267>\3234I\277G\232\223>2S^\276\376Lj\276\025\206\357\275\031%=\276U\235\213>\302\360W\277\001\234\303\276\017u\205>.o\020>~.{\275%\351\236\276,\376\340\275\321t4? \227\201>q\241\221\276T$\366:\356\362\237\276\245$N\276\226\221\376=\021\356V=\264V*\277\267\222\303\016\236\275\276)\261\221\275\\<\313<|r\331\275\271.\225\276\006\334G\277\037\270\202\276\324\3632\276,B\273\275\177\021D\276\253=\017>\340\215\023>3\210\234\275\220$b\276\341T\016\277x\242\267\275\271\236\207\276\262R_>\003\231\007\276@\340\032<\177\032\031\276uA\215>\301\202(>i=\354\276\240Mu>\371\323\256\276?S\246\2762\003\014>i\361\021>\024Z\217>\262 \n\275\215o\314=|+\230\276\270r\275\276\361\034\261<\257\200\227\276,\0003\276W\260\200\276\201er>\304\357[>\022\333\010?\372\016\000>V\0323\277\2122\003;5\337efhZ\2772\273Z?)x\376>=F\000>>1#\277|\203\331\273E\276\331\274#\315\006\277>\234\t\277\237\030X\276\201q\214\276\311O1\277\033\025N>U\266\301\276(8\225=\373T\252>~M8\276\226\220\203>\366\006\235>SL/\277\234\330\263\275Xi\n>\323\262\'>\213\345M\275\260\243U>\257\334\205=i\320\326\275\256\324\266\276\373\006`>e\227%\277\261F\t\277]\274&\276\000\265A>\223\257\277=\275K*\252\322\213\276\236w&=\177\365\271\275\351yy>\227\306\206\276%<-\276\204\026E>9\010\203\276\031!4\277!\324\036>SEA=\237\341\327\276U\020W\277\326\003\243\274\204\241*\275\365\274<\276$a\375\276<\204\371>\211\345J>$\026`=\371GC>\234\221\242\276O\224\206>\373\225\320\2768)\026\276\262f\325<\314\"\252>\351\243A\275\t\206\326\275\001o\227\276!K\313\276\362\207.>w?4\276\0312b\276\356\364\005?\301W\206\277C\234O\276\001\230\243>@\222\254>\371\037\034\277\206\334a>\217\344\377>\305f,\276\013\373\207>\246,\360=(+\306>\210\361\036\276\316\372\314=\331A\024\276t\037\273\275\255^\273>L\300\207\276\302)\017\276\357\036J\276\351\006\251\276\323o\010=8F\207=\247\223\317\276\332_b\273\233o\\\276y\377\210\276e\3728\276\230L\321\276\010\347/\277\352\235 \277\023JY=\317w\252\276\302\377\032\277\024\251\026>\225#\r\277*\305\370=bg\027>bP\313\275P\245\347\276\362r\236\276\023\224\307\275$\255\373=C\035\204\275\013\242\r\276\353(z\277\023Mm=\035\010z\276;\343\004=\013\020C\276\371Rx=\307b\274\276\244\206\"\277\312M\325\275^\022\264\276\262\243\t\275(\2534\277\331/6\276\334\372\362\2745\010\264\276\241\247\277\274\335\275\345=\266d\026>\001\204\232\275\267\305\322\276+\210\250\2765y\277=y;\037\277\031\240\027\274\352>\356>R\323o\277\345\322K\276\332.V\276\2227T\276\356CS>W{\213>_\230\373>\035\245\270>\2712,>G\241\000\277l)z?B\263\276\274t\346,\276\376\364\237\276\221uI\276\361y1>\327M\322\276\022\273\240\276\273\216\300>O\266\021>w\363\000?\241\310\216>\327\247>?`\0021\276M\303\276\276\275\023\272\275`\016\374\276(\032\007\277\205[\226\276\335{\355=\201\202\377\274#\316\375<\007n<\274\035W\370\276\035\220\321\274\265\373\234\276T\003\370\276<#\214\276\244\322k>\021E\225>\211[\212>\365L\334\276\277L\335>h\266\244\276\205g\225\276A\373#\276^@\226\275m$\246\276\307J<>\024\331\300\275\006\021\360>\356.\336=\200,u>\377L\356\275\222\216\021\277\377\213\272=HS\317=\327\026\231>.EC>\302\022f\276\301hB>\252u\213>!\007\230>\351\320\021\276\222\2607\27682\375=x\356\240>\001oH\274\034\352\220\276\317\211\033\276P\305d\276\236\354q>s\216\025\277\034\342\300>)R\n=\224\001`\275^\261\024\276\020\214?6G=A\276\367iS<\177h\310\276\035\262@=\357=\002\277\336\342\020:R\007\346=U\244/>\206\374B\276C.N>r\301\234>c\244>>\3761*>\351&\344\276f\373\212\276K{\003\276u\222\321\276\245\373$\276\'|i\2769,\220>2\365P\275<\310u>=\004\222\276\035\030\007?\374\001\325=\335\337 \275\262\355%\276\216I]\276\373\033\370>\267m\225=ll\014>\316\020\333>\314\0214\277\006\337\227>Y\251\002?\200\r\307\276\227\334Q\275\223\240;=OD\237>\235\246\006\277j\242\223=\303\313\216\276u\344\353>\303\324\226\276\005:\320\276\340\266\333\276\354\266\270\275gH\262\274\210\026*>-R\365>\244\267>>*o0\276p\332 >\373\2365>\000\362\316\276\247\377\235>~\351d>\211\256\250\276\254\333%\277\257\335\303\275.|\241\276\302\030\376\276\224\014\337\276b\006\375>%\002\212=\3225/>\307Z\256\276$@\006>\324O\017\277\315{\273=\r\250\217>\036c\241>Ph\263\276\027\377\002?\003|\375\276@\'5?k.0\274\030\213\223>\004\332\261>\327\210M?\354\202\022?\023g\373\275L\017X\277c\177\001?a4\215>\243\001\200=!\244\345<[\n\224=U[\234\275H5\372\2747\326\240>\240\313\203>m\241\241=\311\255\237\276g1\271\275H&&?R\220\271=\300\340\006\276\014Z\226\276\005\224\001\277\313\027\305>:\014\370\276P\215G>\241l\313\276\251\004\225=.\213)>\272\232\345\276@\2671?\376\215\305=i#\301=\351\353\002=\355\342\265=W4\316\275\374\216\205\276\361t\005\276\360\322a\276 %$\27674\363\276\311pS?\355\347-\276\356ws\276x\271\201=\323?\004\276\376\320\007>\266\363\241\275\335\201\310>\267h\013?\332\007l=\300\017\245>|B\305\276B\2620?L\231`=\2119V\276\337\372\227>\246\311\374> ?\373\276\263\031@\276\311,\232\276\207\t\313>\362\207\022?g{\251<\321\024\373\275\334\271\005?\344\271\234\276>&\202>\300\020\007\277\346\003\"\277\260\to\276s$\347<\005\000\362>\356s\027?2\017\354>\220\016\300\275\361\010\252=\260E\032\2779\211\'\277QZ]\277\232:\211>\025\223L\241\r\030\273\t\336f\276\312V\230>i\362\227>Lvf\276\027\361\036>\242\340M\277\270-\022?\242\302\246\273\3216E\275\340AZ\276\007\360\331=\363\335\003?\324dl=\227R\220\276#\201\325>\306K\254=^\203K>& \304\276\271\276\235\276\351\227\203\276wA\250\275\200D4\276\343v\217\276i\327Q>\3549\264\276\365\302%\277\232h9>\204;\262\274\2001\231>\307\305\230\276\031@]\276\266\021\030>|\246\322=\374\314\263\276d\303\035?\270\217\302\276\324\\!\276c\270L>;\315\367>d\232r>[\247\301=8\227J\2767\010\370\2741\314W\275\"\354\262>na;>\373}\250>\317\306\343=t\226\243>\325/\223\2763IV>\216\211\335=)\350 ?c\353\010;T\030\320\275EA:>>\274\274\276\221E_=.n\031>\247\245c>\340\'\304>\352t*\277\250\332\262>8\244\352\276\0016N;\323g\027\276\003\302\275\276\353\\\337=C@\212\275j\246\231>\306\261\326=D\270\031?\364\214)\277\325\203\230\275\334\206\023\277\036\337\340\276\236\372K\277\201\r\317<\233AF\277\235I\311\276\217\350\207>m\276N\277\\r\220\276\215I\020=\3049X\275vV\302=+#\217>^M\352\276\030\003\233\276\347\002\221\276\256-\230\275\300\\\242\276\316\003\206\276e\335->n\333\n\277\341\200U=F\002\366\276\247\331\335\275\374\"5\276\320\346M>\230\206\250\275\002\231?=\221b\265\274\'\310\212\277ba\227\276<,\001\276\366\307\026?k\035\301>pq\244>\222:\315>f\204\306>\027\275\230\276f\033+>Pe\324>\230\337\212=-;\336>\377\302\177\276\201\307\226\276r\356_=\022F4\272\331\203\214\277|\230\362\274\234\336\204\276P\035\274\275\302\n\372\276a\220p>\307\361\321\275\201\000\024\275\340\033N=\255\247s>\r\0219?\242L\235>\370\037\224\276FN\242<\350\2056\275\331>L\277\3004\017\277\370\300\023\276\241ug\273\326%\326\275\205/\302\276\017K\364=R\357\363>tT\361\275\017AG\275I\210m\276K\263[\276\374@\207\275\375\016\207\276qc\246\275Wt\227\275l\201\004>Eh\031\277\334\376N>\r\010\257\276~\302a\276\266X|\275\234\267\377\275\033\354\231\276~\2145\276<\340j\276\224\324\325=\201\322\032?\2606\360\274\033u\307\276\340\371\210\276`T\330>\221\355c\276\352\355V>[\220\013>\222l\213\276\314|B\276\300\3139>\252\317\260\275\242\215\275\275K\001 ?\330%\204\275\315\352\301\275\373\343:\276C\263\236>\024=\205\276\t\2411\276\203\3359\2760\3552\277p\375\270\276Z\347\372\275\275&\275\275C\252P=j\265\240>\333\206\355\276\275\234\270>R\252\207=~{\236=4\230\260\276;.\n\276\037\205\307\276\033\227\220=\004\241\330\275\373(7>\334\0163>\262_<\276\200y\373\275\242HZ\276\252~\212>\235\225q=`\217\263>cq(\276\277\237\207\276\271S\377\276\266\"p>Au\212\275>\000\316\275\316\212\177>\212uT;Z\026\250>\000\305\264\276\246\302\223\276\306\027\353>\255\020\001\277\273T\013\277:2H>(\257B\276\006\017\350=\334\340\247>l\337\021\276Z\232\237>\342\006\031?\365p\227\276P\227\261>)\3165\275\331\"\316=P\343%>Z\365\'>t\321\226\276Hz(\276\225\001\351>f\243 \277\332\320\371\276\372\265\230=\271\344\266\275`\356\222>%\331W\276\337\215\234>m\257\211>\222\254\025=W!\210\275c\022\216\276\335\023\336\276(\3102\276i\301\022\277:qI\275\301.\350>^\255\241>\032N,\276I\022\244\275t\266L\276\336h6\276\tI\350\276\200\037\316>v\307\202>v:\335\276\330M\002\276\234\202\303\275%7!?\247Wm\277\353E\244>\021*\027\275W\324\010\2768\350j>~Y\311>\302\255X>\177_\216>:\023\272>CG\010\276!\024\211\276W\267\216\276A\024\000\276sl\n>\320\363\206<\260\025\301>I\n\204>\235\250\263>\256\244\r>\220\223\274<[D\332\276G.\027?m\202\204>\036\025K\276Ub\235\2754\376\t\274Lk\366=\275\322J\275\275\371d\274&j4\276u/6\276\241\301\360>6\334\376\274\220W\336>3\334\200\274G\353\354\275\376\nC\277\352\224\212=V\002\222\276\3569\017\276\2147\354=\023\341n\276\236\346\026>\210\220\224\276yCb?\177-\024\276\273\200\217\276\021w\210<\240\224\027\277E\251\203\276\3171\"\277+X\037?E(\211<$k\\\276\037B\346\275\256\n\343=\207\227\345\276\220D\253\2757\030\023\277\177\332\334=u\352A\276z\010\325=\"[\276;\234\014\340=3A\007?\3109\037\276\357y\037?\004\246\340>+\242\021\2750\206\216\276\256}\367=\3427\024\276\375\222\001\276\342\036\214\276\233%(?\211\247;\274c\016n=<\270\271\274\'\211\000\276.Dp<\310i\216>\025*\333\274\272\202\250=\364\003\372=s\342\214>@\200\203\276\234\267\352\276\211\340\304>O\345\346>\304\211w\276\024j1\277\300\t\221;\275\211S\277\3414\014?\276=\274\275\207\007\310=\272\214\010\277b\276\010\277!\311\312>\201s\343\276\372\001\347>\236\360\275>\357}\273>\235\201\304\274t\330\030\276\267|\326\276\"_v=\305M\\;^!d>r9q>/y\000?\245\222\274>N\035R=\215\211>\276\332\034\253=\247/\326\275\251w\367>\376D\"\277\307O\352><^\255>Fc\276<\336\313\275=[\356w\276T\265\226\276\341\017\201>oe\026\277\205o\035<\304\371\250\275\006M\031>\271\013Hv\234\013=\355\271\325>\016\300\013?\250\204\310\276\037\374@>\\r\245<\331(\004?m\031\010\276\025\032(\276\023\217\276\276\025N\345>\314\035 \275W\221\255>\252A\023\276Ux\230\276i\351\274\346\332\243>\032\343s>\360\237\324>\254\212M\275<\253\243=?)\337\275\254+\357=\r\323\036\275\362\305\214<\323\317\231>\2713\255=\1772t\276#N\036>t+\200d\333\363\276\240\373\">\347\362\310\274\315c<\276\301\273\202>[\267\257\275e\326a\276\0069\300\275\237G\'>4\224\010>T8\274\276\336P}>+\344B>\206\361z>\367\333\346\2757<\242\276+\212\312\276\033PG=\234\302}\276l`\316>\241R}>\206\325\r>#m\n=\207.\020\275\256\361h=\205\264\211\276-9\335>:\244\003\277o\0270\277\003\314V\276\356I\323\275b\337\031\277\017\300\201>\203\374\302=\272\031\014\277R-\217>Q\205:?p\245o\274}\366\226>\231\003u\276T\320\014\277EF\271\276R\306\264\276J~\230>\304WL>v&\204\275\032\340\304=\365\005\353\276\366L\346=\305\371\264>\025\207\013\276|G\301=\341-B>\361a\370\2766V\t\277H\257/\353\253x>\253\346\361=U\254I\277\265&\276\276\230A\201\2767\t\031>nv\320\274\234q\346<\373\2737>G\331\023\277~\332-\275\304Z\\\276\231N\357\276\344P+>\0250\313=K\210\005?zK+\277\001O?\277\205\305\353\275+y;>\342z\375\276\271h\251\27502\356\275\265s\022\275`4\344>\376\331\225\277Eg\327\276\235]\204=\240pD\271=\224??\374\265;>I\311\202>\273\213q\276\000C\233=\365\220\010\277\221\216n=\234\375\337\276\204I\t\276\235\006o>n\034\037>\276A\010?#\237\246\276\325S\323\275.\234\034>\274\267\003?\232\322\233\377\n\203>\002\267\366\276lO\217>T\237\227\276\362\311\244>\347\275\235\277\250\204Z\277$\231p\276\235w\344=+\222z=CL4>\224\037g>\221\007\206?)\333\276\276D\027$\276\237x\001\277\235JP\277N5\341\275\241\362\222\276\3378\227>P \254\276as\000\276\207\236&\277\217\226\n\277\037\243\232\2764\272\321;\344!t\276:l\217>%\324\216\276\212S\366\276\200+\365>\021\272\334=y\004\236\275e\032\375>\267\374\237>p\274\005>\003:\244\276\360\215`\276\005p\274\275\207\326\024?\236X\244>\240\340\312=\272`V=f\260\013<\315\260/>\263\026\336>\242v\202>\331eX?\210@4>Dj\244\275M9q\275[\227\221>ShB\276\221\034-?=\333H\276\207\345\335>\3315P>\203F4?M\375\324=\242\2638>\223\216p=\323\016\250=\225\317\366=b\320\356=&\023J?\360\251\326>ge\362<=}\310>\342~\332=\275_\273\274\340.\354\273\020\207\r\276u~\211\276\305\253\236>Qd\240\276\374\202\002>\334,1?\2616\302\275\021R\020>\312\035 >ns\017\277B\200\035?H\031\332\276\252*\206\2765\0354>\303\362\217\276\370!>\276\367\207\260\275C\225\242=Xq\226\275\326Dd>\377Eg?\215\310\215=\377\274\312>C(n>S.\243\276\332\227<>Z\346\322>\320\272\274>\320\227\032>\365\034\200>\326x\213\276W%\006?\\]\360=\202\324\372\275\001\353\252>z\304\244>_\204\221=\356A]\272p!\351>I\313E\276y\353\021\276\0043\266\275\336\355;>\335\270\307>\305M\227\276s\216$\276=\024-?\343F\364\276\001\204!?\267\330\230>d\352\363>\317\247\020\274\305c\375<5\256\217\276X\002\232\276\021\\J\276\302\231\260\276\013\237\256>27\203=\323\317\024\277\301<\327=\0066\252\275A\374\244\276gH\000?2\203b\276\375\332r>}\273\202>\017\340\230>\274\263\'\276q\305A\276\250\033\212\276\254\337\250>\377\016\342=(W\225=&\332}=a\307\360>t\006\314>6\252\205\276G%\202\276\370\223\213\276Z\003&?\364\023\236\276\254\366\300\275\210\303\375<\251?\371>7\365@>F\222Z\277\226\024g=\220\252\006?\227\037u>q\227i\276o\202T>\"\327\345\276y*\007\277\016N\003?g+ >\027i\220\275\327\\\376>)\242\341=\345]\327\276\211U\305\276\323\313p\276\305qh\276\265\324\216>\317\225\023\275\334\014T\276\254\240\345\275Y\344\365:\327H\334\274\330\274\003\2762\306\007?hT\344>\367\350\001\276\254\357\007?P]\372>H(\302\276\203\330\031?\257b\306>\3609+?\030\232\246\276\233\2548?M\036\332=c\237\257>p\001\327\276\250&\006\276\372\177D?\230\324x>X\365\212>\266#\305>\nb\253>\372\215x\277~\030\327\276\312\275\003\276\023\226{<\216\030 \276\315o\313>zv=\276ol2\276N\213\010?(\262\036\277;o\010\2774\033f>8\315\264=\377vS\276\251,\266>\362\034\322>\213\224\035\277\t^\203>\336\205\233\274\252\322\022>&f\257\275\006\030\220\276^Q\315\276\323\347u=\332\374\364>l\332\313>\227p\005?\346$N\276\234\302+>\004\013T>\307\200\365>fIh=-\030\377\275\206$p>\033\034\203>\217?\217\276\241At\276\371TN<\010\227x\2766\344\271=\226M`\276\341{\334\275\363\343%?\006\232S\274rE)>\225A\027\276\206\326\003?7\213\274>\317\331\364\276|&\331=p\366\373\275\253k\323=w\355\203>\345\335\t\275\262%\177>\263\361k\277\311K\265\237\222/\276\027\230&>\356m\231>\346\260\275\276\352\251o>\241@\270\275K\235\227=\375\2742\275\002\224\032\276\202!\323>#\240O>A\205\307>2\025\326>\367\241{\276\257\323\037\277\237\265\355<\366DD>\330\265\004\276-9\027?\001\2171?\026J\356\276o\215O\276~\002\353=\0203\256\276\202\264<>\316\"\326=\266BJ?\377\326\312=-\205\322\276\306\261#<\347 \315>c~\305>\252\003\370>\033\375\266=\3630\'>Z7\277>\237\333\335=\242\332\006>\344\350\201\277\026v\346\276V\231(>\362N\252\276\372\'\376\276e\202\240>\370O =|F\004\276\364\0071\276\004\007\321>s\362\"=\222\"\007?\346\021=\275\037\262\240\276\2504\340>\244\372\'>^\276\371>\245\242\226>5l\236\276\034\010\246\276,\357\304>\t\304\306>\032\245\336\276\244f\270\275\236\2769\277`\345A\276\226V\360=\336\257\354\276\344I\371\276\311Y\271\220\037\330>z\010\250>9F\262\2769\357\236=\322\021\222>\250\370\245>\241\340\265\276\000\\\200>\366\315\202<\035\003[>\327Y\003?\362\255j>\224$\222>\252\365\300>\204sB\276\0072\005\277\376\013\211>\235\362X\276\031q\031=\034\310\232>\245\3037?\245\227\357\275!g>>\234\351\267\276+\265z\274\373a7\276\230]t\276>\262\331\2768\213\226\276\222b\365\276-}\320>\256\034\242\276L\"\003>b+\253>\262\272\273>\273\243\023?<\223\010>\311Z\255>\000FL<\250:\025?\336\030\306\275\370\357\262?~\240`;\247#\264>\364

\017B\017\277+;\323\276(f\226\003\277\232\366+\277V_\325\275\261\213\214\275;8\252>\014~k>|\264\306\276\375E3?x\376\276>-T\257\276\234\025\245>\305\316W\273A\177;?\300\037\220>W)\331\273>#\334\274IQI=O]\023\277\344^n=|\ri>\227!\025?\020\344\260>\306\301\351>\374#[\276<\207O\275=\272\'\276\312\2539\277\217C\010?\316\246\230=H\215\203>\323\344\366\273\264\365\341;\336aq\275\357\354\214\276p\333 >\246\205\r?\361\247A\276\340\334\n>53\247>}\177L\277m8\022\275\021\224\232\27684\177>\037h\332>\217\254@<1\271\305\276\321L\r>\"\257\023\276\337\234\315\276\235\177\265\275\255z\014=\2046\272>\263c\017\275k\016\331>\333\335\203\275\037\375\254\r\243>\217q\245>\232;\201?\340P\237\274\323\306\322\275\352\337x\275\232m\313>Gu\275\276\331i\273>\254\313\226>\371O\230\275+\303\364\275\3774$\277\200,\337\275(~\031\276v\037\240\276\223@\215?\216 \021\277\024\210\224\276\375\212\362>FI\334\275\0031F\276rM\223>\273,\033>\231#7>\266\233\357\273U\322\354>i\026;?\225\354\223<\r\241\204=\026\177\337\2757\200\305>\243Wf\275\364\212\324\276V\356\247\275\346B\026\276\267\277\257\276\260s8=d\336H\276p\332\n\275\026S\036\276V\372\330=\277CD\276\223\261\367\275F#q=\013\001_>\0320\303=DJ+?\263\026\264>\r\354\311\275\276H-?\257\016\251\274^od>Ow@\276\370%\225\276N\317\242=\\\356\271\276\224\277\201\276\222Y#\275\010l>>\031\201C?\027f\361\276$\300y>\360\223p\274\205<\350\276c\352\304>d?\217\276a\244\204\276\217]G>h\322\235>\0028\033>\264\227\277>\033J$>\000\217\177>\267\341\254\276,s\003\277\214\203\006?\204\207\030>\256X\221>\365\354\014\275\274\341\243\276\306\222\332\276C\357\313>\317\014\021>\005K\030\276\350\221A>?\254\017\276\320w\240\276w.y>\213\204\025>\007\304\320\276\371m\026\276\00044\277o\233O\276#=\020\276c\213/\276\351\033\271\274\336\357/?N\0160\276,\375/=\345\305\306:\016\353\217\274\250\221\231>\213\356.\277t\351\374=\2715K\276k5w\2760\224v\277/9\252\276\355J\264\275l\'\267>i2\r\275\013R\214\276\323\370%?l\253@>\341\2451>R\223\244\276j\374\224\276?\277\304>\342\"J>R\005\010\275U \t\277\324H\030>\005\372\215\275p\024\036\277\"\2334?G.\221\275\300R\372=/p\265\276\314\342\\=\3247\311\275\335k\260>\371\356v=\2651f>\224\246\306>@\212\211>J\341\001>\006\371v\277\337Z\177=\324X=>e\377\241>\253X\237\276\'\321\255\275\213B\221\276\347\367\254<\235I\344\275\253\245\361\2754\027\322\276dP\350\2769i\01396V\306\276\255\263\335>\327\r\t?\036\017\013\275\245\210\t>\254NB>]\317\227\276\334\350\257>\244$\014\276\000\274\002?:B\010?\2213\n?\177\223(\276\241\275\215\276\325\356\252\276{Ao\275E\352\023\277\022@\336\275>\374\326<\273\020\333>X\351^\276\355\277\352\275,*5>\335\t\267>\376\312\237\275G\312\233\276h\212\322=.\246\336\276;\256\010\277\007\236\310=\237^\231=\341\355\331>\317\332=\275\272dp\2761d\346\273\265\300\270>\360\264\375\276\021+i\275\271\201+\276\200M>>M\266j=\310\240\335=\362\347Q\276H\301\020?-V0=L\330\266\276\253@\016\277[\311\300\274r=+\277\327v!>NX\315\276!\306D\276\202\245\001><\256\375=\3553\264\274Vo\275=\315\322\031\276\343\351\231>\022\210h\272\206A\275>\321\321\327>\345>A>(L\357>\260b\302=o*%\277\274\023!\276S\006t\276%\221\202\276\027\035\331\275\367\275\347\274\024\002\242=\006@\000\276]\0272\276\212\3732?\330\232\365\273\021l\255\275H\005e\276\016\323 ?\036rW\276\305\352\240>\3535\347\276\254\026\306=\025\3158\276\347\225N>\321\375)\275\234\303\257\2752u\227>\310\032\260<\261\241`>\320\236J>\017\376,\277\375M\032>Z\322\002?\243\322\303\275gO\000?\036-\350\274#\333\001>3\000\331\276Ibb\276d\363>\274\225\352\255\276_6\020>\330Y\236\276\265FE\273\007zZ\277\001\272\241\276\342\377d>g\0221;Y\030\366>\325u\362=\214\310c)\252\275Ov\233\276\345,I>\026\222\004?\200\232\367\275\242|\262=D\226\254>P\331\332>\240\177g\277\250m%\276\246r\276\276k\265^>\314\204#\275\313\361>>\037\360r>Y\201G=%\274\\>\330\344\217>\260\266\207>(\205\245>\275\233\350=\377\2370?\302Y\247>G\203\226>]\244\010\277\256i%=\017V\370>\370\204\013\277>[U=M\316\215=\234\262\344\275\017\207\n>KU\200>\332\212\r?\305Q\017??\333\245>\364\261\216<\227\301;?*\336\343>\036\273)>\037\3059\277_\260l\276xyB>\002\2230\277Y\316{\276\337\377 ?\213\006\027>\370v\353>Y\302\026?\352\347\215>(kE=^x\331>\201\236\264\276\327L\343\275\010\301\350=[0\305\275\375\313\210<\234\320\032\277\200A\024?X^\244=\000\003\210\275zsz>I\313\274>\303\n\326>%Q\r>h\303\204<\272\346\356\275\332\316\r>\304\336\264>(\370\023?Y\232\305>\273\251\017?}R\374>\311\236Q<\303t\276>\017\335h\276MN\273\275\014\0038=\376\237!>\370#\371\275m\022\014?+7\255\274\236=\334\275Jc\277=\3240N>\302&^>\250\264b;zz\213\273\216\\\226\274\227\013\341>\324\212m\2760\006\002?\331\2010?\257\002\245?\341U\266<\326\312\265\275_\002n>TT\353\275\212\303\273\276\325\277\244>i\217\231>\347r\332\276!Y\264>\350H\301=^\n\023>\021\002\276\276C\270\360\276\343N<=\226\321\r\277vf\365\276\\\004{\276T1\r\277(\0144\276O\227\206>\341\360\232\276\237\312o>\365.\371\275\'B\330=\324\027\336>\326O\220>\010\326\t?\010\341\326=,\244\017\276\215\325\220\276\210;\006\276;\256\036>\310\245:\276\006\230\302>\254\352\010\276w\342\204\276\207\224\265>\033f\274\275\354\330\233\275{\026\346>)\3763\276\316C\226=J\277c\277\321Q\334>\234c\274\274\262\266\r\277\r\222\026?7\314\334\275v@d\275\365H\010=\340\362\274\276\237\260\210\276D<\257x\333\370>Jh\246>\177)\307\276\235\177\030\276G>\315\276\235\177\263=\311\343\317\276\377\315A\276\265\251\242\275\006?:\276B\342\351\275\016\236\024\276\036\266\362,\n\030>\316\311\237\275\304Y\r=\331\330\225\275\340\373\326\275\357\341\314\275\347b7\276\n\344\t?\306\352\001\276\251\035\345\275\302\200\362\276\024\034\207=\004vl\276\312}\'\276\222R\244\327\274~\276\300\232\027?\236\377\t?\251\360\023\276,\215d\276\342\370\235\274\007\3004>\330\206J?\230\036:>\320A\244=)\260A\277g\313\t\276s\377\333\276\257\310\375>d\'H\277%$\"\246\374l?\204\016\310\276.\266\"\276\246\331\205\276`\276\232>\356\333\006\277^\030\033>:\306\017?\300q\260\275\304\021\024\275\214\037 ?\tX\313>(v\016\276\204\245)\277\201Q\333=\363\224\255>\035\273=>\007\374\340\275\355\360S>\004\363\232>\225\366\013?B\372\031?\0165\332\275\250\2521\277\371\247\304>\3512.\277T\321@\277\251I~>/\026\201>\325\200A\276\3578\231>;\342\374=~d\327>\014\253\035?\215\377\020?\232\376\036\275F`#\277\217Sj>\331\205s\276\302\306\361=\005\361*==\322\201>0\025\373>\035\023R\276\\ \376>F\005]=d\023\220\276\313\325\014\275sX\330\276A\344\223?\302W\031\276\241\213\311\276\264W\260>\365\367\'\277\207B\344\276\003\373n>d\0230\276.d\375>`\005o>\372\336O\276\227\325\207\2761\265\213>\252\377\320>J\327B?\234\217F=\206\311Q?{_\307>\321\245\324\265D\005=\201\235\341\276\213\371\222>\222,\347>#\212\033\276\010E9=\223\210B=\010\277\222\276\252:\215>?U\360=\276\207\203>,IR?\310\351\246\275\021x\256\275t\275[>\t\341\372=8\314v\276\350J\271=u@d>\004\t\315>T\211\230>o\263\364>O\224\255\276\3766\017=.\322b\276\3539\323=\r\305=?[\250\037>fK\345\276\377\022\022\276c\037\243>j\322!\273\002\311>?\347 2>\302:\304\274B:\307>\020\337o\276\034\324o\276J.\355>\223\001\036>\206\005\306>\311\000V<\027kw=s\367\004>E\326)>\035>\262\276\256\326\256\276-<\010?\204\267\230=\341\343\260=[\250\366>;M\t\276\315\237\237\275Z\347\356>!P0\277\213f)?\243.|>\376\254d\2763C\302>\365\213\230>\365\277\306>4\007\350>3\366+\276\327\352\'\276\247\311B>\342\225\234\276\352\350\233>Nc\351>\210\220\313>7\023o>2\025\030=De\243\276\177\330?=S\255\220>U\230N\276\216\240{\275l\320\330\274\344~\204>\341\366\306=\273c\277>\177d\371\276\037\264\244>/qM\274\326\322\232:\325\354\216\276\211\022\021>r\275\376<\371\025z>\316t\003>\3317\014\275v\363\223=\236\330#\275t#\024?\206\267\306:$D\334=2+z\276ge\214>w\365\263>\252\177\316\275v\351\010\275\001-\332>\347N\320=\234\007\035\276z\034\r>\240C@>,\351\376>Y-/=8\367K\277l5M?\212\021\323\276`\375\303=P\222\205\275\0318l>N\244\205\276\217\366\245=\004\334\355\276\325\265\031\277u\"\365=\265\304\316>v\226\210\276\370\201\323>\273\376\r\276\310\211\030\276%\250\326\274E\3625>\317\316k=\251\024e>\237\351?\277\365\362\302\276]\237r=z?r\276\350A\311\275\314\034\236>\010\022\030?[\325\227\276\357\"\255=EJ|>(\224\270\276\245pU\276,\310\271\276]v]\275Y\003%\275\336\037?>&\240\270\276\237\0060>\032!U:\025\227\034\275\246\002c\277\323\363\"\277\335\226b\2763\230\213>K\014\372\276f\342@?\204n\3619\035q\023\276\311\341\036?\237\362=>\002\004\033\277\007\367\223\274\315\271i?n\363!?j\337\232\275\346=\032\276\274h]>\263\244\217>z^\244>\270\315X>n\301/?\200\363\351\274c`\257>!\325m?\342<3>5\021\371\2745\242\344\275\'Fx>\204\377\024\276<[\321\276>\352\302=Z@J\275Xq\312\275\023 7>\343)\344\276\270g\341\276\212%\265\276\010bD>\346\273\267\275m\333\255=\365\333\203>\250\345\000\277\300\030\202>$\261M\277V\274\214>\336\\\301\276I\301\304\276\331\373\227>>\"\211>\212\244P\277\232%\343\276\204\266\234\273\300a(?J\020\363>\326%\010>\374\006\325=\263e\267;\316]\\\276\003P\035\277\315k\017\2779\302K\276\263\353#>\375:\352\273R\033\222>\306\224\251<\007Y\210=\225\r\332\275C7@>\2663\261<1\0206?\016^]\274\234\374\232\276\243\267A\276\032T\010>\324\010\336\276\371\225C\274b\227\243;\345\3473=\246\357\246\275\"\210M?_\376\212\276\025?\334\276o\003\214\275\354\020\340>N\357\017?lH\250\276\341C\275\276\202\376\023>\001\363\355=Sy\001\277\323_$>\321\201g\277\247\300\255\275F\206o>1$A\277f\317\362\2768\256Z\276\307pq\276}x^>\3634<\276a\'\022\277\236\037\206>\374o\246\275\330J\226<\334\016\346=A\006@\276\267\"N>\240a\n\276\356(\030\277\203+\030\277y\332\007\277&\327K\276\271\356\220>\027\254\276\276\330\234\031?\200T\377=\354g\202=\203sO\276^M&\277,\317\267\276\303\324\214>;\263\241>\341\252 \276\312%4\276\034\204\330>\205\373\266\275\010}\211\276\367\026\341\276\000\374$\276D\263%?I\035\014\277\252\325\303>0\336\217=_\335!>\344\031y\275-h\354>J\255\212>]]\302>\2366\274\276?\225E?^A7=\021\376\306\275M_k\276\271\002\016\276h\332\013\276\250\211\315\276?\330u>\237\322\255>\374\200^\276\\\317\341\276?\253Y>P\002\\=\005=\240\276\270\232/\2765\204\326\276K\n\234\277\337\3223\277W5i\276\353\322A?MV\220\276J*\352>\215F[\277\r(\200\275\252!\021\277\260\203\202<\254g\351=\301\3562?\2430S\275:\214\372\276\031\177\252>#S8\276\327LE?\232\351\360>\000\\(?gn1=\304+2>F\365\201\276\366\315h>\304\313)\276\177\017\246>i83\275m\224\247\276L+\335=o\337\357>\250a+\277\357\242(\2775!\330\276a\326\245\276\0006\335\276v\217P>\200\250\204\276\t\350U\275\000\231\263>\006\333P\277)\334X\276\000,\264>Jy>>,\300\265\274\007\242\261\276\276j\013\277\377\231o>HAx>\250\227\206>\206\006\210>\307[\231\275\n\276\245<\236O\322>\302=\351\276o\030\237=w\267\306<\t\276U\276\036\333\206>_Mh>\021\\\233\275f\256\220>I$=>[\'g\276\014\241\300=\331zes\324\352=V/.\276&9\205=\265&\226<\377 \246>\302\356\336>nI\355\276\377m\201>t\310\233>a\264U\276\256\266\323>\'\300\347>nqN\277\311oa\275t\363F=\222\331\216\276\361\003\205\275I\362$?\014\226\"\276H\002\232\276\335\212\024\277\275\031\261>t)\241\276n\202\016\277f\375G\275\"\354\n=\221U\333\275p\223A\277\242\254)\277n\014\032>\304V\230\276\335`X\277x=\206>U\250\002>\\g\371\275\250\334\261>\327\033\274\276\332$\222\275!\307A\275Nk\347\276h\277\346>\237\202\026?C\024\215\276p\001+\276\014\017\013\277\003\265\334\275\350\347{>\246(\263>f,K>\324N\271\274#\202\262=\361@H\276\354K\331>M\203\265\276r\222\300\275]\251\027\275\344d5\277\253\301\321=\266p\264;a>\216\276Uh\023=\253^\206\276PV\232\277MbL\276\277v?\275K\025\233>\210\335\354\275T18\277\271\031\210>\314\034\337\2769\3003\277\'Z\334>\026\211\317\276a\220\017\276\246c\001\276\225\262\376<>\322&=\342J\274=a\327\315\275\246\212\316\275\230f\230>SW|=O\256\242>&\225\010\277\245\333B\276%M\202=\266\206-\2512\201\276u\344+>\314\303\217\275\271\324\227\276\270N\375;jVF\277_\320\036?[\345\241=h\233\341\276\364\230\345\276:\002C?^|\314=\243\270\371\274\312\'\235<\022\257\231\276\215\310$\276=\317\211=r\256\357\275\217\220\207>\031*\013;\220\312\'\277\344\036\232=\006~\304\275\022\237\276=\024\'\227\276Sa\200\276\344\262#\277\305\362B\275\362\316l>\311%\315<\254X\212\276j\225\007?\n\300\017>\245\017*\277Z\364\276\275{\225\366\275q#\326\276%\264\036?`\375\210>w\337\376=\326*\013>\302\303\200\275\310\034\224?\370?\204\276\375\226g>\340#A>\273\026\303\276\246\372*\274\342\330\234\277\233`\260=\313\r&\276\002\014\246\276\260\344E?\213\235\311\276[h\303>\340\322\002>\361G\027\276\265\264\231\276$i\'<:PG?\376\321\221\276\017T\200\276\230_\277>\357\333\321>\301p\214\276\201_\355=\000j\032>\362.\264>\227\010]\276\322\022j\276\216I\326\276\246\006\013>\267\005\211>\261\327\022=\233\013\334\275j\251\277>\232(G\276:UF\276\311\023\321\277@\347R\275RzG>\337\327\343>A>^>\025J\036\276\353\'\001>\340\r\314=b\024\204\276\300T\352>h\224\275\275c\215\222\277q\343\021\276GK\216\276\216Sm?\341k\267\276i\303[=S\361\361>5\013\232>\240\316@\277I7\205\275,\327V>F\343\225\276\213p\007?\215\034\270>\201\261 \277\327.\305>\255\310;>1$^>\337:\351\276\341\371\212\277\032[\326>\333\204l\277\226\212\311=\354\263\007\364*>\023\251s>\351\016(\276`S\023>d\262\340\274G\367s>\247A\265\276\305=c\275\252Fc\276\346\005\350\275v\233Z\276x\253\356>5\226\227\275%\266\241\275\210\362\203\274\230~\254<\337\261\251>l^\345\276\034\244\t\276<\256\202>\363x\333\276\036\346\266\276\234\356\274\2756\031\350<\"W\204>\002\036\377\276\306s\277\276<\2552\276-\002\006>5\r\007\277\006\r\275\276}=\355\275\001\212\030?L;\276=1\267{\275\374\262\235>\005\2067\276\230&e>Gh\302\275v\017\n\276\227\t\311>\256\363\225\276\177\303\274\274\022\337\202\273\315Y\001>\276\013\354\2766s\225=<\016]\276\365C\"?\0223\020\275\267\346\376\274\353\246\007\276\006\342E>\265b\006\275?r%>T\272\213\276\360Q8\276\001\026\230\276(\353\311\273\357+\256>\364\372\322=,z\033>7\003\374>\252\245k=\00681>\262(\376\276l+\236\221\362y\276u\356.\275\234\234\274>\311;(=\332\272\211=]\236M>\2629\301>E\327\202=\013\263\257>\001\263\030>mb\341>!J\211\276\036I\036=\244\035$?\031\025n\275\230\007I\277\336\333\236\276\243\310!>v\344{>e$\030\276N\261\313\264\233\246\276\273C\332=\210\375$\277\215\270\t\277\372Au\277\312\273\203\276W\227\320\276\252\000\233\276\355\016\005\276\366;%\276\342\037l?o\347\207=\234\332\201<\226H%\277\363\353*\276\233+r=l{\024?G\305\010?:d\023\275F\371\332\275\027\035\261\276\207G\177?\201!\331>\021<\271\276\271P\024\276\343 \316\275w\036\241\276a\303\244>\322\214=?8\002\220=\0330\270\276\3656\034\276Pk2>\022:\013?\224B\"\276\362\177*\277\t\267\234>LB\360<\353D!\277v\266b?\2244X\276}C\213\275M\214Q>\237\226\243\275P\214\323\274\200\332\345=\203\313\277\276\003\007\023?\221\223\242=\230<0>\275^\272\274\005\312R=q%\215=ZUG\275\211\033\262>\241\205\317>@\332{?*\331-\276\330\211\361>{f]>v\"!=\275\262&?\304}z>s\303\336;\001\304\370>J\247\r\277\037\240\376>Y\r\177>O3\335>\'\031\357\275R>o\277\2271\013\277\263\304\233=Uw\223\276U\204\246=\205\003\300=*\362\036\276\302(\257>[\tD>\374\340\226?\027\271\220\276\370\236\255>\225,\037\276\366\\o\276\006T{\277\342q\210\275\375\013\336\276\303\017{\276\2167\220=\377\322\036?-\226\252=\263\362\006?m|\230=\330j\222:\254\246\022?\032>\037=\340\372a>\341\266r\274\245\033\320>\036\013d\276w9\234\276\245Np=\177M\202>\034\233\370\275\014\240l>\217\276\033\275\021/w>\000\265\207\276F\252F=\363\312\256>\325\020{>\031\031\250>\2220S>\246\307\264=\217h\253\276\232!Y=\327\217\300\275@\006\355>\237\331\200<\250\327\025?+\312*=\302\000\204=Yy\007?\342tN>\373\365\222>\323\365M>\016\306\210<\227\236\360>|\342D\274x\030+?\037\316\364>\231O\247>\270\266\312>N9\247\276\034E\240\274\350w\326\275[\326<>\3529\276>\252O&\276w\301@\2777\204V?\346KX\276a~\021\277|\007\004?\241U\221>\020\200\377=a\211}>\264|\003?d\263\274=E\030\032?y#I=R\003\223>ql\301\275\221\352W\276\203\304P?\227\331\352>/\003\330=\"\373\025?\0226\022\274Q\220\251\275\236\222\257\275\265\344\026>\2555\032\276\023)\306>\206\226Q\276\323\334F=\r\242\314=\214V\355<|\025\302<\207\325)\304\360W\275\306\260)?\221)\021?M\200\234\276\353\377R\276\335\004\330\276\301\345s>\221\263M\277\340\247#\277\2343\336\276\004<\373\221h\300>S\274\030>%\210\327\275\006\2476=p\010\234\006|\215\276\305\346\266\276\211D\220=X\005\206;\346\037\260>k8\360>\312\026\216\276\'^\374\276\201\360.>\010\246\265>\370\347j>\367\324\233>x\361\003\277\212\320\221>\001\336f\277m\'\016?\323l\"=\222\351\210\2767>\200\2763\036\242>\330\313\256\276\200\205\177\276\353-\236\276\252\005\257>gO\317\275\210{\236\276\220F\240<\273\374\201\275\246\261\200>0\224\264\276\331\255T>\274\231N>\275xG\275\264\017\177>>\241\217>\357\213r>s\376:\276\325[\370=J\342E>\340\002f\276\211z\013\276\340lD>09]\277-\222\021\276\322\306\013\277:\275L>\237\013\224\276\277\025\331>\004\001\261>C\312\032\277%\3746\276l\017\361\276\262\000\004\276\233h\241>\003N\177\275\212Z3\276F\006u\276\360\217q\275\253\213\004\274:\230\303\275\251\240\362=1f\230>1\024\263\276\3642&>\362\220)=r\3363\276z\373j>\003\003\227\275\201S\314\276\225\007\275\276KN$>nW\037\276[\345\353\276\337t=\277\203\366\344\276^\220\301>\037eL>\267\320\001?\356Y\247\2764}.\275\207\277\007\277e\272T\277\0027\034\277\327\361\010\276\306\263\034\277\226W\224\275\303\357*?F\262\263>\272\266\"?\37204>{yM>\267e\204\275\036\322\036\276=\245\022>\0260\374\274\003\036\026\277O\214m>\317\034\254\275~\214o=\350\210y\276iL\273\275\357\322\271>/F(\2762d\222\277\010\000\324>\336el\276\377T\365\275\377\273x>\315k\"\275\226\"\034?=\020\251;\352\307\306\275\317d\342=\r\365s= \375&\275\232\\\222>h\252\027=,w\370=\354t\300\215\321\177>\344\334X\275\351\337\260=\352K\006?\244\017\260>#\223\002?W\3259\275\266W*\276\334{\213>!\342u\276<.\276;\221\340\260>\240Nd\276\200\350\301\276\213p\345=\233\0137=\375\237\374\276\342F\006?\273\234a\276\221\202\233=.\345\221>C\032\005?|\007\237\274\t\220<\275\000\227\333;\351\314\220?@\035\276=\004&\325\276\335:O\276\360z\327>1L\240>\026\320\032<\366\267\200>\260_\316\275\030\222K\276\366\255\201\276 \025\335\276\241\306\203\276\361\353X\276\314v4\276Q\354\256=R\202\240\274=\344\026\276\323i\203\276\037\241\226\276e9\215\276\267Ps\276\260@\327;XQ\006>\213\262L=x\253b\276\023\214m\276$|0>R\310\330<\311E:>\251\244/\276]\263\006>W\3000>\273T\347\273\212\272\353>\320}\231>\254XW\276\251\232\253\276\251\342M=x\025\326\276C\363\242\304M\337\276\250\213\257>E\375E\276\000S\020>*\370\225\276\010i\312\275\322F\034?\205\034\272\275\201[\353>\006I\'\276w\342\311=\352\326\034\346=\014>\213\301\246>\247\202A?\200\251`\276\303h1\277k\240!?\204\360R\276N\202\316>\316\302\304\276&\251C>Z\314\026\277\221dl=\324&\203>\242\346D\276\247c`\276\217g\351>\031\242D>\036\252S>\t\3044>s\277\004\275>\340\306=\207\010\021\2763]\363\275\"\331\007?Z]/>09\016\275\347\276\272\276\267m\002?h\263\005>\241U\324>\2668\002?\020\237W:\331\264\313\275\227#-=\030\376!\277UF\201>X\373#\276\346\355w\275\307\326\035\276\232\224\307<\324\214\202>;\261\236>G\355\270\275\357\311\035?\246\307+>\244\252\222\275\201\350y>\302\236\217>\265\266\250=z\230\214\275\377\\\343>\036\t\234=\32262\277\024G\203\275\234\254%\276,\261\010?#\347\007>v\316\265=v{\357>\036\310\271>\031IN\277)\371\200\276\207\366I\276*+\267\276\227\272(\276` |=\253\250\027>z\321\273>O\010\273>\224\267\036\276\234;#>\240\005\301>\320O\005>\250\272\356=;\372e>\341\274\273>\217\367\030=\2045\020>\237.y>=\254h\276\333\322\223>N1\033>>Y\343<\\q\032>\345\217n>M\336N>=\352\213\275HM\036?\214\357\347>\334\023\232>\2401r?v\2067=\277\010\300>%\354\326=\301\251&>\331W8>~\200\346\275\336\304@\276\276|\260>1M\224>\310\205f>SX\355=4S\226>\257Q\262=X\363\333=\206\233\342>\017L\200=\270\023\217>`\004\203=\273\276\303=\326i5>L\0050>\337|N\274\241\227*\276\377U\025\276\\\304\001?\220\221\311\275;\354\242\276\221I\004\277SN\n?6\340\217=+A\212\276)\260O\276\203\377g>\356\\b>\247Uk>\251\261\201\275\223\235\221>A\037#>\177\341\224>\240S\263>l\334M\275\267cb\276\355Z\004\275q\2746?)B\256\276\246W\214>\005\204P=\331o\231\274\027\326(\276m\320\274>\345\351\211\2762N\302\276\0031\017>\301\030\326=FC\261\276\277<\\\277h\210\340>H\252\366\275\033\307\201=\335\023S\276\035\215\373>\335\214\253>\323|\201=\364j\364=\317\301%\276\326\013\002\277\204Q>\276\234\202M\274\017\277\333>\374\013\212\275\276\275\351\276\246hC?d\353\214\275P\202\001\277\205\265\020?\256C\250\276\220\035\003?\347\356\010\276\262\247K=\205\"\320\276O\215\335>G\002%>\033\250\263\274\003\027\206>\242\"\\=\005\332\255>\215\325\203?\303\211\351\276\344\204\353\276\322\337\222gJ\222\276\342\351z>r1\003?6w\005?\001]K>\364w\254>\235\232\312=\'%]=!\2615\275\2166\007?\276G.\276\214\002\200>e!\251=E\362\370>\302\237\265\275\275\313\013\275\374}\206>!\343k\275\251\356#=Tt~\276<\225V>\315:\277>\345!\244=\257\226<\277B\347k>\005F\036>\243?\001?\225V\216\276\003\030\006\274\273\332\217=\366\271D\277=U\226\275\363\344\321>\242\035\010\277\303\240\000>\300vz>\021=\221\276\324]\'\277\337\360\030\276ap0\277\217PP\277\005F\251\274\322g\010\2775\223\245\276\257\274O?m\205o=\265\257&>_\366\323=\316\234/?C<\t>\301M\376\276s\337\224>\372\021\200?\031,\211\276]dE?_\373(\277\037k\202>\216-\334=~\253\n\276s\362\373>\233/\340>\371\332\254\276\353\337\241\274r_c\275\266g\233\276\240\351\357>\250\327\332\276\td8\277\344\373\231\276\223<@\276\014V\006>\026\2627\276\277\340\222>T*\233\276\306\244\306\274\254\221\331\276d\325\016\276\332m\347\276\221a\306\276\317\234\026=bjx\2766\332\213\275\216\334\010\277^F\035\277\254\252\345\276ak\221\276`\306\305<\244!\230\276\004L\321>r\250\371\274`\0143>\244\371/?A\320/\277\211J\337\276\274S8\276l\245J>\016\2072?\236\340\371=\345\240\036\275\256\"=>.\361\326\276\3406\365\275dw6>Wsc\275\364\263\335=)1C\276\351\016\211>Q\013\252\207s\307\275\357#\367\274f)\376\275\005\027:>+6j>\265\030\223\276)j \276\273\336n\275\2352\352>!\310\366=\270N1=`\316\000?\326\305H:\030\243\255>-V\306\276Q\025\201\275\250\362?=u\225\006?+\003i\276\352\224$\276\245\352\002?\2446\260>\322K\002>\251\330S>I\355$\276\206\201\200\276\206[\234>u\225\341\276\265@&\276\227o\302\274\243\305\025?\223>^\2761m\210>\246D\204>/\335\277>O\261\202\276\336.\233\276\252\247q\276\234\307\250\276\376\261\\\274\255\035\001?o\201 \276\272\213\016\277\210\020M\276\177\303\327>\342\254H>\001\000=\276R\347\036\275\014\217\355\276\353c\347\276\364\274\300\276\321\273x>\246<^>\203l\354\276\336\262\225\276~\037\n\276\031\010\r>\211/\303\274\234\270s\276\007\265\377\275\325\024\210\275\300\225+\237\335\276\252\354\237\276\020z\033?\223\213\355=\254O\374>h\245\002>\212\333s\276\241\354\265>h\255\001\277\347\240\016\277\202t\022?\260>-\276\361\007\343>\021&c>\343j\315>\002\020\200\274\030\213\264\276\017\005\014\277\240[\025?\310o/>\357r\034?\005\300\236\276cs\205>\032<\036>r\233\016?WH\376\276\026Zw\2760\226(>\3779#<\344\311\227\275\304\230\2239$\374\246=.n\257\276!\263w\276|\253\020\277,\264\252\275}\323\\=\377^\232\276\222^\372=:9\020\276\007D&\277\234\273\226\273`\247\234>\333w\'\277\202\007b\2733\312N>\351D\t>\252dD\275\225\230\035\277(rm\275\311U\374>}\250\220\276\202)\016>\377\001\227;\321X\023\276E\345\305\276%\005\343=\262\266\027?\273c\226>\325CY\276\006\255\302=]S\r?\374yq\276\274\226\031<\226=)>\357a??w\"\326\275\177\366Q>\3616\251\276 RT?\371s)\277\375s\213<\315\026\236>\021RZ\277\016tt\276\322mQ=w\270j>\372\322\027>Xe\301\274@\244\260\276A^E>\336(\232>\217\271\254\275\325\361\274\275\253C\223>\220D\334>\226\325\364\275\225W\235\275\2157\016?\340\242\373\2744\255\234\275\371j\342>O\225l>j\301&\276T$C>\302]\216\275\322\003\356\276\362\227<\275\273H\300;\350\343\241=\312\r\215=\371\326c>\313f\014\277\r\374\013?\316\364\001\277z\306\337\276\031\234\361=\234\201\037\275\241rz>\341\232\305\275\224!\213\276Jn\257>\213\352\262\276p\260\225\276\020\344\r?\345\363\236\275\246\211\035?\267>\340>\201\364\223\275[\350\033?f]\346\276f\243D\276n\366\300>Gk\204>PJ\021>G\323+=\314\250\214>\370\346\200\276U~9?U2e\276g}\277\275\326\022\224\276\227Q\256>\222\302\237\276WM\342\275\250Q\361\276\026\261\222\275O\021\003\277\362\357\302\2766\327\016\275,<\250>\353\262\033\2775q\014>\204\274\004\274\020\374\204\273\347\215)\277w\235\252\276\255Q[\276\201+\010>\370\367\016\277\002\275\262\276c\2308=t\346\302\274\035\213\213=Mn\010?\355\260B\277%o\205>\352\374=\275\346\023\366\276\341?~\275B\343t\273%C\311\276\321\313\345>[;4>\251\234\252\276W\277\251\276\025o^>\213\354\366\275\033z\327\2758\3304<\325\200\260\276\332\311j>i\302\267\275d\273\261=D\233{\274}7\240\274\006\332\340=\r\217\225>-\032\003\276\234\206\240\273\363\353\263\276\251\371\312>\235!\026>\031\007\352>E\211<\274\330\273\215\276\307 \236\276U\2760>GA6>\302\355\314\276I\322o\276@G%\277\360v\025\277v6<\002\013\202\276|\251\013>\210\004E\276\330\214&\276\246\\\346<{\363q\276\213T\336\276be%\274zp\213\276\351$\314\276\266].=SG\017\277\265\373C>\001\354\022\274\240\315\025\277i\371 \277\240\267$\276i\345\213>$C\331\2760\237\212=\270A@\276\345\253\277>\306\230\274>\376U\334\275\034\317\030\275.\341T<\221?\t\277\250}p\276)\233+>\247\302\021\276KoD>\026\005*>\302\246g\276\254jC\277\370\221<\277r\210\335\276\362\0073=t\312`>\243g\214<\334Xu\275(\235Q\276\336\026\335\275\315\267a>\246\276\016\277|i\233=q\347B\275\240\030\246\276\337\246\266\276\2451\214\275\270\270\020\276B\340\365\276\3250\231=`\215\251>\025\244\030=\341\211\217\276@NY\277\232SZ\277\213\273\006\27772\274>u@\305\276\221\320\313>4\233$\277N\374\277>\336\025\250\275\217p\372\276+p\004?\305\010M\275\301\210\233\274\301e\263\274;\225b\276\031jm\2754\357d=\263\310\203\276\230}\013\277N\210\211\276\010\310K>\310\263\203\277p\232\032\276H\353Y>\202\274\354\275<)\232=\366\"\243\275\371x)\277\242\005\201\273\317,\371\2757r\246\274\224\226\316=\376\246\322>_\324t>\3558\252=\242\014&\276\230\325\202>\240\263d\277W\212\006?u\230\246\276\212\237\232=C]\220\276g\326S\274\313g\255\275\225>\334\275u\026\032?\333\224\024\277\327\363\243>]\227\356\276B:K\277e\245\031\276\214\217\244>\213\226\216>\340\330\033\275\260\031\240>\020\234D\276\315\266\\\276\233\021\215\276\013;\224\276\177\022\334<\230\250\236\276\200\254l\276\"e\352>\031\005\241\275wE(>\212\325\370\275\375\236\344\275\027\317\257\276i\307\000\276K\026\023\276\332\373\205>\364\2432\277\222\357\236\276\321\245\372\276\013\206\211\276\242\305\317\276\246\020\\\275\247\214\215\276\215\234\210\274\242eF\277\304\224\235>U\306\311>t\252,\277\320\221\375\276\3601\243>\370\223:?\214\020\365\275\027\211\325\275\302C\006\277\n\003\342=\034#\212\274\335\346\026\277W\236M?j\337E=\262)\232\274\351\361\354\276\261,^\275\314\347\013\277_\204\254=\224\323>>R1\022?\227\342f\276\336J\205=*z\227=\345G\326>:\242\237\274rLW\276\030zD\276\316a\230\276\212\004\200=\233\201\357>\302\274\030?F\376\273\275i\274\210\275\261L\304<\271.e\276gg\237=\010\037\236>\003\007\225\007GW\275\030s)?\200\207A\277\365\001.\276\0333\245>R\222\377>\n=\265>w\273\225\276\273RD\276\n\207\241\276N\277\332\276[\300\353\27621?\275t4\231>\365\0009>\313\335\371\276v\342\033\277\373F\225>@\224\204>u\345D?\346\317\266>\006D\014?;T%\276\024\020\213\277\247\230N=F\3712>\362\014n\275\017\250\037\276\202\266\235>B\253)?\033<\037\277\250\325\365>i\213\n\276\212l\305\276\'&\355>Z\335*>\223[L>Mb#\277}\022`\275\365a\010?\023\n\216\276\253\014\212\276\340-u\276+\324\001\276\222A\002\275\374\001=;\013\204=\276\210*\333<9\215\355\275c\347\273;\2169\377\276v%~\276m\326S\276h\027\010>\301r\336=\223\345\\\276\0026\206\276\305Q)==\357\010\276j<\202=\347|\033\275\256\371.\275=I\246\2759\202\315\275\252j\025\276)\272(?\026J\000?\217\272\203>\"H\223\275\307\317\207><\252\267\276Q\325\235\276{N\273>\245\207\265\276\335\261\026?\230\203\211>\337&\226>.\265\257\276)\024f<{y&=7;\273\273\355\253{>\344k+?F\025\266=\363\340\212\276\200\221\255>\244\260\020\276\004\177l>ju\n\274q\256\021\276@\237(\2760\016\350<\375\255\014\275\250:\276\276v\003\013\276\004S\224=\035\216|\276\254\035u=\002\2047\277K\333\001\275e2\014>\rZ\254=\250Y\021\275C\3731>\241O\332=CJ\207>\033\260\216=3\014\256:2w\351\276\'1\020?i\302\252\276\323\205\373\274+*)\277k\002\347>*6\236<\377\202\002>\240q\362=Q#\341\276\320\360\322\275\211\352`\275v\252_\276A\234?>\270 \355\275V\225\226>\252\371O\276I\010\271>\223k$>\350\2260>)|b\275\352\020\037\277\260\273\000\276$\354\025\2763p\177\276\227\\X?\301C\377>g\333^\275o\331.?\203\316\001>Bnn=\361\324\315>\361\2424>K1\354<\222\000\035\277\217\020,\274Z\033\333\276,\323H\276x0\025?\204\271A\274\312`\260\276\022\337\t\275\3770\002\276\360&\273\274\371\000\013\277\0162\332\276\257\374\027<\250\367\006?wm\001\276b8\213\276w\017\024>\r\214\035\277\335+6\277\037\274B?\226\224\240=^\364\"\276\264U\003?P\213\202>\317\236#\276;\'l=\247R\220\275t\374\240>J\346\366>\261=+>\213A\021\2755\314\207>\226\r\226>\230v\366\275RLR=\254j\024=-\000\205\276\031O(\276\253\361\226>c}\023>\004\347\245\276\000s\234\275\266\275\027\276B\262:\276c\212\013>\320\304\265\276\206T\352\276\030fi\273V7\001\276\203\033\371<(4F<\275(\316\274\330\347\271\276\253@\330\276,\237%\277\021\0046?5\211\021>e/\367\275\251n\377>\345z$=X_\013\277\353@\217=Q\246->\311\376 =\276t\362\276\360\346\215=\362\263\233\276\227\354\215>\260\272\235\276\005\373\037\275 RM\277\326g*\276\257I\352>\031\027\231\276*V\372\275\276p\013\277\271([>\036\234\362\276X\321v\2764\304\246\276\024\257q>\354=\242\276NU\376\276\2750\r?`\373\227\276\261V\004\277\005\360\265\276\342?\004\276\301\341c>\004\236\212\276\356Q\304\276\204\245T\276\305\030\203\274/5\357\276\243\345\002\277h\366\327\275\021\233\035\277:\240/\2766\0305\276\321\034\001?\010\326\273>t\231\364v)\200\276V:\270\276H\365\035=\3227\344>\014\214\210<\362\244\311\274\264\276\227>id\210>\222\345\202=D\231h>\321\334\214=\217:,>\177\024\251\275\362\t\245\276\260i\036>\317\010\253\276&7\304>\242\215g>Y+\000\277\013\256\315\276~S\207>\243\354\255=l\335\200\276mJ\336=T\324}\277/\350\203=\310\023\311\276&\335\214\277\261\352\317\276>\350\230=\352\226\343=\315\271M\276\274X.\267\031\032\276\260u\r\277\025\235\372\275^ \336\276\2348\231\276\233\344\201\276L\013y=\322a\026=\r]2?*\277.>A\031M\275\335WH>\316\037!\277\020O\345=\207\224]>s\255\214\276\305\024\310\276\347\2646\274\215\215\264=\356\326\342\276d\020}\275\313\023\217\276\251\223\371=\326A\007<\234,\223=\034KU\276c\243L\277,\276\374\276\214\251\222\276\203\255J\276\330\330I>N\342\324\276\230\3552\276\t\306o>6\367V\276t\226\213\276\236?\005\277\010\246N>\314cC>x[l\275\0130\025?5\254l\276C\310\033\2763\302\002=A!\341;5\210\206>\320v\335\276\0131\356>\320\3552\273\007\325\374\275\001\270\315\275\375g\2039\360Q\270\276\356\343=\276\3674\270\276\002v>\2772}{>v\266\354\276\307?\357\276I{_>i\001\246>\357h\006\276\214\215\254\274\325\264\321\276SD^>\261Q\222\276\247(Y\2719\201\271\275I\362\335\276\241\351\250\276 \275\021\274r\375\004\277q\001\307>g\224\217<~*\013=k3H>\242\'\024\276\254\233\212\276\331\302\313\275\320\200\243\276\304p\231>\3701r=\260\263\215>Sw3>U\333\346\276\345\375\033\277\202\220\235\275M\342\'?\366\360\200\276\230O\335\276\031\003\206>#\3344\277 hg\276\0240\271;\340\266\r?!K\214>b9\320\275\3478\250>\302\274\rq*\216>\274\240\236\276\370LX?\361@\014>\2021\230\276>+Y\275\230\351\377>\230\'\347>8l\323\275?\033$>\263\311\340\276\030\231\214>\216\246\271>so\232\276\207\371\223\276\026,`>I\316:=\361E\307\275y\347C>\333<\266\276$r\242>Q?\330>\032\312\326>e\034Y>\237\226\031?\006s\371\275+`\275\276F2}>O*\217>I\234\017\277\235\213\206>\301\246\357>\260\354R?\356\223\235\275\304\310\314;\273\376\317>8T\336>=R$>q\034\337\276s\251\245>80*\275\304\302\351>\274a\332>\021\360m=\250\226r>\324\254\217\2761$S?\264\351{>\326\374\375=!d\230\276\322\310\240\275wnC>Q\302\337>PR\311\276\001\264\213=\362h;\277u\322\003\276;5\026>\030\337\247\276\017\311\265\275<>\322=!\036O\274\363\345V\276*S~>\236Q\210\276}\364\333<\216\252\211?\300\253\002?:\203\337>\230,\204\276v\355\366\275\357p\274\276\336Vm>Ab >\027%\036>\231-\007?>\214\342>\364\354[\276\213\216\002?B\"\316\276\025b\004\277dge>!\3558=?\333\261\276S\035\246>^t\260>\245\3373>\037\243\264=([\220>\326\003j\275\207-D\276\202\033%?\322\233\334\274!\352\021\277\331\263\360>\177[\017>\032v\231>\314%t\276\372\263\271>\263\372\036\277\003:#>\226K:>\330\242`>\206\301\351\276\324?\223\276\275\253\230\276j\273\315;\321\213\271\275\376\227\317\276\217$Z>\241R\246\276\3444\206\276M/\006<\225i\225==\270\271\275\206\347\211\276\300oA?\353\321\216>\034\263\232\276\033\260\252<\345\2228\276\2254\215\276\253\007\374>Lw\211>tX?\276\342p\376\274\241\342,\276\032\'\362\276\356\272*>\004\335\302=e\2302?\025\023A>h9D\275sg\203\276v\037e>\006\336`\27620\327>\314\316z>\360\233\300=G\216\330>\203\223\270=\314!F=\263\343\005?\027\351\032?\312(\r\277\341\220\244\276\022o\375\275\017\347%>\204\000\220\275\356\277\016\274\306\"\311\274\223\027\201=$&H>(\177d>J\267\326=u\230y>\370\2362>o\250\202=\275\034\257>\331f\361\273\017\255\335=\232\217Q\276\270\035<>\"&\002=\373\017\236>=\035\277\275\001f\342=B5P=F\347\236\275\0078\211\275\313\321V>0\277\257\274\220\226\034\277XJw\275\314\370\265>X\364S\276\007\340y\276\240G\302\276x\361\366\276%\203\220\276\270G\337\275\363\356(>Z\000\230;\350\237\025\277\312\260\321\275U\365,\276\222I\r\277!\021<>\217.\347\276\301\220\366<\357\224g\276\366\312v\276\"m-\277\244\265\255\276\207+\'\276i\021}>\350\213X\276\007K\007?_7\247\276Q\373\275\276\222\017\272>w\001\211\276\t\376\370\276\033\247p\276\210\237\235>5\t\376\275\230\251\r?\014N$?\336Mn=78\240\276\315B\026\276\262\265\342=O!\333>\254\303\251\276\334\363\273>f\037O\275q/\305=\256\252\221>\242\225\310\276\331\032\225\276\031)\234>\265&\n\277p\301t\276\246\023\267=\233\205\002=\375\301\220=\342\r\330\276\017\3671>\346\212\205>\036\230\222\277\244\005 \277[{\345\276\355z5>]\261\371\2768\375\336>d\322\266\276\273\312\305\275\310?\233\275\344\347\226>\253K\237\276\233\354\327\275\360>\034\276\321\026\215\275d[\221>\205x\203\276@\366\255>\014\313\301>\230\337 \276\30085=*\237\000?:\325Q>\021\026\240>\244\344\001\277\332\233<\275\021F\210>\335\215\210>\347,\311\276\234\340H>\370P\333=xb\\>+\347\364\276\302\207$\276;\275C\277\256(\037\276{\241\177\276\322\216\177>\032\014\252\275\343\217\377\276\346\277\246>r8\027\276m,\263\275\\H\371\276\030\323\261\276\"\367%\276%\2037\276\330\255\272>\037vD>[_\201\276\305\035\210\276\272\217\010>\217\225\213>\301\027\370\274j\312!\276up\224>0\'\325\2766h\277\275x\315\205<\252\263;\277\325\037\037/\276gN\r\276\237\315\\?M:6\276\317\242\214\275\005\033\341\274\306\370\366\276\303\247\255\276\204f\271\276W\342\374=\363U$>l\010\307>n\240#\276M;\236=\007\270\353>i\376\\\275\272\030\314\276\207MQ>\021k\036\276\206=t\274\016@\360\275\370<\354>C\022\007\276\2371\220<\030\007\222\275\217\373U>\317\2062\277&e\361\276\032\300\320\276\234\263\234\276\343|A>\024\251Z>\026H\220>\254\364\204\277\232j@\275\373\351\323\275I\347\"?\r\334\265\276\274hj\276\207p\002>\2238\255\276S`\244\276\007\210\032\276\327)1>\'\215o\276\374\234\252\276XI\007?\234+\274\276\325\337\230\276\343\365\237=\200\343H>\327\351\026\277\250\3643>;t\315>\266\246\323\276\024mv>\307*\266\275-\025\241>\226IB>\337H\257\276\'\361_\276\3308r>\242\235\351\276\237\227\376\275\354G\244=\\\364\320\2759\262P\276q\034\355<\005;\310;\363\335,\277\326\334\021\277\317\017s>W\033\036?\250\231\245>\274\344\247\276\327!\211=\203\325\n\277\237E\032\276B\020$?:\274w>\002%\264\2761\\\211>#I2>4\340U\277\365L\212\276\023\236\324\276\225I\017?\221\002R;,+J\275\031\262\200\276\177Q>\276\300\220\353\276s\260\226<\0074\213>\341\254\006>\264\216\223>\322+\230<\302Y\312=$\221$\275\352\220F\276O\253\222\275\314\255\216>BqN=\232\375\032\277Q\350\205>0\006l>Bj\217\275\254|\340\276\374\324\340\276^\252\204>\017\365\232\276{\005\304=h\021?\277\252$f>\232\231\211\275\234\306\273\276\250d\020\277\275\034\354\275\316\262\363\276\336\200<>\316~q\274\203\001~>\362g\344\276\274-L\276\330S\210>\034\221\037\276\027\214\017\275\204F\003\277\201\276\336>q=\244\273 \256\367\275\003\271\'\275F}\354=\007\377[\276\206\254\251\276\306\277\204\276\371\200\275\276L\270\004\277o\357Y\276FW\202\275Y\256\020>\326\354\303>\257\236\340>z\177\373=\242\354\204\276+\302a>\325\322\232\2763\231\027\277\024\266\324\276\274\262)?\000i\301\276I\214c\276\320\264\272>^\210f>:m\234>E\023\030\277~\n\027\277i\231#>?O\206?\273\032\256>s\365}>&\243\227>:\345\"\277\032\373\267=S\366\034\276\236$^>\253\207L\275\270\231d\271\2748\277\234A\253>p\225\364>\364\345==\3108\372>\0258J\276\234b\\\276g`\002?#\2124>\313\304\034>Y\221\252>\364\217F?$oX\275Yd%>b\'\334>\217\235\256\274<\365C>\231\203\037?\020w_? \314\224\276\357\223\201>6B\216>\016l=>\002\005f\276\246\010\304\275V\372\277\276\221\376|>\254\310\212\276s\2506\275\206\256\033?D\026X\276\243-K=N\362\253>\272\342\316;>\345\207>\311\030H\276\232\217\352\275/\234\201>\376I\225\276\027IH><\206\000=\002l\312>\317/\003?\203\242\205\276\347b\004=\322|\264>\260>\354\274\211\032\276\274\017\254\336\274B\031w>\007\333\346=\2511\223\276\014\324\303>\253\363!\277\266\265\220>|z3\276\026b@\277l\225\367>D\220f\275\203hp\276\300z\200\275\024a\325\275\274?\321>\206P\254=\300s\307\276\343\243\031\276r\362\377>^\326\203\275\177\\\373\276\321`\374=\3169\002\277\336\272\214\276\355\360*>\342<+\276#\373\013=\235v\010>\374\010\010>\006\023\336>\336(X\276N\350\000\277q6B>0\266\274\276\344\307/,\021F>K\303\2659\312\310=iN\026\277\t\r\314\273\232\000P=W\004y>\211\315\210>#\372\223>a\266\"?\247(\355=9T\026\277\274\226\225>\233\232\014>~`u>\324\251\355>P\177\335\276|!$?m``>Cs\024\275N\363\360>A\251\333<\335\342\255>|\204\312\276j\221\313>Y\367a?3%e>\246G3?\034\360h>UL\206>\226\353\023\275\256\232e\276\371~T\276\32181?\202Jx\276\250\333\337>(\342\215=J\345V\276\373\225\026\277\277\005\204\2765\322\t?\371\231p>x:\364>\037\222\313=&\\\276>\250\251\021?\340\332\200?v\225\340>\2725\357\275\222i\323\276\337\3562?\277\257\271\276\246\343\316>\013\241\370>\2261\271>F_\020=\306T\242>\030\362c>\301\361\222=\213\016\355>\350\256#\275_\274s\276MM\244\275\031YF\277\030\222F?\021F\310>\241\016\t\277n8\250>\365\372,?%\303\017?\353{\216>N\007\376>\214\243\r?\033j\227=\227\221/?_\220\225\276\177N\203\276d)\220=k\340\032\276\202B\014?\241\203\206>\315\026\267>\376\003\204>9\036h>$w\204\275b\243\207\276\323ff\276\006o\367=>\267\021>f\253l>\007<=?\341b9?u\225\245\276\347}\343>\252D\034\277\227\376Q\275\314{z\276_\334\301\276#\352\301<\r\224\275=Jhh\276\246\343\220>\005\3102\272\321@%\2759\201\362\275 !\026\276k\264\034>\373\022\375\276\\\350\021\275\325\221\253=,&\212>\0030\021>\0078I>\016\370^\277\256\341\236\276\201>\256=)*j=H\302\304\274E\2534>\351\031t\276\237\261=\276hz\205\276\016x\217\277\354_\330\276U1\214=\242U\374>\304m\241\276\354\001B\276\302u\234>[,\343=\306R\320=\307\323\322=\r\300\327\275\336G\020?J\314@\277N\246{\274\236\371\330\276\265W<\276\325G@>.\031s\276\337\327Y>J\215[\276@\277\261\276tA\253=A\306z\276\257\027R\2768[\245>\313Q\000\276\221\252\213>|@\275\275\246\037j<\276\310\243>\352\263}\276\343\343\\>\373\347Z\275\303\236\346>\230\216f\276E\002\275\275\327v >\024N\203>\371\211\016>mwb\275\375w`\276\227\016\022\277\024\266\370\276\224?d\274K\370R=N\373\335>\3559\241>\253\346\022\277\336E,\276m7\237=*\3446\276\321%?\276`\025\336\276x\234W\276l\307\246\275\353q{\276\221\263\222\276\204\350&?\001\356\201\276E\020\370>\362SD\275\021\344\301\276}G\001\277\215\nU\276\230\214\345<\331\"\245>F.\271\276\036\301\274>\3107\357=\n\275\004=~SO\276\016q\'=\253W.\326\367\266\276kr\004?\032J&\277N\241\313=B\017&?3\214\363\275\204nY=\370\255{>ZS\305<\016\021\217=\0008\264\274\374Ra;\016?\325>\356\213\262\276\034\336\315>\365\317\215\275\220M\013>)\013\t?\013\010\245\276\332\255\267>N\277\035?*d\031?\264k\271>\247\347\222\276\202\022\203\276\030^\023>\202|\361\276}\252=>\323\006\235>\340w<=,*\274>@\250\013>gy\323=\360\376f\276)\220\266=|\230\303<\362d\215>\245\337\361>\332\200M<\\L\t\275\251\010\315\276R\033\332>\003c\033?_\221\300>s\004\264\276\207&\361;,\016\023\277\rI\364\276\030\240\340\275\322L\036\273Vz\242\276\004\313\374=\320z\230>\347\232\220\276\030o$>hq\326>\344\357K>\177\257\327\276\027\370\241\276?\347G\276E\231\020>!-D\2765\377\003\276V\217a\276i\210\265>\017\022\215\275\325\272\317=\n{.\276\237a\314=\001;\037?\210\377\177\275\037\246\243\276\000\315\017\277=\350#?{\322\363>}x]\276\025%S>\370\356\223?\327y5\277\024\310\214\254\016R?\312\225\327>G\247|\276Hi\320>\374w\034>)x\033\277\314\202,?!!f<\031\2413>\316\377\316\274\374\277\247>\316[-\277\026\262\213>\334j\001\277i(\261;\265\363\247=\277\001\302\275\245\0312? ,\210\2757C\205\275Ne\333\261\377\006\276\352\030\030?\242H\341\276\257\020c\275\246\207@?\004\232\\\275\270\225/\276h\257\247\275I8\305\276\3752S?\264\016G\2752\377|?\246\005\313<\375\306\270\276\311J\014\277}\005\034\276!D\004?\335\363V\276K\312\021=x\n\001?\377\004\321\276O\034>\277G\036V?v\251$\277\217\r\267<\222\356\303=\243\250\371>\022\215\031>\034\353\252\276\354P\303=]F0>\304\343\246>R\370\305\276\361\371z\276\336\311\202>\213\333Q?,\255m>w\037\016\276\361\303\200\2768\223\221>\001\027B?\254\376\266>\272\005\234\276+\322\222\276\257?\245\275\024\314\244\276\034\227\221=\253$Q>\322\272\027\276|\272\336=.v\342\273I^\260>!\371`\276U3\245\276\023c\327\276\355\017\005>\312\207\003\274\360\227\225=}\177\027\277\016A,>E\326\024?\300\247\334\276\n\003\330=\242\305\231\276f\252P>::\221=S\237,><\013\233\276\021\225\300<\014f\360=\212y\271>\373\316L\275\313w\277=\230\265\321\276+c\326\276\021\007\270\275\217H\036\276\230m\223>a\211v>\265\t\235=!WT>\002\365\370=9\242=>\257\310L=\274\232\324\276\203\332\214>\200J\370>\243\344\026>\230\0044?5\354j\277\031\213|?\237i\301>\213\227\002\277\205\350\005<\212c\003\277\026\234\202\276\363\343^\276\363\3265\277\303\306=>\335\236\036=^g\327\276\321\311\014<\013\020\277\275\334(T>\341@\277\276\331~\217;\241\375\221\276t\336\226>\242w)>\r\350d\276\334Z\004?l\236S>\001w\334<\021\212P?np:\276\007\207c\275\361\335\332>\271\355\005\277\310\304j\277\221\240.>\006\302\220\274\361\206\345\276\337\023l\274\311\325\336>]/:\276\223ed>\0258\224>\343\010\\\276^UY\277\216\257F\277\241\n\234\275:\260\014>Z\304\343\276\350\323\026\276gK\313\276\r\352\242>\025\"\231\275\235\262Y\275\3434\351\275zn\230\275\026\300\004>6D\224\276q\343\031\276P\"\003\272\014R\344\276\332\346\343>\314\363:>\355\032\214\275!Zz\275\003n\256\275k\307V>`\016\372>\3511B?\031G\305>\347\274\213\276A\235\213>\025\232\362>I\272\312>\217\324\031\277\314\037\373\276\340\365\222\276\225\243\300>\201L\006>Y\325\206\276\'\027\324=\365\214\217\274}\2066\277=\"<\274\241\326\254>\024\016#\276M:@?|\204->\177\252\">@\225L\276$\001\221>\263[\256>\234\234\347\275\341o\205=p\212\304\275\177uI>\360:\271=\260\351*\277\336.\032>]\243\337>\210\2647>\351z\025\276\203\233\344>\324\261\224\276\214\276\006\277MS\233\275\007\340\035?+\257\231=\211\211\242<\320p\236>\360\267\023\276s\253\337=\357;\n?\311\313\253>\341e\'?\372\232\323\276*\240\277\274z\331\000\275\366\200\005>\005\241\214>\'\317<\276\013XH\276\313\245\262>m\364\214\275\200\213{\276Q\310\342>h2[\275Kp\307>\261E\225>\275{\203\275T\311\006?B\314\207\276]\343\222>u\243\264=\334\000\016>R\204\265;`\004:>I\256\261>\034\350\310=YN(\277&y\263\276\205\005\256\275\345\277\305=\014\275\204>=\250\262\276\211&\002?\002\272\324>`K\003=\332\024\315=\316\322\201\276\221\306y>\264\2035\276J4A\276I\222\"\276<\371\221\276v\302\250>\tS\022\277\020\t8\276*N\005?\301\371\316=\2651q\276\361\353\177\275\240\300\027>\n\275\226\275@\367\240>\254\325\325\276\234\353\222\276\376\022\317>\327\243\302>\006\244\203\276\nYa\2766\262F\276\316\274?\275f2n\276\0052\375>\312\372\223\276\257@\322=\020\2050?)6`>\257\206\255\274 J\234\276pK\031?\226M\314\275\346K\215>\030\210_>\276\020\242\276\375\311#?\210\360\270>ap\262=\332\027\007?y\224j\277\367\000\343\275\330\343\333\276\361\266v?\333\375\334\276)\262\343\275\205M\237\276\354\263-\277#\261\206\276g\275\"?\324#\217\276\007k\234>v\r5\277Mt\217>(J4?\023\270\252\276\247\376n=w\0370\2778\216\264=\270^\326=\034\000\304\274\313\300\010\2767\030$\277f\373\301\276\214\250\307\276\235\201\266<\351\3704\274\275\310\374>\261n\231>Qf(>-\346\331\276\234\227\n\276E\310h>\\\251\303>\340mC\277j\034\213>:;\003>\341\313\227>\377\207\327\276\202\3008\276\311\331\252\276\014\001p\276\276*\355=o+\003\277\010\227\030\271.\204\200\276\271;A\277\332\232\355\276\224\370\252<\240\003\324=\233\374\341\276|\371Y>F:\214\277\240\270\027\343k=`\343\202\277\221(\006?*>\254\275\370\363\366\276\3108\273?5\036\002?\2202u\277\027\025\303=\330\253\366\2762\177\313=^\371E\275\225\202\t\277\226\253R\277\252\241\332\276\363\346\301>\362\204x>\245\020k\277\\\247\216?\323U\005?!\013\363=\243\363\277\275[\265\021\277\177+\231<\343\300G\275d\327\211=\205\320#>r\320\201=\t\207\302\274\035s\"\275M\025\201>\032\263k>\"\265\212>k\035^\2763\207E?\023\365\226\276J>\035?C\260&>\0361@?\rG\221\275\233\223[\276\255\272_>5#\021\276\001\037\032>kn\364\276j\030\356=q\2611>\334\226u\277\201\200\033\276w+\253\276\236,\265\275\315\361\227=\365,-\276>\030(>\014E\272=\265\214\007\274o\235\264\273\0050\277\276\036\025\322\275\317\247\267\276\276\261\033>|D\271\272?l\257\275\310\304\263>w\024\210>\201\0215\276\266\231\205\274>\306J\277\340\302R>W\313`>s\241\017?\034\357\335\275s\026n>5\352r>=\356\327=L\216\371\276\377O\333\276\262\013\274\275\037\232/\275z\342\200>\267e\304\276\222\027\365\276\272\352\303\276\267l\003\275\373H\'=L\256\251\276\006\025$\276?1\005\276\3175\021\275\265\357\200=p\021\367>E;\304>\310\337\372\275S\316Z>>p&\276]\346O>P\353\352>$\314P\277h\361\213>q{\242>\246\353\213>\3507O<\220.\312\276\"\\\n\277\035\007g>\352N\277\276\335\225\222>\306\303\215\276\002\021\">([\200\276\341 \016?\365hI=,I\222>\322\334\221\275/AX\276PP\220=\r\306_\276\313@6\277\274\235\276;x\222\344>79G>\\\223\235\274ai\216=~\321<\276\020w\203\275\004&\375\276\260E\205\274c\264B\276\037\0148\277\366@\016?\225{\372>\021O0\277\"\210\313;_\327\032?\217\310\021\276#\264\225>\276\225u\275u\022\340\275\236\350\022?1x\304>\253`o\276\231\216,=\016\315\313\275F/\"\2760_\006\275\255\r<\276\236%X>\004\036;?\374g\024\276\361\211\020?\037^\341\275\373c\361\276\250\210\274>g\020\261\275\230\252\017\275\270b6>\225+\305\276\351\377\027=Z\257\237\276\251\274\273\275\250\372\n\276;\343(=\243/\032\277\2232\202Q\001\211>\014O\213\276\331\304\367><\227v>P\302\267\276\340v\341:\313\376\200>KeM\275H\375\235\275\221\3121\275C\312h>KN\344\276\333Fv>\177\344I<\232\205]>`E\206\274\247Q6?\225\224\200\275\251\250n\276\312\271\207>\275\260Z>\267\242#?\000\256\256>\252\372\370\274\374\354\267\275f\267Y>\217M\005>\332\343\311;\0341@<\007` \276#\370N=\222k\023?\330^\243\276\217\301q\276T\337\033\276\354]\232\276F\310|>\322\241\366\276\342qq\276\010j\004\277\t\247\226=f\327\\\275\340\221\355>z\235^\276\034\225\200?\347r#>V\006\"\276\316\257\367<.\037\023\277n\375\024\276q\231\320\274\ri\320>^E/\277eLx>UF_\276\264A\240>\320\000\336=\033n\272f\360\004>\305\276\220=Y\270\306>l\202\336\275\3062\213I\027\200>ne4>\372\022\264\273\304\226t>\343\260-\276\271z\222=\320OT>\203o\022?PF\247\276\220\tl>K\246\016\275Y_\264\276\224\212\020?;=\304\275\323\023g>G/\366\276\332\365\372=\023Hu\276\004q\344\276-05>\266\304\206>F\315\342>>Eh\277\013\026\255=k\323\273\275S7\225>\032\333\367=\000\032\255>=\216j>c\377\203\274\273\245\266\276k\035\232\275\315b\347\275\342\234\021\276\311\037\247\276\205f%>\r\371\256\276\rde\276Q|Q\276\030>$>\310\035\336>6\327\031>\354\311\237\276\370\024\000?\353\340\247>\001?\r?}a\026>~K\256\276\320\312m\276\250\351\031\276\360@\314\276\375S\217>Kph\276C\365\005?\026\3419=\3341\017\276 \034\">\n\370\340\276Q\321\302\276#\351\234\276|\306\222>I\354\\=\376\301\223>0\026\014>n~5>\260\331\226\275\232E\346=1\270b=ju\346\274\325\377G>\222\003/\277\231\031\270\276C\213\004\275\002\264\351>7\262s=\343_\265\273\307\344\323>\212\254\177;\254\202T\276\376\325\002\276\231Md\276\324R\341<\217\371\212\277q\241\037\277\363\252.>\026\253\033>\213\217+=QG\266\275\237\274\332=\210\266\302>\314\312\350>w\305\013?\352 \"\275\353tE\276\333\035\246>.\001\304\276G\2563\275\030\013.>\234Ew\277\031\256\221\275\246\037\261\276i\206\203\276\306\231 ?9\207\205=\217\312+\276\'<4\277\177\221\234\276\334\204Z>\311F\267>\377\257C\277\262\333\024\2777\032\306\276\333\322\336=UJ\036>\357[\255\275\022\335v>\321\027\250<\223\002\224\274\341\311\373\276/\312\223>5\224\2422\342\205\275\227\327\336\275\033e\234>\350RT\275\001\204\023\277\374\037J\276B\333\357\274\201\376\023\277i\241_\276{\024J>m\253\226\277-\033\002\277\324`\274\276\275\254\247>d\016\021\277v4\324\274\000\240 \277@53>\240\240G\276\342\022(\277,c\004\277\243\027\271=\222\216?\276\200yL\2760\'\214\275\307_W\277kO\323>r\036\274=\340\234&?\327*\202>\376\201&\277\244I)\276gn\366\276\271\211\343\276\350\004\025\276\377!R>\360\350\321=\256\\\220\276~\367*\276\263\362:>\010\225\024\277\260\315\260>\216yo>(\255Y?\363\242\375>\253k\242=N\366r>\375f\220\275\250\206->\006[\210\276s.\000\276\004\224\024>\034zn>\345\377\225>\236\323%\277\327\253\034\276\347\032\325\276oF\245=\346\345`\276\272T\314=\377\342X\276\037\220\310>\332\262v\275\006\377\313>\003G\260=\203\246T\204>\215\274\232\301\217\276],\274>k~\014\277\340\n\033\277\026H\307\275\313\371\240\276\261\232\"\275\230\253\300\276\033B\327>oz2\276\315\254\"\274\253\243\250\275F\275=\276\034\001\n\2779\t\317\275\264|\277>j:V\277\345\336\346\276\241\022\017?A5\221>\030w\273\275fJ\203\275\003\347:\277}\212\020?\226\243x\276\333P\370>Y\375\267\276\021P\312\276\373\035\250>P\266\330\276\355\317\205\276\027WA\000>\225^+?\010\206\266>\253^E>\020\244\344\275|\271\303\276Iu\017\277\\\233\303\274n_\212\275#\327\337\275\364P\210\276\252\014&?\374\312\346>g\307\325=I\250\367\276\005\373\232\276\364\000\013\277R\300\031\276-?\305>\024\244\200\276\366<\337\275\332M\222\275\367\330\017>\353\0322\276\303y~\276Sr\336\276C\373\033=\367\366\263\276\266\302\021\276\347\376\305>\224Z\032?\312\332\252>\236\351\350=\374\266\202>\243w\013\2771U\002\277<\325*\2764\341\035?=4\210\276\t=\220\276\354q\010\276\020\311\252>\376!\202=\232S\200>\251\360\377\276\274\256(\276:X\257\276\241\344f\276oL\020?b\214\010=b\373\265\276\315~\027\276\'\024o>w\351]\277\373i\237>\'\331k>Fe\303\276)\341\\>\341\374\356>\301|\367=P,\'>\317}]>\357~\376;\254\311\025\277v\340\316=\223\237 \277\223\204\036>{\261A>\256\214\240>\nH4?\033\254\333=\363#\236>n\036\335=&\021\004>\313\'\021?\031\257\016>\260\367\303>\334\034\264=\255H\310>\0033\004=LZ\245\275\231\313M=r\022\207>B\2016>~\277\241>\n\227\003\277\366\224\224\276\031\013v?\201b\225>p\030k\276\333s9\276\302\360\004>.!\350\276\261\237O\276\202\2544\276\233#\205>\3607\213>J\316\343\275/\331\002=\265m)>\013C\276>\203Bp\275C\314J\276\263\257\013\277M\232m?O\200d\275\222\220\025\277{\032Q\276\n\353\254?\233\256\204?BES<\3412S\276\311{7>(\016(=2\235b\275\030\331\350=)\314\260\276$\310\311>\325p#?\3469\005?\206s\033\276\375QG>\233x\320\276^\223\325\275\036I\032>5\364\263=\360\366\220>,\336H>B\334\267>)Q\246=\021\254\316>\226y\014\275R\230\235>\213\030F>\232\026\366>K\367\370\276<\031\270=b\367\306\276\321l\003?\276\006L\274\342\334\321\276\303-\310\276\220X\315>CM\n=\273\001\210=\242&v>/%\235\276S\243$>-F\256\276\306\334\243>\002\350\306\276Q\244C=RQ@>\257\241\354>\357\243@?%\325\013?\360\030r=k\201\027>T\256n>\003;\321\274\301\342\370\275\341J1=\300*\362\273{\340\206>09|\274D\303\342>\332\014\0024J|==\373F<\237e^\276>\340\233=1\t\264\276[:\t\277\355\207\004\277\027G\367\275\304+\370=M6\360=\256\253\372\274\211\244\234>s\337\315\276]\013^\275\231\335\005\275V\003\201\276D?\306=\247=\305\276\0300\313\274>\037\237>\261\256\023?\273\t\200\276\235F\240\276e\332\345>a\211p>m\247\247\274H\225\002?\361\274\300\276\232P\257>^\247\346\276q\253{\276\315\033Z>\312_\027\276 \337\210>\031`\003<\242g\225>\356<\035=\236\343\r\2754U\021\276\213\335c>\353O\017\277\364\243\345\275l\224\217>e\020\034\2765\006\031>m\2263\276p\223\361\275\272\335\274\276H>J\276\372N\002>\306gl>\201\3452>\272\\\211\275;O\003>\250\255\274>\302>2>\030b\233=i\2668>\362\024\375\276\240\241\234>9L4\276M7\230=\3459\275\276;zw>\007s\021\276O;2\276D\260v\275\370S\263>\313\034 \276(\007\025\276d4\005>\001\242e;\347\304\234=\263,L>\224\215\026?\270~\355<\2610\035\276\341R\"\276V\360\272\276o\335m\275R\007A\276\342\222\323\276J\351\202>&\352\006=\251(\000>\321\271v>5\200\243>\317\300\241\276\232x\213\204\023\000\276jo\217=\032\374Z\276\016\350:\274\270Fw\367>\352s\t?fK\233\276\010>Y\276d,\337>a\370K>i\350\023\276\213b\000\277\232\227\254\275K\237a>\314\221+>\255I\201\277m\260\335\276\010\357p\276\347-!>\302\\U=\250\240 ?\002\276k=\2506\300>\017\313\006\277\342\376\003\277\226\020\231\275&\304X\275=\302\032\276 \341\256\2766\234\214?\226L\032\276\323\257\305\276H\306\316\276\255\344\333\276f\n\002\277)\344Y\277@\361\257\275\342#\240\2767\344\257=\206\235\364=\265c\005\277\334\347A\276j\216\201>\t\352\264\276={\253\276\203\331{\276\021\266{\276f\341M\277:\323\262\276\332\034\271\276\376\231\316\275UR\\\276\216\277\253\2760M\221\276\253A\016\276\270\233\254\276\201\352\243\276\365\372\273\274\361\346\214>\221\202\261>\213\317V\275\270\2253>\335~}\276E\316\245=\374\032\245<\027\034\027\277\366\325\t\276\350X\346>\215D<>\013\222\022\276W\272g>\343\206\351\276\365r\221\276\177e\016\276\014\025d\276\314\217B\276\255h\023?\363Q\004\276$\211=\277\2433L\277}\366\332=\335\024t\276\354\037\267\276\323\311\242=\343\337\360>\213\351X\276\356\274:>\206\242\307\275\036\205\370\276ig\207\275\223\311\264=\201\000\374\276\255WB\276O\021u>\031\335$>b@\277\276\267_\303\275}4\252>H\323\315\275z\264K\2765*\327\275\245N\351;T\261\306\275\242I\034\276\202\300T?\004\n\002\277\364\227J\277f\031/\276\270\213\207\277;\334a>\347\247 =\334\372\037?Db\342\276\214_\304\276\035\315\005\274\017\007\312=\334\232\313\276-\345M=\352\344\005\276\365U\004\277\276[\202=^2W\277\202\332\235\275??\320>a\310\363=JT\245\274\374\324u\276\353\"`\276C\345^\276\350`\361=d\326\326\276\3119\376=\340\332%\277D\360\314\276A\226\277\275\334\013=\276\327\035\212\275\261\033!\275m@\277\276\336\243\322>`\323\360\276\341\344\263>\272,\256\276\216v\354>\334\301\260<\213[\237\275\234\\:\276\215\363V\276\276~\320\274\357\226\r\276%\357s>\021,/>\3078\237=\036\202g:\255\340\t\276\326\020\324>5\223\357>,\340n\274)\366\230\276\267\230\325\276\303\307\233\276\236<\n>S\217,\276\227\253\030>\327\261\016\2768\274Q\276\022\324\212=\331&\364\276\307;\007?\025k\200>IKg>\212\371\n;\331\204\366\276\305d\026?\251\242\351\274X\204\367\031\244\276>\014P\010>\232Y\274\276\030%`\276\366O\342\275\335(\216\274\023Q\242\276u\236\356\274S\n3?\322\354\002\277\256\343z\27657\">\031G\235>:\n\013\277\344?K\275p\027\272>\267\331_>J\024\264\276Ei\202\277P\n\311\275\307\341\265=\363\272:=\347h\025\276\376\223/>(6\216\276\017\200%=\202\326\367\276\337\2435>\203\3304=\212\375\031?rN\232=%\343\253<\016\220\207>a\361\247\276\311\204\307\276\311\000\032\277+\323,\276\357\007\334=\t!\331>\330~\203\275\r\376r>\027y\206=\261AP\276\014\t\213\276\312{t\276\250\374H=\220l7\2771\200\247\276wxL\276\310\217\373>\000\256d\277\355\376\366\271G@\330\276\351\207\023\277\204\035h\276O\n\213\276\002\300*\277\220}\327=ov*=\204\302\221\276\362\010\312\275\325 \022>J*W\277X\244\315>\365\023\333\275\004k\036?\211\373\353\276\316x\000?:\372\031\276R\367\032>\034\314\204>\\\r\304=\260\333\261=\002\227\245\276\223\244\326=\347\206\000\276#]1\276v3\013\277\024p\267\275U\276-\276\020p\240>\353Z2\276\321!\027?aV\t?:\350\377\275Y\246\016>\337\271Z\276\2167\373\276\351\222 \275v\367\016\275\037fF>\226\335\200\275d\362\r?\263w\250>4K\204\276\346\'\264\275T\342\014\277\3051\227\277Q\205\204\276\317/\006\277\204-\323\275\235\363l>9\264\r?\316D\207=!\004\023\276\277\2459\275\246\346o=?k\245\276P\"\177>\360\030\216>_\316;\277\232\3576\277\317\327b>=%\227\2754o\005s4\203\276Xp\346=\227\002\267\275\207N\207\277\n\226E>\247@f\275\222\250G?\274\023\205>\324\304\367\2760%\377=(\252\354\276\230[r>\213Y>\276\246P\030\276y\035\365>\020\001\234=\0011\224\275\2053\225>\2641\210>b\241\022\276\376\035\222\276\001*\217>9zT\277s\266x>7\236\007\277\332\351\344\274l\261\236>z]\253\275HP\361\275\177\277\200=\010xU>\235\211\256\276A\366Y=\210Q\371>\220<,=_\036\303\274>=\236>\356\312\200\275\314\232\013>\317\236\262>\005\347\322\275\035\033\354\276\235g\270\276}\035\235>\003u\261\276\276F\345\276\221,\004?2\233\236\274\377\\\302=c\206I\273\311\351\351\275\340\300\322\276\345\324\242\276\242\341\035=\206\212\211=Ry\270>FI\213\2756\026\255>e\224\335>\023\312\367\274\217\327G\276\305\031 >\273\237\310>|_\223=\357@\032\2744=\017\275\030\237\266\276M\372\277>\037\0267\277\255,M\276N\021\006\277#,\017\276*7p>\221\224\320<\264~\033\277\364e\203\276}5\302=&c\341>\034\022\"?\010\006\224\276\214H\217>\303\3149\276k\271\322\2760\311\037=\332D\324>l0)?\303\323o?RQ\027\276\232\3126?\351,\n=\265n\016\277@[l\276n\025\346=b%z\276\267\265\267>y8\276=\352\264\366=\277\255\343\276\357\227\226\275q\001^?[5\006?\273d\256<\245\004\363\276a\366\376=N`\237>^\017\332=\246\347\260\276\325\375;\277gA\017?5`\301\276\317I\264\276\266\213\270>\370b\007>w\235\342\276\375\363y>]\244\t?*\270\307>\214>r=N\232\020>\324k\005\277\233\001\275=\310\215\253>\274\351/\277N\235\231\276\271\373\367\275\t\3709\277Zq6?\253\311c>\231\3602>q\235S?\300\"\202\273\001oj>\032k\256;\214\005%=_\207\263\276\213\025\334>\332\033\026>\014y\010\277\214i\270;\310\212\026\275|2\300>\'\353Y=\020\312\022\276\\R\333>;`6\276\302\201\346\276\306Z\267;-\243C?\204\216\264>\271\277\243\276\024\250\261>K\004:\275\312\213\234\276\337\324\230\276V\331\211\276}\215a\276\272\200\007\277{+S\277\375\203\366<\2059\212=\006F\207<\352H\337>uM\004\276Ec\266\276G\325\274>\3401\240\276H\002Y>\014\302\037D\217>\303\031\313\274}U\324>\262X\256\274e\260\225=\235+K?:8v\276\020w\216\276\310\001\203\274}w\355>\231\225\267\276\316b$?\326\303\376\275\373\363\273\276\016*\000\276i~\365\276\313\2527?\214\242G>\245\256\022>\257\223=?\243\330\313>\216\224\227>,\211\032\276\367\014\246>\266\335\331\276%|\005\276\310|\222>\304Ax\276\"\030\306<\004%\373>\371\245\222\274ss\005\276\374G\216\274 \t\227=\272\373`>\206\0334>B\376\013\276\004\216\211>H}\272>,D\217\276\335\217\231>\357\340\370>~M\257\273\246j\231>\275\026~\275W%\203\274RA\372\276]H\036?\373\344\267>\"C\324=\255\006\003=\212\242\325\275\333\272\265=Z#\257\276\366Q\034<\301\233\232>\206\225\223\276i}\246\225EL\277\\\203\207\275\364\330\204>\311\035\004\2766S)\277%\257\023\2774\276\233=}\236\363\276o\347\004>\010\375\243:e9\340\274\230\003\000\276\345K\227?\261\025\346\275U\261\343\276\367\360\230\2761\217r\276(\226\336\275\212\224\234\275\005\263\013\277\206\320\261>p\245}<\313[\032\277\353\362Y\276H\321\020\277\246\224\373\276Lu3\277\036\304\354<\267$\245=\331$\354>\013\257!>Ql\312=\031\320\232\276\272{\010?\021i\013=#|\207\276\020I\266\275\212\266\356\276\217o-<\216\336\303>_u\304=\205\006\234\276-\276B\276\361X\221>\251P\316\276\371\321\250<\366\363C\277r\213\005\276Z\277\362\273/B/?W\214\361=\247P\254=\207)\254\276\364\344\252\276\274mC>i\327\204<\214L\376\275\267\306\361\230\341\360<\000\246\003>\235Qt=\002\030\305\2765\233\033\277\r{r<:\337D\274%\3026\275\273\032z\276\177\326E>\3523\251\276\315qm>U\376<\276\327p\024\276\370\221\310\276v\306\227\276&\t\025\277-\t\302>\2139{\276\262b\272=IX6?\312$!\275\217\251\030>qY\230\276\361p<\277\030\365\375=#\000\304\274\036]\036>\2624x>\335\200\335\276\314X\216>E\360:\275L\335\023\277e\373\201=.\276\225=\304\036\233\276\223L3>\202\326\210\276\034\205Z\276\316\374~>h:q\276\007\222\357=\354\371t>\226m\305>\240\341t\276\262l[\2768\347\216\276~\345\373\275.\016K\277\374\207)<~\224\367=\206^\t\277c\377k\275JR\202\276\277\354~\276/\030l\277x\360\262\276<\307\372\276\364\024\314\276\323\002\252<\376%h\277TL\301>\006D\022>\342\001\001>P\034\030\277\026\330\214>\246\240\230\276\014w\230>XL\023\277\304H5\277\307\006\203\277%\267\206=\220i\'\276\242G\017=\031\354U>\'\372\377\275V\000\225=8M\304\276\261\224\"=\200\315\355\275X{K\275\270\024\316\276\312A~=\211\311[\276\036X\376=\2022\333;\307\006h\276\037J\022\276\270\231\206>\267LF\277\010\362\034\275\330\230\224\276\315\025\255=\210\311\233\276O\036\362\275}\277->$\020\355>\210\317\322\276\365\232\375\2764\200\006<\036(\223\276\0350\321=\361\026\\\275\373>l>\274O\014\277\220\232\307\276K~\245>\037\274\305\275@-p\276\264\220\321\275\3657\273>\266\267\234>s<\013\276B\247\307=;\212\n\2774@\220\275\226(\022=\325\330\231\276?\r\027>Y%\303>\231N\340>d\036\336\276\375\t\'>=U\226>\256\027f\275\204vn\276\247\270Y\276\242\250\347\274\342 }\275a\362\327:\342\252\250\274o\001\236>\030\322\034\276v\032?>>\373\240\276\243\3015=\006\0305\275U+\210\276\355\253\310=\353>\216>7\316\270b\341\305=\002\261*>\345\256o\276A\315\r?\323\201\033?\312\017\003?\0131\204\276\230<\034\276\240`\005\277\0141\374>\\\335\255=\334\362s>m\332\202?`\233\201>\367 \260>6Q\"?\377\242\341\274\375p\205\274\205\362\221>\n\310\304\275)\265\035\276S\265\240\275\246\261\236>\273\010\244>\017kh\277\250\334\037=\t\251\r\277U\354I=!\232\306=\272\013N\276\263p\\>*c\020\276)>7aoh\276cW\254>\213\352\t?\302\267\356\276\252\327\252=\332L\344>\347`\215\276a\270\337>8\025\014\277\204\235\216\276/\367\375>g\310\377\2761\244\020\277\234g\224\275\0260\006\277\213\234\264\276\\\327\021?\001\271/\276J\266\316\276H^\024\275\251\213\037\276[\276\252\276]\242Z>6\217\273\276p\016\261>\334\034\233\276\224.]\277F\310\222\274\341Ld>\357w\334\275t\375\362\2757\370\004\277\373\002\261\276!\366\271\276\222\265\252\276\013\275\253\276\234J\261\276\325\233}>{\274\r\277\234\243\233\276\351\265\033\276[c[>\252\322\010\277-_\254\2758\371\310>\306\346\342>\261\236\326=5\341\211>\330N\n\276\332*>\276v\346\230\276Fm#\316\262\226>\347\\\022?\tZ\375<\350\177\325=\363\207\327\276~\027q\275\322m\007\276\004\366\016\275r\017\021\277\022hB=*\331\247\276S\207\266\276\327\362\014\277\242-\007?U\372\t\277;\036h\276KA\032\277\342mQ=SZn>\224B\245>)|W=,\223\335\273\031\251\210\277\246F\273\274\316lC\276\021\301O<6\007\240\276\016\256\301>\361\255`\2756i\277\275\241\261\337=YZ\'=\t\236\322>\360\373\203\275gc#\277?\324\021\277&\374\352\276\002\376*\277Iw\227\276\246\004\005?}\347\330\275\260?\345\274W\316\231\275\t\2657>\355k\215\274\326g\177\276\266U\366\275\245\337\325;\034\267\373= \255\201=L\010\020\276\207\300\373<\022f\222=\367\207\310\276\036+\251\276|J\330>\253\t0\275\033\014\240\276\202A9>\020e\t?\000\222\227>\301$\301\275kp$\276\261\247\267>Y\212\316\253\014G<\000\004\256>k#\336>\002\261\366\334x\346\274\314\3522\275\362\3762>\236\006\030\276E5\353>\224\2509=\032\025\016?eQ\211\276r\023W\2765n-\276\267\356\201>oi\304\274\257\334\230\275\323<%>\205\"\016\277\037\362\034>\3276>>nI\001?\345\376 \276r\021\315=O\'];(\314\213\276$\216\325\276\377\263\244>\350\331\342\273F\366\374>\277O2>+\346d>\013\021\031\275(\336\t\275\261T\364\275\2025\235<\341;\000?O\303f\276yY\001\276z8:\276\235\320\212=,_\250\276\311\377 ?\272?9\276\201\365\227=\342[\177\276\"\344\230\234.\276\314\003y>\335\317\362\276\236\342$\277\213\356\217\276~\025\246\276q\t\323\276V!\272>\205\225\274\275\312\310|>\226\214\247\275\361\335\031?\335\021\323\276\177v\334=\2524u=\274\247d\276\2271A\276\362\271%?\3320m>,U\022\277&\340\261>\225n\024?\347\2454?\355\371\234\276C\036&?i\3219\274\354\017\202>\'\371\344\275T\2105<\037\003C>nJp\276F\216I>\016#\374\275\035\304\202=\016\256N?\345{\354>\377\226\007\277\267\324\202>E\335,\277\177\311~>\215\260\'\277\311\244>?\217\026\025\2771\023\343\276#\370\032=\032\351p?|2E>\345\361\242\275\013\251\210\276d\237~>+\023\023?t\343K=\317\272:>\025^\364\276\301s\366\273\270\3218\277\231\'5\276\224K\276>\027\327\r?\237}\355\275L\321L>\360\0363\276)\355@\276A\237\200?\210)\005?n\335\n?\225\374\004>\216P\220>\013\363#=KwG\274J\002\270=\314\332\366\275\204\007Q>E\343\">\361\217\220\276\261\217\234\276\034\345\272\275mE\232\276\013\023`>\263\275\226?\330VS\275}\371\303=\213\251\342;\030\032\340>\377\361\245>\323}\202\275\013\365\r\276\314Zp\276\275\333K\276\254\003\014\275\347 \"?}\202\250=\336Y\206\274\034\222M?!\321\210\276\373u\314=\237\022\022\276\020\354Z\276\277.\004?\343\355\245>+aj>F\031\"\276\262[(>\371X\217>\033\320\005?\322Y6\276\270\271(=\030\261\375\276\033\367[>\"\333\311:g^`\276wvK\275-\316\306=Bq=<\205iU>\377\3653>\357\005\242=\223%+\276\307s\313=\312\2603>\035c\033\276{D/\276\334]\033\276\273\001f\275\372}\014;\301aw>|\316U\276\274\365\257>Ct\370>\272\344\227\276V7\026\277X\272<\275\242\207\343>\035@\324\275\343\276\227=!\203B\276\372]d\276\216\273\233\276\006:\232>\3642\275=G\325\227\2763\350\001?\n\307\017>\275RF<\032e\321\275\253\366\225\276({\362<\nU\3464\025Y>\205\000\030=\213\217\022=\206\306\017?J\237\031=H\022\r\277\310\342\240\275\205@\224\276\3524\244\276gw\364\276\223[!?K4\024=h\007\341>m[\251>\320\215+>\006\253\357\276\212\322\233\276K\200\005>m\336\r>{\242\243>Y\021\213>b\327x\2767<\354\275\004\331\322\276;\320?= ,\361\274\356d\027?w\035\330>\313\224V\277#Q\345\276s3\257\277\263\240\001G\350\257\276\032\014\312\276i\311\212\276\316\0212\273\341\220\020\277k\300\355\275)9h>J\360\201\277PD&\276\304\313\371>\340\277\000\277\255\240\366=\230\006\217>\3675\376\275\032\026t\276\344\213\235\276d>\241\276G3\247\276o\221q\276}\275\332\276\361\264\315\276w\010\034\276L\305\315[\340\032\276n\254<>\326\351T\277\214\202\\\277n\202\315\276\256\rt\276i!)\277D\267\276>c\246\312\275\305\255\234>\371\243\003?\221\364\211\277g\'\322>\314g\216>\205\221\204\276\2257\231>\303\301-\277Y\027\246\276\302QW\276\376X\200>w\325\261>\333\361\n?\331\303\017\277\247H\240>\346\365\036\277\211x\230\276~\273a\276\237\035\037\277\344\361\205=\266\373\031h6\r>7\271\323\276\252\366\237\277\335\023\211\275)\360\217\277\241\225\322>\371~\251\276a\005\255\276\246\273\311\276\214\261\247=t\347!\277\201\300\037\277\333\251\262\275\307mC;T\230\326\276og+\2742Y\205\275:\274\317\276)G\365>\322\032\350=\304\345\'>\242\033\232\275\316\034\013>O\311\020\277\350\201\314\276\330\260\\=X\337\376>H\213\r?\272\245j\276\210p\234>\000\332O\276\330\034\305=\320\177\324<(c\003=\\a\223\276\n\206\211\276UaW\276\273\314\261=\313\300\037=\'j/\276\242\223\n?\221G+\276MMP>P\2318\273u\354\204>\210\177\022?\331\034D\276\002\214p>6\365\270;\313v\334\272\032\010\006\276(\322\301<\352\325\006\277\255\331\024?\201\017\303\276%Y\310>h\037\242>\n\266\326\276\026\353\025\276\0275:\276V\021\304<\223p\233=d\314\214=\001+t> \273j>\241o\271>t\222\326<\033\rD>\t\354h<\333B\203\275\212\254\351\275\224>?>R\217\357\276\325\326!\275\003_\372\276Z\317\351\276\273d\213=\262\226\203>C\227\026?\335\335K\275/x\252=C\246E>\275mB\276\222N\253\276\242\217\257>i\307\275\276\345c\022\274o\340.\275O\320\317<-\003&?\262\261s>r\177z\276\271*\375\274R\367\250\276\220\273\260\276m-\304>~=\203><\361\241\276\300N\332>`;\203\276\362\227\316\276\307C\216>ZXc>\005>\237\276S*O>|0\337=\306\342n\276b\325\270>w\243#\276!\016\277\276\020\203=>4\024b\277\024}\336\276\203F\340=\266(\357\276+Uq\276l8\214<\034\341\001>\013}\022>ef:\277\027\264\003\276\n\342\261=\271\232s\276\177\351W\277\246\214\244=\206?\003\277E\326\221=\205M\'\275\244s\213\276\251H\026\275\357\267R>\234\252\274>Y\205T\275\372\t\350>\\n4\276\240W\326>]\tD?.~\243\276\326\321\t\277\212]\320>\215\343\316>\233\336\354\275\200l\251>\305{\335\275\206]\027;\273\034\346\275i*P\276#\255I=\0139\303\276$\004\341\276R\376F=\217\271U\276\305\223\206>\303\307\021>E\243\020??\2319\276\307f\036=sC\327\275\375\374\027\277\233nG\277\375?\016=C\2376=\366\311\243\276\271b:?\266\252\263\275k\247\324\276\332\341\342\276K\201\302\276\302\303\202\276\344$g=\246FB\275\005\370\356\276\370#\201?\003\313x\276\235J@>V\264\354\274%\254\003\277\033\225\222\275\304+T<\342\344\010\276\367\rA\277\262\342\340\276\227\202\334\276\002\032\246\274>\224\304=\005\211\234\275\210F\035\276h4-;\220\3059=\323\277\330>F\325O=%z\304\276$\202 \275\354~p=\345\311\331=\343\303\222\276\345k\353>\200L\315\275He[\277\237\216\210\277\243Y\205>w[\337=\347\262\254\274{\231\n\276\276\006\327\276\356C\227\276\353?!\277\304\236\214=\224\237$<\207\374@\276\344\030\301\276\035\360Q\276\331\363\251\275\250\260R>\001\266y;\'\256\303>)\024\223\276@\237\274;\0215\205=\t\203q\276\264B,\277\366\230(\277\321!\343\276G\233\207=\251\323\004\277\376\252\375=\327Y\234\276.1\335\276\353\236\303=\222b)>\210\375\255\275\207\304\027\2759\337g<\313\362\271=\365p \277\001mw\276\277\236\006\276pd\252\274\323\247C=?2\251\275P8\373\275R^8>\346\370\266>(s\250>\032I,\276\031\312\315\276\320\217\360>\001\371\262=\377\375\033>\330\320\204\276\022>\211\276\225\326d\274#\225F<\220\233\227\276\267$\275\275\335\227_\276\3002\005\276\225wv\276\265^\230>\234+\376>$mb>\226H\035>j\003\244\275\373\010\317\276\252Y\327>\031\'\242=2\325\213\274\204+\032\276\232\370\\>\002A\361\276\276\305\375=N\264B=;%\200>\244Az\276m6$=\222:\014=\224\313\227>E-U>a\346\356=\023\373*\277^\260\230>^\267\246\275\356\340t>\037\374\373=\202I\022?[\274\273\276 \020\340\276\334,\276=\345\n\025<\345T\233\276K\026c\276-X\311\276\253\374\312\276\332z\037\276\350\342\203\272\370\3649\276\217\013!>Ik\216>\223+\334\274\367$\313\276\343& \276\315\214\200\276\270+\373\276\260\251\375\2768A\037\277\3045\304>\005\345\024\275bCM>\321z\007\277\206Jy\276\376;\233?\202I\200\2769\362\373\276o\206+\277\322/\273\276 r\334\276\346\256\304=\312\277\263\276\"\335$>d\266\016\276\3056\245\276b\214\214\276\342\374`\2762\214o>\237\375#>\355\0302\2773\264\222\276\360\372G\276\241l\374\275\373\304;\273\356\346\034\276kz\210\276\304\026\002?\303\212\034\276\367p\220\276\352X\307=\206[\220\276|X\022>\235P\256\276\203\347\261\276`\271U>\007g\026>T\270\346=\274}\327>S\360\343>D\354\355\276\002\335Q>7\327*\277\226\323\363\275\177\243F\276^Y\341\276\202\256\203\276*\267\246\276\034[l\276\342$\363>\364\222\257\276\257\n\251\276\237\026R>-X\343\275\r$<\277$]\231>\326J\276\276\0201\014=6\300\212\277+\351X>i{\214\276j\212\005>\r\377,\277-,\266>_\026\200\276\270&8?J\032@\276\013\3701\276\347\025\303=m\002Q>R\313\004\277L\003\207\275\267yP\277\213\202W\276F\032F\2777P\236\276\370\326??<[\260\275tP\005?\014\237[\276\365\004\302\276\362\261z\277I\2303\277\000\224&\277)\311\212\2765\202\035>6\352w\275\274\311\262\275QX\376>\3414p=]\230\014?\225t\020\276\030\321\215\276`<\312\275\213Lh\276/\255\267=\031\247t>\274z\003?\001\3325?_\2729\276LHZ\275\303\345\217\276Q]`>\023\303\001>Q\307\">\213\303\332=\306,\022\277\311T\244>\031\3210>o\030\364=\353\241\333\275\246\366[\277\274x\016?\325\362\007\275\311}\332>\356\257\027?\016\375\317\273B\231\221={\245\202<\354q\306\276\321\3221\276\334\001\226>Wu\273=\033\226\n>\233\261\221>\014\037Y\275\302!\242=\0045\030=x\337t>\234J\315>g\244\247\276T\017Avg\237>e\257\217>\260\242E?\222AT\276$2\026=\214\317\204><\360\376\275\265\204[>89\212>\006\212\353\275z\275f\275G\006m\276\261u\003\276F\213\\=\2740>>\250\357\310\276\216\307\014\277C\236\371\275\213\356\343\275\014\260\321>\213#\316\276sIv\275\245\212\205>\2454\336=\375\366\\\273\230>\222\274\037\341/>U\217\014\277\017\271~\276\307\326I>\022m\337\276\340-\371\274\217A\206=\370\300\221\275v`\306\276\3648\255\276W\250\036\276.\355\252\275M\257i\2757\r\340=:#[\275\200\270X\276\205\nn\274+\244p=\223av\276\220*S\276i\373\340>\373\341\266\275\377\363\364\275RP\355\276!\221.?z\371\212\2745\260\025\276\214\246r\2754\213N\277\265\322\021\276\325\240E\276\254v\250\276H\263\240\276\330q\243<>\014\240=Y\214D\275\355P\026\275`\025\201?\274\001\311\276\270M\221>\r\217i\277\236 \363=\264g%\276\220\304\240>5P\211\276&\215\024\277\225\232\341\276\301\277!\276\017\246\367\276ax\303>3M\347\276q\357;\276\253\0070\277|\353?\276_\343\311>,xW>\006] \276\037\016s\276=\253\n=\262\'~\277#lZ\276\375\371\\=j\004R=\024\227\221\276o\205\014\277|\002\271\276\243\367\313\276@\363\273<\260fG>,\225\246>\255Q\200=\007\214\027\277v\266\333\276\212\"\022>O\253T\277^N\020>\220\237\314\273\214\323\372\275*\323\263<~\273\230=\027\0328>wLB\276$\036\274\276\364\365\013>\254u^\276\235N\306>\335O\006\277N)\013?\275\220\205\275\177_\256\275\304/\250\276d^\231>\266A\017?%R\233\275\2611\013\275\t\t\355>\276c\r>\037\303\006\274|\347\266>}\221\312=H\232\026\276\323\345\210\277\2140\007\277f\301\212>\246l\215>\030\024\026=\374o\367\276\302\261%\275lj\n\277\330\353\010\277\333\025\357=9@\006?\024\316b\276\371g\021\277\005\223\023:#\322\315\276m\243;?\355\223\031\277%!\227\276\226\322M>A*o>/d\025>x\020\203\273\213\210\237\276\2515a\276\\\270i>\222\321\'\276\2211\235>\224\322\226\276\272\364\026\276\342\312\017?Xl\340\276\232\206S?WZ\233>u\230\225=\264I*>\305\014V?\205\341\330>\017\223\314=\300\224\337\276M\371\326\276>&4>\207^\361\275\317\200\007\276A\337\251\274rR\003=\034\375\363\276\'\030\271=VuA\275F\366\327>\'#\267>\235\013~=\3678\234\276[\023\312=,R\205=`p\035>\270\230\016>*8\350\276\322\372\234\276%\016\"\277m-]>\322\344\026\276\023\013\212\276\2029O\275.\225\207=\025\022\235>\353F\200\276c\202\025?\366 \372>\256\214\324=\2734[\276\202\207\314>\320\323\301=\356\036X\277TI\276\275\202\022\301>\273$r\276z\335l>\007\313\360>\036\337\304\276U\375f\276\372W\243\275^\025\232\276\215\034 \276\367\210\271\276\367\275\261\276\366\335\235>\326\235\260\274\375\213\216>\331o\365>\334\n\204\2765DV\2753Kc>\244\341\365\275\306\215\007\277\204\035b\276`\334\004?&\306\256>\251*L<\266=\032\277N\376\014>r\256\032=L\232\232\276\220\223\306=>\251\245:\r\245m>M\352P\276\233\006\203>f\327c=`3\343>Eh\224\276$\326>\276\267!\337\275 IW\276\r),\276AM\321>\207\016\234\276\212\032\211\276\223Q\241>\262|2<\275B_?\303\010\035\277\305\237v>?v\303=N\351\365=\027\022\324=|E\001\277\260\325\272\275\253\271Y\276\257\366j\277u\231\033>\377V\237\276`\177\033?Y\001\312\276&\231\257\274\374\224\001>+\364\007?\035\366\340\365\007\"\275\345\3221\277B\277l>\224\367\014?\273-[>5(\263\276\213\351\005=\246\032\023?t\357\351\276}\333\303\275[\252F\276_1\275\275.\020\027\277*\220\212\275\234\3356>\032\267\306>Lh\273=N\274\344=\004\033\244\276^\342\354=\305\tQ\276\247\350\224\276\342A\260\276\035\253u=\353\226\200=\002\240\035>\201#]\274\301\345\337\275\316\306\014\276N\340\276=5\261\205\276\231\211\010\277f\220\254\276\315#\033\277\345\267u=\030-\022\277\363\034L\277\212d\256=PW\341\276\324\347\345=w\025\245>\307\231\250\276\331\016\017?M\356\240\274F\352\023>\250V\021\274\210\266\342\275\225\201x\276\235X\240\276l_D<\347?\332>9w\000?:sY\276I\034\215\276aI\263\275\177\376o\276\373\244\273\276\216\014\327=>\373\026\276\213\347W\276\357\031\222\276\336\\\355>]\340\264>l\360d>2\367\177=\371\276>\276,\032\032\276\023\262/\276\350\200\265>g\t\037\276\212U6>\214\210\034>\361T\020\277\306\204\034?\333\316\213\275\210A\025\277(\n\227>t+\300=EC\213>\230\200\002=\372o\250\276\337^\013=\274\274\000>\221]\263>\367K\254\276\371\322\2729r;~\277\026g*>e\265\250\276\222OS\275D\020\032>\274o\317\276\325UN\276\266bC>\225L\351\276RZ\004\276!\001\036\276G\2232\276\307Y\247\275\033\376\277\276\237B\025>!\370a>\r\037\025\275\014CH>p\017<\276$\037:>\007\216\232>#\242\235\276y\300H=d\363\302>\342\022\354=\025D\'?\275\275\023\276!oG>\003\207$\277V\326\341\275\252;)<\333<\247>\274\334\211>G@\260\276\t\021P\276\271\020\031?\345N)>SK\342\276b\\%\275/\207V>\237\273\224=^y\240>6\235\207<\267z\354\275\203\376\006?\021\366\257\276Y\247\204;\033U(\276/\002o\277Y\221v\276\372I\362>J\224\223mD\034\276\347pT\276\037\032\366\276n@p\276`,\224=\221\272+>\376[\214\275Y\205\223\276r2\n?\360\312\'\277\037@\330\274\213\361\310=\273gB>\306\3304<\033D3\276\2063.\276\251\177\231>|\254H>L5*\276\312\\\032?\021N\311=\\\302\272:\034V\323>^\373\\\275\323\031\352\276\231\275\025\277\344\030\024\275C\017\252\276\247ia\276\351\363\366\274\'\3237>\374\237\326=\032\272\253\275\'\377E?O3K>\223\211\336\276\232\343$\276k\260\313=\261\024\202>\016M*\277\032\036\371\276\227q\337\276}\232\215\276\325\207\024\277f\332\030\277!\222\235\276\314\"\025?\350\007`\276\030r\316=\361\375E\277P\363\017\277\262IG>\016t\236>U4\336\276R@{\2768\313\026>\352*\315\276\202^\307\2763\351\247>\253\277\020\277E\203\022>\'\204\025\276\276:6>)\257\001\277\261\337\205\276e\353\333\275\'\332\'\277\250\201D\277O\035[\276\036Z\236\276x\016\357\275\263\276\214\276Si-\276e\204\347\276\316\005\205=\"\004\370<[\322\204\275w\017\251\276\271\355\"=\333\177\375\276\316\211\006?-X$?\242>H=\302\354h>\0219I?~6\372>\'p\037>\322\004\236>\263\214\314\275\3560\317>\177\340\275\275M\022\371=\230\362\231\276\306\365V=1\351W\277\3238$?\037\306p>\333\255\217\276BZ \277\235_\210\276y\270/\277\265r~>\320\3456>\243\360\276\276o&\252\2761\027\356=\375M\361\276\240\035\351<\242\356\344>\343\211\245\276\021\342->\241x\202\276\255[\312<\2167\221\275\334*>>\r\273\022\276\340-s\276=\016\231\276\215\340\037\325\025\253>9\204\331\276}\000\213\274\346\353\200\276\236K\216\277\024K\030>\326\034J>9\253;\276\310D\037\276\177\365H\276\313\300\001?\272\234n\275\206\342\303\276m\224\343\300\025\037=\267\370\207>\215\025\243\275\ng\331\276\262\206\234\276\032\217\224\276\241\357e<7\224\376<\370\277^\276\347\355\035\276\302\202I\277q\036R>\376\205\226\276#\227X\276`To\275>u6\2770]b>e\001Z\276\375\225\266>~\342\000\277)c\003\277yz\002>GR\256\010\203>\025\352\265\275\314M\305>\177\032\016\277rY\363=\316\325\323\276\357\215\343\275\202\275y>\371(\264=\346E\020\276F\013\274\276\374\303\313\276m\375N=\243\232\227\276(g\200<\013\001\026\277\r\270\231=\266_\303>\211\223H=\373b\371\276f\354.\277\0160\365\273\006\242\332\276\'\305\220\275a:U\276e\031G>\361r\216\276\033\0310\277B\016v=(\344\032\276]\327\343\275\353+\021\276B\262\243\276pJ\230\275\303\323A\275F\2102=\233g`>3?}>\r\331\246=\350>\202>\240\332\336\276U,\331>)\324\334=\007s\274\276\304\2146>\360*\334\276L\336M>\241\223\237\273\360 \325>\343\034N>\031\323<\276\n\372\000\277\215\262\247\276G\354q?\177\320\022?\005s\030=*\301 >\316\336\332>\343\2064=\330\273\027\275L%\364\275\206\214S>\277\371\034?\247\251\212>\177\036\215\276n\001\244\274\021\373\245\"a\377>\276a\003\277\\GG?\350\266\217?\177\337\373\275\245Sh\276\214\376\360\016\035\376>\006\"I?:\367\356>\224A\275=\340&q=K\322\312\275\335] ?\265~\272>\362N\006\277.lH\275\341\313\240>x\322\215\276\321\002\360\276A\264L<\226~\210>x)\027?<\316\330>\3031:\277\035\370\331>\370\006O\277\360\275\002\277_%\201>\'\222\016?]\320\">N_f?\333\013\254>Z\2151\276\004]\225>2\001\206>?Xo>\354\210\244=\272J\017?;e\376\276\013..?%<\211\276\202\210\357>w~7>\274\245n>\303\002\n\277:\363\300=\215\331R>\305\234\223>\262\022\330>\203E9\276\376\212\240\275\265\230\033?\216\303\005?3f\226>\317\306E:M\254\325\276\260\264\016>\306\345\315=h5\255\276\307P\007\313\013\021>jx\010\2760v\323\274\254\350\262\274\320\231/\277>\326\215\276#\000t>%u\211>t\3472\276\204\317\254=\241\232\242>=\371\212=\034\237\216\276\365\027\307>wL\204\275M\246U\276\372r\355=\265$\022>\373\034\200\277\231_J?\'V\033?\243\321-\277G\030U?M?(\276\361\035\264\276\013\204\344>\241\0134\276mS\266>\352\003\221\275\201\321\253\275|)D>T\203\273\275\364\326\002>\3157\274\276\260\365v>\333\314$?\247M\033?\236\205\r\276\177\374\256=\r\010q\276\033\334\255>\311\363\346>\025~\252>\317\010\306\276\276@\356\276\331c\022=\302\306\027<,e\360\275\365\271d>;[\017?\023\223\007\276\352\321\313\275\324U\006>\215\311\364>~\267\202\276sP\t>\211X\033>\037\302\035>\013|t>x\270\351\2757\221\335>V\304\317\275\334\007\222\27549T\276\245q+=q(\365\274M\205\203=\376\264\330\276;\251\200:\026Bv>) \273>\366\n\232=\211\265\"?,\276/\2773Qk\274k2\036\273\024\366<\2761d\037\276\241\335(\276\030\374\014\2777\3653>\307=\261;\275\211\310<\312!!?rR\332-/\221>h\221\251\276\365\373P;|[I\276\226\232\275\273\352\237\301\275\257\275Q\276\222\037\307\273\346\203Z\277\275\232\037\277\217e\347>E\3419\276\275\251+=\354&\217\275\341\270\237\276No\212>\265\356\020>\037\343\002\277E\032\004\277\252\035u\277\242\322x\276\274XQ\277o>\334\276\367\270\022\277\263D\374\273\0223\244\276\231\214\021\275\"Vr\277i\002\360\275\352\311|\277m\251E\277\031:\225\276\353F\004?\257V\275=\303\304\246\276k\2268>_\007\000\277`*\324=\256\013k\276\217\032\263>D1\276\275\370\200\272<\225\311\205\277\300\323C=Q\2268\277\367\017\220\276\343\025{\276\347\013K\276\210X\202\276\261<\025\276\271t8\277^@\211\277F\225\031\276@5~\276#\205\324\276\273\236\366\274\220\276U\277|\247\353\276g\003\010?\200n\363>>\324\231\276.\304\313\276\322i\017<\274C\004\277\327\030\222\276c0\351\276n\322Y\276\275\344l\276\031\006\255\275\223\324=\277\341\260c\277\230Y\255\276\340\205u\276\376P0\276\265:\257\275(Q-\276\275[\"\277\270\241\210>f\022\236\275\356^^=\240\201\001\277?)\303\276\253\305h\276\260\025\347\276\226\037\010>\256\332\350\276`\217l=\227\204/\276nh\025>!Z\223\276Y\360C\275!\251\331\276\317q\r=Z\205\325\273u\r\345\276~\036*>wU\200>C\254\330\276#\231j\276c\326~P\267\220\276\333\2200\276\277k\214=\027\242\201\275\033[\376\276\346\204\250>\205\246\232>\324;\233\276\367\346\206\276\231\241\202\276\211,N>M7$\276hh#\277\021H\205\276]\036\037?\251\330[\275\202\276\255\276?U\014\277\212\257\037\277\304\353\223\274\273?\263:\345\003.\276-\361d\277 \256N=L\006\211\276\373?R>\200\306G>\005\')\277X$\347>q\257\352\275\036|\252\276\352\030 ?L\007\343=\205\317l\276\367s\241\277\265\000H\275\362\037\271\275\034\305\022\277=/\332\276\310m\350\2751\025\007\276W,\276=\207\227\263\276\002\342\010=0OX\276\314\326\326\276\237+i\276\202\333\024\277\342o\006=\365#\343=\276+\201==\314\314\276\237t\360\275i\203c\276;\330\217>F\373\035\277\317v\010\276\025\251I\276r\360\367\276<\373\034\277S\023\345=Ri\273>\240\330\261>\316\240\227<\216\337\212\276&\305c\2777P\035>5>\336\275D\365w\276\351@\264=W\360\'?\322n\232\276<\200\366\274\331\211N\276\2551\322\276\302\374A\277\377q\200=\374\233\345\276:\003\263\275\372!+\275\327\332q\276E\002\223\276|]\261\276\251e\345\275\020c\333;_7\264\276D\363\323<\356\275\362\276\267\273\211\2762H\223\276Z(\216> \202\210\276i4\177;\240\250\275<\373\302\255\276\216#\200=c\351\037\276\326*`\277lf8?\024\211D>\360\244P\277\355\330\347>\326\200G>;xO=<\332\213\276\345v\216\275\222F!\277\n\356S\276bA\253\276\255\360c\277\310lp\277\342\340\363\276\257\332r\275\034\226F>\243Dv\277\223\016\360>\346\223\306\276\301\251\317\276w\265\t\277n\3276\277b\241{>\nh\221\276\305q\361\275\312\034\025\276*\366\213\275\236r\212\276\341\336#?\225\177\204<\246+\253\276\025\344\246\275\216\245\205>:\203 \277E\213\002?\322\337\322>o5\224?\036~\022\276\236\022\356\276N|\373\275\"\'\236\276\225\217%>\214\017!\277\307\206\363:3n{>v\376\030\277\022\231g>\273\t*\277\024\343\355>\026\341\034?\244\361\016\277\023d\262\274\222\026\227=]c\324=\2675\244\276\345\376)\277\263\260\222\276\300s\267>\036\312\232\276\226\201\207>\023J\034\277\266j\216\275P\321b>\3315\317=\327\"\376\276\021\n\313=X\303p>\324\336 \276\242\253t>D\244q\274\323\370\214=\211Iv>`\271\323>\304\363\326>\231\300K>\343R\207>\252\201\030\277\360\364\305\276\250\216\274\276\332\021\021;k?\241=\260\255/\276\r\342\255\276\363\363\003\276U^\351=\263j\217\276&]\324>\020\246\256\275K\001\216>z \336>\370\354\230\276\357\031y\276\002\2657\273\030\031\177\276\r\266\014\276\004\333\223\2760\270\026>o\210(\276d*\252>r\243\t>\272\360t\276\276\002\214>\351u\362\276\227\366\346\275\253\207\311\276F\254J\276W\"\312\276\245\371\275\276\257%\214>\271\026/\276`\361)\276\020\275\017?\260\0246\277\rD(>\346Al>\331\245\312\276\t]\016?C\254\324>[\326<\276^\031\252=,\245B\2767\036I\277\350\004?>\265\2361\277\"w\220>\260\n\364=>%\256>\347wt>>M9>\3162<\275\336\0172\276\027\032\247>G\'\342\275\264\\\305=r\312$=uH@\276\275:\324>$\241y\276\236\220f\276\240\031\244>\202\355:\277\020\324\323\276\3134\223>\226\247\272>\347\005\035>\213/\245\274\233\327j\277S\000\\=^\373\200\275\374>\213\276\017oO?\306_8\276\001@\212\276Z\256!>_\n\262\276\256\363\252\276\3764\222>\023\004z\275\016D\212>\361\312\211\276GgP>\306t\203>\353\305\004?\306\206\013\276>z0\275\216\n\021>F-\244\276^\016\025\277\020XS>\230\347\'\276\276\006\026\275>\364M\277Ej\275>\346\007s\275Su^>\340u\246\276\020\356\202>\323\264.\277\265#\321>4\230\325<\364\017y\275\353\245\225=TT9>FH\"\276\231a\204=\263Ld:\304\330\346\275\025jw\276\373\273\004\27727\007>\206\r\270>\272\276\220\276q: ?i\311\255=\023K\270\274A\177\254>1\201\r=\363\225!?[\226\034?\323\0030\277\324\025\354>\201\302N>q\325->!U\326\276u\241\215=\317\324B?\257W\002>#\017\310>\302\355@\276*\274\024?Bn.>\250\323\"\276\213Y\025>\341@\335>\205\342\221>1}W\275_f\004?LDd>_~\254;\262/>>\362Q\364\276Xw\337>\240D{9\314b4>\237\304\217>\3139\256=tm\034??)\355>\010\244,>\261g\277>\333e<\277W\231\235\276\240h\r=\345\217\016?\3678\202>U\316\266>S:2=\247\"\251=\211\343\016>\371\260\374=\001\200\266=\345\305:>\300\372\237>\310_\031\276\212\277U\276\313\215\373\273[8\367>d\022=>m\355R?\25404>\356\243\213\272]\320\236>\266\t\243\27626\250<\216\027\231\276R\n\316>\214T@=\371h\271>\350\245\230>\336\177\005?\310Lb?@\230o\276\346\367\274>\220\215\303>/\327^?}\271\000?\225\224\025>\366\031s\275\322\272\217>\270\214\254>\361\227\202\275]f->\340\324\006\275`l\354\2768\324\346\275\300\363T=\344\257\036\277\245\355Y?p\324\336>\226a\003?\306&\211?\265> \277\210\037\252>\016\202\316>pc\327=T\363\n>\360\'\027?\261K\244>gI\005?\301\221^\276\'\016\226>\356\030\357=QBB\276W y\276Wt\314=\3754Q\276\353\022H\277\020\236\233>\323\310\361>\365\005\200>\372\204\250=#\263\354\276i\371\223\276\246\337\031>\023\202\223\276^Z\352;\276\342\'\277\250?\030\2765\315\231;\352\250\364\276sv\224>\324\227\315\276*&\027>\203{\227=vT\356=,\0218\273\000\003\261\275\227\271\215>]#\211>=\213\357\275\353\230\311\276\203m\364\276\177\2047>\"\003{>i\033\220>\365\323\343\276\312\r\'\276\331\361\216\275=\"\006\275\337q\335\275\215H\263>\265\276\023\276\220u\233>(\350\002?\267l\004>E8\'>A\254\354=\363^f\275R\242\346=\"\220\017\276\021\217\215<\264\035\030\276\332\006\347=:\203\317\276p\377O\276\327\370x>\277\016v\276|\0340\277\223L\237>KD\026?\355\252Z\276\352R\320\276=\265\212=\3770\226\276GG\366=\242M\177\275\316\257J>\347NC\274\'~\313>\260\210(\275\271V\323\274\343\263\003\275\333\217\271\276\344\336l>l\333N>%\276\373=\214`\3778<`\333\276\'\023\365>\001y\236\276\341\214E\276b9\263\276+\251\351>=\'g>^e\273>\007v\211>a\276\023?zAp\275\354\023\031\277o\"\221\276H\021\240\276\311\3013\277\034\261\373\275\372EB\276\002\205e\276\024\223\265\275\211\237\210\276\026\023|\355\3468=g7D=\033\031\021\274\013t\026?\351\017\003\277\273\267\265>\372\025\001\276\203s\275>\350\227\324=\355w\304=\005\355\'>1\256\364=\214\221b\275O\343\033=\n\nU\276.\303\274>\277\357\t=7\246Q\276=\215\'\274\025>\201\275\252\353#>>\243`\276%\324\032?\016t|>\315[\212\275\3479\335=\267\207\233\275(\351z>\203\251_>\350\273:=\270zt\276\000\004\364>w\236\007?>\203e>\306K,\276\007D\271\276\371\204K:\321I\220\276\225\226t;0>\232>N[\333=\251\365Q<\'\240\375>\335g\t\276\341:O\277\260_\004?\022tH\276\363\215\302\276R\242\231\276\320\275\207>\350\316\205>\311f\276=*):<\333\345\007\277X\367\333>R\177\313=\271\271\200\276\262q\031\277\342\r\321\276\351\036\310>\27682>\262\032\343=&\336\025>\346O\204\276?\265\275\276}\004B?\211\017\371=\270\017\330>\310:*\276\352\266Q\276\321X|\276\033\266C<\022\031\020\277\266\006t>&\317\276\2760\321\355=\275E\200\276\331\202\210\276\227\254\023\2763q@\277\362\177\222>>!\203>(\"\313=\361\263\322\276\261N\t\275\342\345\246\276\377\344\023\277S\032d\275\n\221y\276\220\373\331=s\304\021?\363\366C><\235\n>\341\202\346=\306\354\211=\331\325\327=s\230\251>\304\016\035>@\351\311>\032\010\023>\3661f>\205\277\257>\340x\t?\213j\352>F.9?\027\"\222=\371\306\376\274A\364\243\2760\256.\275\3375\342;hc/=\376\316\236>&\257\253>(\366\t?\275q\260\275{g\337\275\302G\n?\307\236Y=1\346\357>b]0>/\265\215>\224\311\000?1\350\010=Hk\037>\247\323\206\276\270FU>\211NL\276\210+K>\020\013\232=\357\223\\\276\342\256d?mu\013\275Y\214\364>5D\205>\211\007\244\276\352,`=\ny\357=o\310#\276g\2769?Ax\025=\234\316z\276Z\001\245=\316\373X>\200\331u\275\004x\230>\320\312\250>n\315\r=\207N\236\275\344\236\007>%a9\276\327kk=tT\331=;:\017?\2311\007\277\\@:\276\356\335\245<\215)\322\276~8\302=\3325\336\274\037r\273=\2062\247\276J\324\267=_\263r\276Qq\277\276\026BG>\270\227\265\275\275\3423?\316\224\201\275g\310>\276\265\360w\276\r\017;\276%\245\364\275>\260w\276\214\374m\276\236%v>C\375\247>l\032\247=\226\317\245\274M\364s>KY\237=\334\330\354\276\251\353k>\267\007\347\275\236\030\331\276\t\325j\276\3255\224>\373\312\227=\345D\230\276-{e>*\204 \276\321\024\247\276\177c\233\276\334\021q\277S\3715\276\177b\035\275\005\003:>`e\232\276<\263\245\276\314V\270\276\215T \277;\351S\276q\250]?g$\347>nTU\275<\2276>c\212%\277\2152\n=\235\263\022\277\316\275\013\276g{\036\276\300\227\361\276p\nx\276)\276\214\277\246DL\276\316\372\000\277y7\354>P \252\276\023\220\013=\362rw\277\362\233\322\276\237\225\207\276`\227\031\277\270\342\237\276\n\335\001?\020\331\241\2761<\243\276\3506\337<\313\365\336\276;\005\232>\355I\215\276g\304\r\277\026\255\210>g\313$\276\332E\324\276\274x\251>\r9\315\273\r\2322\276\231\332\022\276\020\216\341\276\216\362\353\276\315H\030\277\372h{\276\036\305\261\276\340\027\356\276\276<\177\276|_\314>\274CR\276ps\000\277\272\373\234\276Y\251S\276\252]?\277{\311\252=\t\316D\277\217\3130\274\006\373\215\276\237/\247=\374\274\321\276\342[L?Q\304,\277Z\321\'\277\034z=\277t\251\371=\237u\022\276\343(\232\275\223\257u\276a\351\270\276w\005-\276\361\355\372\276\230\230\376\276*\007\342\276\237ih\276\313\330k\277gS\217>\202\261\027\276@\224\226>\003\211\212\275 \306&=\0213\204\277{\231\367\276uVZ\277\357\235\236=\341\303\r\277\020J\313>\037u\223>I\210\375>b\260\260\276\014\334\354>\014\217\305\276\370\300\277>\235\034h\276\367(\240>\222\343\276>\331N7>q4\274>\354\334G\276\212\310.\277\245Ud\276\225\210\237\2756d;>w\207\002\2759\232\">s\346\304\275\217\243\256\276O\375C\275=\351\250\276\210\305\032=JR\301\276v8\351={f\001\276\354=\356<\356,m\276\021a\003>\303\333W=E\263C>\024\034\202>\023\316y\276\271\255(\277\204+\300\276\270P\233\276\0261\021\276\354\372\214\276:H\014\276Y\022\251\276\305\203\321\276-^0>V\305\373\276\251.\007>\024?!\276B;\037\277\tl\344>3Wh\273\301\260\276\275\007/\240\275\354A\241\276\024x\030\277\344\327\266>\260\374\002\277R`C\276fXy\2762\177\376=\251i\225\276U\302\002\277\306\223-\276|>f>\037\021\003>\002\304\234>\224\t\263\276\\*\303;\003\302\234\276\177\252\325\276\236(\'<\020r\026>T\025\224\276,\025\275\276\036^\013\277h\342\300\275;1V\277\274J\002\277$F\332\275\325\217\336\275Fn:<\"\207\223>\312\261\210\275\177L\306\275\230\345\277\276\025\300\231\275T\036?\277\273\334Q>+9\366\276\207\356`>\341h\026<(^\241=tL->zf/\276\345\255\361\276A\366>\277\241u\221\275=\216~=>^N\277\235#-?Ps\\>\025\273\350>\263\352\304\276Z\005A\276\376\010F?|\r2>\317\025\314\275p0J?oS\244>\277X\320>id\200=!\370%?\277k\341<\030\032\030=In\240\276\242J\000\275\317\312\235\276\345\356\345>\211\030(:H7\236\276\224\204\213?\023\330\330\276\316m\320\275\336\267\311>\0375D\276b\267\002\276s ?\276\'\344\031>(\264E?j\020\246\276r\'.\275\016\324\037>\316A\r?0&\337=\345)E\276\245\253V\277Q\016\272\274\'+\032>\310\353a\276\th8>\330\312\335\276\253,\333>\217\2523?\212\225U=\275\335\274>N\211\300>\246!`>9I\202\277o\364\343\274\234P0\276\353R\217\276Q\236O>J\"D\276;h\232>\016\232\275\276\207\2103?\372\325o>!\025y\277\365\034\377\275h\354 >\32690?b>\032?vo\212?\342\300l\277\035pu>D\250*\277\0271\220\276\260J\035?vZ\022>\365(\025?\333s\242>:_!\276\240&j\276i\337\217>\223w\\\275Z\321\335\275\237\n\005?\2410\204=\223<\257>[\342%>\242\323\013>06\237>\336\t\205\276\337\256/>\242\357\365\273-\014%>\026j\025\2771\271\337\275<\262\231\276\3672o>\001D\030=\214\372b=\014Y\321\276\020\316\317=k\250\202\275v\325#?{\242\255=E\304\251=\003\347J\325\315\235\276\376\231\016=<\256{>7\265\257>K\006A?\212\272\256>8\271\202\276\326\243\215\276-\001u=\355\270\203\276\275kY>\202\335\313=s\315\037>\327\226\275=P=\r\277\274s\221=\226k\306=\221o$\276\t\2639\276\230\314F\276^\212;\275\241F\346;\t\262U\277\350\020\240>\366K\237=[r\234\275\351\220\253\276G\tG\276@\245\243\274\006\202\316\276\210\323\203\276\315\2552\277\271VK\276)\334\016>\241\t\261>\220.>?\356K\226>#\253\332>\370 \223>0\344\253\275\244Bo=\256\224\014\277\223\006\336\276N\356\320>mtl>\235\317\302\276E{o>u\017\213\274\252\347\254>\333\177[>n=\241\276\377@\003?(\201u?Z)\004=s\214\224\276\301\376<\2766\217\237\276GJ\021?\320\231\220>pNf\276Y\205\363>\324h\270=g8 >\215\ns>\371\007\270=B\2656\276\202=\221\275U\340\316<\363\252\321\276\177\253\213\276\357n\205>(E9\275W\251\361\275\035wl=\335\271\247>\343\177\361:q\247\340\274=\267:\276\023WK>7\215[>^\247\251>\222\242B5\347/\276L6\002?\032\010\325>\361T\232\275\205\\\344=ow\205\276D\221r>\224!\357\275\303%\326\275\326\003\264>\237c\003>\232\242\331>\034\270!?\352\215H\274\\v\271>\313<\274> \007\223>\323\013\201=7t\237>\250\214\004\276\244\254#\275_\246\201>\231\214\233\275\250\254\334>E\013\377>\313v\256=-\327_?\337\217\273=\342I\356\275E\312\\>\014\220\302>e\275\371>\2455\004?,;\316>\2407\025?\3402\005?\371\372)\277\013\337u>RHS\277-\213\303>D\234\313\275g\255\253>o\356]>\356\234H>\260V\367>h\035\300>\234\313\205\276\327\007\256>\"}*>4\030\322\275n\315\327>H\013\030\277\362\013\364>MC\343=\030f\252\275\340\017_\276\362\210\t?\350!\021>\003\332\212\274\364Qq>n\205\323>\212S\240=\250&,?\321%\252>m\271\225=\033\'6\276\020UR>\226\234m>-\242\362\275#\330\t?X4\204\276\216\021\275;\353\277\302>r\020;?\030\217q>?\277\\>\210A\347=\373\332\334=\357I{>\200\000\027>\006\242\367\273\177\200\202\276\217\006@\276\260\362\235\275\177,\227>\213y1>\007)\304>\244\177\025\277\275:\262\275\264\351\223>G-\005?[\025\222>_\326\204>@\322\332>\217\357\341=z\341\363>\327\354\025?\016P\033=\203\231\304\275\325JD?^\220\253\276\226\377\000?xZ\362\275\231\2749?\252\023\203\276\260\352\010\276\022\276\373\273\262\t\r>&\005B>$H\367>\332R;>\233\374I?\376\320\037\276!*\027\277\315\007\350>\257\366\217>\335\371\201>\0263\325\276n\250\332=\001\355\225>\267\266\204\276\343d\336\27585\312>~\310x\275!b\014>|\355T\276\240\374&>\213A\001\276}\260\303=\000\245\362\275z\307\274\276I\034+\276\373i\024\277\241\241\010<\270\277\007\275v\357\270>\361Eq\276,q\026\276\272\307T<\3304\221;\204\373\217>\377\253\020>}\367\032\275\032\031\236\276,-`\276oU\216=\330\262\272\276\261xS\275\030\216\312>\023=\314>)\022\311\276:>!\277\221z~>\354\344\253\275\367\3240\276\362\335\">xt<\277\333R\"?\270\254\337>2\367\200\276\273^\364>\334\307\354>\"\227\235\276GR\006\277V\370\225\276\2062\023>\003A\330\276\272k\233>]\034&>\213X\211\276\216|\217\276\246\352\301\275\300e\225\276\'q\301>\360S\037\276\367#T=C\272\006?;p.>\332\375&=\321\216S\275nl1\276\375w\354=I\223\021?jo\n?\273\031\271\276o\034\245\276<\240}\276T\340\200\275\230\216T\276\177*\026?\\\353=?\253O\242>\220\241\002\276\273\347\265>\266\202\352\275S.\026\277\342EO\277A\007>\277e\036H\276\263E\037=\325\377\353>\000\311\036=\254\244\202\274,H\340>U4\211C\0313\276\311\230\255\275\244\031\376\276\356\020\024\275\372=g\276\340\'\224\274\033_\205:\373\313\245\276\007\371g\276+\315\371\276sE\250>\242F\375\273\315\322\311\276\334\236\221>\243\355\320=~\336\">\254yL?\265$\004\277mU\000\277-\356\303>A\205\n?\334\034\026\277\304\'\267\272\271\214V>=\377\343\276,\014\364>\237\371H>\014u\365\276\203\306\374>#\217\365\275\376E]\276223\275\332\340\037\277\272x\314\275\221*\212=\212\221\036>\254\3223=\311\246E\277\177\263\235\276\351\345\330\276W\027\216\274\216\030\360\276\024\311\"\276\303\373\364\276\217_\013>\274\005\274\276\354K\246=\355\033\277\275\004M\003>\316\030\311>:\377^<\325B.=H\020s>\270\233\236>\0341\"?[\263\331<\322\243R<\345\246\027\2756\246\r?\216\3234> {\307>Jo\362>{\244\036\277LA\302><\265@\275d\326\006\277pN\202\276\251\021\177\2762\312\201\276I\277r\277A\326f\276\214\245\343\275\207\r@\277=\007\377\276\000\225\241\276\232\010\250\274\364\2744\277V\001\267\27561\241>:\004\337\276K\373\276;\367\376]\274\371d\246\276\326)?\276\205\352$\2777\000\270\276\364)8\277\274\241O=f\006\205\275bJ\030\277\277A\302>\376\214\256\274`$\237\276\253a\310\275\032\250;>\275o\262>\232\250K\277<3(=\322\200\264>\257\256\214\277\267\002\'>\207\334\242=_\274\017\276~\033\277>\265G=\277\236\337\034>\361q\203\2764\334e\277\252~>\274\007\306[>MQ\320\276j\343\275\275X\277\236>\001\207/=\211\267\265\276\300\275\005\2751)\277\275*)\322>@\037\271>C+@\276\305s\216\275\200\'\253\276\302\373n>Z\n\031\277k7e\276\231h\010>\330\032\212\277SW\342\276\376\224@>\034\372\302>4\"{\275\243w\256\275{\263\311\276_\330}>\311\302z\276R*\362\275\263\320\030\277\220\234\000\276\r\325\210\276!\236Z\276c\225\201\276\207\213\322\2764\314\032\276\242kX>\356{M>\364&\360>\370\370\370\275\2779\262\276\020\341S\276\347\010\300>\366\265\217\275\264]9\276N>\340>\025E\202=\231\235~\276F\000J=\226w\236\276\000\214\353>\\$\273>\035&\216\277\220\375\347=E\213\201>\0054\221>\203S\224>S\240\223\276)c\016\277c\024\033\277\323JC>\272\371\276=\255\276\217\276Q\245t>n\312\374\276\037{\204>\220\266\026?\013\r\247>bh\'>\357E\366>\223w\233\351\305\000\277V\034,\275\316\000\026\275\276\330\355\275\236\'\037\2760\360%\276\250\004\264>\010\271\272\275\3410\177>o\351a\276)_)\276M\351\033?\265\212>\277M\002/>\233\226)>aS\177>\030%\336\276\002,\344>\352\232\370\275\362\311\326>\210\375\246=j\271\374\275\314\221\215\276\277\366D\276ni\245\276\311kN=/\3017>v,\257\274\217xQ\276\350\260\232\276\252\023\217=S\032A\275\214\353\221>\031_\341\276\212\200\221\276\337|\031>\214=\255\276\031\1779>~ZB\273\'\347\017?\"\327\266\275\253o\241=PR?>\346\325\220>\236i\375=\342\241\"\277\210g\341\2762\345=>r\177*\277\017\336f;\017\376\215?\375\240\253\276\325Z\301=\367&\215<\330\321\321>xK%\276\014\313\277\276\366\267O\277Z\031\354>\312Q\303=\2060\271\276#Q*>\212\236\037>&\313\253>\332\240J\277)\300\335>\274\370J>5a\230>\307\354\211\276\243\024\200\276\037\363d\276\352\377\307<\022\223\017\276jm\250=\271[\257\275\336\\}\276.\305\374>\246\324\375<\240P\363>\337\\2>W\t\315\275\214\356\205>\235\347?\276J7\356\276Y\004\206\2751\255\242>\246\003\326=V\026\247\275\352\240\351=US\221\277<\3674>\203\244\032\277_\3105<\007\360\375=\213\003\277\275\322\255G\276S\026Z>\251\247\334\275\004\177\265\276\355\300\216\272\3343\237\275K#\231>\375\253n\274&\032\220\273\243\373\344\276\341hi\277LC\216>\244\375\367>#b\312\276\346\350\370\020E\016\277~v\230\276\306\375\316>\310AT\276C\244\355=7\027\035\277\315Z\204>ox\222\274*,\032>(~\326>\233\266]>axp>\236\377G>2\\\232\276\343\2203\276\216O\234\276&5\320\276\2531\340T\317\024\277\250\267\304>l\246\373:\355\364\223\2751\216\327=\215\237j\276\3451\021>\302t5>f\002\207=\n\202:>\300Uv\276\030\355x>\217\0070\276\265\326\271>#\344\237=:\0337?c\276\270\275\217\360\325\276Tr\200\276l\212\205>\334g\003\276|\246c=\326\227\225>(\357\244\276\214m\254>\227\0133>\256\007`:\035\240|>\2364\345\275\352B\">\374\005-=\354\314)\275\033E\243=\342[\231=\031ck\276&\261\335>\0268\234\275\372C\327\275k\350G\277yYp\276\t\334\261\274\220\2569>\376\312\213=6\337/>\262\023\373>B\217\236>\006$\'\276\010k\214>\321a\245\276\341\353\253>L\347\020\276\360\246\004\276\002\3653>S\351V>n\034\200\276\232[\265>\016W\323>T\210\214\275\226*\330\275\233\225\024\277f\'\231\276\360\303\020\274\345\257\035\274\027Y\201>\"\262\350\276)st=ihX>\177\253\252\276b\222(>\335R\325\275\017p\255>\371\337\327<\370\306\307\276\212\252;\276\356\374\245><\236\n?\r\016)\277H7\253\276p%_?\365\227\317\275\256b\017\275\345r\236\276)\r\031=\004\345\377\275?\366+>\271oo>\317\270\331\276\323i\234=y\317U\276V \010?\271E\212>\266\252\226\276\'.\301=\021q.?\232t\017?\360\241Y\277/\022g\276\254\010\331>n\313\242\276\303\036\273\274i\275-\276\334]\254>\t\220\212>}Z\310\276\275\371\017\277\335\300G?\"\rD= _\034\277\02260>\273\267\265\275;}\315=/\002\217\276\303\2434>\363%\221>]\023\035\276i`\304\276@y\211>\234Z\350;\251\342:=8MY?\3217s=\277\020P\276\017\211=>\240s\\<\242\026\024=}s\030\277Q\365\275>\262\023W\276\023\3607\276\253\330j?\311\244\035>\244\247\207>z\343y\275\261\026\014?:\t\205>\375\212\351\275d\307\263\276\326\t\200\276\005R\201=\014\276\300>3\217.?E\347^=p[6\277gQ\263>\3726m=\033\301\327\275\200\"W;8\010\234;sXQ\275y\346v\274\367u`>\377\346\004\275|\rB\277Wp\025\276\207\001F>\240\330\017>M\037\303\275\341 \217\276^\300\310>\334\376\230>^C\001?c\353\227>\347\236\205=\3106<> \323\374\276>\240\331\275{\276\030\275D\001\360>\347\263\301\275\'7\035>\2240A>\030\347\331=\\@\231\276\224\004\020?\326\020\312\2755\257\321\276\251\004\334>.\335\246>._\023?o\360\263\276\311\276\231\274,\331\223>\213y\013?\305\322f\275B\306D>\205\002\337>\223\307\222\273\260\250\021?\275\357\025>\347\335\255>|\024\262>\277\220\231\276\270?\325;\013\333Y\276\261*\200\276\177\010\230>\265\177\252\2768H\352>\032`\014=\026^\t\277\343\367\263>\373\216~\276\361\247\215>\007CT>\201\210\374\274\230`T>\255Q\004\277\237\366Q=M\217\247\276T\225\271\275\010\027\005>g\266\310;\307\'k\276m\036\035>\344\234\362\275Y\r^\2764Z\344\276\255H\227\276\377T\\=\224\241K?\312P\263\275B\356E\277\022\372\004?\343\376\230\273\273\213\312=O\331`>\356k\013>w\257\265\274q(\215>x6\343\275m\037\033\276\334\036Q>\\\006\226\276\264\316\271=\363\332\026>\"\342\017\277h\334\251>\347\234\265\273\227%r>c[{>\354\343T\2761o\241>\010\271\224>\227\333\351=\256\351\204\275He\203<\355#H>\372\335><\362Eh>\367D\\>\355\244\026\275\367\3019\275OR6?g\375\271\276\335$\253>\301G\311\276\033\005\007\276r\305H?\224\227l?\326\246\253\276H\004\026\276z\370\201>\242\253s>\356I\024>\030\364\357>\202\3540?RS\261<\264\366\344>\022\231\237?rn\304=\346\216\325\276\314\035\301>\222\221\t>\3078\367=\r\355\341\276\376;\353>\337\027\263\276\266=\233>^g\216\277\277`\207>8b\335>\272\027\247\276g(\023\277\244\357x>?f\356\276T\037\020\276y\233\371\276\037\215\022>u\372\337\276\3645\036?\331\210G>E\341f\275\272a$\276\004\365\336\275i\344\223>m\230H\276g\224\237>\016\314&?2gB>\014\2357\277K>\002>cY\352>P\016G\276\331\0029=\013\2075\274\325p\330>\230\264X=\376\203\374\2751\314\351\275l\325\216\276\314\"\345\274\373\037\243>$\214e\276vnf\275u\267\355\276\235\nX\276]IU>\"\270\004\276=\263!=\032\276\017>%!\200=\245\035D?\027jk>\013<\304\274\242l$\276\003\006\371\276\306\304\016\276m\361g>P\245\260\275\024\002\331\276y\350\371\275M\246\344\276\003\305\210=\3051\'=T\355\270=Q\333\r>\021\301\007?\010B\330>\002\323\034\276\352U\r\277\010a\366\275T\265\255\275Pg\231\276M-\306>\320\360e?\260iO?a\032\326\276e\306\301\276\376\365\022>Ct\311>\026D\364\276\242<\312>/@0?\036N6?,\332\320\275O.\321<\352\017\240>\2656%\276\276\003\002\277G\354\215\276t\321&>y\020\365\275\321j\342>\323\2434>\233\250\004\276\273\357)=\037\327\355\275\343G\245>\210}j>\255\036\230\274)6\200\274/)\027?\013\322\027\275\"\313\024?\305\350\000\276p[\243\276\356\305\213\275\267zR?hV\263\276Y\354\217>\325/B\276\343C\304\276\343\002\326>\321\010\323\276H\215\253\275\323$\351>*\265\264\276e\201h>\004\311\007\277\035\373\023;#\221u>\355\003\226\276%\024\233\275\271\007F=\212\276~;\314x-\276\230\263\014\277\032\273Q>N\264\265<1\000e>\340\273?\277\235\274*?\214\316W>\014\214\256>\200g\237>R\276\220=2k\376=v\0269?\301A==\262\362!\276\3408\306>O\316\275\276\2152j\275\000i\323\276\365\215\013=\275Q\016?\273\313\003?\370i,\276^\003F>r\321\341=\236\r\034?\376/\021\275p0\222=\330\344\311\275\342/\030\277l\271\244=\214\251\216\276\0216\357\276t\250[\276\246T\023>\013\315\332>\210\266\006?\316>}>\007\276\247\275\332\034\271\275\272<\036>\216\242U\276o\304a>\255]\214\276]v\"\277\003i\343>\004dv>B\233\210\276D\372\017\276\256#L\275\313^j\276`G*\277\005\273\254\275m\302\264\276\237\255)=\206\3466\276\253\032><\241\365\322=\177\202\315\276u\303\333\276\027\001\024\276k\346\201\274\335\246b\276\345\2105\276\361\231\343>\276\356\024\277\370x\266\276u3\230\277>6\303\276\010\331\224\276X4\202=Fm\324\275X\036\314\276\357f;?A\206\001\277\013\240(\277\342\355\300>\002\225\307>\233\213\204\276O\345\306\275v\217\213\276:\016*:<\215}\275\240\032]=\265n\267\276\031\336\372=\315x0\276W5\202=\21358\276\265n\274>\372i\332=f\030\205\276\234\\\365\276=b<\276\251\224\324#\035\334\2769E\342;\002\252\235\275\363\031\326\275\237\241\013\275\355\307d\275BA:\277\216\337\030\276y\354\004?\276>\222>\372,\270>\036;y\277\314\"\223\276\035\354\022\276\276\001\324>!\004_\276\342\213\241\276\360\302k\277\243\354\036>vq/?g\321\336\275\226c\317>\231\302\037\276qXQ\277\217\272\"\277\364\355\260>\t\333\214\276\245P\224\276@\325\231>i\241!>\265l\252>h\r\323\275\021\307Q\276lk\034=\034\037\024=,*\027=\327jC\277\236\360\001?\332\263\356=\323\304\365\276\354V\"\276|\030\216>~to\276\013\'N\277-P\352\276W\023\276<\223+5\276\273\t\340=\320r\226\276\006D\017\277\272\214\333=\210L\245>\211\351\306>\352Y\242=S\000\375>\312\031w=\360Q><\334#$?\221\307\000=C\'\'?/:\034>P\355Z\277\373a\365>\357\023\013=\206\377\311>\347\262\204<\000nV\276\371\363\352\276\344\025\"?%-J\277\362\355\260>\253\300\325\275\321!\021\275\253\206\210=\324\335\032\277\004\333i>\367\340\310>\216X\346\276\374\221\333\274S.\255\276\250\266\320\276\332\317\277>\005Ud\276\250\022\207\276M\027\353=\355\377{>\302\313\201\2741F\000\277\002\031%\276@\361\256\275rd8=\355p\241\276C\241>=\006\177\306\276U\266\263>\214\'7>c\352\021>\235]\263>\354V\272=\332\274\336\276U9\253\276@\022\326\276\345\376\353\274W)\003=\206\214\273\2761v2\274\327x\004>\240\024\276\273\023\013\203\276mh#>\351x2?\242\214\341>\002\311o\275\234\245\177>\310\324\246>\351\200\002\276\024\233R\276\022,L\274\240\316\303\275\022*\303=\236\343\023\277Fh \276P\243\275>\263\357\236=\310\014\361>\016\022b\276\270\221k\276*\374\265\276\373iH\276\3143~=C\003#>Ag\336\276\326\262\'\277^\223\017?\003\236\007>nJ.\276\274@\205;BO\300>$\300\215\277\236\255\316\276u\305\005>8\271\253>H\005(=\261\304w\276\213#>?<\037\224\276Vq\003=yX\240>\222\243\325=JW\345>\372z\240\276\201\254\377>\021\2442\2768\224\276\275W\031\314\212\351\024>yl\350\276\323#\010\275l\3003?BlL>\363\237\270=\370\223\231><\242l=\2409\306=$\230\305\275\021Wf?\361\214\311=\371U\336\275K\3374?Zs\200\276=/\036\276\341m\225?\255\236\230\244F\004>\206\321@\276\323$c\277%U\004>\027=\260\275\3313\256=\313\242\250=\303\242\203=\',d\276p\250\017\276+0\002\277\377\316\027\276\022|\013=\251c\261\276\3025\253\274\352\005\265\275:\241\302>El\001\275\323\036\026>(\033\331\276\255\306(>j]\241>f\031\236>\014mW=N#\007>\231\313^<\034M\010\277\022\3527\276|@\234>\253d\334>\014\2553?\026\212\020?6\000\211\272\311d^>U\206\224>\301`\313>=\211\002\276\240\300\332\r\177C\275\354xi\273\201\337\325\276`\307\234<\304 /\272\t\302\030?P\320\321>\340\344\251>l\237w>\204kW?\027K\260<\322\t\346<\342\326B\275\336\014Y=\232\316\004>\241\271\317\275!\277\025?\314\250N>VC\004>\006\246?>O\020\026\275\345\016\t\277\351\363\357\275\301\205\226>qP\003>{\001\254;\375q~\276\360/\373\276%ye=]\325\203\276T\230>\276\255C\240>K\203\262=\346c\243\276:\010A\276z\202X=\276\237\014>\233zC>\316\361\226nc)=-y\227>\221\262\221>\021=\374\275M)y>%%J\276.\376\2759g\227v>\225x\203>\016(\371=S\365\242\275\276\241-?\225\304\255>aM\327\276m0\204=\325r\223>\366\234Y=\177lq>\3052\023\277Nu=\277\374EK\276\320\371\210>\303\336\247\275\030\002=>\235J\225\276Q\221G\276\3473\223>\316;\210>\225\032\324\274\367a\263>\255H:\272\375\2576=\037\265\370=D\331\265>\211m\250\276\347|\343\276\352\t!\276\025\\\377=z\002\320>\376\021\224\276\377\243;>Z\303k\276BI\222<\017^J?0C\031=\007X\014?\247\250\261>SI\005>\375A\006?\3742\245=:\006\203=\341\367\242\275\275\256\226>\324\255#\275\311(V>\307\300\024\276L\227^\276t\201w>\247O\310\275\342\273\234>\240<\002>\345\321w>\262\350\217>\025\323\235>\260\242\337\276\275\243\010>\202\342R>\221\255\302\276h\025\277>\215K@\276\232\333\327>>?\020\276\226U\226>\035\2254=\205\302\264\275\030o\206>\320\037\354\276\312\230i\276U\030\"\277\030\242\342\276\241O\336=\243\373[\276Q\232\023>\226\331\252=%\236\321=\317\037\362=\314\006G>)N}\276\216\257\370>\2776/\276\263\036\236\275\272\253]\276u\317.>\013Q\217\275\254NM\277J\271E=,=M\276\207\341f\276/FY\276\332\275\003>\220\' \276\033\022\324\276\273\\P\276B\007\353>\026\210\371=o\037\325>d\334\241>St$?m\036\001\277\327F\230>\210\236\212\276\306\300\256\276\320\204\024>\300\337\344\276~%\243\2759\241\307>m\tt\276A\325\326\276F\251\212\275X9\247\274X\375^\277\374(U\276\267h\030\275-\354\275=2\227\223>\257&\237\276\336\243\t=\324\365\370>x\307\227;\237\336/\277\246J\367\2755S\320\276\364YH?\216\032\216\276\306\370\326<\003\350\234>q\330\235\276\tt\241>\300\243\245\276\024HB\276i\243\r\275\265^f\2767\231\265>cF\253>j\024\221\275\014f\276>\3769\351\276u\351\310>}\244\245\276*\314f\276\212\260\361\276h\t\247\276\220\n\306>q\347\244\275\356\217\220>=\235\265\275\373\210\024\277\022\0257\275\307\352\001>\004\364\226\276\227\331I\276\217\351\321\276;L]>T4\004\277\273\301\001?\371\000\221\275\276G\207=C\350\215\276(\301\342>\251\n+\276AO\241\276\331\340\267\275f\210\373>\325-\245>\033\244\312>!\311}=T\277\356\276\312\256e\276\024\233\023?\253\206b\274\312p%\276\225\220.>\032\027\212\276H\215\267=\001\260\215\276\322\207\233=\262\227\002\277D=\"\276\223K\240\276c\275r>\030\300\022\277\277\020>>\230+O\276f\273n\276\317f\n\277K#\\\2770\265\232\276\2449\026\276~\355\356\276\250/\245=7\246\335\275{v\006\277\264\355\307>\321q\214\276\340\311_\275\221\377#>\352\224\355<\2007c\276*h\n\277\373\356\r?Y\r\363>\022\256\217>\252\202S>\366O\215\276\233%\220\276U\215\211\275H\002\364\275\006\375\233>a\374\003\277\010\244\310=,\365\233\276\311\315\206=\010\026\273\275TM\365>0,\253=m%e\276H\r\201\276\036>\000?4\317\031\275\226\360%\275\345n\001\276\\\220t=^\301\025\277\243\010\225?\177\226\375\275\277\3642\275\205\220\351\275@\272\312={\031\235:\322\347\020?L\033\020\277G\222\266\276\253\266\335\276w5?>\221\253\263>\016\024^>1\r\344>\355\237\242=\326y\271>\216\352P=\275\355\300>\336\224`\276\004\260\263>\265\\\222\276jD4\276!\375\006>\2431\301\2759\020T>\317\202\311\276Z\2560\277\277\005Q>r\321\255\276\022\345\t?N\353\366\276\'\222\372>\372\212\265>p]\003\276\334n\203=\224\347\007>\360\016*=\303A\226=\254\014\026?:l5=r0\220=\025\250\"\277)/!\276\363\301>\276\006\374\026>\003\030\211>e:\255\276\217\241\212\276\200m\371>T\374\\\276R\375q\274\365\002\030?\3031d<(\370\207\276\216s\317\276pj$\276~\223\006\277\346\014\022\276\247\322*;\333e.?\326\376\303=P\200\035\2761y\345\276.\016\220>x\234\266\275\215\260\237\276\310Ql>,\372\217>zm\001?8N\224>\273&y?\001\325A?\304\225\220\276\013\365\036?(\321\355\275w\200/>\360\0045?\257\217W\277S\312I\276O.i\274\330\244\331>W\2160>\t\324\331=\244c\364\275\244R-\277\320\276\270\275\266\232u\276\032%\235\275w.\251\276\205\0047\275\245!\034\274\314\365\212\276\2503G>\352\207\372=\360O_>Y\247\006\276\341\364\337>\236\223\302\275\211\226E\275UZ\240=7^\327\274$G\240\275\253\003\222\346\376R<\n\343\036\276\2167\033\2753\322(>\302\323\300>s\343\277\276\345\264\257\276\304<\323\276\345\361\203\276J\340z> \374\255>\357\361x\276|\373->\356o\036>\322\307\350\276}*\236=P\337_\275D\307\206\276OP\234\276\241Jp\276\304\351\205>\365\025\224=\270\305\313>r\240s\276+-\254>4\2427\275<\266\226>:\301\204\275\254h\367=X\274i\276\032\316\013\277\374N\033\276\003\324\255>\215\332\356=\010}\275>7:F\276\246mu>\215\353{>\322\272\215\276I\227O>\347\307,\275IK\373>\331a)\276\336\010\300<\213\351\">\"\014W\277\363\217\270>\024\n\327>\241\331;?\200\200*?\333~\222>so\230>\214\307S>{1\024?\010\310\206>\235\346\327\275\242F\325>\307\020\245\275\013\204\206=\034\0233>\232\372G\276\301u\005\277`\272\203\275u\030\030\277Z2\027?33&>\030%\323=\263\214\325>t\330\247>\352\232\214>b\217\336\276ZB\217\233>\250!\035?\241\360\t\276\027~\007?\3126%>\214\302P\275\037\353e>\0278\320\276h\341I\275\310\001s<\364\300\314>\"}\252>\262LT>\343\255>>6\364\272\276\312\342u>\332\037\n=R!\244>\013\252\343>\303\201\245<~\220\177\2761<\023?5\3740=\032\3575=\252\356\203\227\025\025\276\221\032\003>r\366\234\276~\304\354>\261g2\276\326\250\340\276\234b\356=\007\376\201\276\277|\263=\230~)\276kF\016\277\270\303o\276\376Zw>U\2134\276\017\224\211\276\376U.\2763\251V\274@*\232\2760\253\035>,\305\310\276\314$\273\276v\034<\274\202\236\276\276\367x\037\2762\215\221\277\302?\006? \323\r\276\016>\027\276N\246o\275\260\332\375\276}\200_\277\021\221m>\356\302\223>\r\202\211\276j[\273\276\3765\224\276\264\225\234\276q\025\355\276\021=\201\275\0068\307>mR9\2770p>\276,\016g\276\3731\201>\312y\241\276\332\375\312=\353\201\001\277\364\341+\276\316\330)\2777\353\250\276-\361r>\325\342\'\277\325Y\222\276\013E\207=\344\2174=v$\311=R\221\362\2745g6>iH\370\276\256i\005\277\263\361^>k}\263\273J\237\326\276\233\365\027\277\301\001\206\276\365@D>Q\n@;\265\336*>`\274l\2770\216\213\276\374\352\234\274\207&\242\276\226{\005\277\202\222\330\276\336s\201\277BLR\277+\310\274\276:Z\t\277)\354\201\274/\242->\230\307\221\276P\341\000\277\244\003\354\276\216g4\275\2761G?\261\tF\277fbr>\213\000H\277\034\307N\276\017\027\221=/\236P\277{\323\'\275\315\022\340=w\353\270\274\345\330\376\276I6\204\274:\334w>\303\316w\276N\265\r>Tk\341\274\311\364\261\275\003\340A\276vST=p\334\242=\274\343\224\276\236\356\225>\203\334\211<\014\265\314\276\257\267 >\361\027I>\314\021\374;\234T\203\276\364d\257\276\314k3>W<&>\206(O>\267\026&\275\250\344z\2765\350`\276Bp\034?\213M{\276W\254\030?\340\314\235>\273\236\001>\256.\375\275mG\231\276\373\276\251<8\3467=\037\306\237\276\352\256\226\276\016\253\260\275\337\203\215=\210\002\202\276\376%\006\277\0014N\275\233\322\010?dy\354=\345\023\006?\001Xs\276\300\211\t\276\006D\205=\303\315\206\276\355.H>*\214m>\203W\004\2751\034\364\274c\214\255>\343S;>\245\223\374\275;f\240=\322\202\274\276\371\\\271\276\353\341\313\276v\272\255=\3744{\2760\034\232\276~\200\025>\027\210\350=e\242\221>\307g\354=D\370\365=\3133b>\255\201n=L\232\021?\200\221P\276\'\3644\274=\240A>(\305\263\276\032\242\025>\260\326*\276\352ke\276\325:\032?\016H\316<\336\350\314>\243\377~=+\305J\275\307>\255\275/)\336\275\213\211\002\277L\3104\275\037\211\212\276\376\265#>w\256\005?\313\216\232\276\255\275\351\275uG\326\274W\232\242\274K\310\266\276\270/\277>M\322$\277\302\036G\276=j\035>7\240\022?hf\201\276s/\376\274\250\3453=U\305\236\274\034\376\007\275\207\200&>^\232\022>\206m\317>\307\232\320\276&\366@\277\002\346\343\276\343\232\010>/\313\217=\366\212\344>\217\027\266>H\227\233>\305\007\002\277\217\030\"\277U\013\030=\271\326\006?\221\3274\276p\313\211>2I4=+fy\276Yir=\212\214\023>\227\262\366=\323\222\371\275\230\274\014\276\t\232\276=\014\303\265>~V6\277L\247]\273k7\232\275^\032N\276\217o\345>\'\222\002>\243\'\357\275A\272G>\235\304&\276\t\242\031?d%\216\274\322wM>tj\025\276\263\276\274\2750\270:\276\340\214\376>\204_.=\342\342\212\276,z\222\276nl\037\276\312`\310\274\004F\200\276c\020\275\276I\224\206=&\213\003??\227\243\276\373w\034\277\037\265#\277\366l\005\277+\321#\276\356\276\314\2766\004\367<\200p\022>?>(?\310\312\376>\3330n>\370\200\314>\257\213|>\207-\275>rRg\276\371nF>v\021\031\276\007T$\277\222\022\265>\026>\211>e\204\242>\002\323\325>V\236R\276}\245\316\276\263\007k=\277\260B>\177[\255\276\255\222\372\245sT>\212\251!\277\366\232\361=}z\014\275\210W\344\275\254\221z\2769D/\276\221\213K\275\313\227[\276\021\014\261\276\013\324\320\276]\330\215>\000D{>\354\212X>\254\2060>\306\243`\277\n{\207=bW\203\277\360\326\374\274\204\265\214\276-\251\347\276sA8\275\201\305@?m\342\034>\034\207\220>\003v\216>\375g\317\276\021\301\t\277\375!\226=\2657\334>W\020\014\276\336\312\t\277:\020g\276x\370\203>3\347\335<\376\357\001\276\334F\017?3c\260>+J\277>i*\276\276\r~m=Z\221\031\276\'\254\320\274\201J=\276Y4n>\265\r\322<\254$\337\275\223Y?\276S\361&=?d\266<\205N\254=\315/\276\276>[r\276\352\231\n?\302\205p\275U\036\367\276\243i\352=\027\277\037\274E\224@>\266\245\004>\357i\363=\2209\242>\332\214\307\274\332&\370>\305\323\351=\324\004\303\276@\246*>\223\244\274\275i\334\363>^2@?\322\343\357>\305s\177=\250:\354<\241\201\316\276\n\256\276\276\033i\324>\256\331\002?\273GR>\375\354\037>\010\244+>c\251\364\275\rqd>)X\004\275\3748\271\275\"\237{\276\030\326\200>P\324I\276%h\220=\222)\201=\330\035\325\276\315n\357\276\371\030_\276As\324>[(\260=\354U\002?\374\235I>/\004\032?\270\357\255\276fE\344\275\310?\313>m#h>t4\016?C&\202>\264\242\370>#\237\t?\022+\274=e\334\217\276\205VJ>\'\020\376\274\363\321\330\276\263YA\276\262\362v=_\232\230\276\305\024;>4S.?\270\301\207\276@\271\300\276%\356\232\276\026UC?g\243#?\034\"\030?N\374\'\276m$l>|\376\021>v\254\332\274\300\236<>5t\204>\277j\227>@\221Q?\212}\233=\016*\362>/\266\361\276p\001\206=mz\225=[\314E?\010\'\n\2764\240\206>4c\221\275+\304\026>\363\215\300\276\230\016$\275*\272\330>\257v+\274b.\326\276~\264`<\001\230\361\276d\331\233>C\322\307=K\372\214\274\326\334\234=\232\234\360\275\375P\221>_!~\276Z\227\220=\234B\351=~&2\276\201hx\276\221-\211>\330\273(?\335\326\317\275\275\373\212\276l\342\240>\007B~?90E?\363\223\362\276\241\t\262=\256\016v\276\002\007\234\276@\253\375=G\336\335\274\306\246\032\276\360i\237\275t>\001\276\240x\215>c\367\356>f\203p=\016.\306\276vz\352=\005\002\216>\244\215\206\274\255\366K?\231\202\351=\373\306\303;I\000r\276\004\004\266=\372Y\344<\243=\254\276\014\177\373\276\341>7\273\016x\026?\331\364\177\274U\332\206=\344 @\276\r\306\212>\356\032\037\276\265W\033\277Wr\267\276\231%(>$\036\204\276)cG=\316(\361=\346\330H?\241V\256\266\276<\0328\367>\3703I=\231\'\303>\213\303\215=\037\340x>\013 r>-;\020>\370\234};d\007\340=\360\345\037\276@\003\211>\274\374\254>\346~\275\275\357\265I\274q\207U\2750\344%\277\023\005&?$;\010?.7\211\275@\352\250>U\270\232\276\034H}\275\271i\361>\315\344\214>\035\024\217:\005\034\322\276?ec\276\336J\354\275\227\271\306>\335m\353\2757\347t>QZ\r?\237\007\030\277n\302a\2741\340\310\276\021g.>\000\340\031?\337\177+>\276-\334\274\245n\272;!\330\341>\255|\337>\224\215\003>-$\252=\305y=\276\036U\233>t@\215>\203\264??\353\021\233\274P\361\377>\272\022\323>\332I\235>\251\377\253=g\346e>\361\032\330=\t\222\237\275U\343:>\310\302\310\275\240\303Y>&(\343\274\244\032D?\007\305&\277\276\214\330>G~?\275\233\351c>4\323\364=,\226\033>\303\255<>\262\236>\276\021C>?M\326p\276\272\010\211>\\\352\023\276@\244-?\204\327\001?\034\315\255>`\317p\275\270\321\375>\257\177\216=\372q*?\371\252\206\275\276\341\340=^\251\325;0%\034\277)\250\003?<\033z\276\211=m>\317%&\277\217\323L>\204\200\345<%Ur>\334\027\222\273_\250\265\276d\202\373\275t\350M\275\245\343\343\275\356K\027\276\232\276\n\277b\363%\276\30158?l\205\266>\026\346\227\276\215\r\221\276\017a\315\275\374\037.>8\227\214\273\217[\030>\372\340\340>*\226\312\274\335\2664\277\260G\246\276\262$v;\212)\037:(\243\214\275\'\256M>)/\361\275\304\307\031?\324#~\276\302\273X<\366\007\307=J\223{\276\247\276K\277}P\265\276(\252\366=\263\3712\277\\\216\t\276\014\340+>1\346\345\274\0319\t?Z\322~=V\031.?\316m\374>\253\260\032?N\000\235?g\017?q\305\311>\370%;?\232\014r>\252\365\242\276\330?M>\351\251\354\276N\342S=\345E?=!\307\224\276\206\024\r\277\036\276~\276=\022\266\274\230\266\007>>^\223>\224>\025?\t\017?>5\036\244=\013\311\210\275\305\370\374{K#\276\353 \315\274\014\265\375>*M\274=D\330n\276\367B[\276\301\333\222=\355\277\352=\274\316\311\275\301\0277\277\301\343\177=\354.\227>\033(\005\276:\344\010=B\007\244>\222\3476?\340\351?\277\2579\236\275\244\351v?\371\000\375=\372\345\035>\304\177w>\365@$?\035a\202=\r9\211\276\250\356\267>\023\242\332=\340\373\355\275joi=\242^\323>M\306#?aH\014>\030\2326<4\202\020\277ftE\276n\200/<\013`\247\276s\307K>\022\214\336=\244n!>\231\326\326>`\026\275>\226\301o\275\230\262\351>\027\212[\276\214\260\217=\261\321\002?\251\246z>\371\213\000>\022\340@>2\025F\276|\254\210>\355\222W\276\n\\i?\216\201\257>\234\014}\275\317\311v\276\202\0027=\231\325Y>8\210l>,\366\336>\266\376\340>i|-\276+\014r>\272}.=\tf\367<#\272E=\372\365\r?\210^\027?t\203;\276\\\010\335\275i\010,\276\253\250\n>\313r!\2767O\340>D\373r\276\321\227\203=\247\253\003>\237S\200\2764\016\244>a^5\276SY3>\004\232\251\2768\260\235\275z\363\373\273`\210\017\277~}#>?\324\201\276\023OA>\364\217\346=\225\261r>\375\242\n>\304&6\276a\344\350>\204\3562?\tv\252>\342}\305\2769T\215>\234\277/\276)\313\261=\341S>\275\217d\361<9R)=\333\247\300\274*\216\010=\343\263\325> /\232\276\034a\236=\343fp>\265\211d\275\210:`\277\013\020/\276\022\236\243=\372\r\035?\336\241\251\275q\364\272>\313\232\033?\252\214\323\275F\376\217=\341\216\016?\022;f\276\214\017\311\276Y\361\221\276\212*=>?\343x=\331\343\025>\266\366}?HH\346\275\226\306\n\276\244?\250>\323\t\013\276\026\t\016?\3042\245\276ua\202\275Pd\352\276\275\305\013\277\010\305\024?^u\244>[\023\352=E5\341\276P\237\025?34U\2767\004\322=\337\2309>C\315\225=\347\025\332\276\254\227\204>\244\215\017>3\342R\276\312\010\263\2769h*\276\254\3269?\307\005g>\350T\350=\3344{\276J\363\323\275U+`\275\323f\257=.k\340\276\223p\351\274\220T\002\276\226w\003?a\355{\276|B\375>\240\217\213\276\330 d\275\377\241\254\275\265m\002\2767<#?n\217\310>t\\\337=e\000\361<\203$\244\27550\362>\251K\327\276Y\035\346\276\365\362\n\276I\221\364>MU\035=\267\301\361\276x\262\232>\036S>>\001\227\350>;\340I>,\372\370>E\214}\277\274R\037=p\274\244>\245\332\357>\332\001\226>\007\202\003=/\316\327>\260\235\245\276\033$H?z\0226>\'\311\373=QH\001\277AVD\275=\344\312\276\267\'\352\275\326\362\234=\331]\216=w\342\325\275]\243=\276\337\312\202>\006\020\266\275\020\212x\276J\203\242\276N0\240\276\256q\263\275O\325L?\343\237\224>\362\244k\276w\274\203\276A\030\210\276t\352v\276~\262\033\277\321\177\027=3>\201\277D\357.?\337\207\277\275\333\346\027\277\366.\344>n\026P\275I\244\277\275\234\206\205\276\241}\014?\277\004j>\311\267\240>\274\010_\276\034\256\303\276f\0361\276\321\305\357>\361\2435=\266\020\235\276\030\212L>\022\341\205\276\342\034\363=`\337\214\272ic\273=PQ\306\276\317\265o\276\331N\023>\3659\245\276\245q7\275\300\254G\276\025q\323\276\272\243\026>\233\212\262\275)\335\000\277c\234\216\276#\321\005\276S ;\276\212\310\270\276\270\2647>\006\353\312\276*Vw>\252\177\230\276\370\037k\275.\320\212\275tl\246\276\266\206\266\276(?\023\276\001\"\271>\374\0346?\341|\325>M\256~\276\301s\305\275\344a=>\267\345\250\276\347\245\030\277\245H\275=\261\344\312\2754\025p\276\304\327t\2766,U\275\271\230\014>\202M\271>$\313\217>\301\213\221>?\364\223\276\023\362\234>K\266\350>DJr>\375\354\234\273;\rP>R\016n\276\212\354\236>9\322\035\277\372k\205\275\000\360\343=\236]\222=\346\007\245\276\022\3413>6q;=\301\317\305=\265\025Z\277\250}\004?\031~\007>&\325\232\276I\254\004?\375\242\020\276\266\330\010\274\256\241\214>\037}\335\276\210\363\223\276\035\373\261>=\357\315\275\2175h?\217x\244\276\273\2135\276\373\342\035\276Tn3>:\263@\277\3647\323\276\246\030\264\276C~\006\275\004Z\272\276\210L\221>\363*b?\276\276\004\273\330]\376=\326\373g>Oe\034?\205K\244>;\347\202>\034\345\017>\215\277\376\276Vg\005?\271\366?>SC\370\275\326\233\005\277(\326\"\276,\325\207>\324\221\230\2767\270\021\276\203\2601?\2263*=\214\245\264>\3463\260\276\251\025N?D\244X\276\225\\B=\317\352\212\276\026\262m?\374f\024\276\317b=\220\3322>^\276\242=\024o\241=Q%\330\276\321\213\037?\033@\235>\003\375\222>\004\363\256\276\237\332N\277\310\347\020=\000\'\023\276\023n,\276\275\242\272\275\322\244\316>\252\007\372\276p\354J>\325}]\2765\322\001?\350\323\375\276\023o\355>\325[\037\275\272\257e?3t#\276\010T\345=\332ri?M\265\206\276\301o\315=\027@\231\2634q?\035\337\341\276r)\302\276\335\237\330\276\316\226\252\276\016t\346<\0109\t>\351\364\246>\374\257\243\2760\266\031?\305\310\027\275\010\247\312>\202p0?.k\027?K\317\205\276\324t\313\275\030\310C?pM_\276\247L\274\2754b\002\277\361\031\361=\020(\034\276\335\326e=\355\253\243\2767\305h=v\235\023\277\334<\022>\254\207\325=>\310\333>\215\364@?5\"y\276C\307-\275\331\010\274>\240\302(\275En\365=q\250\233\275\244\274\303\275D\321\356<\246\371\230>\250f\335\276?\365\253\275\267\325(\277\027\346?\321\262\002\276\242\315(>\263\363\266\276xZ\246>\001\375f\276\002!\022?L\327\007=$\177`=\245\036\221\276\'lG\276\"\356\021?\025,_\276m\361\200\275$k\376\276\317\"h\273LJ#?>\254\t?t\272/?\202\374\253>P\300\031\277 !\331>\0367\332\213\227\275\276\217\332M\277S\251\345\275\254M\'\276\035I\300>ks\216>i\016@\276\016\242(\275\300\034z<\222n{>\017\341\202>e\351\253=Pf\236>\315\346\250\273q\271\006\276\234\370\343\275g\232\334\276\257\246\237=\345\245\242\276\233\215p\276\2539\354=f\177\374=\200\252\301=)\035Z?\276\303\233>\023<\203>0\264{>\022\300\215\276\374?\007\276]\331t\276Z\037\244\274\204\246\342\274s/>?\213\327,==\211\254\274\257\323\374\275D>\217;\343\366r>0\263\\=\004\275\227>\005\035G>/fv>\303\221/=\314K\331\276z\255d\276K\323u\276> \254\276\371\302\332\275\276\372\"?Eh\025\277\316\245k=\013@\246=\031\310N\277,R\235>\347\341\250\275\2647\226\275jn\005\276\260*\217\276\037\340\373=\206F\303\276\355C\221>\305\370\013?\377r\267>G`\032?\035\203\321>\2531\251\276d\254\224>\"%H\276\315\367\025\277\362Ey\276tl\242\276\247\241\343>#x\330\276\025\370\217=\324\377\274\276\311/><\323)\022?d\375<>\222\030\267<0\313$?Q<\323\276H\034\262=)o\235\276\'\201|>b\312O=\030\250h>~\261O\276\200R\023>\247$\025?\334d\232\276E\235\320\276\031h\201=\354\341\232>d\272\241=x7]=\375\355\035?P\025+\276^\303\240\276\271\021r\276U\225\002\277\030\211\016\276 >G>\265\210\366\275\223\032\003>\364\257\317>B\334\013>s\010\245>Q\251\373<\221\207\254\2766\004\310\273\302\226\237>e\273\223>\2613 \275c\334\211>\265\207X\275\346j\013?A\340\212>n\371\364\275\245e\022>\336=\222\274\315\356\027?XtG?A\220\273>\256\337\205>\323\221\345>\016\201\312\276\237N\033\276\214\211\317\2767\372\277>%o\303\276S\273\270>\301\002\303>\333\247\337\276\001\330\t?\231\021\340\275\001G\035?\t\311\363\274\373\371\346\276\014\277O\245\274?\204\200>\213\337\350>\243r]=;s\021=\351v\245\276\235\253\231\275\375\365\203>\376\271\t?~\240w>SG!?oV\365=b0\215\276\027O\254=\211\303\262=\242\236\263\276\361Lv>!\2079\277\245*1\276\210\2229\277|\002\333>\354\013Y\276\275\217\215\276\247\273$3>\340\251\220>\311\226\207>\222\316\203=\332\016\007\277\"\302S\276\205y\301=\313-\223>U\326\213>\006{b\276\322\220\000=Q\221\246\274\236AS>|\222\272\2759\364\226=\360\245\217#\355\360;\255E\252\275gr_>\345\\\233\276w\263x=\341\346\275\276)\232\241>\250\211\303\275\350Y\231\274\001\242\322\276\253~\302\276\3140\025\276\202u)\276a\325\265\276\230\204\002\276\266\204\340\276\021@\277\276\230\205\345>a2\n\276\002=\351=\301m\341\276Q\350\307>\355\304%\276\272(\226\275\005\214]>\024os>\271,\322>\031%#>xB\363\275q\304\374>%\324I\277\335\226\263=\257\262\306\276\n\n\t\275m{\356\276\'\330\377\275\017\334\227>G\351.\276\013\000\021>\333\232m\277\216\276\233\276\332$\226>\360F\307\276\3043\215\275\300\220\225>d^o>\222\266\021\273\007<)<[UN\276(\013\222>\244\355a\276\263 \246\276\3666\261\276\221\375\227>\376(\224>\023C\007>\335\036I?zOZ?\020S\022?\325\020\241>\264\211b\275p\245\r>e\205\324\276?\307t?q,z\276\270\327_>\260?\340>\201\372O?\r\343\'?\372\3568=/J\262\276\363!s\276\217B\265\276`U\251>\006Y]>9\031\304\275\007\tT\276\214.\352\276\010\3135\2771\002\225>\036b!>\360\243\003?\362\017\360\275qC\264>Q\265X<`\266\022\277\262y\204>j\334\241>v\355\026\276aI\356\276P54\276\007~_\277\2519\031?\277\013\211>E\214\351>\361K/\277\344qb\276\226!\327=#\304\261=}\000p\274x\374\225>wq\274\276\006\026C\276>*\324\276F\2602>g\360\027>\274\325\321\276\376\351\250\276S\020X\276\326\322\352>\227->\276\\\353\200?\212L)>zT-\277\376\236\230>\272\262\005\276\022\005p>\211d\323>z\007,>\300M\211=e\035\226>\233\310\r\277\n\r\246\276\324L\024\276\006\221\271>\313\002\331>\177\026\010\277\352\374h\277\247\366\360<\007$+>\240}\272\274\3575\321>\236\360&\276\366\352:\2765\371n\274\301h\030>G\376\230\275\027\221\030\277H\022K\276\220ZV>\\\020Y<`l\323>\232\353\244=\003\233l>*\356P\276\322\340J>46a\276\217\033\271\276\342YO>d\237z\276\303\333\240\276T\013\257\276\375y\311>\231\026\025\277\375\350E>wf/\276\277`\004\276\201Y\244\275\261~\322>\333\316\267\276\204\216h\273\241\323\365\276\317\211\202\276\304\2055\277\301\260!>0Bs\276v\335\331\276q1\365\276\351P\307<\326h)>\353\024\321\275\021\357\267\275#\266\351\276wJ\000=\003\217p\276jTW\275x\002G>\256A\364\275X*?\276C|\032\273\377\031q=_k\005?\353\226j\276\213\307g>(b\332\276\263Re\277\240Y\305=\2137\007\276\203\311\264>h\000 \277H\332\227\2740\326\250>P\342\375\274\2272\221>\276A\230<`\315\325\276M\355\233>\033\310\264>-\024\356\276\260uh\276\307\270&\276\211\351\"\277\265n\245\274R\266}\276\016g\304>R\227\354=\325~\226\276Q\235\303>\005\206\251>\017\216\367>\240\200\243>*L\315\276\2201\021\277\210.J>s\357\214\276\226\277\314\275j{:\270\tw\001\276\376W\r>nO&>\355v6=\335\376\226\275/\203\234\276w|\320\276{\274G\276\002\307z=\317\361\250\276\252\302%\276ig\274>om\224\275\374\360\215\276\313\307\007?~\242Y>\021\303-=\240\302)\275/\333\215>Zq\002\276\313\327g>\261<\237>\272\210i>g\227\253>5%\332=\3222\250\2752F\204>Q\266r>^\234\270\275\020\024\\=\217\022\273>~\243\030>\370\203\0328^\241\304>:\016&a~y?\233\035\016\276\222\003\367<\201\272:=\212\2726\277\000(\023\252p\304>\200\317-\276\342\227\330\276\227\326\017>t\241\256\276\3700\326\276\346\026\020\277B\275\211>\317sw\275\277\373C\276\014\"$\276\361J!\2777\252\217\275\230\236\365\276 !F=\204`\362>\254rq\276\2512\252\275\243\307\036>\253\374\345\276\356HR\276u\013-\276\021%\210\275\360\257\203\275\370\262\013\275\276\354\222=\036\305@\276\321d\375>\2146\307={\031\035>>\340\021\277G\221\375\275\024\232\241?\373\037\357\276rX&\274\215\371\330\276\r\272\006\277\207\002\006\276\240\200\250={\236\234\276u\217\312\276\272\271\263\276\245C\n\277W\375\273=+\245\347\276\354\274C\276\324\221\317\276\257n\230>\374\251e\276\351\0178\276\313\375\267>\023\000\212>We\010\276B\305\037\277\306\254\320\275\257\2231\2769\224\211\276t^\243<\024R\210\276\276\260>\277\\\302\343>\333\366{>\255\004\340>wY,>\242#\002\276\225\3304>\376\010*\274\367mN=s\202q=\223\037]\260>\231\274\257\275\0137e\276*\217\337<\256\247.\276\224\321\241>4\204\310=\270S\257\276\036\311\247\276\305 \262=A\t\n>\316w\233=\217\366W>\263\252\335=:\342\372>Y/\270\276\3325\237\276#\234\264<\337YN\276e\344+\276u\234V\276\344\374\'>4\322\333>[\376\370\276>\006\006\277\316G\364\274\325\014\314>\216\215>\276\357\214\324\276\352\361\037>\334c\034>\235)\270\276\003/c\277\373\273\364>\016C\346\276\2316\276<9Q\036\276\022A\251\276i~f>\314\370\247\274\361/k\276=\300a\276\236\373\354=\373}\207\276\nD\017\277\233\250\021\277t9\002=\\\r\221>_N\251>\300\013\004\276\010I\205>V5\303\276\223\304q>Z\242\261>\035\031\266=\206\031\322<\177Z2>\007\241\014>\1774)>\211,\004\277Q!x\276@!\217\276A\320\234\276\323\204p\275\351$R?\217\005\212>\311\335\311\276\322=\346\276\332\233\016\277\355+z\275\320\340N\276\357\r\374>GZO=\025\024\374>I\361\352\276a6\314\276\r\226\241>\242)\027\277t\306\204>\372\364\232>K\021=\276\355\340\001\276\314\377\200\2752v\027>\237\3445\276\233\\\343\276tm\342\276S\216\027?\321\033l>r\2426?u$h>\365\225\006\277\3548\324\276\034\220\356\276d\331/\274P\037\317\276\375\0047?\367\277\373\274+\221\026\277\304\260\023\277I\004t\277fIF>\215\346\342\276\366\2654\2772\251\377>\016\325\260\276+=\361\276\353\373\002=PU!\276\014\020u>\021\276\202?u\201Q>\215\363?\276.?s>#\376$\277\242\253\377=;\303*>\030/\356\276\221\333\264\273\327U\235\2731\226\021?\271\365\277<*_!=\034\231\232\275r\253\364\276]\341\326=e\370\017?\256\343\r\275\210\340\326>y\267E\276\242\277\367\276\2349W\276\272[\007\277\'f\325\276\377D[\273\305\367A\276\321\004\241>^\271^\276gP\234=\225\325\202\276K\212\353\275R\n\277>\007\264\317\276L\267J\276\375\t6\277XX\023\277U\243\301\274#&\035\276\355\363\210\276\"6*>u\203\023?n\336F=#+J\276*\3169\276I\202(?wq*>\300M\231>\014\235K\277Q[A>\267\304!\276\306\036t\276]\245\207\276\334}g>e\347\244\275J\306\376=\315\2125>+\2004\277\366X\245>\035\204\214\276D\250T\276\277\314\230\275\3606\260\276\265\307\322\274\327\266\363\276\223\257\260\275\013\2304\276)\366\237\276v7\222\276\376\273\340\275\367O\002\277\r9Y\275\031H#>G\033\347=\236E\376>\016),=\312pL\277G\2557\277\231\302l\275\216z\000\277\2724\030\276(\324\246>b\220\364>!\241\252>\316.\276>\233;\323\274\274\375#=]i\343\276U^\373>\003\371\223=I\016~\276\261Q\375\276\tX\003?\177p\016>%PV\276\020]\247<\362\013?\276\337\031s>\\&~>>\004\317\276U\\7>\207\325\034\277\362\\\220\276\007p\021>\334P\351=uB~=M\022\032=\241\'[>\254\3210?Cd\232=g\374\231>\224\353\262>!u\235\275\252K\014\271$}\217\276\231\362\361=u\277\215>\230\203\004\276d\264\302>IX\202>\301\260\312>\267\030\241\276~Q\\=\\%S>\345 \211\276\325n\277>\366\256\264\276\350>\027\275\232\315\266>]\006%\276\321\257Y>\242P\235=`\001\220>\375k\017\277\300T\220>\201\277\214\275\211p\306>\312[.?\200\274\354>\256\264\237>=\335_\276\035oi\276_\213\271=h\rW>\260\213\r>J\256\332\276\001..?r\256\254>*\314\234\276\216[\270>t\362\306\276\037$\343>\241\260x>\331\000~>\356\030t?\026\343F>b\005\313>\320:\241\276E\350+\2754\326\230\276\311\365\013\276\215\270\322=\000\256\340\276\337\"D>Pj\230>\325\324B\273\006\353\207\276\037\325 \275fEK\2764\236Y<\031\205f\275L\343\240>\234\373\005?\216\251\345\276\230\220\364>\216\332\202\276\237\2561\276\235\300\005\277_Y\034?\003Rj?\263\357\034? p>\276\030&\323=#\304\235>\300\342\347>\245\250f?\204\204v><\020\005=\024\301d>\026\260$>3\234\004\274\367Y\032\275\010`;?^\006\206\275\1770\225\276\364\231\200\277Ix\263\276\374\266\351>]\275v>F\305\253\2753\007\027\277\312\334\271=\330\361y\277;\"=\276}\326\346>Z\264\251\276+\353\214=\340\007\005?\261\"\225>\320-\000\275\026\305\246\275\177\200\213>b\243\327>\006\362;\277\033\230\316>j0\260=%:\210\276\263\362\004?\272\022\316\275\247\212\005\276\277r\230>\276\353\216\274{mu?\363\004\002?k0\247=<\351\257=/\352_>\277\316\221\276\235=*\275\007;\251\276\023\242\260>\345`\"?C\277\241\276v\341\234=\251\276\026?\032\304\236>0`\315\275\277\221\252\275\260\334=?\0049\203>\356\326\r\275\247\222\351\275I\374\272=\006=\035\277\346b\007\275*o3?+\366\336\276r\240\370>\312\270n>\365|/\276d\233Z>\371-Z\276M\032\254>z\033\203>\016\245M<\302\"\207>\031\265\025\277\013\204\264>\247\221\301\276\036\351\033\276+F\245>\336\034N?~\327\240=^+b\277\221\365\215\276\3167!?b+\235>\362q\344\276G\325\031>:\310\271\275\355\023Z>\004+\202\275IU\210\276\225\317\"\275\350\316q?EAI`C\237=4\307\266>\342\301}>\240\331\"\276\312\2416\276\262\246\323>\261\352\341\276\315M\036>B8\023?\0272G=\204k:=2\256\003?\336\274\217>\213Y\027?\363\270\251>6\026\216>\025\300\032?v\254m\275B`X\276\022\344\203>[\324\022\277\200i\035?\205\327\342=\234*\014\276+y)\276fx\204\276\316\256\250\276\025:O?\222\364<>N\234\237>\323C\327>\200\027\344\273\367\325\352>\007\315i>\tg,\275\025\021\277<\023+\276>M\243\327>}\261\242\275\216\310\373>\245\267\236\276\216)f>&z\220=/\216B?\365FT?T_\212\276<-\204\2769\233\032?\"\237\303>\201\316\374\276\215 \323>rO\026\276\\|2\276\025\021B\276\351\360\227\276\201G\322=vj.\276K\324\005?\256i\313\276\216\226\343>\247\365s=\255\354\313>j\177\366u\0322=\321\023(=/\204L\276\202\000C<\355fO>ii\306>\260Cn\276ESF\277J\271G>oZ\212\273\274\255\270>\237\0020>\277\207\357\276y\334\355\275\307\316\017?\235\342y\276g\204\031>E\342\224\277,9->*\320\236\276\332\236\274\276\375q\034\274&[\000\275\231\033\031?\333\356\230;\270^\333>p[\242>r\204G\276UQ\035?\222\032.\273\3317\204>I\201\034?\347\306\347=\003\203\372>w\314\312\276\030(s\277\265\333\'>\236P(\275\321\252)?\371\366\221>\250\030F\276\352\n\022?\235\220\2511\t\275z\366\340>s\374\266\276\363\360J?76\256>I\237\367\274\026\006\227\275\027_\215\276\245=\332\276\216\240\241=\'\345l\276\213\261\271\276\321<\365=\316[\325\276\007\211\210\276\324\304\216\276u<\001\276+BA\276\2168;?\217h\017>-3_?M/\277>R\340,\27471\312\276\034\016\250\275\344\201\207\271\331at>\215\263^>rF\244\2764qF=\215\343a\275\342\022\315\274\273\262\262\276\313\257\353\276\323\026@\276\226\226\264>\257\233\307\275\206\225\273>\373u\205\276\rX\267\275\235\315J?g\243\362=\246\210\001=\356\356\341<\237\352\025>\014t\226>\362\243\252\276\346\235Y>K\010\216>F\225=\275A\325\253\276\024\362)\276\r#R\276\037j6?\257+\275\275&0=\276\351u\206>\302#\367>C\026\001?\3646\266\275\216\030\365>R\340\344=\304O\">m\272X?%F\207>{\233j=\0323\314=\243?\222\276\250\311\033\276\251\035\332\275\356A\032\276\233\315\235>\023\316\314\276\325\246o?\315\251\340\276W^%>\332\240\001?\207IK\276\177)\237=J\240)\276\262\3001\277\262\037\321=\332\216\260\275\n%\252>\323\023 >\263\354\257\275\334\\!\275\356\010)?\310\016t>i6\251>\031\300\037\276\346\330\345\276\370$\033\276\346}\330\275X\352G\275\305\013\361=\341\225\271\2759) ?(\212\330=\351\257\321\276\227\324@\276L\357\376>\215\337\212\276\"\355%?@Q\023>(\026Q>5\337\321\275\314\313k\276\307\326S\277\331\004\217\275w\200\245\275\201(\023>\263\000\001\277\013y\037?\352\002\000?HB7\276\257\361U\276sQ\"=\252\n\314\276\253JF\276\025\232\317>\210\233W\276\370\370k>\313\251\354=o\010\320\276\277\233\327<*Z\330\276\211\343\360>A3\027<\341\250\201\273\300\244\017>\343w\242\272\244\346H\276\236\214]>\213\336\313;\231\234\022\274\311\247\247>\220\357G>\253\007\353\276\317\357\366>\207\007l>\"\354\334E\'4>\225Z\202>\n0j=e\3045\277\306e\267\274\021:p>\212\263T\275\212\257\324\276\273w\241>rnX?\007\356\340>\322@\032\276\325C\312\276[WF>7\232\240\276e\314\335\276d\311\362\276\325\201\264>\333\330\306<\n~\230\276\306\016\310>\035\242\225>\005\321]\277\344\210\365\275\205\252\010\276\017\3659>\326\031x>M\372\366>j\034\000\276\372\005\220>i\337#\277\305\254(\277\0241\022\277M\177\341\276\312\246[\275[\356p>\0146\217>\rz\271\276\241\314\016\277\256\237\222\2760l\026\276\210\341\013?Ns\364=\217\365\254\276\323\t\r\277\250\211m>\344\021\257\276\307\215E>\006\205\272=D\352^>\201\320+>\317>O\274\0170\005>q\277\253\2766c\340\275\342\363\203\273\277\013\226>\261d\323=A\023\\\277\263>\253>\213>\300\276X\301\202>\224Cp\276\204\321\'\276\374\002D\276f\364\007?\315\356\014\277\201rO\330\n\276\022\210\007\276v\367z\276\016\224\261>\007\033x\276\341\033/?\347\316K>\257\276\257=d$&\277*\330\273\276KcQ\276\305\303`\276\000\014\"\277\001{p\276\273\362\024?\022\262\307=5\236\204<\365\301\355\275N\271\310>\344\020\350\274\254\207`\275f\347\010\277@\243\201>;ow\277\376\333\324\274W!$<\344\252V\276\311\262\232>@\030\352=\231 \300\275\340\376\255\276\227\206\014\276\303_\236\275R\206v>\337\0340>\272u}>\254?\017?Q-\026\276\237\265\325\275\r\261\006\276:@\253\276\221\326B?\225\006\034\273<\210\371>e\376\210\275\212z\036>\355V\215>,\031\214\2763\327\352>\321\377\273\272S(\030\275\313\315\342K\351\326=\301\273\322>gb\237=\0068k>\376\220\324\276e4\326<\216\022\343\273\347\207j<\020+\352\274g\027\n?\360\241*>8\307#>\201\007\276\276\010,]>\242_}>\374\314\211=K\216\311\275q\367o>\324\007\025\275b\241-?V\233\336=\036[\215>\005\315\032\276\206~\207>c+\243>!w\227>\263\275\022\276\205N.>#\225\306=\352\024\323>\3436t=\342,4>\213\356\020?\252\207|>\232PB\276(z;\275y\301\300=cV\311>\365\036\307\276\305\334\264\276\312\223\001\276\361\260\350\276Y\"\365\276\"\266g=\340\326\033\276\260\311V\276\002j\254>x\240\270\276\037h\325= \261B>\256\306\342<\277\315%?\323:u\276\035\020\356>\226f\210\276U\202[\276\230\327\216\276LH\246>\333|\354\275\177H\331=x\\U=n\304A\276)\000\235>\331\336\245\275\342\240F\276?\014\263>\336\277\202\275\331\003+\276\223\304e>}9=\277\263\223\214>\361\262\n\276\273\357g=\227\3222\275\010W|>\207\343P\276\350^\327\276\316\0304\276[\245\273=\304\212i\276\321w\177\276\002g+\276\001g\341\276`^h\276\2274\004\275Me\340=c\334N\276\235\'9\277\364Y\2534X\005>\255\326yY\335\035?\342\276\302>cZT\277T\365S>@5&\276\212\374\366=\244\233\324>j\352&\2768\016\022\275Fap\276\343\311}\274\212!\262\2758\350\265\276\312\314\017?\362Z\323\276\3036\331\276\000\2175>=\n\210\275,a\245\275\026\237\275=\246\322\232=\350?#?\226\300\245>!\2406>\263|%\277}\233\375>S\312$=$\014\233>:E\321\275\033q\300\275\2600#\275\"G\312\276\256\226\332>\206\335]?\313\003\250\275M\023\200\276\226D\254\274u]\251>\202w\271\274b\264\224\276$\020}>\236\017\226\276i\360\007\277\345\262\217\277\034\345\364\276f\263V\274\363\351=\276\030\030\222\276h\322\302=\"K\222\276\301*\371=\027w \277c\223\301\276\014\372\371\276\005-\221=\330\342\260\276Z\317!\275\325\014\231<\216$\036\277\024\332z>\331rp=l\230\203>!\203Z\275\017\212\322;9O\333\275\035\\!\277s\324I\277\r\245\324\276dV\225\276\024\017s>I5\206\274\324^>\2743V\207>\363\033^>\211\177%>\306E\'\277\225\347L\276?\310\216\2766s\372=\014\303\025?\261l\235=\214\344t\277\363`\263>D\025w\274\244\242\024?\255\3670\276\273B\250\276\346\3438\277\252\314l=\034\2516\277\023J\240=\233\345\247\276\215\244/>\031\260;\277\244\223\250>Z\027\000\276\2324\030?#c\306>\222n\225=\177\325\257;\204)\n\275|\350x>\311g:>\301SD\277\227a{=\343\376\010\277\377\201\022\277]\352\257=[\356\244\276\304>\262\275\307f\027?\017B\204\276\265\253\222>\236\312\207\276:\366\003\275S\t\216\275\351\325\260\276\227X\214\276S\3107>\323\371R\275\234\232\022?F\257j>Z]\304\276\ta\037<=OL>\225\301\263\276\307st\275\341\205\213>H?\302>\246\030\307\273\300\272p>n\243\327=.E\274\275\223)\222> \277;\276\212Z\270\276\030+\225\276?\320(?\016\036\216J\223\230\276\307\243%\276H\030\312<\241\334\345=\355\373\223\275{Ek>M\3149\2775\317\305\276\2304\010?\031\324\'?\266q\234>D\357\207\2761\244\240\275\311\376\315\275\312\030\243\276<\322\217\277f\274a>\033t\261>*\262`>W\261\006\276-\324\234>\177y\352>/\324x\277u~\234\275\321\034s>\300q^\277z|\207>@\272\346>\331c\247\276q\024L=\201_\336\275\000h\227\275\227~$>6\246\303\275\022Pp>\326\006\033=\375\220B\277\032\331\202\2754\367f\276#P\017>\010<\035\277\255\370\'? \2463>9\224\372\276\316N\356\276d!\004?\365%\336>\204\220\241>\306\002h\276~@\177>E\233\247>\352>\030\276\227\372\315\276\236g\210>\265\210\214\276[^\324>\351\271\003\277J\254\312\276\256\003A>\211U\025\277\306\365\035\275xw\004\277\341\370R\2765\337\022>\337\374\300\274d\234*\277R\330\276\276\203y\'>\020\213\267>\362I\234\274\021a\332\276\305h\270=\270\213\263\276\205\265\356=\255m\200>\220z\036?S\253Z\277\226\341\013\277\244\364|\274\r\235\212>\212k\374\276\027\264B=\3128\t\276(\tj\276\035\272)\277\231\010\256>\303\273y\276>\020\260\276\265\312\363\276;9\243\276\313IJ=\013\333\006\277\007\272\234>\340\237L>\244\023\234?\357\244\221>\307\211e\276 \033->\334bS?\211N\235>#BO\275\300Q\252\276\301\271\371=\201\002\271\276\272\002\244\276\271\273\177\276\237\300Q\275\263 #\275\374\311\261>DU\035?\033\214\207>(\230\340\276=\277\221<\365\367\247=\010B&\276Iu\224=+\325\275>\351\363F?\307i\002>\232\025#\277\2733K>\200\223\227=\353\277\023=3iL\277\300O\203\276j\313z;g\033\366>\030\207\376>F\300\254\276\305\r\202\2768\274\272\276V2\022\277\221\033\262\276\032\346\375>\237\256\323\276\007)\203;?\017\252>\\ \003?\362\214\024>\363Wu=Q\224\030\277(T\004>\022A*\274\252U\017?\200\013\332=D\034\337;R\372\034\275\355m\361=\336\315\234=\003\342\312>\010\245D>Z\031\025?%.z\276\237~\364\275\311\303)<\031\253z\275\324B\017?@\245\010\275vk\241<\276\0048\276\242\335\276>(\205\243<}(\331\274*\250\303>Z3\361\276/=\267\274Kq\351\276\020{\227>6\354\246\276\021\272)>\323r+?\355/\003\276\275w\251\275\034\001\373;I\302D\276\310\375\250>\211\231\303=\334\000N\276\373\370,\276-}\370=\371\307\333\276\322\034V>\252\013\344>\005\002=?\334c\001>\'Gg>\374\334\327>\344\342\311=b|\261\275\\\177w\276G\212s\276k\356+\276\316y\270\276\374\241\271>\253\251\313>\034|\274>~\350\231=\2438\233>\254\033<\276\347\321\310>\376g\"\276\353\010\025\276\242\236\223>\305\020\271\276\225\363/\277\227\272\254\276\372\347<>\233\303\344=\022T\n\276\343?S\276\324\241@=?\252\212\276.\264\352=\334\030\363>\303\241\216>\374m:\276\242\217\002>\022\t\"=\210\233\313\276\341\335\230>\317EU\276N\264$?\342\215->9\227>\277\020<_>%o-?\036A\257>*{\223\276\"gq>\362Z,\277\210\332\276\274[\343\252>\314g\320>S&\310\275Ib\036\277\254\tl\275#\233_>\027\032d\276\271U\277\276\007\240*\2777\332\261<;zF> {\\\276\271\"\373=\214s\245\276\022\340c\276`\324v>\n\270\333\276b\304\223\276\0035J>\004\023\202>\252mH?l\025\303>\3648\261>\343B\006\275\007\313\264\276\345\032\r\277\215|\354>\272\327+?!\320.\275k\305$\277\010\326\252>\206L$\276G\\\252\276r\307\034\275!sS?\001\372\324\275Z\355i\273BF\201\277\222\031\314\275r\\\201\2769\033<\322\030k>\206s\313>k\010\222>\366\217\367>q\233<\276\366\013\212>\001UBCW\245>\037~\331\276\226\217L>n\237\361\275\272m\010\277\022+6=\2632\034\274\2206\001?f\304$>Y\321\210>P_\310\276\n\347\363>\371Z{\276\371m\320>f\257\177\276\215\350\277>k\215\022\276\202\020\234=\354,\370\276\314\024\343=W\234S>\267\320\235>\210\034\265>\354~\016\277\375t\007?\210N*\276R6\353>nF-\277Z\255\005=\024c\316=\211Id\276\225W\325<\270\257\336\273\030\327\001?\325>\016\276\010\273\002\276KJ\335<\332\\\313\n\2414=\253\342\200=>J\215\275\036\035\017=\355\352\303\275\205l\330>i\321\007\275\216\274\215\277Wkq\276\305\201u=\317\356\357<\306\203(\276\037\375\366>\343\025\243\275&\231\270\276\302\374\222>\353L\256\2761\201\277>`\331\237=^\327\246=\273\361\340\275l\254\342\272c\340\252\276\245\"\217\273\320\030/\2779\203j>\3134\213>\365\266\'\276+\212\270>]^\245\274x\311I=\302\t\247\275\337\364\311= \213\220=\260C\235=\236Y\017\277\245?\244\273s\333\217>\317n\202>\254\033\236>\214@\264>\222\366\300\276\303\003\376=\206R\214>\027 \370\276\216S\354>\335:\005\277\212UM>\263\027u>\016\354\332\276/\274\\\276\2714\000\277\235#g=$?G>]m\205\275K\327\353=&\001\246=`s \276\367\254\341\273 \376\214\2763\"\362\276x\360w\276\241\371*?\020\020f\276\214\310\374>M\373\177\275\0228&=\306\340%\276t\375)=\234\236\333\276u\307\r>\242ll>\370ma\2769\2073\276\263\352\221>B\233\033?WS\271\276\321\2450>m\271\272>c\270\256\276R_\266\276\264\252\271\275\356u\023\277b\037\301\276\026\022)>\345\206\232>\226d\217>(\354\375;\352\2470?\211\347\026>\2642\326>\342\214\332\2751\214\'<\361\026\322=;z\211\2715\362\037><\344\r\275u7\325>\036H\033\277\243\363&\276~\261y\276\224\214<\276\234a#\276\224z\374=\233\024\211\275\202\256\256>\243?\025?\037\222J>\240\204\006\277P\r<\276\022\326\213\277\201.\217=\215C\334>Ey\037?)3j>k\r\243>\334c\033\276Fu\350>\340\001?>a\017\215\277\266lP?\006\364\203=\373?\000\277h\233^\275\373\022\250>0\335f\275\014T\202\276xN\305\273\005\325\224\276\036 \004\276G\242\005\276\217\260\257\276)**?\351\343\355\276\037\242\273>\253V\214>\321\'\373;|\320\031\277s>N>\020z\010?\272\353\232=D>/>\227\355\311>\237\0078?\360[\315={C\214\276wo\352\275\034R\000?,\203{\276\204\024Q?\037\336\030\276\035\216i>\025\214*=\237\234\346<\177]\231\276%\362\217\275\177\316\365\276h\234L?\264,\214>\006-\327\276\270k!>*\272_>g\307\313\275\014\272\014?\204\240D?\322\352\330>\361\330\200>\320\271v\276~\263\356\275g6\217>B\350\263>\tA\022\276h\265\034\277\273\311\335>\230\"\344\276\371l.\276\0373\273\275\250\374\333>1\340\267>9|\364=\362i\375>\032\217V?!\323q\276\225\262\371\276q3\002\276\037\372\307>\230C\212>)\016\r\277\306\315T>\256\322D?\232X\303=\030]\362=:\264\016\276\021\215\200<\336\223\017\277\255B\211>\243M\"\277\341\327\233\275\n\252\201>c\262\177\274#\351\017\276\236@z>j\330\226\276g\256#?\204\351\376>\242\262w\276P\024\r\276\236$\215=\216U\306>\253\2255>\310\375\304\276tA\n?\312\230|>\307p\300\275\336\226\216\275\220\031w=\340\260\272\275\227\000\357\274\365NV=Ro\371=MW\200>\360\021\242\276\004\021#\277d\006\350>W\246\334\275\3220!=\256t\247\2743\007~\276\310\2135\277\301\000\200\274\214/\220>S\372\256>l,\375<\242\270@\2765\272/>^\021\260\2768\325\246=l\331\202>LM!>a\000\257=\365\005\231>T\207\273>/\352*\276[O\t\2768CN\276\362}\304>\316\264\232>\024\330\234=G`\024>f~\235\276\17730\2762\272\330\276\\\300\250>\026Q\r\274g\177\004?\001\2326\276\331\311\272\276\337M*>\301\352p\276\3275\240>\214\326\\>V\341\213=\251\307\271\275Le\211\275\341\324y\262\236b\276\356-\373<\316\372K\275\212\212\340=\024\206\270\275?\312\253>\031\223:\276MN\260\275B 1?\211\032\014\275-;\243>fz\367\275\026\206#\277\221\376\256\275\274\254\027\276\013B\035\276\023\321\302\276l\030\233=\225\212?\277\342\224\016\276\022R=\275\237,\217\276\261\001[\276\325\316\013<\263\216\233\276\227s#?l\235\352=E\237\n>\312e\323\2751Pq\276\371s\233\276\033\272\365>a\223\314\276\037H5\276\021\\\335\275%\221\030>Y\013\037\276\250\033@\276\007s0>`\221\220\276\033p\242<\276w\002\2763\256\300\276\336\rY\276\216\036Z>$Z\244>r}\327\274\275\2218=1y@>\235k\207\275\321\032\022>\265pA?\251\377\343\276l4\260\275\"O\031>\230\274^>]EU\274\016(Y>@\273\006>\"\324\017>\333\204O>,\254\313>\253\307\323\275I\250:?,\0304?\234\'\302\276\210\224o\277\330\003.\275;\350\343=\260\315\253\276%\335\277\273aV\025?\023Bp>\2354\271\276\\\306L\275\261\227\307\275&\032\273\275\375K\004>9I\207\276\3368\247>\177y\237>KGc\275[\2148\250\320X\276E\024\312=\324p]>x\n\303>d\025\n\276\036\220\240<\274\3262\276\212\212\347=\217f\250\276\343\362\351\276Vr\030?J\007\334\276\305\035\205\275\353\177\223\276\3759\272\275\235\323\243\276u\266\244\276\324)\367\275\255\222\000\277\013\231.\277\222;\377\276\214`\371>\312\006\033\277W\324\341>s\214\013?\177\304\222\274\271\342\300\274\360S)\277\001A\225=97\217\276\235\003\013=\271\t\327>\300\376\355>;\356j>H%\227\276\000\207\317>\247\231D>\370k]>\327c=?\221^\\\277o\\\036=\020(j\276\373\262\004>\226\0165>\226\222\233\276C\237\004?\224\322\307>\027\314\213>S\307\025=\233\005\236\276/\333\t>\362\330&\276\361\271\014>\205CE\275h\320\247>\262s\300=bL\345>\270\206\250\276\266\357\270\276\250\261o\275(\251\222\276\324e\315\276\013\340e>-\333\335>\324 \006>\262F\377\274@\000\242>\375E\363>\247L3\276\205\0240?\312&+\276\344M\203\276r.\264\273\'\254`\276\356\324\236>P\017\304\276\214\310\001>xs-\276I\234\300\275\025\023\307;c\277\376=\303\264\002\276\365\247\001=\201\250\203>V;Y>m\252\024?\267\3002>\240\350.>\245\314\307\276\243\004\321=\212o\241>\267r\010?\000\002\023?\315w\200>\037\nX=\365\350W\276\315\014\002\276\356\346\306\276\320t\327\275\264\230\335\276\323\236\006?+{\350\275\234\245\335\276 \\\352\276\363a\003?\371\223G>SdJ>_.\032\277\257s\270\274(\211\306\274\322\351[>\311\374j\274\362\'\221\276\022\321\305>gj\366\276c\000\037<\034\343X\276y-\234\276\345\220\014\274\234 \216>H\2524>\223\260:>\330R\033\277x\327)>\030I5<\300%f\277\336$\205\276B3\271=\023\377\264\276b\272\225\276<\310\225>\0045\240\275A\210\200\275\223\215\311C66?\333\r\221\276\t\032=\2763w\343>BV\014>\275[\313\276l\027B\276<\016\264\276I_\007\277\205,\021\277\244\343}\276\344L\373\275\325\346\320\275\265I\307>[\311w\276\246\274\325=\020\325\033\276\330\224\n\277\212Y\330\275I\225\376\276\240K#\276X\204~\276\264\230\276\276\023\024\\\276#h\305\274\276F\243>{\225\027\277*\\\331\276\343T)>\263=\304=\212\354\236\2764N7\276\364\234\260\275\0252\033>\0318\262\276\027\355\023\2779\234+\276\346I\351\276\026\360B\277is\215\276K\317\370\276\007\347\207\277\344\234<>\262\246)\276\207~K\277\374+i\277\212\217\245\276\353\210\252\275t*\255\276\371\n\377\276\276\024\013\277\226\315\246\276}\232\002\276M\016\350\276}\334\'?\315\243\220\276C\177\220\276\206\312\005\2767\227\337\276\316k\317\276p.\376=\302\016\016\277\377\335\007?\302\272M=\326,x>\235\271\021\276\257\233\273<1\r\312\276\246\300b\276\316\246X\276\2554\353\276<\216\n?~\353\311\276-\2604>1^\260\276V\274\343\276\332V~\277iU\r\2779i\206\277\345\034\233\276/\221\221\276#3-\276/#\356>\377\007\220>p\330\330>\241\\\251\276\025\262\235=\001N<\277\201!\322\275Y\367\214\276\3143o\276>\276\235>\312\024\">\031\037\177\276a\262\000\277:\331\004\274\351z\273\276\270B!\276\207\243\014?g\357K<\265\014\006\275\030`\025\276\245<\020>\236\014\004\277\257\305g>\247\027\252\2753\334\021\276\022\325[>0\031P\276\225O\317\275v\013+\276\357\251\243\274\273\267\357\276b\213f\277=w\231\276^\365B=1\232\003\277{#\035\277]J\201>\301\026\244\275\202\376\007\276&\216S\276\030\225\022>*\342\257\274u\271\260\276\307\3672=\246w;\276\315\027n>\276\nM\276M\r%\276\324\353\027\277\342\217\326\251\177\013\276i\026=\277\"\316\300\276\200\220\334=,\361\001\277(\243\007\277\374}\322=a\201\213\276\3032\270>K\324\322\275TO\323\274\205n\264\2769\177\020\276C\262~\277\377\030\234\276\030\2509>a\340\277\273\312\367\016\276q\367\242>y\035\312<\tB\274\276lm\007=\274=\322\275D\362)\276\353*E\276\343nE\274:\2574\276h\307\367\275a\001S\276\325?:>\347\303\244=\034\247^\274\275~\236>\252\220\265=\303\302\035\276Ilh\276\366\301\036?\031\265\320\275\233(h\276r\340\253>]T\235=\270\311\367\276\250\334\021\276\260\224\275>\361\035:\277]i8=\033V\330=j\t\017\276\270F\241\275\001\350\226\275x\323\016?X\261\341\274\341\375\253:\006|$??\343\000\275)<#?\206\271\026?\267z\361=^\202\001>\362T^\277\370\304\312\273I\003\334>\222*\364><\">>ej\004?\002 Y>\363\205\002=\266\367v\276\367H\014\275\212\323o\275K\242\356>6\037$\277\361\327\227>\236\317\242\276s\307>>\304\351\205\276a\323\312=]\336\342=\201\330\251\275=pk=\257~\364=}\316\217=\033\266&\276\243\007\262\276\245a\361>\2407\254=\270@\205>Z| ?p\253&?\307\337\242>\242x\016? \266\332\276\251\205\213>D\2560\276#p\376>\020p\304>&H\215>\323\301\330\276\273\324F>\347b\316<\\\244\211=M\316\321\276\032\223%\273\312m\336\276\205z*\277>\347\303=#2\024>\340\266{\275\255\360<\275\263l\211>\244\273\257<\371\342\n>\247\270o>?\264\246>U,\347\274\360\377\205>\235\035\332\275\277\205\245>\201\2512>v+\312>\302]%>\003\273r>\245\326G=\007^r>!6\231\274\257>5\275\251\351\030?\3570\272>X\332H\276\270d\033\277 \365\004?:\203k\277)\030\236\276~\3652\276_\373\224\2765\266\374=\323\254\006\275\237\002\333>\037nI>x\036Z>\3471\313\275\235\004\035?\337H\\\275\301\336\273\276\272\272\342\276A\331\203\276i\307\277>\250\347\226>\247l\245<\213U\210\276\344\335\r?\221\314q\276T\030?\276\355\347\034?\355\271\270\275!\366@\276\340B\372\275/\331\246\275\002\341\202\276/\025\235>\265\343\343=\272\317\215>\315KQ<\336~\254\276\255|\340=\226\202-?\350\022\355>\313\2478\276k\r/\275\014\364\223\276p\372\331\275\326\010\332>\014\331{>\2037\004\277\217`\265=\253+\324>\235BH\276\023nF>=8\353>\323\316*\276s\342\246\276\030\032\234\276\013\303}\276\367P?\276\271;\262>\013\023\024\277\3067\270\275R\3102>F\274T>^\202\034\274\212@\324=\345s\016\276?J\203=\340\2058\277\203\355\202\275\310mJ\276\025\231\021\0026>?..\214\275\2320\206\2751^\t?pI\262\276T\\\354\275\213\255B?\013!\203\276f\324k\276\255+\323=\336\001\235=\367^c\276OQ\000\276\344C\375=\027\207\351\2755\332O\277\354\315J\2762\335\016>\301\253c\276\214\350\242\276>\010\026?_r\274>\375\215E?\341\021\265>S!\301\276?5_\276\262\257\245=E9\313>\001\367e>\233\235\272>\377h\001\277\362\214\200>z\313%\276\237\242N\276L0D?Cw\233\276\321\"\251\276\265n\362\274\324L\324=13\014>\235\031\235<\306$\310\276$\316\032>\014\035\361\276\033}\320\276A\236u\275m$\036?6\362\027\277\235\200E?\212\004\271=XUX>\027G\240\276mQ\354\017\275hO`=B\036W?\340!\304>\235u\244\2760h\270=\335\215\r\277B\342,>\027\321T?d*\322>\311\304\\\276\364\033{\276\2537W\203>>\335\340>/\367\006\277\375;\270>\303\032\036\277\340z4\277\347`E?_\2744\277;\362\306\275o\2079?/\330h\2766\\\216>ihk>Zg;?\351\251\236=\212\334\222\276.m\355=d>\224\276\3572\216\275\253m\242>\240:\200\320\025\362>h#K>\356\021\274>w$\032?\027\007\214\276\307\267\234\275Bh\000?\177ZH>K\343m\274?\233\221>\333V\242\2767t}\276\243\201\316>R\232o\276\001\365\331\274p\267\025?\273\361V>\260\235\006\277\301\356\005\276MR\035\276\273\037\371>uu\322>\313\216!?\205\206\206\275]\224\202>\324@\020>\232\363\303\276\006\354%\277=-G?\241.\222\276\033z\347<\030\341\226=\341\177\242>\306\266\205\276\2763\336>Xt1=\366\341\237\276\026\0220\276Q`\206\275\274C,>2\203.?\276z\177\276\027\nu=\216UG\276\275\326\206\276\266:\217\2765\340\370=\266\327\014>\2007\016>\330%2>\326=\311\27430\356\276\333\376\020?\254\366\t?7\\x\276\234f\223=i\005T\276\210N\365>;z\006>\365\224\004\2773\335\002=Z\204\263\276\363\310Y>\346/\004>\200T\021\276\315d(>q\356\272\276\320%g>\002\376\351>R\207\013\276R\216@\275iX0=\324a\325=\322\215\357\274\310\272\005\277Zz\263\276\320\007\343\276W\026\347>\302\032\327=v~\356=\034\271V\277\303\010.\277\241X;>\215L4>w\210\271\275\3725\200\275{\351\327\275]PY>\344C\n\277*q\017?\362\203\337:\256\005I\275\246sK=%W\215\275\374\356\033\275\316\351\352\276\202>p\275?\3165?\306gW=\354\326\337>\010C\233<\375\014>\2746\'\032>\022v\034\273\247r\206>?}\t\2754Ib\276o\006\236=\253T >\334\2170<\221\202\n>O\356\276>\342\302\212\276\336\r}?z]\032\277\330\373\356\275x\220\347=\337H\355=\231\361\\>\376\251\322>\200\014\365\275I\226\266>\327Y\301>\005\2328=\0058\247>7\001\324\276\000\260\032>\200T\264\276\245\220\226>\321\313\332\2761\370\350>\376T\226\276\324\226\322\276%\357j<\036\331\203\275\351=\302\276{_D\276\266\215\330\274q\216\000\277\234\302\031\276\327:{=\305\312\340> X0\276\033\355\225\276\342R^?\244T\033<\245\031\253\274e\240W\276\300\002y\276!3\264\275\230?~\275\017\364\217\276\272\317\023\277\026\261\017\276\2425\223\277W}a>N\250{>\313\364\000?I\210\324=\223.\373\274\361j#>4<\226\276\301\275\237\275nW\002>\232T\014\276Q\240\371\276\270`\322>\007\360\242>\270\224\344\276\270\302z>*~\220\276Kr\216>\036\034\342\276\2509\204\276\024_\260\276Q\316\277\276\210\231\202\274wU\262>\001K\024\274/\000#\277\327\213`\276\255o\n?\3030G\277~/\006\276\337\203q\275\217\314\026>`\315\024\274%\372\316\274\275\002\336\275\247P\035=\206\250\215>\271\265\226\276\240\341\r\277z\303\321\275?1\033>\362~f=A\017I\276T\331\256>\232\002H>\032\343\'\277>\340k\276\265\004\303>\346\352\327\276\203\315\376>o\273\275\274\376\232\354=\035\016\355\275x\254\367=b\237K\276\202\200\317=8o\335>j\251(>\3749\216\276k\\-\277\250\244\203=\241\200l=\236\220\344\275\367\376Z\276\362\303\231\276\360\317R\277\266\3278\277\367\225`\276\377?\374>Cb5>\321\000\237>\0027\273=\232D\275>\035W^\275\323\221\220>j\004\255\276\273\310E\2766\013\374>\226\251\365>\024oA<<,\206\276=\275\\\2761\037f\2768C\032?\347\277\003?\034#\226\276at\350\276\304v\373\276\360\315%>w)\271>\273\210\310\276\275)\016\277\215eA\276\353`==\023\256\033?\255H/=\332x\363=\324\346\237\276 J#\276\006\212\305>E\224u\274JJ\265>_\301\260<\325\325 \277\356\037\032\277%\344p=\255-\313>\230\310\325\276i\213\005\276Cq\004?\332>\251=\"q\207>!|\177\276M@\315>I\207\263\276+\204\365\276\357\030\007\275\025\230\326\276`\236\235\275\247,c>\320\333i\276\377\303\353\276T\307\355>\341\353\003\277R\002\024=\024\272.\277\355U/\277\223\025\322>\307\000\013\277cR<\276\211\326\016\277aX\207\273\005\261\207=:\352$?7BA\276\211O\277\276\t\305\272\275\344\353i\275Q)\266\2761\240\330=\220\037\023\276BI\354\273!S\221\276\312\027\024?\277\223\265\275S\357\336=\261#1\277M\212/?\247\262Y>\231\204\376\276j\237\000\276\021\205\324\276\030\344 >\213\002/>r\200\036=\005\311\020?b\330\366=\244\340_\276D\003\306\276\374\252>=\001\006\200>\031/?\277\255\000\034>\256Z\207\275QZ\004\277\031\335(>\221}<\274\321\343\033?U\0266=\221\354T\2746\322\003\277q\343\361=r !?\225\322\363>,\307\255\276\262\025\354\276\256\211\215\276!E\305<\307\336\241>:\003Q\276:\3060?\207\375,\277\304\032@=J\261O\277r\354\236>j\361\0249K\370\024>\005\225\327\276\033t\202>\023x\257>\324\365\222\275\201\276&\277\236K\352=\035Ik\276\316\030\016?\347X8\276$\335\252\276\223\005\351=JZ\322\276\311\243\001?F<\262\276\346\360*\276\216\363\244>\241\030?>\3256\326\276\030]$\277|1*?\360\364J\277:\317\227>\0322\234\2752\263\376>Z\374f\276\260\210{?\3319\272>\327\350\241=\265&\346\276q\036&\276sd\001\277|!K>\333b\233\276\034(\244>U\221\256\276\206\276\021\276v\030\201\27622,\276\342\025\361>\362!\252>\206\337\003\276w}t>k\347\305=1\230\233\276\315,\006>\213\022<\276\030P\n?\333S\354>\211\302\r>\000\006\373>\035|\260\275N\271\332=\300\364@?\005i!>\300qb\276\030p1<\332\014\255\275\233\n\006\277\351S\214\274a\276\271?\311\3777\277\325jr>\226\270\035>V+\211\274\013\271\010?\260\336\337\274AM\347\276\321\033\007\276\330\347\330=\232E\255>Js\360>\371\3013>\211\257\006=\\^.\276\013F\330\275\216Kl\276/29\277!\251\230\275Z\214\307=9\253c=T\345\314\276\353\361L>r\205\316>T\037\347=\234\010\021?\264\203\200>@wo>Aak>u%\014\277\005\312\247>\214\'\024=W\022\002?W[\310>\235m\263\276\251\213+>1\240\362=o\037\013\277?\0372=\"\260&?\227G\205\275\267\335\005\2764&^\276\331\212\031\277\204\243\250>N\341\224\276\025\264t>Z\\_\275l(b\276\023S\213=/`\020?\370\333\"?\262#\371\276\302i}>\267\004\200>\317\301\307>\302\312\200=\273\025\232=\006`\317\275\'<\346;\356E\235\276o\366\313>\345\356\224\275\365\212\325<\275Y\212\276\023N@\2763\2545>\036\2748\276\217\352B\275\202\200\211>\324\232?=H_\016\005%\211\276k\345+<\245\277%;\t\t\271\275{\243\030\276\361\334\334>\223\265\242>\003i\001>W\366;?\177\270\006\275\203~N<<\370\024\276#\261\306=n\220\277\276\231\317\235\276\276\030\006?\327\270d?\"K\347\275\024\t0\276\326l>\276\304\364(=\227F4>\247,\210\274\243\215\035\276n\242\333>u\243#>\235\360I<\014\341{\276\350\242\200=\355:\324>\225s,\276\233\212\334\276aW\236>\315\260\357>X\020b\276\210\227\324=\265\331\020>+\033\233>J\305Z?s\331\317\275\301\010c\276\na\327=\360\001;\277$\242\314\276\003J\316\276\303N\020\276\314Dm\276l\000E\276\003\227\316\276\346\003\033?>\007\224>\363\270J?\001\3127?A\224\277>\375?7>\207\031y>\200y3\267>\273P\203=\302]\335>\316K(\277\325wT>)\242\221\276\340A\013\276\247\321M>\"N\312=~\177\244\2768\276\364\275\313hW>\221\373\t?^|\263\276\335.\326\276\322\206H\276x\211T=G =>x\020\017?\341\2232\275\241\345\346\275\"\004\250=~E\260\277\030\376\321\275\360\255M>{\325\212\274#\261\225>\300\361\306>\032&.\277\266h\317\276\016\037\n\277\177\220\206\276sa|=Q\214\332=q\326q>>=\205\275\263G\225\274\354\017,?\234\207\225>`/3\277\241V\n\275\361\344&>}\373\277\274\'\007\201>\277F1\276(}\r?:\"\'>\337\355\357\276\272j\r?\007\360b>E\377\262>\233\014,\277C\333!\275l\360\357\275\344\226\344>\335/n>\2753\255\275\335\002i\276Glb>\214e\272\276\005M\334\276xyW>]Z\003?\246\221\211?R\210\222\276\'Di\276\003\036\273\276\204p\'>\004\312\254\276\240\257\t\275\r\2751\277TI\002>!\362K\275\231\357\007\277\272\261\334=\2353c?7\354\264\276\2263V>\316\343%\276\3558\020?\307\251\201;Q\365\221\276\320\355\254\276\367\324*?u\215\372=\207\234\217=\225y\200=1\350o\276\001\246\n>n`\255=~\205\313>f4O\275\360c\205\275(\351\037>\307\374\231<\364\363\257>\323\014\261\273\037_\325\2764\023\307>\345I`>78<\276\001\342\013\275\200\0038?7\371\032>\216~\010?\r\025\324>\222f\333>.\213b=\024\200\305\276\260Jn>\204\254\215>\031\017&>Of\204\276\313\241!>\037\265<>\310t\331\276X\276\234>\301&\205=m\343\215>`\'\261>\247\272\n\277\366\202q\276\320e\333>Nm\276\276\342G\231\276\336wi?\244v\260\276%;U>\341\202\354=^\006~\276\231\220\224=\025\242%\277\225\277\277>]\315\030\275\253N\375>MV!=%\020\360>\300\252\226=*\344\200\275\222\300F\275OO\355\274\222l\245<\267\\\344\275u\337@\276\025l\301>\350]t>9Hv>\271%\025=r1r\276l\372\304\275NJ\221>\227\335\216\276\016;\350:{I?\276\275\253\232\276\203\255\n\277\255u=\276\261R\346\275\023\272\317>\377\240\022\277ex\221><\326\263\276\314\357\356\275\321\313\007\274_\205W\276\364o}=iOi\275t\021\033?\227\032\004>\274\211\266\275A\027\363\276\311\250\325\276\246}5>\336@y>E:G=\204f\342;}*\026;0\000\354>\n\240\201\276p\255\216\275OsD>\"\006\304\276\366b)\276\000\327\004\277\326\035\307>\272\263\037=\n`\331\276J\223b>(\003->\370\177\330>F\227\"\275b`&\276*\2423>\375\001H\277\355\353\261\276\352\230\243>\177\301M\276\277\0255=y\367\254<\350\332\274\275\020p=\276\206\306\337\276kmH\2771N\347\275\000O\241>\342\204\346\275#\"u\277}\031\200\276\016\255X\275\205\274\256\2754\010\230=6\252\002\277\363\363h\277+\272V\277\211G\205?\331\257\210>DJS\277\302W/=\026\004\266\274\207#\007\276\215\345b\275\204\2466=\005\3729=\250\230\001?\022>\317\276ox\317\2763X\337\276\t\345\031>\255\215\034?w\257\013>:IF?r\253\221>v\224(\277\220\330\342\276\314\020\247\276/\212\033\275\240\240z\277\225!\263=}\036\333\276\370\227\205\276\364w\270\275!\277\305\276mj\223\276\035P(>\356\331\253=\017\216.\277\022\206\004\277\200]\253>\005\032)\276\252\006\260=\211\252\030\277Zy\263\276\217\350\261\276\276u\255\276\334\376>\276\234qW\276\356f\351\276g\245\213\276R\221\251\273\212\206\r\276\026\0234\276\271\004\337=\334\301\237\274\366\316\233\277\226\312\327=\252\034+\276\253\204\233<\342\365)\277q\262\2349\024.\363=\350N\001;\023\n#\276\345\265S\276\207~\026>\212\002\315>\211\025\210=\310j\326\274<\236\220=\346\013\270=\010z\034\276\0160\031>\320\360\247\276\225\303\326\275\007Y\241=\031%(=\020\3553\277\326\364\025\275\313,\354\276\270!\201>\034\260\357>\252\217\021\277\344nU>\362\221\230\275\177\213\363\274\004!\304>\375:t>\223\216\033>\222fD\276\"\020@\276\005\273~>\327\303\241=#\352\220=\246aW\275\026\004A= 8 >\304L\035>$k\274\276\355\2034\277`\377\351\276+\273\310=\0054\320>\t/W\275\333\274\354>\220[\023>PG$\276\310\023\271=z\206\000\277\001EN>\'\"\230\275}\025\352<\003\375\241\276\246\t\226\276\001\']\276\311s\343\276.\305\213\2764\307\177=\017-\256\276{v^\276\353\306\275\276\rl\251>!m\345>\310\321\252\276\213\264\372\276\375lo\276\235\303\214\276\340\007-\276\352\222]\275\372\357\354=\327;&\276\236\017\023\276\001:\200>\337\241\032>\352\304\217\276//g\276D@\335\276\261#\256\275-\234\237;\266*\214>\005N;>H\234\372\276#>\216\275\261zK\276\220\020\025\277\014\360G\276\212\372Z\2757\350<>\246\3640\276\025a\330=@K\336<\357\3058\276\021a\232=\350\254r<\341\201:\276*\334\330\276;\323\217\276\302 8?\207o!?=L\251<\331-\005?6\206Z=\216\333\212>\243\327\210?>I0\277\224\026\013>Y\371\264>\235\2344>T(\031?N\354<=M28\276\372\227r>\263\257\266\276\227j\240=\252\311:>\226\262\201>x\252\327<\247\325\312\276\2258\323<\347 \n\276\"6\232\276\273\343(>\3764A\276\244O\216>\026\374\'\275R\275:\276\246\333\305=\371\252\312\276\224\231+=\202\226\371\275\034\253P>\224\010\252>\352\270\253>\331D\345\276Gy\224=*Md>h\273+\276\206\215\030=\035-\350>#\347p?\003:\257\276^@\201>\006\337\231>\333\336S>\362\200\350=J\277\377\276\277\330\'?E\342\327>\320\330\007\277W\310\341\276\266\263\242\275\002R\341>y\323\266\276\001f\r?\253#D>U\346\376\275\207X5\276\312\277\027<\2725\301>\')\'=\370b\366=\306\032\247\276\006\271\n?\r\260\373\276L\t\244>\310\202\204>\340\030Q=j[\306=g\027\344>%\002|\276\235\256\n?h\210\010?5f\254\274\212\245\032?\013\376\260=/\324\'>\342\023\325=\221K\360>\016\237,>\332\374(>/<\312\276|\222\253?5\266\235>\372i=>$\266\325\276\266\337C>\327a\311\274[Z\037\276W\210\244\276\323\217`>\351\'\335=\315\005\'>~\365\320\275\202\3674\276/T\356\272\337\202\225>\362V\021\277\031\267\334=Ti\223>:7\202\373\263\356<9\202N>\210\244\034?\263\3315\276\200\264C>m\241\243=v\345\251>\264\212\207\276\334\357\260>la\327\276\337y\013\276\215\276\364>\3347\227=5\371W>\3102\201=;\227\324>\201\200\314>\206\262\271\275\346,\242>S\331\224<,\232s>l6\231>ggO>s\230\356=\332\342a\274\353ig=\3017\304>A\375\342\275\r1\234=\314\327s\275\347r\315\276VH\234>\370\360Q\276\032\324\277\276\312\311\353=~]\314\275\0025\250>\301\212\342\276]\001D\276\352\'\321\275-\352}>\003z\231>\374\227\377\275\337\357<\276\257\217\024?3\200%>L\232\256<\206\260\273\276\267L\347>A9\260\275\353\344\003\276\353\311\034>\263+\030\276\271\177C\276j0\022>q\373\247>\373\216)>S\376\251=\235*\225=E\216\262>\375\307s\276\251L\205=\3619\244\276Nq\312=\220\300\377>\315\010\037?\250\370\363>9CA>\003\263\224>e\033\306>,\264\330\276\241\306\331\276\273\366\242>\014\027\272\276\030\371W>\376\240\221\276@\305\n\276\315o\271>\037b\340\276\340\326\272\275\242;\233=\"\353\021?\245k\330\276\031\241\032\276:\024\233>\216\306\201>\3116\335\276\202q\250>\234\220\264\276\351\007\010\277\351\345}\276p\357\347\2769|\374\276\237\262\276\276\312\317\206\275C\034\371\276\035?\215?y\216\227\276NF\360\275TN\235\276\024\330\221\275\243\372x>\275X\271>\265d\266\276\314\241e\277j\224\334\275\364E\003\273\32210?tT/=U,\001>\352\304\030\277\022u\324\276Q\371\023>@-\274\276\344.\023\277\272+[\2764y\326\276j\t\013>y\311K>Ie\034\276SP\t>\305\021M\275COM=TD\307\275\021,\242\276\311x\214\276\336\372\271\276\013\225\n\277\311\205Y>\243\266D\2765J_\276\362\030\260\276\177b\240>\003B\352\276=\230u>\345\327\214\276\212\340\305\275\030\325 \275\\F\260\276p\315C\276r| \276\206cX\276\310X\025\276Ov$\277\315W!>\201\364<\277\215\242+\276i\202\210\274f\023\275>A\301\230\275)_&=\036,\252\273\207\255\321=\255\210\025\275\354\r\212\276\315q@\276\034\365\200\276*\344\363>\364cM\275\361\252\230\276\373]\207=\320\3438\276~\242/>c\001%\2763\376\261>2<\013\2768\341\000?t~\237\2761A\021\274k\352\347=\264\307\366\276\331\016.>\262\344\211\276#\350\r\277)\014\037=hB\263>FAg<\232\"\327\274\306:\230\275\347T\372=\207\033\365\275Lh\321\276\035\215\303\276|X\035=\037X=>8h)>\335j\241\276\372s\260\276X\342\341\276\004R\006\274\375V\210\276\367\251@?\356\316\002\276WH\325<\006\265\266\276\315\020\223\275\037\360\246>\0178\351\275\"\357\207\276\254~\302\275\340\221!\276\351\254\013\275\331Z\220>\272\364\303\276Q\3431\276\255%\261>\341t\373\275nH\337\276y\336l\277\341b\022=(i\032=\237lN\276\274\322\206=0I\205>\256\007\017\276Yn\265>l\243Z=\371\274{\276\306\251/\275\334\026\276\276\3300\031\277\260D\263\2766\206B>{\316\243\276o\267H\276V\005\243\276p\037\313\275/\314\203=\232\275l\276R\316E\276\2036\337\275t\231\307=\201\254\006\277[\224\017\276\265\n(=\032\036\215>\004o\343=y{\247\276@\035R>/N\244\275\215E\373\276\2048U;w0\036\276i\n\305=\307y\261\275\372\256\235\276\033xf\276\025\021\353\276\202%\225>\267\326\355\275t\300\003\276\207\316\246>R\030\304\275\272\356\024\272\326K\352\274\n>1\277\367\322\241>{\006\225<\303\277$?Z\030\022?\026uR=a\034%\277\211w\303\276E\361\333>\252\315\275>Z\246\245\276\330\306\325\275\366}\211=\362\023\260\276\345\312\243\276\350\375{\277\333\211\020>\320F\213\275\351a\306\276\226;\230>\3677\366\276\026\261\265\276\215\340\013\277\010\245\211\275\253\203\014\276`\217>\275\037\016\271\275\261\243T\276\306(\253;\230u\215\276\332L\016\277N\305\217\276\260\362\261\275F\025\225=\224\222\314\276\330x\332<\306\227\261\276\333J\315\276\236\311\216\276\234\2308>\025m\036\277\264@g\277\2366\222=\277]\240\276\214#\316\276\024\231\236\275\313\366$\276@\326\n\277S\r\376\276\364/\236\275{\212\030\274\340V(\277\373\244\202=\367K\317\276\237\2671\277E\305\311\276\014\315\336\276d\355\235\276\316\347\250\276\325$*\277\031R\016\277\267c\335\276Q\267\211\276\227,\251\274\tmp\274o\220\354=\361e\231\276vK\303\276\252%\177\277\262\254 \277[\343\007\277\375\024\271\276#X\213\274y\262\006\276|\213\027\277\367\263\221\275\021>\267\276o\326\026\276\004\003P\276x7\361\276\225\267\247=\211p\'?\265\216\206\276#\314&\277r*\006=\215\362\216\276\363\320\005\277\377\322\037\276\373\337\235\276\003\0264\276\317\242Y\276\3174\337=\231Q\341\276n1\356>\270t\026\277\235\255\205\276\367\']\277`a\t\277\222\377*\277\345\345\323\276\254\341\357\275\001O\004>\007\345A\276k\034\322\276\365\326\361<\256\251\265\275\222k\260\276q\315\364\276Q\0345\277-c\006\277E4\243=\'\245\014\277@\267\226?\371\257d>S\371\223\276\243\326\035\276]\005\337\276\356\373#\277.N\\\275\030j\236\274FN\007\277\360_\305\274\215\363\264\276GH\255\276\241\263\351\276\373<\236\275\302\236;\276\204\341W\276\332\2264\277\326wK\275\251\3249\277~\246D\277\325@\254\276\240\307C\277n4\037\276\341\212\363\275\\lQ\277\221o\300\276[\017&>\376\365\322\212aL\276\335k\253\276\207\244\201\277\311b\021\277\251L,\277\221\004\r\277\321-\336\275\335\341\230<\322\243\347\276\017\007\314\276\343\242\351\276>\025\211>\2372\371\276\330\305\230> \247\r\277\276\210\246\276W\301:\277\002\000\271>\303\340(\217^\036=P\356\316\276S\322\306\275\250\2640\277o\251`><\255M>`\337`\275$V\371\276d> ?\210\305\026\276\222\221$\276\027p\366\276\261\344!\277\257\353+=\257P\272\276X\034I\277g,\023\277\353L\035?\337 \367\276C\211\346\275\216+$\276\0278\307\276\244\034J\277\017\223m\277\264\326\377\276\352\235\233=j\253-\276\337\033\276=\350\306\233=\316\233\r\276\224\017\374\3102\277\2648\363>\006\020\327;lHD>\355\336\013\276\251\337\273\276\215\355\022?\264k\340>\227\245\256\276k\302\353\276\357\020\321>x\026\202\276@\277/?s\374\254<}D\214;\"9\244\276Q\304Q>\244\265]?\r!\370\276&\177\331>\2360 >J\341\023=c\372\032>\2018\262>\2007y>bzy\276\364\361\227\275\247BJ\274\016\023\240\241\237Z\276\262\310\022\277u\264\354>t\004\241=,\017\312\276\354\372\204>o\003\311>\363\307\226\276\031\330\204\275\310\022\037>\323\026\230\275\320\000\005?iJ\362>\245\230=>\335\325\022?\031\373\022>#\246\323\276JZ\371>\366\275\005?\226\342\032?\255\357\007?\336\377\034\276\224\315\301=\223\310\366>2\322-\276~e\301>4\235|>l2\260>\014\341O>4\364\326\210\372\030>{\256\305=\255\225\032>\n\271=><\212u>z\253M\276\237\003\030\276\371E\212\275\373\341\267>7\247Q\276\363\204A\276&v*>\265f>\277\200\337\322>$7b>\254\231\245<\027\022\241>G\364s<\215P\006?>\021\272=S\363\304\276la\223\275\341\333J?B\007\332=u\006\241\276\017\272\351\276\345l\251=^~M\276\234\227\r?;\t\021\277\202\237|>\020\224(=\025@\263\276a\177`>\211S\367>\363\243\253=\320U\232\275!\007\n\276%L\270\275x4)?%J\225>\006\334D>8\241\007=\n\206\'?\t\244!\276@\333\373\275\356\216\025\276\2238\273>J\255\024?1\254\n>: \375\275 \257\314\274#\007\271>\354\355\313\276\010\260\236=\002\351\243\276ga\351=\337-3?0y\202\2769;w>\305\332\221>\3369\220=\r2M}\201j>\227\367\255\274\375)\343>\005\266<\276\362\324G?\256\261J\276\364L=>\331\316\300>F\266}>utW\276!E\356>\376\017$?V\017\251>\035\316\336>Qi\327=\241\357\034>\023`)>\254\223g\275\356_\261\276B\276U>\3110M>\346(|\275\222\301\t\276\273\2415>\204\035\352\276\203\253\255>\036L[;\247\372\231\276\337~\025\277}y\027\276\202\265\305>\266\r\262\276,L\343>>\020\225?aR\003\276\362UP\277\371\210\230>\237Hd\276\247\337\237>\253\2674\276R\215\007\276T\321!\276\005Z\371=l)\275\276\364\232\360\2761\177\010?3\013\357>k\236m=9wd\276\253\001\226>8h\232>4\335\003?\365\333\267\276\257R\230<\005\333M\276\352\025\006>\234s\277\276\265C\347\275U\003\206\274\312\275\266\273\326\0348\277\212\367\236<\023\260\031\276\320A\271<\314\204\014\277\364\270\024\276\335$\374\275\373\026\314=\301\005\317\276\236\337\365\275\031\t\354\276\326\234\026>\351\030\347<\343=\034\275\032\026o\276\351\013\220\276\367\210<\276\n1\233\276\035\213\366>\034\022\367\276f\030\355\276\376\313:>\217\216\217\275\301\226_>h(\303\276\225\254)\276\240X\226\276\212G\343\3614\030>\307a\232\275\357\3771\276Qt\312\276\261\205\205\276<\203\354\276\265\356\351\276\013\213\236\276\3725\345<\271n\273\276T\277V\276\3159\001\277&\014;>P\373\322\276k\257\371>\314\327\027\276h\333\320\276\367\336\262=\310\333\016>\025\357\312>\327Y\313\275#\"\266><\226\312\275$\275\260\276\325\347k=\265\301#>\013DT=\037C\354\276L#j\275C\313\221\275w\"\\>\tZ\310=\305\237\362\274~G\203\276\002\366\237>\373\357\241\276\023\026;>{\235R<\276E\n\276u\017\266\276\247\346\256=Ub\026=\2579n\276\003\244\376\275\304\022\245>V\000$\277d\265^\276\353\243\004\2777\351\201=2\323\216\276\035\213\272>\3542\254\276\0143*>=\270\224>e+\035\276K\026{\276\000\261D>\340.\233<\272Mc\276(\351\212>\336\345\003>\324J?\2766V\033\277y\367\221\276\257T\242\276\000\021\234\303\371(\276q5\317<>\307j<\230\341\236\276\231*\234\275\226\234\252\275\332\226)=\033\232M\276({:>\201\026\316\275\366\247\202\276\032\327\325<\035\305\356\275\025`U\276L\325m>{g\337\276z\316\254\274u!\301=\207\034\213<\263\310\222\275\235\376\264\276\357\016\306\276\274\313\037\276\277g\276<\216l\226\276\355\314\007\277\037}\005\276\r\271\376\275\251\037\333= \373\211\274\337\341\204\276\260\354\311\276\323<\303\275|n\035\276\031+\257\276\241j\371<\370\245\250=\025\232K\276\274\310]\276\365X>\276\362\327\276\276\210\205\002>\356\255H\273\274X\256\276n\035@=Z\352\376\274|\354\375=\2516r=\323\340\254\276\245\024\037\276\376ia>[#\326=9\250\207\276\265W\016\276\311\360\222\275\334H\357\274@:\270\276\276a\360=\300\322\215\276\356\251i>\373;x\276\340\216\033>\262\341\272\276\266\014\021\272\357\300\213\276|\034\355\276\030\310\351=\243\272\027\276\255\221\325\274\274\003a\276\256\335\301\275\364\3267\276:$\030\276\303\335\237\276k\362\223\274(:\'>\276\020\315<\375\025\233\276\320\036\252\276<\326\337\271v\362\324\275\212\230$\276\362\246\373\274\365G\306\276\373\301\025\277\303\312\233;\315\243\204\276\361O\004\276\237\0336\276\373\202\327=h^\312>\271Eu\276\3447\314\276\255G\274\276h\267\325\352\317\r\276\002\213\346\273\311zJ>\034\345\355\276\275X\236=\305\036\306>\341\031F\275\336\300\017\"e\204\276\231\"\200\276\377Q\300=+\301\253>8(\024>\\h\354\275\316\354\"\277\320\221\216>\324\373=;\326op\276#f\032\277\227\343\271\276\004-h\275U@&\276h\217\244=\232y\222\276G>Q=\204\300\220\276\201\206\250\276\0209S\2764:\214\275\2038\344>6T\017\277\370\312O\2755&\250\276N\013\030=\243^\211\276\341\210\307\276u\251\223=\205\241]\274\273\217A\276N?y\276\244\301o=\220\\\327=R\343K>1\340\002\277b\300\214=M\302]\276\316`/\273\346>\004\276\276\230\201\276`\336\250>T=\250\275G\313\224\276\200\362\340<\226\367\213=\215\305A\276f\022_>B9\213\276\341\204\326>\225\026W\276\327\336Y\276n\345<\275\354\362\227\276-\234\207\276\265\371*>\347\337\216\275\001\275\016\276\376\345\274\274\310\272\212\276\325\034\227\274|\373\330=4\333@\275l1\240\276\346\220\"\276mP\025\2766 \307=\007b\271\276\312EH\276_\327\004\276\207\272\025\276\023\323B=\376u\216>H\333\r><\331\274\275O7g\276\357\007\365\275mr\335\275\004\004\370\275\303\r\226\275K-n<\304\220!\276\371\232w\275W}\024\276\347k\362\274\013\037\202\276\027\252;>\032\243Q\276\2235<=\272d\304\2759\n\037>\033\354A>s1,\276x\210\263>6\302\357\276n\034\372\275\277\335r\276%\324\311\274\037\246\343=\007\362m\276M@\302\273\237\326\202=R\230+\276Y\204\023>\271\341q\276 \300\260\276\032-\213>\036D\270\276\004\353M\274\202\006:=\177:{9\235\346\311\275\251\357\375;\276H\346\275-+\003=\371|\224\276\331\234\246>0lM>\002>\274\276\272\265\240\276\301M\344\275\001\303\005=h\216\376\276\301\016\256<\247SF\275\256\321l>s\367\227>\237\303\016=PC\266\276\2707\307\275H\201\030\276o\003 \277\036R\247>\252\277u\276C\201\013\276\222\255\233>\227JH\275\313\251*\276W\\B\276\272\201\347\274$\006<>\314\217\252\276;iF>\254Xp\2768\270x>6\267\260>E\324\023\276a\370\307\273q\347\r\276]\356\346\274\343b\352\276\217\223x\274\022\277[>\352\311\206\274c\303\361\274X\036\227\276\030N\221\276\200\263\013\276\355m\254>\313\333\222\275\323\225\003=J\266\323=\3165\272\276o(T\276aV}=\266\260\327\276\222\362\237>\227P\214>\001b\024?\311\305\255>,\022L>\362\351g\276\r\211n>PH\212\276o8\034\276\363\225I>\226\2409\276I\005\324>\026\242)\276\225Gc?\3052\261\276\267\r\001>\244H\376\376Y\032\274\036\231F>\221\341\251\276\3025\265\275T\356;>l\232\346>B\342\247>\224$\254\275\013\211\010\2750\371B>\255c\"\276y\275\302>a\320!\275\023\231\341>\236}\212\274\260\177\223\275\236\254\265\276\322\370\336\274\252\267-\276\356\317\032?\004\203\333<\243\253\032>d\253\021\277/\353j\276\'gN\276\225\330q=g\243\037\274\300E\346>\325/g=\242\377\325>\332\231\322=V\354\312>\314C\232>\206\212\233\276\013\310e\276\324\267\304>\301\023\350<\331\313\213\275v\213\226\276\216\030[>\316\313\377>\327\365\275>m,\016>\2406S>#\001\377>W\246h\276\352\000Q>\306n\177>\265\r:\275\030MJ=\334\235\331>S\302r>W\233\273>\372b\356=o\327\271\276\316\270\236\274\034\240F\276^\036s>\224\320\276=\241&\267>L\202t>\022\241\t>\314\343\004?[\3340>\357\035\025>H\260\'?Hj\036>\250v\313=\307\304\351>$J\233\273\244\366c\276cY\334\275\334\024/\275v\347\321\275\244~)\276oa\020\276u@_\276\332\273\260\276\233;\311\275\257rL\276\300\313\364\275[~\257\274\002\247\250>@\203\010\276\316u4\272\262\207\224\273\277\304\276\275\227\000\333\274\221/\277\275-\351@\276\270l\216>&=\266>E\222~\275j;}=0\321\320\273%\301\005\276\272\'\232\276\216TS>\004\215\204=E\355\214\276u\365\353\274\3114\234>\017\036c>\027\032\221\272Y\207N\275e\2126?\277\354\345\274L\337\222=\254\300\007>\354S\315\274\0216\342>$\374\020\276V\266.=>|\007\277.\030\342\276e\356\325>\016\212N>\367\220\210>\2262\373<\357A\305<\341\357p>\343a\252\276\301\275\341>w|\034\276\312\3232?\r\316!>7@\371\275\033\035\375>\360\"H>\026y\317\272L\007l\2766d%\276U\336\341\274\360Z\270>\013H<=\272[\354\275\352\356\266=\037i\301>2\367\306\276\360\345\037\276\210{=>\002\360\"\276\013\034\275\276\251j\236\276\302A\221\275\352S\022>H\370\346=X\304\034\276\273Yv=\250hx\275\273\214\363=]\300\035\276[!q=|\377D=_\273:>V\024\216>\221\177\343\275\2549\215\276u\252\002\277U|\272=\204(\352>\002\310X\276\245\255\263>HW\370>=\3664\276\347\203L\276\352\"\n>k\027\023\275\030\027\215\276\245-\204\275\260\373\t\276\226\177)=\020\237\206\276\304\301\255>\001\rU\276\324\205\220\276\322\203^\275\324\325\376=\277f\330<\325\247\326\276\250\332\324>w{\216\276\267\316-\276t\373;>J\223\236\276\315\275)=s\224\262=\262yH\277\022y2=\201\n\234\276\351\317\222>\242#\214<\254\220\005>\002\346e=?\373\224\274l\262w\275\360\274\333=\251\306d=4J\206>\005\235E\275\320\302\344>\316Z\221\275V\245\001\276\3779\224\275\347\333\232\274\000\340G6e\304*>\024\263\361\275\355\360:\275\244\275i\276\257\014\250\276\2148\020\277\375\344G<\336\373\316\276\331J?>6\273\227\276\313w\271\276\214 \017>\247}\027\276\333\206|\276\023\256%\276\361K\030>C\367\350\274\324sQ\276\367\232\260\276\326\243\037?\347\307\227\276\325\351\002?G\200\335\276{\033\335\274\370=\341<\022\007t\275\016\246,<\357\325k>\010\030N>\'X\030\276Ht\260\273\347)*<\017o\314=j\303\025?\304t\361\2762d@\275\320d\242\276Z\346\304\275H\002\037\276p\273!=\024\353\376\276\356t7>\020\025\367>c&\274>y\014\243>\211h\017\276\303\017\222\275\'\323\205>t\363o<3\025\215>\360Nb\276M\251g\276\023]\r?\366W\212<\"\323A>\334T\235\276G+\206\276\216)\265=U\205\266\275\206f\026>;\314u=\\\360\257=\2252\032\275\306\353\035\275\332\231!\276\032}\235\276<\357\227<\355;W=B\331e\276/\316~\275\005\\\252>\363\206\001>344\27693\341\203\316\034>lI\223=\267\313\232>z\233\002>\334\334\\>\312j3\276\317\242\307=\276\316\326\273q\357?>>-\254\275g>\014?\023k\334>\241gv\275\n0\000\276#t]>\322<~\276\004\235\321=f\023A>\rk\257\276\231\221\347\276j\030\236=\310\316\305\275\207\363\306\275\2404\273\276\244\321\010>\206\245\372\276I\025E=\310\322Z\276c\023\243\276}\267\211=N\307{>S\242^\276\246\224Z>\247\350}\276\375\233;\276\'\371\034\276\341\244x\276n\317\n\276\356;\267\275Wm\244\276\267\0345><\215\337\275\363\325\204\275\254Q\251=fM};\350\343\321\2756\221\305=\006\332\026<\031u\020\276\211\207N<\235\225\n\274\260\344\313\275\312\373\341\275l\371 \277:K\212;\263?\305>\371\325\346=Uw\226\263\253\007\275\344\330\212\2756?r\276\003\310z\275$\304F\274\366\367W\275\311\013\206\276\226\305\207\275n#\225=2\321\222\275\366\377\340\275\374%\001\276n{\302< \026\216\276\355\251\024>\320\362m\274E\215\'?\204K;=\254A\266\274\026\010)\276\206U\247\276\206\374\366>d)\'\277GR\006>\032\356\363>\331\232\247\276u\002\021>`\243\'\276\005\202\025>f\321\243\274\305\314\235>E\322\031\275\227\344\371=\3769\254>\230t\335=\376\200\001\277\246B\332\2752\337\270\275o\013I>\025\242F\276X\277\224\275*\313 \276%\256\267\276\206h\003>\270ND\276\343UP>[\274\023\277\337N\257>\372d\374=\0340M\276`O\313\276\253x\320>0\375\207\275\rQ\362<3d\230\275n7\205=\365>\273>Hcw>\211<\265>1\301\207=\370G\360=\353\331\241\276\316\353 ?\264gF\275\004^\354\276\351\000]\276\307\245\330>:\t\255>\256\273)>\347\355t\276\374M\023?\373\305?\274\370\3310>\006\270\276\276/\314\340>\354\322\301\275\243H\203>n\244T> :\375\275|b9\275\033\307Y>j9\357\275\312\003\204=~4\203=\274\363\350>\271\323\256=\276\240O\275\334\267\r?\376!\230\276\216\026\353\276p\036\221\275\023\264\215\276m\215I\276\311\270\230>\242\234\025>\212l4>4\273\013??\200`\276uQ:>OY\316>\331_[=7\275\222<\366\371\212\276\357{\244\275\331\317\032>e\264\235>\370\376\311>\367)\020\276`2\277\275\237\364_\275\016\032\234\276\t\234\253\275R\035\235;\024\255U=\257\005\260\276\\T\242\275R\223C\276;\323\334\274\030\306\263>z\035\037>d2|\274\037\237\372\274\033\303\035\276\263w\r\277yc\241\276\316\277g\273\332u\272\274\300\357C>k\261\016\276\263[L=\337\331|\276m(6\275\321\3146<\341d\235\276\321U\214=\207\271[\276\2232J\274L\232`>\001M\022>6\266\010\276M\320\231\275\275\335\220>[\262#\276?\326\254<\331\201\234=\033\216\353\276\177\243S>\231\240\025>e\"\341<\325\016j\275kq\360\276\001+\206\274\346d\317\275\0174\247=j\232e>\227\037w<\344+\r\276\"\350\265\276\177l\251>\302\205\340\275\033\376/>\364\254\335=\366\300\024\276\324\337\360\275S\266\267>?\002d\275\024\331\271<00\352=\213\225T\276\272\370\222>j>\216\2750I\013?Fd\020\276\177\365\333=a\242/\275\375\222\265\2766\267i=\204\247\305\2748%\013\275%\003-\275d\200\375;\032\025?\276Hxh\276=\303O\276\265\014X=\026{\224\276z\271_\276Gd\202\274\\J\365\275\225$\310\276\220U/\276\346\017\260\275\245\237\265>v\232\323=\207\342;=\205%\037>\237,>\275\002\256\343\275=\345H\2763v\374><\232\320=\3548\205\276\336;\">\242,\250\275\240\371t>\211_U\276ZH\230<\222\234*\276\277\266\234>Y6\304>\232\223\213\276\010\275\362\275\000{\363=\010&\004>\031o\256\276\266b\313>d_\205\275\213\356\231\275G\027\254\275Q\276\312\276\024\330i\276\305Qe=\355c\247=\214\225\020\275\203\255\264>\201\306\025=\300j\273\276=\257\253\275m?\210?N\377\226<_\275i\275\223\242\020\277\323+8>\361n\272=\034w\266\274\2266\343=3\377\250>0\006\306\275\263\1778>\337\234\322<\266\026\033\276\032*\234==qW>\236\255\355<\344&\r>/\302\232=4\230\352\276Wz>\277Go\034?W=\010\275f+\304>\331\305\374>7\t\002>5\357\313\275E\223\371>\240\327\026?\\}\340>\205|\354\275\303\374t\276\277\212\023?F\n\362\275\222\223L>\275\275\334\276\'rN>gf\300=\021\370\370\275J\017]>3g\331=$\331\027?\301d\022?\363\003\252>\017\376\254\276=\367\241\276\"\354~=\007@\212>U} =T\336\177>3o\241=\022\320\277=F\211\031?\225\222\020=\230\337\243>>V\271=\322+\257>9\024o\274L\217\260\275\276w\227\276,\007\016\276\277\235-\275H\306\034\277\3423\310\276e\236\207?\221\244\330\2763\213\275\276>A\025>\343\004;><\366\036\276\252d\220>:\346\214\276\002H$\276\317\300|=\007\345\240>\246w\373>]RH>\244UY>oW\002=\226\017\242<\026~6\276\370\030\200=\255lS\313\373\310>\021\261\362\275\304\221?\275(<+\275\032lr\276=\340M>1[\256=K\234D>\250\326%>7\025[>\231X\353=\004\256\324<8 \035\276PD\331=\034\202\322\275\363\217v>)\216\220>:\020c>\001\024\017>*\266\200>\263\361R>Ml\244>W\227\375\273\007_\264=7}\362\2759\224\203\276\204G\324<\004\276\277\275\260x\322\275\267!\240>\236dz>c\352s>\354\020\'>zE\270>)t\315>8\002\000\276\364\360\224>\240\177\025=\321F\302\275Q\3008\276*\200\002\277I>\370>\267\216\264\276\340\002\013<\260b(>\235\031\223>\016\206u\275\2427\357=,KN\276\3743\205\276s\200\204\274\332\n\250>S\014\214\276\376?\207=\005#\020>C\234\324\275\231\363\211\276l\361\257;\024Z\232>\254\031\333\026\007g>\261\241\264>8.\240>\227\232\032\275\246\247\002\276\241\027\007=RV`>\261\311\225\276\225\337\315>\224_\231>3\027\027>\350\343\276>\215\346\026>\t\365y\276\343\373\000\275 \357\251=\2316a\275B\336\346\270#\217\274\347\301c>P\341&=\332\220\211\273~\345/>\031\344\220\276E\261\231=+e\343\276s\334\272Z\264\033>\261p\240<\207u\363\276r\003\034\276\345\371\231\276\304\004\027\276\270 \217=|\260$\276\006\316\233>\374PH\277]@\010\277:\252b\276\355\301\216\276r\242\251\276Xk\213\276\203\254\004?\203\321\255\2765\207%\275\003\271T\276_f\206>D\255 \275\367\213\242>\372g\020?\003\352\234\275\221\306\024\275\362S\221\274\354\320\347\275\373U\351=\000I\206>\307\013\020\276\017x\330\275\223\3139<\031!\300>\260\267}>\352=)>\025%%\275ON\370\265\254%>\363\203g>!:\267>,\303\351>\204\002\243>\352\365\006\277\212v\217\276\2765\000\277\032I,\276D\353}\275\231qe\276\342r\344>B\017|\276*I6\277~\nO\275\256\030\032>\357|;>\232\\T\276\334!\246\276\211,`=.\226}=D5\003\275\177\330\036\276r\037\343>\231W\370\274VAV=*d8\276\006}\211>\351\351\024=\324\301P\276\361\032\331>9\307\214=\327\315\244>\334*-\275L\216\236\276\3651N>3H\373=\017\342\211>\314h?\275^\341\033>-\225\261>\"+.>\237\211\000\275O\001\t\275\033\220|>\254\261\237\273\016\361[\276\023\3605>,\225e\275\301\336:\276}\004C=\203\370\362\276\r\300\257\274S\001\370=\257\326\200>$\240\347=\303\272\262\276\374\016\360\275\337]\375\276\333y$\274\212\243\036\276\245UE\276\371\213 \275C\260\016\277@\321M\275\021\221`=q\243\205\276\036o\222=\223u\241\276\024\365\230=U\001\262=B\371o>\277\230%>\tn\202\274\251H\010>3\353\266>X\226\325;\034\351\203>\314[\364\275\'\240f=6\242\354>\225$(\275\273\365\210=\305\026\n\277:_\217\274OK\373>%r\023>O\352\203\276\001Dk>\226B\330\275DAO\274\337\003S\275\244s\344\275\034\307\017\276\002\305\260\275\025qe\276g/\004>#e\226\276RB\242\276\260\356 >\023\203\214=\361\266V>\244\017#\276\256j\256\275@\255\177\276\030\232\311\276\213\204\026>\202I\021=\244\033\317=8\260\253\275\224\364\327\007\346\034\275\362\323\355=\367^+>\252\247\346\276@\326\214\274\317\232\327=\002\217\244\276\221-\313\274\360\257\217\273H\261\005=Z|_\276\344\333\005>\307\211\266\276\336\256\352<\245/\373\252\254\242\276LA\307>\257]Y=\237\357\023\274D\"=>{\t\272\276\005\245\234\276b\203\177=\265\334(>\210\203\311\276F\"N\275\344W\017\277\222\210\303\276\014Y\302\2768\245q\276\nm<\276\321\365@\276H\'o\275j\354\024\277\213\367/\276\246\313\241\275\323t\334=\346,r>\257 \006>\267\010\311\275\243\203\372=]-\232>\200\024C\277\220\202\306>\316\0261\276\243in\275q)\002\277\266\372%\277\250\373\321\276!|\355\n\277e\277d\220\001>\354\003\273\273%\253\204\274\222\355\225\276\377\271\007?I\221N>\210\324\013\277\202}\372\274\233x\307>i\301\311=\030\216\246\274\373\377\240\276p\342\226>\336\261\300\2750\237\250\275\2035L<\032\277\217\276\261\254\340\212\013s>h\004\320\276\342\271\031>0\277\251\276S\367\020\276\363\225\371\276\373y\312\276\364\210\025>`},\276O\220\241\276\217:C>\016\014\361\276@\362i>\337\235;\275\262[\330;G\004\276>h\255\204\276\344\360J\276p\2310>\"<\024\277\325\343\026\276E\334\371\276\263S\217<\014\2066\277E\304\235\276d2\203\276\004\344\227\2768\352\214\2765\227R\276\322T\242>\037j\366\276.\311\231<\344\316\230\276\223\353]\276\236\213\250\276\314\245\312\275\354\242\213\276\t\362\005>e?\352=+\326\254\276\367\003\306\273\300\006\304\275\315\232\020\276\021\270\027=>]#\275\251\311\010\277\014O\342\276\310\313\232\276\371\312|\276\376\026\032?\331Y\206\276\203\252\343\276\302\211\243=r\nc\276\313\313\272\274\321\237E=FQ\244\276\312\335W>\244\205\006\276\3427z\275\034R\227\273\33494\276\374\207\332\276\367\325\374\275\310\237\347\276S\331\017\277p#\302>x\221\305\276yG\300\2755#Z\276\316\215@\276\207O\r\276y\225\r<\314@\220\276Wpg\275\337\214\321<\350Eu\275O#i\275D2\224.\354\213\273ek\337\275\220\323\177\275u\336\010\274\357\371p>3S0\2761\314\360\276S\337O\274\260sw\276\255\220<>\343db\276\231\272W<\252\303\205\276=\026\314\276\211\035H>\337\371\334\275?\256\017=3\201:>\016\345\206==\356\212\276\266r\027\276\200\221U=\367\337\351>\217\211p=\325\023\314=\177\325G\276\262\337\330<\2405\353\276\023\357\234\276\010_g\274\177\201\036\276\304\206;\277\255\2114\276\026@\251>\345\354_>Y\323`=S\257\220>u\232\345\276\342\n\323=\006N\215\276\342\330\346\275#I;-y\261\275\257\373\333\276\375\200\307\276\2155\313>b\351\300\275\343\227\205=c~\347=\016\217R\276\340\263\026\276\354zG\276\350\335^\276\317\264\366\2769\030h>\256\321\221\276E\022:\275\302\3405?\266\315\251>]\016 \276\234i}\276\021Y3>\364\314\223\276,\204\201\275\266~\207>d\206\340\276\005\276m\276x\025\"\277\352\346\225\276u\346,>7e\274\275\345\244\000\277\342\353\337=?\031\300\275\352\361\204>\241\334\003\275\317\330\301\276\323v_\276\211\214%\277\331\3239\276\351\344\257\2768\370\350\275\256\373J\275?\254\302\276G\261\243\274\357\363L\276\256[\031>\003\023@>\355$\036\277\210\340\010\276A\232\226>\373\371\024\276$H1=\342\027\233=\346\025 =\372\346\320\276\240]\263\276 \031\267>\334`\216>\356\2732\276\332\233\373\276o\265\303\276\333\352\276\275;\030\303=:\333Z>\244\301\004?9\343\262>Yk&\277\003\352\300>\367n\021=\223`,>\007U\226>\037\315\247\276\327\325\226\276@\240\330=\014\316\372\276\247\276\256\276\254\301\007\275!\235\370>\246\222-\276\351f\220\276\226\016\237<\332\350\203\273Q\204\331\275\t\200\227\276\314*\330>\360EE>\264\022\252=\312\215\201\276N\311o\274t\302a\275\355\201\271\275E_\014\272\276\262P\241\274\277\340\365=\"\206s\276\177o\002\276\213D\356>?\327,\276#\235\"?\276=X<\"B\275\275\022\336\265\275*\274\262>i\344\261=:\261\007>\340\300\365\276\312\243>\276\307Lm\276x\213\343>\343: \276k\223==)\307\022\276\336D\332\276\034\212\232\276\331\347\371<*\203\265\276c\355\360=(N\205\275\256\336L\274I\353\351\275\377\340\032\277zw\035>i\020\355\275/xM\276\021 \205\275\351\320?=z\372$\276li\031\276\211$\244>]\235\267>7EL;z\377*\276\304\303\031\275\332>?=\345\226\014\275w\221\227\276\036\222w= ,e\276\273\245+?\276\305\346\275u\254\022\275\276\275+\273u\334\305>\245\353q>,^\202\276\345\362\001\276b\237k\276\r\253&\275\263Q\232=*\361\017\275\255\304\310\274:q\026\2772{\220=\203\307\206\275\233D\004\277\300\226\237>\214\306\010\277\322\n\220\276\300~\347\273\352\370\210>\202\363\310\354\305->5\356\t\276\211s\006\276@\027\201\275\333n\375\275\245o\233\275au\370\276\036\226\261\276\225W\257\276\264\207\306=\006\370\022\277\317\270h\276\340\214R\276\376\310\371\276\374O\025\276i\333\001>\265\303 \276HB\334\275\"\2026=T\367\255\276\016\375\315>~\220\217\276\2467B\276P\364\222\270\231;\316\276\2552\251=X\225\301>\333\033\205\27679Z\276\337\255l>6G\215\276\303\005\346\276\3079<\276\025\305\202\274\360\351\257>\247S\274\276$\'\275\276\210wh\276F\273\354:]\347\013\277\260\376\351\275\207\2715\277\177\2707=\306=\254\276(\203\032>6\245\246>B\003\225\276\216\304\340=\267@\316\276<\340\334\276\352)\241=/\035\023\277\352Gd\275\250\342D>\275\314,\276\314.\327\2762\224:\275T!\005\277\321\352\335>\3260\241=\306\256\005\276\242o\010>F\010\024\277\230^\203\276Nw\354>\317\247C\275U\354\013\277\010\300\004\277\272\277\322<#9\360\274|\006W>\343\275K\276b\230\020\275//\006\276S\343\235\276\\\215m\275\3318\240\275\344\346\201\276\213\014\310\275^\001\013?S\306\034\277=.\017=8\017\002=\r\034\237\275\234h\252\276\265\213\010?\005\014\343\276\233\2567\276\375\'<>\306\263V\275A,\360\275\255\0275\276\257\016\202>\007\006\334\273v6\310\276\243M\223\276dj\211>\250Sg\275\222B{>\311\337\305\273d\336s>\241\253=>M\227\343\276iR\361\275`\263!=O$#\277\2064\326=`\034\231=}0B\275vI\214>\311\330-\276\006{\247\276\261\025\007\277\245\352\027\276qw\252\276dN\010\277\364\030\307\276\373v\347\276\254^\306>\215\221\322>\367l0\276\037\370\326\276\321\363\304\2768\210\201\276\324\332\372=\213\232\234\275\002\030\270>\336\026q=S\344\261\274>\220\317\275\210^\253\276\014\000x\275\244\211P>\336\232\364\276\021\354\036\277L|N\276zm:\276\361b\215\276h*[>\332\350\027\276c.\327=\310\224\035\276\014\301A\276H\020A\274\027b\213\276R\235\216=\003\247\007\276\033\224)\276\236\221\031=\216s\203<\257.\342>d=\370\274i\226\216\276\000Bs\276\034\356\216\276l\221\303<\003:\024\2756o?\276\221\034\'\277\275\243\214\276O\357q\275m\243\224\276\254r\301\276\236\031\244\275)\351\244\275\250\220\240=\001\264\265=K\223\235\275\212\023\327\275\016a\314\275\267:\004>\000\013\007>\214\371d\276k\006\300\275\265\313\363=\347\014\007\276\352\303\263\276\000\353V\276\326\000\215\276\331\233>\276\020\255\344=\360\364\264=&\244\350\276\021, >s\347s\276\3326\247=\227x*>ZX\274\275en\215<}\n\342<\276Y\233>\256Ab\275\004\371K\276k\010\240>\025\017\035\276S<\215>p\241\271\275\250\354\266>\273ZT\276\337\201u\275\307n\023\277\261\004\001\275\004MT>\017\013\244>\3764~>\231\023\217\275f\222\330\276f\210W\276/\226>>%S\377\276\335\355\260>\027\263\224\276}\0210\276\360\314\315\275b\321|\275\234\301\201\2769M\373=\270\272B=\004\036t>_U\217\275Z\306h\275\377\336\226\222d\324\275u\264\017\277+$\221>\t\223\271>Ja\257>\216\330\001>,D\300>\005\356\027\276\332\302\202>\251R\007?\371\001\327=i\342\204>H\016\020>\2712!>^b\262\275\327mZ=\020\260Z>\240\200\310>\373\363\254\274|\336\304>\232\377\002\276\3609\317\276\3638\365:\372d\310>\367\251\256>\371-\035<\244\005\255>p;\003>\256\365{\276\366\372\206\274\001{%\276\356\352\323=\307\007\226\276\177\223&\275B>(>lk\317\274 \005\211\274\353\337I>:P\230>*\346%>p\231D>\322W\313>\223@\375>\277\365\347>3\332\353>I\325\355<\255\2044=\376W\230>\212\0254\275\321x\362\274Q\331\224>\304j\">\2175\224>6l\241>\251\207\025\276\034\260\362>_\024\303=\r\362\311\276\346j\307\276\261_)<=<\270\276Jk\220\2754RNfs\256=\301-\313=v\250:>\367=\304>\213\353\200\276\243Z\033>\343I\222>\257I\242\274\251\311\032?\332b]>\267N\351=\246F\356>\001d;>\273\245\341>M\t\214>\022\213\211>;\250\r>P\335\207\276\022\376L>\204\3727\276\316\346*=}\004>\276\210>\235>\027\032\365>\315\014\231=\2302b=\374\'\225\276o\240\265\276\334/y\275\345K\013\275m%\355\275\323K\366>?.\013=\024\241\262>\241\334\223\275o\253q<\266!\333=\366\330\252>\3526\210\276\375\317\006\276\35575\276K\241X\276Z\350\271\276\214\0336\276\256\372\322\275ep\022=\200\274\327<\327\352\215=\376\024\220>\014\n\325=\314\244\202=\347V@\276\354,\223>l\005\334>\26316\275?\230\204\276\257\315%>\270\343U\275I\214\202=\004w.=\000\003\031\276`m\346=\267N\236\275\350\246\222=\375l\306\274\276\270\034\276\000-\005\276\330n*>\337\333v>\315ru=-\300h>\232\233\317>\315\276\\>r\271\225>\341;\316\274\3130\213\276@1I\276E\315#\276\237j\030>bL\351\276NnO\275\247M\025>\374\217a>\242X\354\27406\340=U\232\203>\222\250\315\275\010\215\266\275\234=\006>|\213\017\276\330\301\030\276-\232\367\275\325SS>\300_^>\235\323\275=\317\037\367\371\255\210>~P\343<5\276\323\276\365\204\236\276\034!\251\2761\337\335\275\322\304\216\276A\220\320>\370\361\"\275\240\263\352\276 \311\211\275\260\333\245=\354M\310\275\007\351\242>hM\366>\340\264\227\2769\315-\27446\r?\320~v\276\341f\371\2753\253\375\275n\366g\276=\037\265\275\213M\032=\324\304\031>>\355&\275\200\223\001\277#\231\205\276\246A@>\240P\234;W\177\227\276\301\024\177\276L\252k\276\260\030)>\332\341\336\276\007\301c\276`\321\325\276\223\342\016\277\3752I>+\371J\276\357\305\377\274O\201\337\272q\262\361=\273G\333\276\262b\004\276\313_\020\277}+\217\275\033s-\276\220\275_\275\256f\n>\304\210\370\276 -\322>\241\322*>Z\213\335\276\\{\343\275\357\267]>\274\326t\276\n.\000<\367\226\\\276r\2452\276\233z,\276\302>\305=\200\343-\276wVS=\343\355Z\277\2305\363\276\010\032\207\275\r\311\210\276\370(\351\276\017\243\010\277\271k\304\276\233\003\364=\374\304x=\303jF>X\330\365\276\2715\250\2767\377U\277\266\373}\276\024L\273\274_\254\007\277\313\305\341\276\245;\347=\275w\221>\346\202\027\276\320\251d> \262\305=S\243\030?-\304\351\276\375^w\275\260\265;\275\247\007<\276b\203A\277vmG?\t\002\036\276KA\340\275\241!\221\274\346\370XV\340\306\275\010\362\333\275\003\207\264=3Mz\274\t\351?=\341=\366\275\t5\036\276\354w2\276\014\364\206>\304\237\272>[\311\204>Ap\252\276\210\254\005\275\265|v=`\034l>f\332Q\275p\335\271\276O\177\037<\300M_\276\264,R\275\307\251\031\275\373\3136>v\312\321=)_\246=\006\374\021\276\034\307\344=A?>?\021\003\310\276\325\254\016>7}2\276$\231\213\276~\347\324=v\364\265\275\312\356\031\276\243\213\330=TA\036\275\324\3100\276\026\004\214>\367\374\251\275v\220n>\241l%\276\007:\020\276K\307N;\321Q\307<\213\324\260\276 \364\207=\206F(\275\002*M\276h>\031=T\321\260=\375\256#>S\026\246\276\327\233*\276m.W=\216\3779\2768\305\372\274\306\242\275\334\273\367=\321\237\264\276\222C\364\276\007s\361\275\356\254\305\276K,\206\276\263,#>\'\340\270\275\346\235W\276\260\3447\276\017\027\027>\326j\204>I\177(=\\\252\277\275\240\315\035\276^w1>\213c\202\276U\017\210\276\371\227\027\274\007\215D>\246\344:\276\016\322Q>\304\350\004\276KW\372\276\375\301\204\275q)\224\276\223\252\313<\331\315$=\212\214\346\276\306\250\022=\"\224\225\276\204\203e\276t\332\300=E\260L=\265\032\260\346\241\257\276\206\305$\274UQ\262>\227\017\002\277M\351+>\362\206\344=WhM>\277w\n\276\330\253\357=,v\034>k\0054?\'\351\017?\267\216{\275^\024\210\275\363&.\275\207\206\276=\245e\177\276\021f\250>\350\2566=+\374#=#\017\242=\337j\346\276\301v&>\023\311\270>t\213?\275t\362\367<\224\016\312>O\314\263\275\0312\261>\360q\243=\377F\220=\327\300W>\226\000\216>\253\224\274\276\177\204\246>\255i0\275\202\240\247>\005\334\317>w\367\023?\000\315\242\274/\372h>\022\336e=g\277d>Q\345\222>\317\026i>\366*\266<\223H\355\275l\340t:\002I\245\275wf\227=M\010\237)Y4>w\375\215\276%H\245>3\001m>8\212\367\275\325]\357>3\005\214>-\260Y>7\022\265\275\243\307\342>E\347\321>\374\342\266\276\350w\246\276#b\222>\337\304\001?\317\272x\276_\321\217>\221kj\276t\007\313=\325\325f>\r\035\326\276\247*\273>\254\005\213=\n\333\325\275\217\312\"?\213\321\240=\334d\271>3\001=\276\316\337\252=\206\302\231\275\030-\334=p\036v>z\3761>\237\366@\276T\276\243\273\361\336\036>\030t\317>\367\177\031>E\370\205>\036\356\020\276\354\202!>\252b\036=j\206\254\276h\022\271\275\366Y\243\275\335b\007\276\037]\276\276\357\377\232=x\023\326>}\035\010\276\233\247\225\276\272\367\225>\376\215\371>\010x<\275i\236\003>\212\010\">\266z\333=\257\002\321<\004\304\021;Jz\034\276\017\234\242\276\263\376\207>\207l\225>O\333\345\2748\'\265\275\036\006\241\276\333\032\215\275\022\253\3766\237\376=8g\035\276\252\366\240\275\252\004M=\001J\032?g\261\225\276C\020\226\275\242\243\022>\004\335\232=\363D\224>\223\275\212=T\245C=\017]\277=\214\333}\275[<\217=\n\337\006<\221*\327=>C\347=\301\\\236>\367z\222=\'\305X>\206\000\334\275\225\253\341=\r6Y\275\257\374\014>\231\255\027>]\367e>\323\244\345>\371\215\177>\314\3415\220\027\232<\303uH\276~:z\275\014\016\005\276Fe\243\275\202\263\033\276\351\005\275=\213\343@\276\225\303\221>\tF\374\275RJ\251>>\324\027>UxK>,C\306>\270\277\005>\372@r\276n.\004\276\001qB=\2736\352\276\021\235\354>Uz]>!IJ:r]z>\236\000\014>\330b5\276p\022\212>]\316*\276\353\255\362\274\301\017\312>\3236t\276e\275\215\276\017\037\210=\233G\201\276\321E\332=\030\324\020=!=\231\276\004a\020\273\211\306\007?\343\006\020?\305\022\033\277\2479*>\005\275]\276\2775\230>\315\024\213=\017\325\256>\344,\034?\363\242\301>\350\346\260\274+\337\244\274\373\037H>\005|\365\276\250c\324>\304Cu>q\001\303>9\230\310>\377L\231\276H\305(\276\247k\225>\301\342w>@\260\221\276\002\211\005\277\3529I\276\331\010<>\234\334\367<[\007\226\276\202\0230\276\223\236\034\277\303\010\240\276\034#\211>+}\223>\254Oa>\232\324\331>\325\261Y\276\313\333??\222\023\247>p\2237>\245\213\331>\311\250\312\275U\307\227<\326\266\352=\2757\357\274\305\021\204\276g\016\223\276lD\213>j\225\371=\221\237\223\276\226Y\031>\227\273\250>\310\231/\276\301\206\212>$\177Q\276\033\335\237>\374S\306>\336\337\t\277G\266\221>i\300n?\235w)\276H\036a>\252!1=\246\231\310>\363\312\021=\230+\275>\037\212\317\276\240\376\254>\326\221m\275\223W\002?oh\224>F\033\362\275\200=\037\275\364\325\215\276\267K\021\276\305\021\345\275$\346\243\275\236 \214\276\376\240.>\3326\343\276\224\247\253=\3310\'\2766;\177=\027\213\217\276E\265\301\276Il\340\275,\371\240>\305G\226>\274\335W=\276D\202\276\374\324\275\20496>\375\260\241=\243\320\375>\353J\001>\210\354\341=e\276\226=\3013\313=\376\240\201\275\242\312\250\276\264D\017>+-6>Ic\241\275\304x:\276\220\177\002?\021\322\272=\261\274\340> \006\376;m\022\265\275\254,\263\275\210\022k\275\372+\372<\351,\254=\356S\357<\275\206\006?\333\267{>Lv\223\303\361Q\276\376w\340=\215m\240=\005\307=\276\262}\177>\364\365\326\276\352\376U<\rR\023=d\270\r\276\302\322S=\205-\316\276\020\271\177\276y?\343>^\n\232>\207\263h>\350\221u=\270\232$\275\013&-=\013Op\275cuy\276\213F\253>M\033\221=$\331h\276\236G\224\276E#\363<\214\247g>\316\345\254>\335\225\206\274\321\377q>\215d3\276\013\272\252\275\373\367\204\276bxA\275h6\207>)\257\364\003]\t\276\320}E?\350\343\203\276%\344D>\\\320\220=\302C\311=b\217\247\274\r\r\227>#p\030\276<\313\266>\317\365[>\021Q\314<\363\225\271>\004[\304>\004\2714>\325\330\000\276\230\351\336>:\226\036>\256\215r\275\n\352G=\275\r\323\275=nd\275PE\005\276C\251\306\275a\252f>;\330\337=O\225E\276/\350\035\276\200\252h\276\362D >|\347 \276\322\370M=\036G\264>\241LO=\037\265\241\275\325\035\250\276q&\205\276\310\244\365\276\036\241\025\277O\307^=l\t\220\276\200\255\326\276\020\257<\276\376\013\231\276\204e\254\276\222\234f\276^l\023>\r\376\210>\347\264z>x\352\253=\251\330S\277\323\021\026>t\317\322\276<\224.>\255\316\202\276I\374\351<\331\365\262\275\223$\273\276\232%\021\277T\253p>_\273O>\345\032\210=\024]\272>\354\342\224<\244\'N\275\217%\302>!\017\203>\332C\344\276{\261\372\273Y\354\010?\214\204m\2747\241\002>\313\336\037\2763\002\014\276k\2267>\222^5>-\230\377\276\367\225\312<\211A\024\276G\330m\274@\367\300;\330\247U\276e\230\267>\274\033X>\266\334\037\276\017a\007\277\013\340\002\276\363\316 \277u\355\301\275\\\271\376>H7\325\274\300]5\275\\\\\247=\301_\322\275\233\024\240\276c\371]>\343\350V\2769\350\002>t1\026\277G@\004\277t\000R\276\340\342\226>\234\221\222>M\217\177\2766\234\270>-\326\273\275\240\260e>\2158\033>U\013V\276\035\3516>\213\213-\276?d\373\2750\211\310>\203& =\370\023V\275a\2066=\246\300\330\276tn\202=\034\320&>X\213\301\275U\237!\276\255\255b=\237O\324\275\213\036(>\355M\322\276\320[\265\276b9\'>g\334\257\275\3316\354>``\303\276\361G_>o$\372\275\007E}\276\320\266\227>D\210\255\275h\372\236\275\200\244\177\276BF\267=\320\306\227>\303g\206<\225w\000\276\215v\242>\242t\330\275\365\352\036\276m\314\022=\216&j\276\340\275\251\273\256NZ>L\275\\\276\322\030\313\275z\227\033>\217\314Q>\217\260\330\275c\026v\275@N.\276\227\241\275\275\257\000\234\274\005\244<\275\275\t\205>\221\307E\276\225\241\005\276\272\237\304\275\364\276\304\275OH(\275p\200\350\274<\003\300\274:\032\210=\203\242\257\274\232\231\254=\022\337R>\207D\255=c\026\331=\307\345b\276\235\233S=G9x\274L\276\013\277\341(^=\305\352\204>\300\223D>\252\235J\273\002\005h\274+G\351\274F\247\242\276\342\304\272\275k\240\375\274\025J]=\313L\243\276;\233)=\265\026m\274F\274\341\273E\320v>\013!\257>T\357\346\276\346\241$<\0025w\276\332\376b\276?\026\020\277t\254o>\301\371v\275\244L\227>\242\324#>/_\002>\274_\362=\253\r\\\274\014z\362\276\317[\222\276\326\r\234\276_\251\000\275u\350_>\370\177\232\276\353T\032\277,\202y\276\004\021\323=D\264\341Y\246!\276U&\206>\202\323\356=\270=)>v\3511>\345e\000?\312\361\024\277m\376\306=)\374M\276q\340B\276\nJy\276\336\346\265\276X\354\320\276`\376>\276l\252\350\276\313\223\352=VH\241\276\211\273\177\276W\353\221\276`>\013\277\356\354\327=\263\212\212\276\353\177Q\277>\214\271\275d\347\007\276\030^[\275\022\333\200\276\351\276\016\277F\021\274<\200^E\276\371/\276=\306\3111\276v\277\025;\267\301\215=B\3362\274\314\250q\276V\277:=*&\355>~\341\342>\201#q>6\351T\276\303}C\275\370\243\361\276\301\031\203\277\225V\005\277 .\304\274\345o\330\276\227>\331\275_\232\237\275\216\273\206<\035\242\316>\255\257\313\276So\220\2761\352\353\276\271\216\037\275\016\035\253=\330\273\214\276\344\230\364\275\307@\262\276\n\325p\276j\346\204\276\202\203\024\274L\1770\277\200\017\031\277^\233\317=\017zR\2767\225\030=)JX\276\250p \2755\227\226\276z\372\343\276^\345^\2763\362\t;\343\315$\277\340\253\214\276\263\224g=\006\212`\276S\254\372\276}L\014\276\257l\235>F\337\304\276\3519A>\241\374d\275\014\261l\276\0017\205\276\245\260\234\275\235\223C=w\316c\276\221\213\303\276wi\260\275\322\341\245\276\314\344m\277\206(\232\276\335\212;\276\355\326\221>\246\230\023\277\334\213\370\276\371\024\346\276\032\221\266\276\360\270:\276\350\354\001>\205\303\234=\342\272%?\220\270\036\276\215!\224\276\027!\034=\203\210PE\312\302<\213x\317\276\021E\246\275\333\243\020\277\330\303\211?X,1\277\345\340e\276\232\330`>\014\324\t\276\020;\022\2760\221>>\315\310p\276:{\336>\215\276P\276\341\3372\276\310]\000\276n\265\036?4\211b>\226t^\276\333W\361\276\022&\003\276P\363\356=\323\210\234\276\225\354\367\275\271D-=\210+#\276ca\203\274\236\267\326=\217\006\347\274\351\2670\276\243\310D\276\301J\t<;\307\271=\241\206\213=\273KP>\303\007\326\276!#l>\243\244\036\277+\347\241\275\002~\306=dI\322\276p\366X;x\367\253>k\346\240>1\222\255\276\315\350>\276\271\323\026\276;\347\243=\232\215&\276\211\341\257> \220\307\275qU\035>\335J\235>\037\236\224\275\242U\237>_\351\204\276\216\321\320\273\336\t\207>/g\320;\220\2448\276\220b\022\276J\255!\277)\315L=\216\340\032\273\316\204\314=b\216W\277\354Xc\276\247\000\003\276@\263\301=\263\335\250\276\273_:\276\301l\t?\365\256\310\276\363D\031\275\265{\275\276G\016^>\224\321\226\276\323\277\234=\177\240\261\274E\322\216\276?-\240\276\265\255\206\276/\215L=z\033\353>q\366M\275\220\327C\276r|\000?4w6?U\315\233\274\301\255\252=\221\200\246>\227\307\263>3\221k>dl\201\276\3531\216\275\273:S\276\352\214\220>\\\322\235\275l\204\346\275\017P\217\275\\\\\000>\275\t.\276G\332\262=P\374\247\275\251\377\367=&rC=\325\333\355\275U\272!?\341\366\367\276H\246z\274(Ou\276Z\024\022=\025\342\264\275\334=\251<\263d\020?i\n*\276\330\242V\276\364\3400\2761\0314\277`\277\221>\257M`?\221;\213\276\215\035\234\276;\212\337=#\342r\276_\025\013\277\215\037\'>}\350\000>\370\202\213\276L\\I>\024\007\217\275\251\321\327<\340\355\227=_\003|\276\375\361\220\276\377%\033>\'\326\374>&\232\024\277\341\031\274\275Y\307o=\0374\243>W_\031?k\367\013\276\032\223J\276%WM=\304{\372\276\265g\321=\337[\276=bI\262\275\216s\235\2767~\225>@\340\260\276\257\377\233=\2651\224\276!SD\275\254}a>\303\241Y\276$\366\006\277\277\320G>\355\354O\276Q\271\230>\373\004\331>@\0306>y\263<=\222\314\327=\025\017\274>7D\221>\255\273\262>o+0=6\030\343=\222\356\271=l|\032\274\374\350\035>+u\316<\303\177\314\276\006\343\232\276\231\027\221\210\267\324\276W\224\202\276\3172?\276\335\2039>\227#2>g\273\246={l\360\275\322\365\263\275C\030\270\276\234\244\203>\243\220y\276\220;\004>\264\205\225\275\333\266\376\274\304%\217\276,\275\241\276,\263\333=*\262\n\276\371R\246\310\263\226\276\261\232-<\226\301\250\275U\200\220<\362\273\210\276M\010K\276\021\031\341\276L\327\201>\355c8\276\266,\n;X\252\243<\3340\014\275ei\016>\002M\232\275\005d\221=\233@G>?[\220>\221\021\020>\247\222\257>\301\005\337<\270\330\215>\264N\207\276$\273)>\020<\277\276\277\206B>7\275\266>\301\332\346\276\023\202\210\276\305\035\347\276\\\363Q\275I>\002>2F\345\275\0231\251\275M\2437>S\267\315\274\231N\211;<}L>\333\343\344\275\240\320U\276\264CD\275\"\356\336=\2145\013\276\010\300\036\276\235\335M\276:u\016>\230*\342\275\220L\351\276\316\311\220<\237z\r\276T\310\215\276>\216Q\271\221\206\275>\211]\261>\364\266\270=|\220\366\275\000\371r\275?\3574\276\366\342K>W\035\244\272q\352\016>\014\355\373\275\223\3307\275Q\022\024\273v\236\'\275\351G\217>\343p\211>\347$H>}\277o\275\250\355\337\276\2546E\2762\251\024\276i\260\230\275\236\267\303\274f\027\013\277\277\223\324\274]\2770>\222w\274\274P\334\017>A\037\334=\311$\326<#\010\340=\212l\257=\200\036\270\275\265J\307\275\037\332\031\277R\340\302.>\323\275\333\257\204\275\314c[>\n\245?>h\214)>\354P\316>4\267\004>\340\270\346\276\205\210\200>\261)\370\275\323q\002>\236\306\360>N\025\351=\312k#;\301}\303\274#Y,\276\317U\306=\320\323\014\277\351l\212\275Yz\330\276\202\347\213\276e\346|\276\237\016~>\276\311X\276\343\333i\275\335\341E=H\227\230\275E\316\361=\\\272\212\276\037\0227>\013O\255>\275$r=vdy\276]\214F>6\302\262\271\307\232\232\276\356\342\266\276D\362!>\275\276\336=\303\343\215<\221sX\276\020d\256\275.\316\306\276\331\\\260\275\3165I=\260\315{>\344\022\214\275a\333\202\275\266\356\306\275\246e\201=i\240\275<\010=\274>9\237F=\253\006\016\277\016\345\372=\377\321E\277\354\244\262>8\260(>P\320\201=QT\240\276i\316\332>s\353\244\276#\021\360\274\341\317H\274\3312A\276\206\362*>N}\232\276B\355\312\276\014\242P\273$d\221=\263\346\261\2746\177\213\276\213_\316\276apF\277\t\215\036\276\2041\200\275\234\224y>V\371\024\274ws,>O\007\326\275\305\263_=u\341\233>}\031Q\276\255\305\036\276\367%\223\275\265\317\253\276\230\302\001\276,\321\\>=f\353\275\300\353C>`4\362=\205\254\247\275K\250p\275\3249[=4\333Q>O`\'>\231\255\331\275\230:k\276\211\201\353\275b,&>\2344^\274\251\265\'\275k\002\214\275\231\373\262\275\te\212\275`\362\306>\r\004\322\276\242\265\271\276\307\206y>\265\211T>\322j\270\276o\334,\276&\355\360\276\003\221\235\275\223C\223>\334_T\276\361\n\342\276\353w\000\275\004\316M=]r-\276\025C\246\275\202\303\254\276\200\203\225<\036\270\365\275\346[P=\\uQ\275\254\270\330\276\241\251\003\276B\244)\222!\225\273\351_\322\276\350\365f\276\352:5\274\001~\007\276\313\204\233\274\317:^\276\345\033\t\276*R\224\274\372\263\325=\310s5\2768\307@>\306\021\256\276\250\330\2448\n\325R\276\267N\213=\256\n\222>\n\202x>\3370S\276\004\324\241<8-Q>\334\226\217\276\277\233\267\274Nr\375\275V+\266>Q\001\030?fxe>b\332\203\276!\371\223\275O\245\r\276\2655\221\276/\036\243\275\307\236\267>^4\372>\204&\350>\035\007\236\275\3025\351\276\374V\356>\323\335\251\274Q\216\371\274\300sR>\240\377*>\031<6>\016{\257>\323y\213>\313\203\316\2769L\347\275~\374\223>\200\363\'>\357\010\000?\266\227\225\276a\034/>\336\222\214\2756\214\210>\002\"X\276\360?@>\236\213K>i\370\235>\372%#>\205\330\274\275\317\271\317>xaE>P\"\223>\252\374K>\330\354\371\275\345\340J>$\373 \276#\020\273=\253\326\000?Q`\203>p\342M>R\305->\n\270&?\016\320\303\275\026|\202\275!Z\257>\366\030\362\276\345oo=\002Cy=tY\233>\267\356C>U\315\244\276\027Y.\274\306\377\274><\335\334>w\021\264>N\265\276\275p \345>\361\263\260>^\022^>N\245\227:\276\\\224>\225G<>\325\304\233>\014\276\352<\365\314}\277\342]\224\276\206\310\244\276\247\223^=\025\014\205>\205i\213>>A\251\276\373\302\202\276\203\321\000\277\365\353P>\276\216^>\275T\333=40\342=\304\301Q<\315\2157=C\205\341=L\003e<\333-\245=-\326\355>P\371\225\276\351\360z=\025\344\241\275i9\356>e\351\0259fI<\277\315\265\255=kh\026\276hOF>\360\037Y=\336H$\277X\227\353=\272\356\203>v\346\010?\316\245\224>}5\212>Q\025\312=x8\322=n\371\302>u@\327\275Y\354u\276\006T\330=\337=;>X\002\202\275\210V4>f1\264=h\013\016\276\312\232b\276\303\342\212\275\014\274\336\276e\342\021\277\353\331\214\275\333\232F\276\322Nu>9\372\026\276BE\r>\030_\322\275X\217\355\274\217X\330=\237m\337\276\275\240\024>\225\371X\276\301K\272>\353\364\014\277\004\276$=\224\314>\275\326\365\003>\265\227\321\2752R\025\276i\217\245=9\333\013>\322\tX\276gx >\366}\263=\223e\356\276\350\352\356\275\374#z\275U!\014>Y\351\325\274\325\337z\276R\376_\2762\025?=\272\242\333\276\345.\261\276\261\213\250\276\003\264\244>\256\325\276\275\327\024D>D\225\037=\275=\363=\226\227\300\276a\314\027\276\316\0023\276\251\322\304\275\241\235\341\276\302:\245=\325l\013\275Ry\354\275\230\r\227>\204r\014\277Kx\265\275d\242S>}\026@>\221\"\257:\240\343\256\275+`]\275\371q\327\274/\277\216\276N\224\333=\356\225\330=2\332->\376\375(=>\253\"\276\233\255o=\n\270\206>\037^\r\277\242~3><\226\362=!\312\014?\277J\233\276\361M\204>\313%\211\275N\000d\276=\332I>\350\302\177\275`?1>\276\177\'\274\007\320\016>\273\244k<\254\211.>\254,\340\275]j\257>\'\017\223\276~\243H>\365\032R\275\346=\363>\3609\313>\226\335\014>.%l>\326\220\235>\352\313\256>\365\351\006?\301\302|=\365\343\261=g\263\332\275eVd>\033\315\347\275c\t\205\276\325\246\345\273[\020\320=\301\220\223>\357\201\207\276V`\005?\023?|\275\364\016\266>\226;\023\275F\225U>\300\271\217+Q\212<\034\364\t\276\331M\217>w\242y\276\035\233;>F\342^\272\2770\361\276\021\352\337\276\3318\210\276\274\016?>\367\212\241\276\221\350\313\274\212^&?3\303\344\275\2077\006\276>E\300>#\035\274=3.\367=\374\013/\274\266Re\276$v\237\276\345\333:=E|\255\276\231\255\035>P\232dM\256\300=\t7\303>L\207%\275\377h\017>\242\240H? \270\211>\n\355\035>_\0340>\210\017e<\254\242\354>}\232\336\276\214\216\030\277\311bR\275\322\367\273=>\3742>_9d=\277a\230>\253\225&=\321\277d>\327+\267\275\216\002,\276\222\025&=\334\370\334\276\343\306\021\277#\356\030>\211Sf\276\n\243\212\2766\000\016\276-\3421>\233\256\256\275\013\274\207=\014\365\037>\317\330\300>\235\024_>y\361\t?\351%\300\276\226>M\276\336\260\317\275c\211W=\352!\020>\345\263\307\276Uc\023?\006\352\225\276\032\371\213\276\221\201\236=\032\337\005=\334X3\277\255\312\200\276\262\010\233\276]a\275\275 i\303<>iS>k\237\340\275\276z\313=\020\245 >Z\365s\275\203Z\216>r\316[\275\267\026\205\276\257\266\023>\365=*\276\014\247b>z\303\252\276$D\240<\301\2647\275\363T\222\275\\\213b=\241\235V>\372<\214>o\275%>n-\315\276\034\237\204\276\377e\275\275\310\214\007\276\261\025\240\276\315\362\211>\271\315\220\276S\360\372\276BI\\\276.\276\034\276\207\257\030=\002*\331\373{3>\357B\202>\224-\226\275q\024\350\275\3326\362\276\211v(\275n.\300\275Sc\232>\303\221(\276\252\355U>v\001\312\275\001\304W>\357\370\362\275\260w\205\276-\275\272>^\320\355\274\355F\027\2749\223\304>\007\322x\274C\322\266<\202R\252\275\030\260Z\273J\202\324=\023\3164\276\t[n\276\371\301\023=)\230\247>B;\217\275\234\303\207>\021/R\276a\314y\276H\013:>tS(>\r\3534\276\003\323\240=\270\"\270\275\270\315s\275_\361\216>\207\037\n>\n\013\337=\224\310\301\275\303\234\001>\n\024\346\274\261_\265>\364\357H\276:\031\215\276|&\311\3249\276\275#R\223\275\035t_\275\203\271\034\277\017\365\021\276d\324*>\324\231\222\274\301g\202=\210\337\370>\372{p>\371p\223\275\000o\231\275<\321\363\275\212\266\n\2762^\305\275\310z\"\275uk\223>\020\322\276>\002_)<5\033\001\277\301\247\021\276\342Z}>\321\020\221=\314W\210>\230\333\307>\232\r(\277\240p\\>0\027\341>\206\217\010\275Vp!\274\206\332b\275\014Ic\276y\246\373\276GM8=\255f\363\275aa7\275y\244;>&\332\r\277\025p\220\276\271\215\207>(\n\315>\251\370\217>F\2004>\251R\000?\257\014\227\275\325\025\303\273%\315\265\276\362\354\014\276\373\322j>t\251\276>\201\231\206>|\351\256>\000!\032\276g\332=>\003\320\027=\032!\213\003\234\007\275\267H\032<\244P\241\276L\200R?\325\022b=\341\006%\277\364\237\300\275\354H\366\276\314@\237>Q\336[\275\370K_\276\312\017\351\275\2143%\277S&0>L\030\335\275ZQ\254;\033M\274\275T\202|>\027\367Q\277\356?\301\276\024\321\253\276d1\177>\"\346\005=\007,\354=\007\202\006\276\352\273\343=b\232\225\276\",\010>gk<\276\017\334\362=\301?(\276\376\307f\276\323\265\212\276\354Y\214;\202\241\353>\235^\254\274?\331\022>\303\246\234\275\322\272\346<\342\366\316>*\351k>\207d!>\372X\256>0\206\017\276\350\030\250>\324\213\004>)\t\214\276\'c\242>M\345\377=\331I\206=\003\360 >Cn\325\275<\217f\275\317\t\231\276Xe\033=\364 \222<\237R\324>\332\226b>&\223R>G\301u\276\372tb>\346\327\036\274n\357\325\276\324J\313\276{J\234=i\251?>~\315Y\275d\350\262=+\267\257=\2569)\276\265|??!\n%\275{\0244>@\033\234\276\365+ \277\370\251\370=NF\246>\3735\004>I\013\000?\370P\264>\302\2020\276e\r\257\276\252\265\246>\263\352\243=\350\374\237\276\035\251\230=0}u>\200\003\025\277\334\346\321>)\336\276\276\\\224\255\276cI\212>\373\277o\276!\362%\276Lk\237>\036\004:\276\261V\325\276\016\320\036\276C\260\257>\221\275\275=u\030\206>A\302\022\2751Y\214\276\226\nR\275q*\357\275\017\026=>5\360\351\276Y\365\237>\347\211Q=B4\240\275\375\355\021>\007*P=\217\232\316\2755\205\353\275m\226\213>\334\277\333>\no\223>\021SI=8\321&\273e\023\230>\266x2\276`\365W>C\220\216>\036y\225\276:\000E\276\016C\351\275\235\010\202>\247\253\226=\373@\244\275\350\034\322\274ja\355\276O\313\271>\3330\004?\2236\230\276\030r\001?\002\r\207\274\330\034O\276(\034{=\320\257\260=\361\227\223\275N\236\232=KK\362<\341\322\005\276m*\205\276\351\032[\276\n\037\271;7z\304\276\007#\010<\260\022\373\273\273\263\034>n\315\257>\356\021\322>S2\211\276R\027N=\013\230\350=\220\264\037>f\334\017?\216Q\303\275X}\255>\320L\352\276ZP\026\276\245\360\322=n\377\245=o/\257\276\365\263\005<\273\342\245>\017\204\213\276;\256\220>\232\301S\277\200\356\210\274\240\025\t\277\037\371\202\276\372o%\276]T;\276*\3576\277\234n\220>{\304\312>\003\013\205\276\221j\017\276\230o\327>\356\3611\276\346h\010\276xOR>\372\3343>\233\3744\276\212S\226>\267\335\002\277\214\371\365\276\331`\255\275W\215O>\340<3\275\213\374a\276@\260\303=\323\241\300:/\n\212>\261\262\236\275y\245^\276\311\335J\276\322\360\365>\004\346Z>\322WI>:\242\212\276G\275\220\276W\200J\275\264\200\247\276[6w=\366>\237\276k\206\335\276\345\340]\276\352\255s\276\276\2506\2762\014d\276G_\252=yX\363\271\330\303\276M\243p\276\3647\206<\245v{\275\241\232<\275\334\204\252>\241\366\224=b*\r?\2469\231\276\312\005)\276j\225\323>+\366\362<\266\374\214\275\237\233\207\276lR\215=\216\026\244<\300I5>\332W(\276\377\331\025\276\374\376\246\2753\230*\276\3472\232\275\314\362\r\277>f8\277\264\336\026>z\250d\275\006\373y\275\337\001\202>\r{F\276\005\352\350>#\303\'\276\276R\236=\300\027\230=\356\000\361<\374\024w=\002\007f\276a\177\226\275\310\307>\276\3510|=b\031\354=\367\357\022=\256\357}<\177K\362\275\260\375n\275\312\220\250\276rt\236>LF\311\276\315\324\036\277\006\354\212\276\277T\003\2778\357\347\275\212\203\325\275\345\230x\274>^\217\276^\331?\276v\230\331>a\032\031>A\233\212\276\350\362g\276\205\001I>\001\016>\277\257$\232\276\277\346!>0\030\220\276n\271\237\276\300\316f\276\366\353\205\276S\001\340\275>g\230\276\013G\234\276\345\r\006\275`\375\332\275\013\361\263\276+(\007\277Df\263>M\310\">\223\"\001?M\013X\276N\270\270\275\027\244\217\274T\331\226\276\307\340|>b\241\271=\252\272\204\276\253\316\374\276 \035\327\276D\264\270\275\005\352\221\275c\326\273\276\026\355)\275\026\237\217>m\346\036\277\230\301\026\277\036y\"\276P\367\355=$n\216\276\263\213\033>1:!\276j\377g\276\335\026\023\277\343\336C>\264g\266>\202T\304<\241\362)\276\221.\346\276\265\004\027\276j8\206>#d\025\276\264\360.?\024\r\r?%(\342>\023\303\203\275wh2>\351\223\375<\263\370\247>\317\3732?\030\036\235n}K>N`z\276\212x+>\177WT<\210\007\261\276\024\244\231>\317\376\242=\256[\177>\3046\242>[\246\226\275\360\361\221>\260|9<\200\247i\276\220 \345\276\364\036\204\275\212\213\270\276\021\022\366=)\251\005>\034\342s>\035\307*>\352\362\263\276\227\316\202\275K\272\223\276\232\270\276\276\363\305\010?\374\237\234=z\333\002\276T7\021=R\363\010>\336\025\232\275\355\235\357:p\330j>\237\262\331>ZKX>\212\247\333\275E\353\357\275\323\252o\276P\201!\276L\260\r\276#w\320>\265\247e=\203\300\021\276\270\263\371>\261\365\313\275<\0351>\335\371\023>m\213\017<\027\241\352>\216\230<>Q\020\277\275>S\317>\276h\334>\376\2054>\006\312\'>\267\230l\2763\375\007>)\205_>\004&V?\300,\202>\n\331g>\316j\014>\351\374-?\356n\363\274\027\035\321=\234\037#\277\222\230\251\275\302\250\346=^\200\241=49\214\275\323$\230\2764g\321>\264\314\005?O\375\257\275Yy#>Z\206\304\275\t\217\205>\224\021\007=\251\336\360=\275\372\205>\306\035\004?\272\210\354=\275\234\010\275\233Q>\275\2269\225\2750M+\276\331\002\303\276L%\030\276V\017\013\272h[\231<\004t\264\275\274u\335\2754\221\235\2751\253\246\276m(\351<\364\332*>\"\367\243>\302\254\223>Z\354\247=\340\311?>1>\033=\376t(>\234\357\306>\341]\000\275\246lY>\363%\317\275{i0>\205\244\311=\272\233\376=_Wa\276\273 \034>2\256\225;\006U\020\276\330YE<\272\177\260=\254\000\"\275\335\213#=]\363\000?\236h8\276\260k\242\2750\327q=p\211`=\343:\234\275\247_$\276\361\350\267\274\231\276Z>\264q\360<\nS6?\241\3466>\"\254s>f\273\214=\326\000\327>\010\007\312\276Wp\240=\345\221\223\275#x\311>\tt\211>_\000\037>\202\312\027=\304\353w>\004\360,>\024.\026\276\361\243\202>\315\321i\275HG\317<\250\226\311>\341\224\217>\222z\225\275\203\301\003?\350\372\314>\3144a>\327\274\220>\033\373\276>\255\025\267>XD\353\275\204\177\246\276Y\267\330=\265x$>\013\305\215>\270\243\327>\223\001\272\273\036\227\332\275t\245\254;\004n\222>`%\017=/\377\370>\223m\002>\343\360\206>\016\3722\276\3541j=4F\t>\007\027\300=Q\363\002\276\334\204\251>N\002\275>p\351\005?R\345\301\2768\211\264\275,\033\017>7\211\256\276s\201\310>\215>\010>\271N,\276\240A\267\276\351\224\211\276\035\234i\276_\2011\277\260\230\376=\217\305\234\276[\205\253\274\317\367\214=\037\315\022?\2308\245\276\375.\225\276\330\313\036=\024\313\326\274F\272V\276h\013\215=\246-L\276aN\026\276\034b\263\275/\\\262;\013y\264\276FO\266\276\237V#\275\036\373\014\275\r5D>m_\237>\355V\270\275\030\347\262\276\035I2\276a\207)\276\204\013\320\275\364(*\276\025\261\301\276\207\245\200>C\007V\276\201B\237=5\205\237\276\036\213\005>\242\262\005\275?\3222\276\213(\350;j!\177\276\nGf\276U\245\210\274+\273h\276\306o\234\276\355k\314=\210\263L\276w\025\355\275R\324\023>l\224j\276\0236c=_M/\276/\000\260>f\314F=\342\"\236\276\004\017j\275\3764\014\276\241\277\341=#1\246\276\207\300l\276\225\001>\276!x\301\276\0132\254>5\270\272\275!-q\272\260\267\026\276\261o\354>\246\247\252\276\027v\024>\243e\360=\355\016\213\276\347\231\302>T\203y>\222\ne=\006\025\334\275kM\222>}q\243\276\201\275\313>\265\255[\277\263\226\027\277\201\241f\275M5\317\276\360\322\014>\177\034\231\275\302\267\326\276w\367\037>\003X\025\276\037H\221>\225\301T\276P_8\274~\246\337\275\314\252\264>F\310.\276\364\304\273\276\214\346x=\306\265\020\275P\321E\275\203R6\274|3/=\250\272\231=<\275.\276\347\257e\276\302>\321<\230\3263\275\233H\023>q[:>\242nW<\321&,\275\033\377Z\276,\345\224>\371\337h\275o\254\006?T\3231\275O\266\336\275\311\235C\275h\205|>yj0>\314\345\025=\274\302w>\362 \367=I\366;>\321\367\330\274\225\203\014\276\233.\027?\332+\200\276\201\256\313\274\010@\216\276!\266\225\276a\340\215\275\220\370\250\276\032\236R\275\033\301\013\276\213w\\\276\t\363\214\276[]\216\276\370\237\324\274\257\264\371=\030\005\267=\222\360\\=\353\374\336\2742\tc\276}\343\350\274g\345\351\275\344@\"\234-\262>;\234\234>X\001Z\276\334\026j>\243\325\216>\257\231\244\274\337o\027\276SJC=\032\367\014\275\273A\035\276Cp\206>\352\031\2418;\256_>s\333\257=\324\316\315>t\n\256=\214\343\213\276M\231\341<*\032W<\005\263)>\333\023\274\276\224\034\216\276\345\326\322\275\316\333\227<\201\032\227\276\342tL\275Z\355:\275\023[\023\2742\206O\276\317\3338>T\310\212>QK(\276H?\027?\211\251\032?;\314#\277b\016R=d`\343\276n\334\265>gh\255\276;\333\365\2734\263\031\276-\217\231\276\234\274I;\003\274a\276\331\017\244>\303\306[>\320\314]\275\320\262$\276D\270\365\2769/\247=AuV\276pm~>/.\036\276\211\023O\276\377\317\255\275N\236\275\276\'L\016>P\221\221\276\342O\226\276=_\250>\375t\270\276\220Y\022\275\233\236_\276hF\225\274K\216\234\276\251\235\252>\331\302N\276\301D\007?\232\235\202\274W\315\307\276\"\272C=\336\303\253>\n\334\257=\260\016\321=\201H\324=>\213\024\277\'36\277\375\233\264\276\327$\247>\037\260\014>\200\313{>\262\374\342>^\304\352\276?\214\t?\006B\203>\250\000\330\276\265Z\355\275\272\307\036\275;;\202>j\235\367\276F\013\264\275\017c\227>+\"2>\247OV>\332\245\266>\216y\223=\246\205\305\276\024\370(\277\236+p>O\023\206\276d\227\301\276}\310=\275bBD\276\351UI>\212e\036\275\303\006\013?be\224>Z\240\030?K\253\221\276\255\367\001\276\310\222\216\276\000\246\316\276\207y\022?\211>\r\277\332\214\262\276\212?>\277\r\007\220\276\364;\360\274\247\342\214>\236hy\274\203LO>\177\007\315<\212?\217>\263\255;>\t\264\014\277\331M\227\275\332\366\251>\343\340V\274\216\371\003\275\267k\241\276\033IE>\013\245\352=\303v\261>\323\246S\274d\377\215\276l\240\223\2769\013\n\276q\304\306>\340\355\204\275\355\034\235\276\234i\306>\243*\272\275\027\2769\276\34031=\020j\230\276\326\031\237>\032\010!\277\201_\242\275\276\363\351>k\241\215\275\t\266\216\276\367\272\325<\331\246E>\270\000\037?\005\246\334<&*A\274\351X\266=a\031\364\2752\250\362\2765O*>Mj\372\275\035\004U\276[?\362=\207\021\272\275\230\360\010\275\361c\000\277_7&\277\251\253\013\253\276$>\007(.\277zN\255\274\t\273E\276w\366~\276.\362\014I\331A>\377;\330\276<\315\035>\206Z\014\277F\n\221\275\213^\245\276\306\360\032\276Lx\200>5\257(\276\312s#\276jP\'\275\2546\377\275\010-2\275\265l\302>\225X\214>\304\223I\276\252:\247\276z\362\233\276\302M\260\276\352_\232\276\272R\357=&\247\257\275\356\323\271\274dw\t\276>\277\372\275Z\201\250>t\003\250\276\201\253\n\277\nn\"?\267\200\301=E]\241\276P\226i\276\3068C>j\032\307>\264T\355>\267-\024\276\301=3>\212~A>u\312\017\276L\004\032>\251\302\302\273&\361\341=\031\253\027\277\277\211\242\276\033F\212\276.z\373=:y\344>\0323!\276\2140\007>2]\032\274\003S\276\353X\363<\264\203\302\276\260\300\317=\224E\366=\216ye=I\222\247\275\002\316,\277l\3543\275$\220\025\276Qc\024=#&>>r\233\310>)\000\245\275\277\256\223\276\236^\267\275\250\255\272\2761\273\310/z\235\276\034\362\230\276]\362W=\200Cd>\243\276\361<\345\362\335=\2303\255\275\231\210\241\276\332\374\255=d\322&>#3\n?\2510\214\276=\276\370=\200\027\\\276\370,\361=L\022g\276\000d9\276\0312\327\275\200V\314>\202C\001>t\346\310\273\267\374\222\276w\224C>\036*r\275\235\342\253\276\215\343\203\275L\365\316>J\023\252>\030XO\275$f\025\277\271\024\256>\217\326\346\276\320\025z>\274y\343>\340\001\212>f\252Y\276\262\245\360>DO\277\275\357a\306\273_\002\235\275\2405\222>\304\301\347>\205\3573\276\363Z\314\276\264\212\230\276\230\255\272>\032\222\302\275n\277M\275q\372N\276c\360\215>\273\346*\276~hj>\000\276\251\275\335\220\032\276\211\353h=\253\343d>\266\203\201<\r\017\273>;\031\206\276.\213\316>+\323\255>+\300\247=\272\224\231>P\243\037>\347\237\332=\020F\366\274\223:C\276\355\r\273\276\264\233;\276\307\024h>\306^;\276\257\027$>RI\003\276\010\216\004?\222\007\327\275\003\033\207\330\035\346\274y4\306\275\027}V\276\2451\267>\322=\350\275\206f\214\275\256\355\002>\277\350\263=7\323\222\276;\254\254\275\357/\354\276!\241\210<(\360\n\276 \224\361\275\367\215C\275d\344\376\273\224\344$>,\244\211\275\327\230D>\\i\"\276\302\314\001=\3723\234<\370\325\332<\201\362\344>\314c\222>\216\355\247\276\372\334\213\276o\330\276=\350\212\002\276\244#\003\276\273\303\243=\217\225\346\276\301\372>\275)\034r\275\321\365\023\275\3308\265\276\0257\225\275\013b\034\276Op\272\276\227\254^>\312\316\231\276\354~<==\365\272=\235\026a\276\317\177)\275\264\321O\275\327T\216\276J\275\360\274\020\352D\276H\357\224=\004\005v>\376d\206\276\276\301\2228h\032\324=\235\t\010=\327\243\020\276\350.2\276\240\310\r>j\305\t\275zV\016\276\021[w\276\312\207q>\304\002E\276\240I\257\276\2359\347\275k8\000\276\253\340\271={\362x>\251\001|>\024\330\303\275\240\013\221\276K\340\246>\377V\320\275\303&\347=-\327\241\276\202\264\205>b\276C\2764\003\014?\251\235\035\276\276\342b=\021\245f\276%\313S\276\300\243\327\276-\351\225\276Hm\375>\023gu\276\323\2045\275\344\367@>\236\304\212\271\366\rs\273\231R\025>\006J\344\276q\342\220\275p\033G\276\316a\356<\267\361\213\276\371\271\356\275\201\201\017>:\240H\274\342\013\200>P|\223=\273\233\022\276\314f\245\276\327.\356\276]\331\251>\245\364\333\276B\364\346>(59>\231\003%>\237\245\006?c\274\010?,]\303\276\343\214?\277`\211\002\276\276\007\346>\333\310\'\276\316\251\224>\270\332r>\341\214J>\t\001\323\272\373\256\026?\013\222G>\037\2751\276\332+\032\276\004\r\013\273q.\357\276\366\004\036\276=\323^=\357\214\374=6C\221>\r\206n<#\332A\275\031\323\377\275q\345#>\276\371\224\274_\267\225\276\3027h\276\030-\331\276d[\317>\024\237\364\276I_\276\276oc\310\275\021\205 ?|\033\272\276\220\272\214>\310\305\006?SM\\>\027Z\023\276\253\252\n>;\324\232=(\035\324\275m\275\236;\027\253\352\275\260\\\234>\232\356v\275\376\204 \277\351w\244>C\343G\276\221i\355\274c\227\202\275{2\210\275\320\306\t\276\367\344w=b\026??\'}\305>o\227*=\277\004\"\277d\240\036?\242@\n?\254\342\246>\234\203.\276\305\377\030?\360S\271\276\343\033\270\275\3204,\276\311D\232=\"\365\322>8\027\377\275 \016Z\276J\034\222>\315\\\351\276\352\203B>\367\027G\276\325\221y>\016h\311=\257{\251\276\2135\355\276\352\353\017\236>to\001\276@f\203>\267\315\377=a\016\233>\363Pg\276\305\221\203\276\274N\247=\360\323\013\274\220\000\002\277\232\330\016\277\2104\\\274\317\354v\276\020\223\316>{\024\353\276\367\323\303\275.T\375=\260\271\211\276k@\257;J0\266>\264\347w>\026\013\014>R\375\335\275\3133\306\275\206\027\333=\001]\252\276\241\233\310\271\220T\323\275j\024?>\031e\323\275\201|\366=B\320\214>\375\226\263\276\362\333\005>\247M\311\275Q\201\032=\213\007@>7-\355=b\233\351\275\305\323\264>u\351\313\276\377\244\032\277\354^\022\277g\306\227\276\350\377\242>\3572\244\275\010G\033\275]\232\221>\226r\237>\226\224\255\276\t\335G\276\376`\363\273\320_;>\256o\177\276\324W\303> ;\340;#\211\320\275\306\215a\276.@_>\265\353\033\274\305\244\025?\351|\031\276\330\212\216>\310{\254=:\237\257\275\235\262\221\276\027\247\276\274\2534\276\262\002\220\275\302\342\010?\300\213\323\005S\005?o\037\226\277^^n>w\324\227>\244i\032\276\021k\024?|s\332>\242\013\010?\3773)>{\360\236\275\226X\230>x$\332=\333\340\027?\036X2>\000\351\261=a\362\034>\025\251\023>\221D\341\272\"\033\270>C\'<>\374h\265>\234\330\234>\263\233\226\275\312\006\002?\251\030)>L$\245>\026\277d?\334\341>\276W\202\303\276\257z\265\276\303\310)\276\212L\360\312I`\275\333\263*\276\313&\226\276\324\374\215>\224H\363\275\035\254\277>\264\274H\276\001\nY=\365\315\267\276\311/\273>\2423\n?Lf\331>\257\312\241>\251\212\337>\223M\254\276F4k<\235\323\353;/\337\257>\217\222\273=\330\0267?|e\226=:\210\005\276$\333 ?^m\236\276\202@\330>~A^?\341\342\261\276\350\265\035>\327\013\023\277V\223K=\241\261\345>\014\032\324=\010\347F?\2707l>\362\0077?C\037\221=\334\025\224>\003\274\271=\341v\n>\031\261j\276\323\007)\276f.c>+\035\367\275_\022\n\276\246Ms>\325\256\346\275\300\014:\275\265\323\035=G\350q?\002\n\356=\201\272\347>\241\247B?\345Zq;.\303\344\275U\252\247<\252=\376\274\024}\323>r\317\265>w\273\032>\270\254h\276\217\334\371=a2\377>_\244\227\276\240\251\275\275z\321n\275 \010\251>a\335\244\276\257\032\224\276\303\214}=\370n\027?f\201x\276\335M0\276\203Tt\276^\253\312>/\207\262>b\2342>\214e\327>\236\241\203>a\372\022\277`\341\257\276\017VQ\276v\3654\2763_\221\275\241\302\204\276\255\315#>9\316\261\276\252\266P=\351+\250>\210R\336\276w\320&<\311l\260=\3548\202\277\300r\250>\306\251r\2757\345\220\276p(\305>\360\241\326>\3227\001>\326<\270\275;:1\276\\\\\010?\372\356\223>a\036\344\276\255\310\346>2+_>nt^=\210I\230>\350n\003?\321\226E>\310\237\034>\216F\306>\365\337\277\276\323\311Y\273\260C\n\277#\243\365=\276?\272\276A\036\300\275\247r\027>D\222\210\273\307\243\2369%\366\035?\204\345\357>\310O\r=\330\226\026?^V;>\331\221\002>\310\237\375\275^V\326\276\021\341\021\275\365\020\263>\"#\207\276R\225\341\276\027b\235>\316Z6?\333eq>U.\200=}@\250=Z-\351\275\232l\311>05$>\261c\220>\353,d\27535\013\276\372\204\353\275;\277\373=\355\014\222>6G\261\275\220\322\316;q\345\360<_\233L\276\351\217C\275D\215I=\016\022\005\277F\310\r\277\277\3160>D\026-\276Nb\254=\2243\033\276\030\253\320\275\253!\301\276B\277\301=#\3150>z,\227\276\335=\206;#\355\266\276ub\021>\023\345h>b\356\244\276{\251\266=|\323j\275\346V\316=T\364\264>\2036\227>\255\227Y<\352\233\375\276\272\005\350\271\223\337|\275\032\032\312\273\023\027\013\277$\376\326>\007mC\275\275\034%>_\364o\275\256{j\276Q1\226\276z](\276\017\017\235>\362\252\254>\273\324\276\276\022\021,\277N\334\315\274\355\306\024\275#a\356\275\222\207\215\275\334\270`>Qa\027>\206b\020\272\020#\212>\301\257\335\274%\036\007\276\364.G>.\210N>.\276\274\276g \375>t\005\023\276\223\250\024=\n\177\264\276\302m%\276\207\004\270>\256\213)?\227\013\226>\334\360\303\275UT\370\276l\014\033\277xm%=*\304\351\276\005\375\237>ti}\276\261+\025?\2247\271\274\375q\025=3\017\355\276\262\026\'\276\275\373\315\274\264\203\254\276le\216\275\035\240\227\275\305\013\006?\340U\332=\017\r\226\276\231\3557\275\234\231\024\276\234\233\312\276\356\255\311\275M\325\022?\266\347\001?\337\336A>T@-\277$\220\220\276\027\254\210\275\200\312\314\276zoY>F\251\213>\223\026\252\275\354\321r\276L\302\021\277|\032\213>\214[\210\276\331\005\344\275\033\324\305>9\037\035>3?\232>*w\211>\307\326\223=\177\207]\276Z\275\345\274`\235K\274\227\271\205\276\361Y\214\276\010qp=\335\371\n>\0017\026?\rV\304\275\256\360\002\277X\215Z\276\247cH\276\325\3101>\252\210\243>\357}F\275\371\004\276>\345\004\364\275\321b\006>x\371L=\002n\320=~:\332\275\371W\225=m\3471\276\031\301\271\275\275a\003\276rnC>\264e\006>N\032\260\276\210\371k=)\345\266>}\230d>=H >\033k\010?rx\003=O\031O\275\313P\272>\257\352\222=\357\014\260>\277\362\307<+\020f>F\3362\275\301\255R>k\017>>\027\227\221\276\264k\255\276z#\207\276\023\254E\276\276\321\305un\374=D\030\341=\027\020\261\275J\347\211\276\325C\364\275\331\241\276>\346\241\355=\013\335\365=nC\264>d\336\356<\326\207\257=X\331\207>\027\350\345>b\312\275\276\036\223\230\275\001\211\334\275\342\271e\276xfJ=;\351\257>n\034V>\346(\022\276\225lO=\324X\200\276\226\340\">\010\252\022\276\0161\353\275\361\317\n=\342k\021>\222\310\204=\243\312\031\277`\310\367>\021\351\244\275\247\365n\275\330\252\024\275b\224\343>\355\214y\276\232,\245\276$jG\276W\360\004>\220}O\276\314\205\221=\356b\t>J\315\310\276Q\256w<`\265\216\275\371@!>\376\213\233\276\212=\236>a\327\362>r\342\360U\307\351=\264n\325\275j\026\244>m\226 ?\325\034 =m7\210\276\347\252I\276\351\346\007\2756G\253>\237D\261=\'\371\224>\303JK\276\017\204\355\273\216\013\"\276\242Vh\276W\311\007=\020p\211\276\026\255\263\276m\213\213\275\275\'\026>;\305\250;\336\022\336>;\346R>\347\317\t\276\n}\026>\342@\253>\nh&>xs\223>)\351\212>\002w\r=\201\0308=Z)K>\246@&\276\244@\201>\033W\200>=\211\217=\027\021\324<\177]\232\275zK\352\275\212\356\003?2\230\266>CG\255>\224i\274\276\2279\336\275\331!\000>]\351)\277\331\211\273\276\331\2235\275{5\200=\371<\230\276Bn\273\275\205\212\271>\025\374\252=\322\270l\276\250[b\275d\200\357>\230\343\027>i\374\352<\374\327\375>\300\241\323\276\033[\035?\3369\264>W\020\t?\264\246L\277\3279\272\273li\207\276&A\010\2770\336\354\275\302B\263\276_\200Y>\360\204\364>\347\027=?\201\003A<;\341b\276\347_a\276 \274\004?`N\327\275\325\030\235\275\321\304\000\277\345\017\240\275z+\235\2768\250`>\274F\273\272\264\202\343\275\205(\256\275\257\206\002>F\337\270>\360:\252\276^\010\276\276\301\277\024\2763x\007>V\364\333<[\031`=\326\031\216\276@7\242\276\345\227\302\275\244\321<=\233\235\254\276\236\355\373>\010\265\256\274\177\312\216>K\362V>\000[`\276i\377\265=H7y\275L\240\245\276\207\006V>\026G\325=e\320\227=\330\355\032\275\256\261\222=\241\373\221\275\333fg\275\366V\361\275\344zv\276\023\033\345>\370\t$>\224\240\302=\363\330\324\276\373\037\240>\255\002`>\004y\253\276\202\226\315=!\206\343>W\276~>\362\005\272\274+\311Z>c\267\223\276\357\234\320\275q\374\367\275\206\3261=\033\303\315\274\210\306\312\275p\271m\276\264<\331>v_\022\276\027\343(\275\354^\223\276\035c\005\277\001\274\r?q1\006\276^\263\024\277\354\315^>\351\204\375/Q\000>\271\335/\276RX\261\273h`C\276\363f\222>\362\210\311\274\362\343-=\230\227\345=\206DK\275/\322\026\276\244\226(>\250\377\260>{\334\277=\361\333\031\276\241|\217\276_}L>\036\327\235>\361\374\024>0\254\244\257s\213\275G\202\241>\361(\275>Sl\230<\224\331\217\275Z\330\026\276\023\316\034>\013\nA>\300F\215>2Z\357\276]\200\r?\034\r\n\277\311\357d>\2200\210\273\302\0352\276/ [\276#.\205>\276\013\007\277b)\234\275\007\365\222\274sJK\275\220\363\202>\031\020\332<5X\346=[3V\276Va_>S\256\274\274^\255\024\277\034b\312\276\236\317\310>z;\362>\232_8\276\306J\260\276\345\334\235\275\235\255V=F\307\341\276++E><@g\276\325\373\010\276\330\341\324\276&\017\367>\314\264K\275d\345#\275\247\324\003\277m\017g>\213bC\276\331\311\342\274<\0070\276\267\336\250>0\000\010?q|\223>:\257\226=\353\306\033\276\354\013\365>7w\265\275\204L\210>\010>\017\275\342\244S\274\243\346\036>.\315b\276\360|\273=\333\r\254\276\331==>3\327B>\365\021U>C\244\263\276=\363\215\276\307J\314\275\350\246(\275\237\"<\276^\231\004>O\356y>\200\327\243<)@\202>\201`\302\276\037$\205\274\350\001\202\275Op\323\274\010UI=k\031\025>j\374\021;\275\307\327\275\330q\027?C\307\273\275\341\371\354\276\211W\321>\220V6>\326\372_\276e\001}\276\375\216\306\276D\010\034?\342\251\024\277\255O\333>\216\247\232\274\223\371=;3E?\276!\215\025>\013\001\252\275f\264\200><\235\204\276%j\033>\245i\221\276\277\200-\276o~d\024\020\206\275\030\247\200>yeq>\"\203\034\277\224\036\303\275\346\236\213\276\353\277i>\373\212`\275\246\253\021\276\307m\357< \017Q\275\004iD>m\365\215>\337\2071=&\316\204>\230\330\235\275\231^+>\322\016.<\021\313\252\274\363 \252\276Re\274:\214\236\257>\335\320\330\274\r\217\003\276s\206\310\275\205\240\266>,\365A>\345D\216\276\224\275#>\351\021\205\275PLm\275\244F\202\275p\300-\276B)\216\276)]\200>\204\320\363\275\344F5>\204\204\001>W\300\360\275\330)\206\276\341F\362=E\323\306\275Y\267\226\275\321\0009\276C\217\034=+De\276x\341\006\275\317\365n\276\234\322\221\275\311\006\002\277L\346\244\276A%\352=\rdw\276\006\260\016\276\325\205\273=K\353\232\276l$\225\276\373.3\277F\241+>X\345\363=\254;Y\275\363\257=\274CI\362\275$\371\203\276\271\370\324=U\237\275\275\032\017o\276\332k\266=\214\365\202\276\026\272\334=\344C\251\275i\336\273\276\356H\024>l~\006?\266\001\251\274y\363\034=M1\266=\031\351\345\274\324\247\030\275\241\372G<\255\316x\276\373(-cLF\276\371\363D>W@\353\273\020N=\275B\235\036\277\360\211\254\276\037\342\363\275\346\264\317>\274H\236\276-\315\244\275]/\246\374\307\005>\312\246\022\276tP\216\2762\266\243\276N;\212>\217\016\251\275\'\0264\2762\364/\276\260\273\022>\032\177\316\275RF\210>\037\033\001?\252)\332>\355\325\263\276\330\217\226\275\2542\017>\376\327\221\276\213\367\256>\232\357\266=\310\032\301>\356VN\276|\273\226\276?\006J>\351~\037>\002K=<\3444\252\276\214\301[>+\260\216>V:\334<\225\266\246\276\031\333\237>V\013\005=\207\037\026>+I\210\274\217\022\307\274P\n\213\275o2\340\276\232\247J>\367y\277=G\324-?y\322\200>\355V\344\276\325\333\352=\001\312\264\276\236\313\250\275\256My\2750dA>\322\235l\273\364J\207\276\310\202Z>\224z\266\275\332\026*>\322\004,\277\377[\001\275z\3143>\007n\254\276\035\006x=\234R\"\277l\036\353>\226\330\327\275\276\226E=\356\"\242>\222\322\303=k-o\275\301\341\013\276U\377\205\275\021\252\351>\275+\341>J\036y\276D`\027\275\342\224O>\320\217\031>/\301i=L4\237=\260]\205>\026\237-\275\007\352\005>#.=>\302\343\303\276\374\362\033>:n\022\277\306G\327\275p\030\232>\253\223V>X\347\"\276v\"\262=\n\225\340>p\360\n\2765\341\263\2753\207\365\275\007\014\221=\276SE\276/\025\320\275\254\326\314\273\306N\247=\266w\030\276p\013\002\276\035\2163>B\201\201<\266\317\007>\227%\324>\217\004\216\323\212\210\275\273\360\262=&\300\022>\342B\"\275\r\250A\276\202\273\n\276\tvw>\273J\352\276P\345\263\276\023\2717\276\306V\"\276\334\235\341\276\375\320\332\275\020\244Y\276\010)\373\276@\245\247=\202\264\242=,\233Q\276\317i\237\275Fuj=t\314>\274k\325-\276\217\274:>Fd\231=\322(\223;\240\361\006>\254w\354\275B\025\266=C\353h\275\227z\013=\372\214I>\3221\215\274F@h\276\346,\204\276\320\220\n?\374\232\027>pH\317\275\204\221\000\275&\265\251\276p\261\275\276\3231J\2766\330->\t\352)\275h\345\362=\242\345\276\276\206\037\344\275;X\205\275\220X\306\274\002\222N>j1\\=&\\\324\271\333G>\276U)o;5j\001>{0\372=\264\345\300.\365\004>\2757<\276\325-n=\\\t\227\276Yy\333\276\006>\255=\272x\210>ox\264\276\254\002^\275i\257`\276\362\0212>.\t*=\306y\261\276\3218k=\022\017\034>\343\020\310\274\355\213s\276\213\021\213=\333E\035\276\350\374\274=\323\0320\276\241\242\203>i\317j>\347\223t>.\330\252\275\255Y\264\276\006\031\222\275\360N\315>Q\020k>(\377\025=&%\220>\025\342\261>\333\247\276=y?5\276\305\351\301=B\332\231\276M\014\301>\300\276\217\276\224\r\231\276\003\221F>\206\0202>\310\273s>\231\310\213\276\277kJ\2760\332\256\274xK\271<\271\271\346\276w4%>\305\216\037\276*\025\027?\037\224B\275\3416\236>g\344m>h\222\002?\316\037q=\353s\215\276\337\307\037\276\370a\200>\371`>\277\027+\247>\024\363p=\201a;>\032\364\233=0x\257>\204\247\233\276|\256e\275\236B\234\276/\315\025\276\002B\231>\357\241\262\276\200\345x>\215\322k\276\264\336w\275\025\r]>\2168\221\276\\\211\307<\346R-?\236\365A>\257n\362\276Pf\365>O\244\252\275\\#\246:*\257\320=\306\014\263\2755]\\>\024\226\350>C9\326>U\031F\276\030\346\236\276\377x\356>\327\203\r>\214\375\262\275\216/\347\275fnv\275\323\004A<\245\242\220>5\023J=\345\023.\276l\275\235>=\370\332\275\320Jo>\232\336;>\"\3751\276\340\214\235<\245\005$>\014\314\236>\024\361J=\206\326\257\275\304\372\306;i^\247>S\224\217>5:\257\275\302Z\037>\022\254-\276_\307&=\027\344\201\276T\016\003?\022<\254>M~\367=v-\370\275\257q\271\274\220\344J>\231o;=\036?\201=%T%>\257\304D>\002\357\263>\t\034N\275C\353\035\275T\247f\276a\214\274\275\327\223 >2~D>\337\361{\276\361e\177\275\241\024\263=c\r\324=W\331\210>I&K?\\\025\007=\332w\245\273\026\274\377;\257\206L\274\335\213\372=\217O\027>\200\222\001\276=\250\034\276\003[\375>+\323\372\275#\206\314\275\222\241\032?\261\t\264>{\025Y\276Z\006\247>\027d\023>\334\342\014\276\3602\335>%\327g\276q\250Q>(\177-\276 \214\336=\217\225\017\276yn/\276j\001\216\274&C%\276\n\363\200\276\276\335\354\276\226\020\003\277\014r\240>\256:\221>B\020\364=\364i\203>Q\366\266>\300\237\221>\365v2\275\237+\024\275\267\354\203\276\275!\214;\233\325C;\027\001\201\275\305\265\242=T_\214<\355\007H\276\357O\240>\310\003p>\322\357\031?\035XM>t+\n\275\204P\254>o\343#=t\013\322\275\000\356$>9\255\201\275\377\355\325=\230\227\331=\326\340\216>\234|\275>Bb\222>\030\371V=X\323\322>^L\234=\277\261i\273I5\032\276\353\0160\275\n\254k\276/if>N\036X\276l\313h?c5\037>S\354\226\276e\031\276\275U\326\267\275l\246\253<\322\223\307=q\314\r>g+L\2764\t\201\276\225\272\007?vQ\263=\316V\270>e\313\321=\210Z\361=$;\305>\0256\264\273\355A%>\246\264\246\274\326\010\275\274\013\205\026?\026\002\016?%\267\362<4\330\302<\2000\232\2767\247g>\202\372\247\272\202\265:\276\265\315\352>\354\2651\276\306\3743\275\264\365\260=G\344.\276\274|m\276\316\016\236=#\245\267\275J\336\261>\246I\234>7$H\276_\006\002>\232\310X>\257\377`\274\213\240\206=\2343\035\277\337\362%=\030\255?=\324Yp=m\251\227=WR^>\346\340\331\273\3259\033>\004=\004?.d\326=&C\030?\347\0350\276\373\362\217>\005)\323\275\237\3242\276\274\261\211>I\004n>\375\035\355=\031\363q<\230\020\020\276i<\275>\033\005\221>\351|\273>\226\250\330>\013\207\031=\373\336\242>\313\343\303\275\236b\005>\273,\022\276v}/>\024/\265\276\377S\323\276\334b\266=qV\030\276\336\332\032>u\224\211\273\204Z\270>aW(\275.\234\310=\216\245>>\336\303\242>\037|J=\225%\326=\213\323-=\310\257\203>P4\205\275\001\342\245=.\346\224=\0009\201\275\312\237\233\276\356U\\\276\377\1774>\344\216\265\275vn\311=\301!)>\377+\007?\362\375\351\2753Zw=w\341s>\341m\221>\036\332\216>\270\365\254=\037\374;>m\004\234\275\303\337\215>\227e\347>%\336e>\277\260\000>(\000 =\351\336I\276\255G\261>\261\377\264=\344\270(\276h\343?\276\236f\n\275Xn\006\276\336\253\000=\0346\207\275\336\377\013>\016\264\264<\265\020\345>\247\365\216=+\345\202\276Hpm>\334\2546\276\030\214\004\2746r\021>\277\204\223>\313\201\014>J\230\277>M$\340>\001+\375\274#{\265\276\3639#\2759^l\275c\267\206>\317\240a\275\271\355\000\277fa\006\276\212uy>\212\345\252>@\360\375\275:\312\313=h\022\'?c\307=\375\276\214=\272*\370\275@\314\263=\020\323\364>\275RL>\201|[>4#\360\275\321\002\262>!\231,>\222\267\002?\251\262\022>\257{q\276\005\270\271=e\230Q=i\354\010>\372W\325=\016\351\220>\243k\245\276\216\276\231=\231\375\361<\334;\223>\377\366\266=\240\222O>6\227\206\276\376D\232=q\344o>z\025N\276\204\341\225>\024\365}>\201#&><\341\203>\310\372\r\276pIm>\036\334\025>\025\264\032\2762~~\276S\326d>IE\346\275\251]!?\205\256\223>\256`)?\020d\014>Hw5=Y\343??\252\224@>i\200\007?K\'\300\276\200\254\037\274`\353\337=\005jN?\344\224\335>\327!\014> }m>\334Jl>=\355\005?N,\017>U\321\021=\324\027\002?\032\021\t?Zw\250=\332\300\253=\023!d>\270\034\244\2759n\016?M\266\272\276\036\202,?i\335o\275\320TF\276\021\200\014?\247\214\274>k\306$?\252&\211>\216u\335;8v_\2754Wy\275#w\246\276\366 \023?]\314F>\326\216A<\305\240\300=\233\344\220\276\261\335?\276\206\237%\276\245&\262\275\352\'\324>*\334+\276\024f\365>/\315\'\245\201\025\276\330\231\260\274\360\037\006>\261\232\t?\016\'R=Z\262\022?\305\260j\276g\002\277\276\366\030\t?N\366\231>\017\343\342;m~\267\275^j\005>Q$+?*FP?\234\307\236>\020\204Y<\365z\266>H\331!?-\234\214>\303\n\314>\300Q\260>\030}G>\241\271T\275a\361\346>m\314\251\275\026\224\3129E\316Z>\252\217\227>\027\355\354>\3030\034?\037\t\001\277\001\323\305\276\013m\\\275\241\233\305\276+{/\276j\334k>\013d\363>\235w\202>\242\266\025?\025\241\037=\302\273y\276>ZS=\001q\330=CK\327\275\314\217\000?X\'\004\276\007\267\t\276=\241,\276\233]\212\275\027\021\254\276\303\302\352>V\312\310>0U\222\276Z3>>\244\016\013\276\265-\277>\367`\310>zS\374\276\313M\332=:\347\356=\350\177\234\276\037\330\014?\323I\372>\351\227\224=\201^\374=\'R\325=k?7>\3129==\216\351A\275R\344\344>8\217\006\277\274k\327\275\375\033Z\276\245pQ>-\351 \275\262\360k\276\2376\'>\346\005\334>\r\323+=A\273d\275\345!\004>O\2176>\200\325J>\244\365\005?Rf\317\275\223\254l\275\305\334\377>\322\216\013\274\256\241==-cI\276\326\233a>E\266\263\276V\250\003>\314R\337>7\253\255\023\250\346>\301\224\200>\360\301}>Z\220\213\276\202\365g>\344kn>\336gc\276o\200\227=N\337\315>|\032\017=\017\340\023\276\226\236\202\276dW\003>\007\034\245<\n\017x\276$\206\035;\210t\230\275\361;J\276R\315(>n\266\205>\232\"&\277/\264\264>^\003%\277\224\373\254>\036\332?;#\"\363>\030\3140\276\221?E>\275\377\313>j\236\363\275\315}\240>r\217\275>}\177\344>7\271T>va\010>\014\227\201\276\271\360\224>\215X\207\276\341\352Q>\226\272{\276\031\025\260=?4\204>\206\007\210>\001i\013?\3127\365>\224\245\343=w\360\272>\335\033\244>\256\342\317>\221\001\212>+\363H?0\334F>M\352\315\275\313\221j=T,h\276T\205\337\276\007G\000?\021$!\275\213\315\002>\376\'\274<\206\303s\276\313Q@=8\233Y<\0057a=F\252\253\276,?\375=o\025\360>i\320~>%\215$\276\037\242\366>\205p\217>\2237\006?\246\037\263\275\026t\252\273\370P\256\276T/\226>>(\362>\370\246\317>\355K\240>\307\312\023?\nWP\277\177A%?\246y\364>Y\021\301<*p\305>\235\202\310\276O\300\320=\'\342.=\253~\276>\016,\376>2\237\331>@\364\001?\2348J=\246\017Q\276R\272\314\276\365\201\035\275.\351\252>\030yB>\004$\004\274\275\306\035\277\334F\021=\376P6\276H\312\370=\362#\210\276\265\300\014>\202\252\234>\367s:?\310\000\240>\226\334\312\271}\0014>\315h\221?-\035\376=\255\311v\276\3408\274=\322\266\n?\352\341\261>\250\266\221=\r\311\223>\355k\250>|\303\006>D\215\320\276\345\014u=J\207\267\275\307{_=K\004&>M\212\020>\244J\257>l_\211=\005\013\270>\371g\000=^\351\031\275|\320\213\276\203e\254\276.J\305>wjo=\034\241\247Z\335\027>h] ?\336D\204=A\347\000?A\033\204\276\353\010\264\275F\305\276>\212\n:\274\347\222$=\347WV> \251M>\220\314*\276(\241\277>\240\370\004>\322\241\251>T\216i>|\271\032\277T\260\375>F\336\357\275HT\213\276b\203U\276\032Y@>\227\035\312> \346\266>/2\277\275\333\212\251\275YE\237\276\256\034\033\276\006<\224=g\203\225>\342\326n>\334\253\374\275\024uD\276\201\251,\276\203\220\243\276]jW\276\203\265E=\370#D>\241,\212\275\227\235{\276i\322\233=D\026==\246\304+>{\256\242=O\210\224\276t\014e>{\010\000\276\3109\212>\222\276f>\247\363\323>;3\272=\234 \253>z\243\016\274\022\005\221\275\375\013<\276\272\275\231\275\t\356<\276\303\221\243>\346\277\223>k\220\224<\325 D\276\243?\326>\3236\315=\231j\206\276\274\361\213>F\352@>\261\352A=Z\242\372;\240\306M=\"\272$\276\244\377\250\276`\354\227=\361\372+>\230r&\276\277W\243\276\233\255\244>;\275s>\354k\275\276\252;\035=K\010o\276a\214\214=\216?\316\276\266\360=?\227A\322\276D\271\237\275L&oJ\232`>\177@0\2765M\261\276JE\267\276\226\3621\275\027\304\245\275\326\\H\275\346\222\030?\362d8\275\225O\340>\242<\223\276:YY\275\234\205\226\276-$\331<\363\330\306=]@ \277\200\2742\276v\344\345>\375Am\275m\177t\275_\334w\275\036\022p=z\347f\276\261\356,>B\265\261\275\377\306*\276\007=\343>\207@\031\276\034\337\036>\000\251\016=\302\201\344>|\317-\276\264\311\243\276y\244P?B\247\274\276/Y\261=#O\n>\216\017\316\275#]\212>\350m\271\2746\377\371>\313}m>nQ)>\016\342\371\276\226\311R\275\200\000\320\274\206\177\340\276\307\334\371\275}Z\234=\222\306\2656\224u\276\273\212\326\276\020\376%\277le\257>\245\314\247>\331\356a>\243\016\350\276\231x\217>\\\003X>\272 Z\276\006\236\346\275\365\022\177\275\371\335\361\273\020\342P>\006 \231\275\372\275B\276\322*\005>\327\324\360\276OQ\301=Q\373\330\275\332\020W=Q\246\020\276\242\002&>\207\252\350\275\'qN>|\'O>\370v\002\277|\263\222>1\257Y>\033c(\275\243`%>\375\305\235\274:\220>\275\355Y\236>W\272b>\022G6>\347\277/?\007\300J=\257\354\275\2761|R>\375\270\314>&D1<\363G%>\216\'9\276\341\027\200>\020\210\340\275d\315E>\376\312\237\274^f\005\276\017 \215>\377y@\276\243\375\026\276\027\273\020>\212A\003?}\352\244>\367-^\276))q>\017\274\315=\304e\032>u\217\025\276\210\247\206>\330\327\003\275g\312\206\274\337-\007\276\203\276g?\034D\336\275\375\201\020\276Fb\237=5\003\222>\312\373U\276p\"\203\275\235\373\251\276`/\340=4\003\322\275\004\021p\276\035\\\253\276\317ts\276D(\227\276,\250\277\275\364\344\272=6\365\235>\340d\205\274\350\206\307<\221\177\205\275\237\013\256=:e_\276=\033\301>\035\276\243\273!\350\364\2763dL>\240\033\370\273\3758D>\265\332\361>]\027\247\275\303e\276=^0&\274\347\t\234\275\365~\271\342t\342>\n=)?Z\020K=IOW<\345\005?\275LG-\276\234\354|\274y-\240\276b\336\024>\2579G>@\235\232>\276\257\005\276\025\256.>`\244[\276\370\215\345=\014\223\020\276;b\211>jF\356>\220\275\270\274\245\322/>b\376\205\276\002?4\277g\377\224\276\310\216_\276\226\365\220\276$z\313\276\244\036\322\2761\"\346>\200/\022\276\262%\025=\"\231\330\276\300_\257=\254x\021\276$\241\244\276b\320v>\n\373(\276\005]\347\276\n-\321=\276r\226>\355\254N;.\307^\276S\027)\277DH\017\277n \325\275g\271:\275\001~\016?\200V\317\275\220\335s\276\330f\215=Da\367>\351\366\030\277\213\313\026\277\363.\340=\306\302R>yu\320\276_\360=\276\307lq\276T\006c>\315\334W>\"\207X\276\337=\316>\342j\201\276J\003P\276\250\253A\277\215\377\022>\360P_\276\330/\252\3023s\270\276\371\005\300\276\336T\247\276\302\n\025>\275\327\335\276B>\203\275GO\034\276\204D\237=~\031\377\276#jp\276\232|\017\277xb]\277\337F\034\277:\003\332\276g\033\261\276\367\212\315>\200\231\211=\225\236\311\276\233~\200\276\237\377\034?\245\227X\276\262\312\321\275y\230\021\276\276\300\013>w\216\030\276\217\240\236\276o**\275\265Vc=4\273\355\276\037\206H=\342@\315\275\275\316D>\2674\000\274\351A[\275-Sa\275m\0145\276~\300 \277\321o\365\276Z\267\260\276\213\310j\276p\017\207>\212\020\017\276\r\031W\275-O\010\276X\216,=\373\023G\276\203J\220\274E\315\261=\315\035\374<\330\002L=\222\235\244\273\337\205\373<\306\265\265:\307\274t\2764\257\351>Q\024\n\276o\310c\2765\223f\276@7:>\374B,\277\211\273\"\276\234Hf\276\362#W>\177\036\327=\237\227r\276]\016^\276\023\206\262>d\215\210>\221&`>\223.\252\276%\265\217>\325\264\000\276\275\177\312=\306M\365\276mI!;G\323\203\276<\256\321\276=W7>\014\300\310=*G\316<\351\031\345>7-\373\276\364\026\325\275JL\002?6\021\222\276;\314\024>_\202\254=\004\n4>\204\365\352>\215\201n\275@y\200\275\257\302\300\276\236K\344\275\017i\036<\375J\022\275\333\251\214\276\237\224*\276\276\325^\274@\r\267\276\215\251\233>\203\305\211>6\307\206\276\373j\307>|\366\214\276\262`\020\277\217w\304=\3001\333\275\377gN>9\004x>\002\210\202>\214L\215\276\346\256\311\2769W\004\277@\255\202\276\237\247{\274\275Z\347=\010\352\211\274\275\244@=\211\337\016=e\261\232>\035\t\311\276\320\361\014=+\323\215>\302\222+\276Yqt\275>\311|\276K!#\276{r\031>\235\224@>\352\263\244>\031\033g\2750\302\234=\252\036\355\276\343i\343\2732\201\024\276\341\0367\276m~\357\275\263m\354\241\367%<\243\033\033?.05;v\246\017\276.b\227\276G\264\304>\020@a\2763{\202>T\270\235>\0273\311>\310d\247\276]\207\206= \256\237\276\016m\014\276\245\257\270=\223\212\233\276+v\017>bo&\276\365\312Z\275\312\300\355\276M\\\315<\021M\276>(\255\237=\367\030\320>\276\362S\276\336\355\225>uV\331=\341\366\277\276x\255\000>y\373\004\277\271\205\270>\217A\314\275\324\243i\276S\260$?\241Q\310=\231}\375=\324\341\325<\362&\373\275M\207\346>y\304\221=!\274\237>k;\242\276Q9\364\274X\\8\276\316\256\203>J\342\257>U\035\303>\363\332\035?\013\253i\276\333r\250>\255s\r?b:q>\n\357[?\254\240\211>\317\177\023>6\213\330;\200o\035\276\350\313\200\273\275#-=\017L\343>\023B\014\276\251\361A>\3255T\276O\000`>\307\262\260\276[\217\332>-({\274\342\311\226\276\314\005\247=\3634\225>d;\212\276\243\356\221>\177K\251\276\261|\360\276\355{M\276\031\025K\276jzm>j7h\275\"+\216=\017\267\345>\244\351\273\275\210a3>\306\027\335=hI\267=\371A\206\2763I\236\276\325\333\262\276A3\014\276\244\353\007>\214\313\260>#Q\215\276\367\255\231=\346\217\324>\031\213[>=\022\017\276\241]#=\301<\034\276?\221\'>\353B\256=%\240\313>\212B\356\276`\306\355>\340Ez\274\263Uo\274\270\372-\276\221\370\377=\270\262\026\276\235\345\020\276\351\257\001\2761\246\346=\222\307\245>zQ\343\275\307@\n>\005\n\260\276\305\034\320>\2677\323=\216g\272\275\323\266\242=\325\311\375\275\207\312\016\275\000\250\355\276\006\312\346<\354v;\276I\014d\275\377\210\357\276!\000\276\276x\330\271=\210J\362\275\\H\322\276\225\373\360=N\003\251>Q\236\251>\304>\205>5},\275\'\275=\276\245\tR=\023^E\276$\033\246=1/\306>]\314\312\275\030\227.>,\033?\276h\272,\276*1!\276\222g\"\276\254_\006>\335ud>\256=\354\275\336/\377<\"b;>A\371\251>x(\215>\262\306\307=\244\375\233>\371+\302\276\000\253>\275\351\211!><,\271==\026\017\276\212\246{\276A\031\227\276\373\022\\\276\277\275\315=\354\2453>\314H\276\276\231\344\325=#2\034>I\322\366={0$>\033\240\236=U\344\337\275\023#\264\276\256\304\266\272_\2615?1\013\031=[N\304=\341\213\227\276\\\325W\276XX\250\276\276\000\340\275\335J\204\276\317P\257\275\354\350\245=%^\211\276C\202\234<(4\017<\257\302H<\274\035\262\276c\341\010>xL\000\276\013\310f>@{\030\276L\211\363>g\314\357=,\2304>\244\355\034\276\352\321\017?\007a\200>\271\211\"=\2343\251\276C\327\261>_\320\3259\010\347K=RN\340\275\351[\302>\262\273\332=\002\356\371=\366\340\002>r(\340;Us\356<\000\024\353=\323\373\254\276\335\022\333\276\020\340\\>\250\315D<\2767\211\276&\302\323<\335\030\217>\271\361\236>bJ%?Z\364V>\312o*\275\336\215T\276Q\257\347>\007p\276>\326\316\t\276\246\237~=\236\003\377<\317%r>\317\265\264\276\224\373=?\330\265\274>S\001Y>\\J\"\276\3058\300\367\374\243=5q\2765\373\021?e\333o\276^\225#\276\241\'v>\306T\273>A\223\t>k]\020>F\360\251\275f\371\375\275\010\365\017?~k\036=\310P\217=\262\344>>\263\360#\276\005oX>\214\022\224\275\272\274\206>\214\035,\277\005\213\"<\211\366\373>\032\226\016?0\263\333=\272\234\n?\271\034\235=E),\276\354\205\247>\367\314\241\276}5\242\275sg\357>p\200\200>\220\334\n?66\024>o?I><\330\265\276\203IU>\3147\350=^9\232>\315\232\253=S\350\205>\023\021\267>\372&n?\222\332\315>O\334\346>\251\003\213=\226\034\026?\374\005\231\275\252Q\337=\202\253\350>5z\n?\312\244,\275e\244\371<\317{?=\234\003\373\276\367\034\246>XC\345=P\302\205>\201t\211\275\024\212\226\275\262 \320=\027\333\037\276\312\265\362\274\304\243\237>6\201\275=n\002\016>LQ\241\274*+\206=Ce\344>t\251\324:W\354\304\276\260\023\n?q\221\313>~t==\216\264F>\223\254J\275\032\003\213\276\222\323\001<\2638\202>G\361\031>\345\255\030>\364+!?K\317\032\276\251\037\256=1_(\276~\023\353>\332C\306\275[^\336=\313\2528\277\025\343)>\266\221D>\362\360\200>\351q\236\276\330\003Q>\to\267\276\254\027\'>e\247\007\275\206\302\277>\031-\363\275\237C\340\275\275\034\371=\0014C>\213\372M>\375\340\254\276:\010\247>\345W\214>\263\225%\276\030on\276\037\034\255\276w]\213>VI\331\275\335\232\341=!\212\354\275\3651\006\275\314)\345>Y5(\277.\352\353\274m\257C\275\300\302\305=\032\216\032\275\205M\004>\t\2542>\226\213\337=h\256\213\274A\344\025>}\225\347=B,\275>\022\002\222=\351L\233\276\332\333\272>\000\005\343\275G\031\334\276\240\376 >\356\264\276\275\233\016\007>\230\005E>\320\341/>\036s\243\276\214E.>\327\211\276>\331\230\274>\344\256\267>\251\343\235>=\334\307>\260h\010\276\013V\241\276\\]k>\004\362I\276\361e\275>\010\232\r?\t\026\323;\212\002\264;\351x&=aqH>\303:\213>\344E\017?F\235\231>\362P[\276\360\256\367>\236\260{\276\226e\000>\000\277\252\275\272\036\276\276\375\213\257=\254\004d>\311;\370>\317s7\276\006_\356\276\202\001\r>7xe>Fl\003?]\211\002>\035\216!=\273\022.>\352\215\347>&\207\t\274s\341\032\276ck \273\304\374\257>\273M\235\275\204\235\207\276Z\201\274>\237\274\257>\221Af>\322dN\274\227\031(?8 \207\275\263\356t=?Dz>\0170\215=\324\316\216\276\030&&\276W\3233>cAn\275\274]\220=\356\275\r\276\274\243l\276\365\217\373>\301\244)>\027v\340\275\024\247\343\275\247\267\215=G\263\024\275\006\332\221\276\347\353\216\274\005\235H>\005\273\201=\317y\"\275\204\247:>2\360\026\277\275\n)>Ff\177>,h\216\2761\264[<\016\277\205\276\033ZG\276w\207E>tD|=\235^s>\027\211\227=&GK\275\r\271{>27\016?\256\231;\276\213\017\037?\262hT<\335_+>\ro\327\276BT\221>:\267\242\275\032\246\206>{iU>m\004\366>\223\373\236\276S<\343;Y:9\276|#\232\276\036\343g>\372\005h>B\036\020>\206\367\304=\270%)\2751\213`\276\226Y\221>\342~\250\275\206\357\204>\314gQ\276o\005\001>\263\224\211>\221\355\212\2750\r\207=\n\267\265=\003\203\032>G\226\000>\222\364\240\276WW\312=\316\t\031\275\353R\377>\222\275\344\276\242Z\"\276WBW>\331\036\273>\354\324\245=\217\337\324>\023\251\036<\301\020\316\275M\234\233=i\352|\275\200\014A>]A\263=\353\177\216\276\367\3553=b\255\246>\303gi\276\201O\277\276\303\376f\276\\\030}\276$w\332=-\3368>\026\317\237>\217\3450\275\004\254\201\276\017\3276\276\003,\374\275\314(\306>8\325/>\021\310m=2-\005>\203D\377=\274$C\275\364RB<&\253\310>\330\204\215>\345`\'>\331L\027\276\351\330\201>(9G>\003*}\276\250\3723\275\255(\371\275\341\253\344>$H\225\275\341\270\312>Bfx=\004\356\010>\351\2515\277\257,\206\276<\350\312\275\006\272r>\312o}>\037X\204=\201W\267\275\246;\306>Z\220\353>\231\247\020=\2230K>\360=\363>\242J\345\274\177\331\007=>\353\n>\000]\376>\331pI\276\324\333\023?\312\311A=4\'\004>}\331\257>{_k=\001\370\035?\303\361\317>\244\016/<\306\232M>*\262\200\276\300`b\204>\356\033\004\276\035\255\200>\000\257j\276Q<\001\277\303\235\352>\274R\223\276\'\020-\276\352S\367=\207o\225>!\350R>0EO\276\310\252\303>\224AG\276\300\010\265>\214\333\357=b\251\350\276\010~v\276\005\372\335\275\275\305\211\276\200\311\200>\225&\351\276/\350\214\274]\262\215=:\332.\2770\006\247>9\n\210\275v\237S>\3459\032=\222$\233\315 \365<\021/\035>\324\220n>\304/W>WN]>\032]\226\272\273\342\367\276:3\364=c@\247\276\3753\321\276G*\311\276\215\373\252<\232\321\004\277D\264\240\276\220w\216>\310\010\016?\032\317\205>\326\030\n>\"\271D=\235=S==6\212=czx\275\337\267\252\275\3363\232\275\035\251\274\276\235\276\201\276Lfl\275N\177h\276\226^#\277\270d\246\272s\016J\274\"\357D>\001O\313\276\325\362!\276\022\327\021>v\206\021=\315\326\317>3\362J>x\245\242\276j9\226>\374S\241>{\232\020?\347l\200\276\\\013\253\276E\376\'\276z\310\352>J\275|=\314H\017\276\303\372\340\276\216\026\177\276\026\341\364>\n\244\202\276\240\206\336>\004M\252>\225T\265=<\032\020\275\351\022\345<\"\220\332\276\366\361\225\2743\256\357\275d\345\343\276K\205T>rIw>!\213\273\276\014e\004?\037|\017\276\021Mx=1,\270\274\002\267\217\276\\{e>\353\311=>\225\313\251\275\204\024[\275\356\003\326>\261J\301\276\2513\245>\246\224\372\274\250\335\220>\251\344\014\276.\332\005\276\220)5;o\236\233\274\2037F\277\022\002\266=\350\353w\276mkM\276\204\227\223;\\\212i>\243\031\233<\354wC=95\273=o\206\306\275\342H\326\274\335f#\276\005\373\300>\334L\265>\355\\\265\276F\3135\275x\363V>\260F\267<\037z_\276\377\032?>[\200;>\3538\316>\350\372\220= \314\215\275\345\251!\276\376\025\311\276\354\033q\276\327\332\227\275 &\367\276 vE>\034\343\201=\304l\277\276\334^i=\002\016\024\276\236\005W\275\013\006y\275\274\017\276\275N\n5\277nM]\276g\177\202\275j\265\231>\311\r\201>\267UL\276\032,G>\206\310{=\023\307\222\275Ez8>s\010\254\276s`5\276Y\206Z\276\235\334\026=\300\222V\277\324w_>\235@y\276\020\336\210\276\341j(\276\032\203\t\277\274\211E\276\313\t\356\275\325\n*\276\374\252\035<+\3615>\254\214\231\276\214t\216>\224*\334=Z\'\320\276\240\240\205\276NCA>R;\342\275\253\352\376\275\020\030\240>\245\242>=\304\004\006?\225\026\236>\t4\236\274\305\252\230=\036\327=>;\352F>\035\017E\276\316\"\223\276\"z\033>\035\227\215>5\332\r>~\241\244>\327to\274jLq\276\374\200\264>\354\227\365=\305\004\220=\361\312`\275\"B\001?\306\342\010>}\301s=\217\304\234>7\300\315\274\221\301\204\275\r9\376>?\030\377=9\251\032>\264\234\034\276X\276<>\2202\006\276%\344\221<+\220\322\276\004.\2459j\230\317>\374\342\363\275\216\225\231=\330\2439>\224[\024\276y/->Lmr>\340`&;\362r\025>\273\033\210>e]\211\274 \361\260\275q\254\340>\314\352\235>;\264n\276\341{\233\275E\204\246>p\242\'?Z\375\320=f\026\267>\033\324\022?k,\004?)JD>XO\201\276\035\321\352=\371n=>\367\007\375>\215pN\276\351\357\326\275\0077d\276\223O\243>L\324L=\365\232\365=\311p\r\276y\233e\275Tz}>k\337\346=\237q\021>H\177\n>\262\177C\2760B{\276\325\277N\276\027x\262\275R!\020:\013\342)\276)\275\313>\346\356\r?\332\237q>\032R!\276\337rL=\231\274\022?\240\243\021>\242\200Q\276\241C1>\001\204\177>\206\204\201<\270\266\330>)}\255k\252\265>\304\323y>\1779\">@\216\241<\005\022&>J\345y\275\207\201\221\276\303{r\276\222\037\223>+\373\276>\036\277\211>\204\334\025=\346\223\211>\030\277\333\274U\033\244\275\334D\244\27668\223\275\342F\217\275^,\305\276\371\332\363\275\311\256\023>Y~\276=\373\200?>V\372\303\274Y31\276\222\037\222>\3373\274\2766\274\265=\322\220\301\276O\022M\275E\265\265\275\017\036\333\276r\370\327\275\373e\306>)U\242>\"*\220>e\014\371\273\304\356\'\275\316{\257\275\206\205\366=\214\370;>\320\217\016>\032\330G\276%\006.=\302\246G\276\r\006\016\276\362X\022\277)\362{=\314\271{>\343D\237\276\312Y\026>\240I\214\276\302\325\371>\333\2558=\320\005\n?\220\342\232>\225\377\230=\316\372\"\276!\013\233\276\263e\227\275\373>a\276B\016\257\275\374B\\\276\254\263\307\274L\376U=64i=c\361\013\275\223\343\032\272\346eA>[\205.=\252\214\203\276\317H\311\276~\202\375<\314**\276(\017i\276{>\030>\375\266 =\250tS\276\023/\273\275\276`\302\275Ef\230<\354\366\206=\375\035\244>\310\245\330>\247[\355>\240L\362=\334\330:<\242.\347\275-y)\276g\335\220=\201]\347=H\320\n\274\224\244\264\274\364\230\231>&\207\223\010\240\343=\007\325\325\275\365\032\330=-\235\256\276Zk\324<\352\314\024\277(\356\256\276\235\273O=\rc)>K\240G>N\010\200>\0354\265>\002-\016\277\212l\243<&\001)\275\353\007\271\276\346,\255\275J\246\253=\265\r\n\275\360m\266\276&!\020\2775\204\017\277\204\264N\274\316\206\377\275T^\366=\262;\257\276\273Sn\276tZ\302=\221\215\226\276|N!>R\343@\272\362\272g\275\270d\005\276b1\200\276\216S\245\276\365\201\334\276\007yk\276D\323\247\276\254\213\220\276\310\342\265\275\337h\336<\205\326\262\276\320\222\337\27691\224\276i\236\333\274\343i\207>[ \250\275\331\264\363\275is\305\276\372E\233\276\234\322\023\277\352\272\017\276\304<\235\276\324\014h>\200\305\347=cn\366\276G\005\202\276]\363D\276\235\245\031=\237\024x\275[!\247\276\204,\214\276\311E\004=\302\234\007\27580a>Y\214\303\276(\334\332=\224\3454\276\222z+\277V-\000>\371\341\307\275\311\013\017\276^\367\n>\322\366\035\276\310\025m=\371\327\231=Z\370\226\276^\213\314\276lT/\275\243\340\020>JO\025>\310\007\201\276c\030\026\277\327\377\242\275]\274\307>*\223\263\276\223J\365\276+s@\277\370\272\241=\300\027\025\275\'\227\027>\225~i\276\356\340C\275\252<\014\276\002\352\255\276\273\254\361\273\035\234V\276T\340u>\204\001\263\274\363\311&\275\000I\266\276\356;\273\276\270Ba\274\t\320\324>P\310\241>\340\256\023\277\023b\334\274\222\274\036\276\271h\253\276O\253%>IQ\022\277HB$\277\363+\273=\206\332\360=\265?\364=\376\327\265\274G\313\211\276;\031\224\276]\3027\276 \033[\276\250^\243\275^\250\351\273\252u\221>\020\251\314=\315\213\021\277\225\250\250\2743y\267\2763\351\315=\'\352=>\325\022\350\275\224\333\205\275\225\212j\275a\235I>+4\246>\007\243\252\274z\252\253>\353\373I\2739\t\306<\253p\331\275\201\333\246\275\306\002\300\275\375\236\030>\220\250\214\275\314\220\272\275RVc\276\277WD\276\324\363M>\341\023\233=k\300\244>\363\243\373\275\\\370n>\252z\030>\n\202H\276\240\331\212\276\343\267\233\276mQ\t\277LO\307<\177\312=>\272\335?\276\374]\350=\345\022\235>;\312\326\275\335\233\357\276\021Uf>\202cS\276Y\342\253\275\322\273\r\276\331\264\025\277D\035\224>\377J <\362\255\221=\377l@\276\2778D\276\335W&\276Za\264<{\226\033\276\013^\013=\\D\330=B\177W\276\004O\002\275\324\257\312>\024\276\336\275i\374\351=\326:\313\276\006\025\201\2762D\212\276\323\352\310<5\330\212=\301\034\363\275Q\221\207>\353\250o\276\257\255==\032z\205>z\250\337\276\213\201^>\215\316\t\276\326\350\251=\316\347\255>\371eT>7\034r\276\266\376\226\276\344\007*=\220\n\365u\'\211\276#z\003\276Z\267\345\276\317\320p=\276\251;>\260\330\367\275\375\037I>\036_\030\277.}\332=\331\203\344=\266\227\037\275\260\215\271\274;\t\\\275~Hd\276\371\246q\275\312\274\013\276J\263\370=\215\224\325=\236P\351\275\337\270\253>h\"$\276\256\327\365\275O\237\267\276\243\\:\275\333\211\250\276\026>W\276\304Js>\252\326\345\276\242O`=\236\325k\276\252\377\221\275\264J\r>hx\350<0\331\273;T\036x=\034\n \276\231\227\327>\320`\206\276\3239n>\210Z\204\2766\361\257>\304\256\344\275Hv\253>\277\333\354>\303\'f>&\234\030>\332\206\243\276I9\354>\001Ac=\2514@>\030\314\221>\257\342\350>\227\346\375=\215\026\200\276H(\234\274\2614\256>\367\033\315>\212\326a\276\267\212\337\275\252\275\205=M\235%;\037\375\032\276\364\300P\276\276>\r=q\025\001\276w\033Q>\233LE>\336\333\031\276\020\262\207<\300x\006\276\302\257\365=}XO\275\274h\377=h\357\016>S+\002=\352\343\342=xd\245\274w\251\222\274\317\214\013\275\351\034d\275\'\346[\276\230M\357=\352K\306\274s\334\013\276\350\244\214=C\357\215\276m\322\303=\332@\211\276\342\201E=\241\267\001\274\304 \244\275L\201\376<\006\261h>\256\262\214>r)\212\273\364(\273<\207F*\276\341^\273=\305\n\242\276\\\302\246>\177\254\364\275\2366\213\276\342\317\310>L\r\227\275\'d\220=\214\256!\276\025v\231>\237H\210>\202:8\275\310\006\237=\233\226\215\275\272\336y\275\000\271\303;\034\300\022\274s\244\265>\211Y\265>\256\031\314>\021\346\231>&\036\373\275\302\245/>U\357d\275\212\306\300=\332\233\016=i\201\202>\330\361G\276\237n\323>\240eM\276B@\322\276/LJ\276\031\031p\275&\367\005>\320\252\024\275Nz#?\276\224x\276\335N\266\275\363\346Y>p\274\247>\276\346\324>\265~\265=\330\331\355>y\237\023\274\212#\250>\305=\211<\004(\340>\315\r\002\276\177\347\321\275|\361\007\275?Q\305>H\002\360\275\322\250\372\275\350\334\034=a\324\312>\270\303\316=\347^\220=}~7>2\364#>\357\234]>\365\210\230>\3610\226=\225\272\251\276\210\3403\276q\245\035>i\273W<\216q\205>\016\3521\276X\241\221>?e\362=\304`+\276\265I\241<\374%\264\275\273\304S=\226\331\365\275\240\204\306>\331\267\276>\027}\354=\026\332\013\276\327^\022\276\024\314\343\276J\333\305\274\271!\335=~\271\312>\306{\306>\014\013D>\032O\007=\3574\254>\211g\330>D\303\270>\317\254\374=\354\365\302=\240tH\2765\241\007>\036\362\311\275\020/\243>\244\313\366=PZ)>P\312f>\000\300\245=W\227\266>\323\354-<\213+w=\304\305\225>\356b\331>\324\331\221\2766f9\276\257!\331=\360\271_>\031\213\004>7{#\276\303\033\234=\004s~\274UtY>\336.\032?\273\372&\275\331\241\220\275\003\236,>\261\200c>m\366\005\276n\343c=e\n\356>\013\360I\276\372Cf\275\014V\343=j\024B>\226\031\245\276\276\241\035?V\030f\274\370\206\256\274q\222\302=\243~\372=\252c`\275\037\251\212>\241\266\000\277D\377\025>\025lt>g\333\375=\261R\254=\023\0144\275\255Q->m\034\006=_\344\t?X\256\200\276\245QI>!*\272=o\234\302\275nn?\277\321\374t\276,@\306=\201nS\274\2139\307>\264\262\200=O\341\213\276\024qN=w\342\020\276\377a\003\277\352?\024\275\341f\024?\255)\342>A0!?\213&>\276k\334\016>\317\273\031\277\352V[=\310|\363=E\335;\275\301,k>cJ\001\276s\202\263>\016\323\032?\024\250P\276\237 \346>4\037\202>\313\t4?p\r\007=e\005d\276rD\235>\375\205\023\276Q/O\2750\313\237>\270\177\306\2758\023<\275U\253\260>\"2\223\274\343\222\206>\177\002\326\276w\032\235>2\226C\276\251\252O;l\251>>`u\001=\320l\255\275\300\331\352>&U\347>o\026}>&l\224>j\324\234\275]\261\264=%a\020>\343\303\t<\203\017\235>}/\223>\360\247\223\275\263)\313=\r\274\332\274Cl\300=\264W\352>\r;\030?!\005J=\231yP>\001z\216\275,\341\252<\036\212\013\276\355\301\227>\206\242\036>.Il\276\\\206\364\275\001\314\325>\2501,??\324\350>_\217\210>1\001-\277) \214\275\026NU<\200\3577>*\002{\274T\022\360=m\242.>9\3043\276\262K\327\276\031\006\223>O\001Y<\213\016\270\0270\252>\213\251\241\276>\324\352\275W[\017>\nWU>\007\342\277\275\224j\271=\241\260\236\276\222S*\275<\206\362>hF\227>\320\204\302>\375<\024\276\030\247s\275\030\364\350>\251\234\221\275\271\237\320\275\207@\230\274\264I\370;#]\251>\300\017\212>C\206J\276q\"\"\276\005\230\304=\351/0\276\234OV>\266\006\203\276<\021M=\037\0262\276g\242\000\276d*\016\276\235OZ>]\246\341\275\227\230\205;\250\225\t\273\'M\031>\337N\266\276\363\312\226=\007:\205\276\206N\227\276\206\316\t>\021\r\222\275\227\270\025\276\000\270\335\275\370r\000\276\034\335\372\276oGO\276\026\324\244\276l\236\255\232(\215\274\347$\314\233\020\r\276,\260D\276e\355\377>Bp\r\276\372:\017\277\032\t\300>\276\275K\276?N\257\276\244@\203\275\030\230Y\276<\241\304\275\243I\037<^\007\201\2749\306\217\274\323\316\320\276\261\010\210>\331\201\325\276\353\301\016\276&\271\334\276\254i.\276/9u<\256|\241g\213\266=\331\352*\276j.(\275\006+\363\275\351\232\267=W\337\357\276\321\222\364\275\333\030\333\276\016a\342\2751\265-\275m\232==\r\001\034=Zp\250=>\315\221\276\222\315\025?\206\272Q\276\232_r>-\277A\275CHC=\265b\321=a\006\251>\022G\253\276\246[L>R\243*\276\002d<\276x\274\037>\335G\243>L\210\204\275\211\271\204>\\\310\362>[\2622>\370<\217\276\331xx=\265\0370>\206\363\271\276\r\360\205>\304\323\014\276]\326\313>\314\256\020>W$$\273\242\254\207\276\347\004\033\tw\265\274\223wG>E\305\373\275\372\311\177=hX\237\276\232\371 \275\014\214\230\276\027U\034\276\326$\236\276\267\251\213\275\340\027\014\277r\014\351\276\225c\222\275\222\337\311\276\214S\374\2027\021>\334D\267\276nL\036>V\334\245\276\224\212\361>\361?\221=\233\327\002\276\022\204\274\275\232c\336\276\216\'\267\275D!\017\2772f\336>*6\262>}\323\r>\002\256\273<\223\240\254=\020\260\324\276hp\363\275\325\030K\275\311\353\375=\320N&\275\313\254\235W<\347\276\372\251J< o\224><\303\266\276\205\273h\275s\252^=\273\205/\276<\226\255>>N\001\276\206\274\275>/\364.\276G!\217\276\036\366\031\276_\275@;\024X%\276\202b\204>k\336b>rI\260=\023\367\304\276\267\032F=\244\214\313\276v\002\370\275\333L$\277q/\277\276r\277$>\237\232\346=_D\";\227\231(\276\215\023\227\2767N\001\277xx\234;\327\325\035>bt\237>6;\231>L\245\247\276V\014\017\277R\222\211=\341\342Q\273\321\362\274\276\204\255\025>\202{[\276\366)\006\277\020\312\212=H\035\270=c\373b;9\013\311\274\315f\205=4\255\235\276\334p\221>\244\003\271\276\t\032*>\361\207\244;)_\221>K\'\276\275\3635\345\275\005\212=\276\3271k\'p\224\276g\035M\276\306*\n\276\025\003\212>\216\035\014>\276\202n\275\300z<\275=\024\337\275O\314C\275\016\014B\2763\2007\276Q\342\203>3\017\364>\017\227\345\275\343\240\331;;pX\276\020H\313\274\371\325\370\276Y-\317;\254\323\\\277\245=\217>\205\n%\277/\202\013\277\370\332\034=&x\257\275\020>\3601\276\252\235\\\276\252\240u<\235n\355\276\210\020\336\276\230\327\370=\225M\220=\220\n\247\276\210\021\021\272\254\003[\276\340\036A\275\334\230\"\274\374!\026<9\221\016?\331\233^=\347t\353\276\3623\003\277\361\323$=\014V\327\276\250a\313>\035\222\234>\251\231\021\276\021#\007?\266Z\264\276\357\236-\276!c;\276m\336\227=-5d>\370\3136\276E3\203\275+\353\250\276|\024&\277\316\021\321=\307Uz=s\000\027\276\356\361*\275\370\304\030\276\034\201\215>\016\276\216=\355\r\177>H\374\200>\272 \004?\014{\211>\363UL=\006we>\312Pl\274\237\016\025\276\374x\342<<\354\273>\344\025\035=\260DQ\276+\272\306>\370\014\214\275f\217\314>#)p\275`\0376\276\006*T\276\343\277J=\335\203@=q\274\002>\277\227\215\276T\327\266\276\371]\354\274mCR\276\245\337N\276\267\205A>u\244\350\275\302/\t\276\365<\361\276?/\304=\200\207\025;\030\323\020\2767\315\"\274\330\001\247>0\205\240\276\202{3\275\307e\245\274\213\255\316=D\204\241>\313\202\024>H\207\344=\005\354\037=\300\307S>tH\314\275;\215\245\276\302L\206>\355\211\022?\217/\025=I]6>\234\325\213>_\230\343\275\357$\233\276\270\377\334=&\216\205\275\304\250A\277|\241&\277ix\351\274\301\262\030>H\370\223\276<\341\224=\377?\267<\017\253\231\276/\376%\275\307\001p\275\013\204O\276\312\203\027\276\036\221\372\276\245[5\275Z\0050\276b\340`>\335:\301=\343wJ\276r\"\213=!\225\207<\302\372\375\275\"{\204>\207\252\202\276\000\356\025?\177\247\304\275\014\024]\277\254e\212>m-\360=^\024\002\276\307\355\337=\227\356^\276\314E\240\275\312(\021\274;\225\311=\262\211\334>nPQ\276\322G\346\276n\242\343\276\343\370\244>\377\241m=\263~\344\275(\026\301>@\276\311\276+1\231\274\351}\245=\236\"\200\276h\242?\276k\230\312\276\204\212\033\276:p\204\275\217k\231>XLV>\240\234\324=\362H\235\273\277\036o>tl\204\275zS\240\276O\032D=40\251=\212\311a\274\270\300\253>\253\225\200>\373\245\242=\242I\004\277\327\3048\276\274\274V=\000\2070>\271-R\276\203p\277\2768\030O\275P\264\017\276\007\275\265>/X\230=r\272\311\276\235\005\006\275\201x\236\275\330?\307=\325\376\353>\231p\306\276\240\250M>\221\231@=\021W\204\276\253\234z\276\003i\037>\263\243\316>\335T\242=\264\314\\;\327\022s>#w\332\2742\033U\276\003q\234\276\275\364\246;\013\373[\275\231\210T\2763\315Y>~&\352>0\236\345>m\375\370\275\320\360`\276|{\237\276i(\313\275\207\226\203>\265\361\207\275\203\352\252>\032\252\242\275\316z\005\276p\017\025?\370c\216>@\312\233>\272\210^=FJ1?\362\034\334=$\253\262\276N\332\302>/\024\017\276:\352\t?.2\361=YD\352>\033:\356>n\275\213>R\214\211>\351\222T>\272\377\255=\360b\255>c8\035\2764\240\251>hE\037>\200:\260=\330\215\236=Z\025\206>\310\022\217=P$\366\275\324\212\003?}\305v>Z{\036\276_.\030\275VU\316;\220\263\344\276\376Q\366\2761\177\">\323\365V\276H\364=>\370|(\275\307\243\364>\010\313F>\366b\241>\342\235\206>U&\262\274\305\222\314\274\3322\032=\177\037\325\272$\271\204\272\333\216\014\276\202\337\247>\230\270\027\274\360i\223\275\301\337\365\275\203\016\264\274\377X\272=i\377\241\2753a8>\341#\240\276~\321\036>\321\355\000\275\200\026x\276:\222t\275\004\353P\276\300f\224>\314B\252\275\326f\263\275/\365\305>D\233G=\035\030\337;\364\322\275\022\006;=p\023(>\220;\321<\364\316@\276&\263@\276&\' \275\372>@\274\037\307,<\315\342\250\273\232\271\367\275|so>!\200\033?Z\354\203>\255A\253=\237\3713>0\222S>>\274\356\276rR=>\240\275\004\276\327\0327\276]\324\201\025?\300\251\270>\220{\036\276`9\'>\033\032\261>\031V\212>\331c!?\343\000\202>\022\364@=\224\300\217>!\2362\276z\260\202>\365\210\215>f`\244=}\261\001;\274\352\377>0m\374\275\227pY>\025[\014\276\334\326p>\200\272\352>>\303\324\276p`p>\347uF>g]=\276\313A\205=\337\201%?\025\307z\276\252\322\021\276\354\373A\276\320\346\360=.\244\031=P\233\212>\367%\246\276s\3142>\336\247\330>g>\321\276;\022\306\276\316\010\'\275\340\027E>\265\252(>\343x =\002\303\216=F\267Z=\004\370R>yh\271>\251\003\330=\251\221`>\321\303\">\324\302\300=\332\177|?\264v\200=\t\350)>[Z1\277f\376l>UIH\276f\266\334\276w?\264=\302P\250\276\010\024\267\275\n\260\004\276\005\251\026\276\251\373\327>\305+\367\276#$\233\276\210\223\366\274\200\207\207\276DTQ>Rg\016>\237\313\247\276\235\004\327\276\335W\244\276\242\rl>a\005\210\276W\263\250>\000\027\310\276,n]\276\350\336\213\276\246\021N\277\001j\270\276\245\322\233>D=8\276\345\200\314>\310#X\276\252\314\336\276%|6\277\031z\310\275\014\333\333\2767e$\277\277\343\314\275\027\336o=HJ\221\277V\006\215\276\206\025j>\364\355\030;\023?\2124p\232>\001\373h\274\340\272\214\276w\346\004\276\226\226+>J\364\326\276\212\212|\274\310\201\356\276\277\327\226\275Y\2522>\364\322G=cS\373\275\001h\300\274/\022k\276\005\331\360=\332\0006\275\224\247\326\276J\222\035>\000\217\312>\224K \277i\272i>\364\355m\276\304\2717\275\034\206\205\275m\221\253\276\203\211x=\207\272\002>\016i!\276\025\033\206\276\345v\242>\323-\351=\337\030\365\276\001j!\277\344\310\260\275K\342\030\277n\332\243\274\"_i\276\013\255c\277\225\334D\274\304d\304\275\211].\277\317\274\302\275\020\316(\277ki\222\2759{|\276*\317D>\030\022\207>\226\270\031>L\367J>\243\272\267\274:\3345\276\312;a\276\013\376\251\276x\310\310\276V\373\362\275\212\263\027>P6f=|G\004\276j\215.=\225\331x\275\362\212\305\276\265|\315\275B\200\277=r\210\031>\213\342s\276\220\2129\276DK\006=M\020\271\276\177\321\216>8\275\374\275\313S\216=\304bo\276}{\304=:_,\276\246)\273\273\326\027\014;\022\315F>\267\321\035\277\327\262\001>\025P\177\276mL\353\275\324\031i\276\210\205\253=\303\206t\276\322\353\233=\257\222P\276\270\361\213\273\256\270\221\276\t\372\006\276\007\272\330\276\346\004\200\276<\355\314=h8\r>D\246\231=zS\'\275w\236L\276>9\016\276Bo\304\275\035\274\241\2769J\232>\215\223&\276\303h:\2763\331:=\236Z\263= R\226\276\302\257\207;;\036\336\276P5\210=\025.\360\276#\206\226\276\017\276\241>\003^x=v\022\274>\300\267\274>\350J\327=\212\227<\277\202K\032\277\237\214\351=9`\253\275.Ij\274\260\t\340>\024\326\252=t^\207\276zQ\253\275\315\024/\277}\254\003\276\221%6\276`\311S\275n@\032>\014\351\264>\003\337\350=\232Q.>B\306\033\276\"\"\001>34s\276\335Q\251>\214\213\372<\010\326Y\276k\230\346=;b\237\276\0010\250>\320\010a> \332[>5:n>\212o\320\275l\334\010=\234[\262>\273\351\022?\2205\334\275\315\370f\275\227\344K<\332\335F\276~\276i\276\213\336\232>\205\265\222\276\230\031\023=\036\305\225<\307\352\372\275\314\234(?\256\000\223>\n\220J\276\335\001r\276\316\274\307\275\243\313\272\276\224a\205=\204\003\266\276B\ty\276\260\320\264\276\372\307\206\276\222)_\276d\352!\276b\025\205\2767\250\213\274F$\325=\346\207\270\275\364\230\346>-\271\212=\277Ij\276a\025\341>\204\326\344<\253X\016<\247\240\254=\304%\006<\265\013V>s\345=\274\246\341\324\275c\311\241>-\025\330\275\354\254\343\275\335\242\001\276\t`\224>\215\031\035\276\2229\250\276\237(\233=R\267E\276TL\013\275+\261\215=\255\026\313>\034\277\357>\370\277c\276`?\010\275[\340\300>(\213\375\276|\245[>)O~>\361\212#\276#\204\n>\223\312\221\276cA\262\2765\221\347\273]m\315<\315\224\336;[\2534?{tR>\370\353\020>D|\r\273\021\203\335\276\221u\r\276\217#\320\276\352\260I=/&\360\276\024\010\236>\221U\250>\206\252\024?\247\"q>\250\'$\276jl\370\274\247\272\321\275\343\367Z>\344K\256\276\007\205\274\275\334\335\202>\005\330E\275\241\222\265\275\205\225@=\316o\010?\364\335\353=\306\304\337<\221Z;\276R\274\335=\302\370\230\274\347Nh=R.\244=(\376\335=\274\013\207\276\254\326]\276\006\203<>\335\206\217>\223w\n\275\r\357T\275\234\000E\274\2451Y>\250\217v\276\215L\016\276\020\254\210<6\310^\276\335\325\263\276\005+\333=p@\t\276S\362\355\2759\224-\275\010\272\030>\230\177\307<1\336w\275\356|\035\275}\262\221=-\343\274\275\300C\021>\324\030^>YO\027\276N\250Z\275K\267\217\275\026#\213>\272\020>\276\022\017\023>\220\272\001=\\&\223\276k\220l\2762\304d=?*\231\276\214\r\201\276k_\305>\367\246C\275\234\266I>\002\255\016\276k\246I:\340}\333=\302|\010>?\265|\276\025\013C>\340\341\031>\226\370\335\275\244?\221\276\020f\247\276\354\2061>\346\004\221>_\317\361\274j\331\r\275\233\320\034>\332L\004\275v\211\010?R\266\022=\0012\305>v\340\205>1\342F\276VS\377\275w\r\255>Q\016*>\303\212\231\276\364M\360\276\225I;\2760wa\275\016\372o\276\023/\305=hJP>9:\365\275#n\016i\301\353\274\037L\010\276\034\257A>s\346\330>/\'\311=\272 \327\275P\357\375\275\001\253\352\275WU\306=\th\024>[Z\373=\351\304\343=\365\267c=\232\016+\275\274\354\013?j\200^>\202\221>>=\353\206<\004\305\352\276\364\307\206>\221\356O\276\251T\277=O:\\=\025\336\226>\205\326!?QN\243\272)\330a\275l\366+\277\337~\227\275\246\206\020\275-6\363>\313\334\207\276\224\325u\276\213\356l>Y\357\306\276\343f\240>\261\204\267\276-zO>.\027\023\275\325N\017\276\302h\007>(\036\243\276d\014\361>}^<\276\331x\213\276)\356\222\276`[\364=\025\256\000\277 \250\001\276\004\235a\276\351j\240>7\321\034=\314\030\320>:\004\352\276#!\010?\255s*\276\303{\247\275\356\262.>\023}\207\276\326\337\222>\347W\236\276\255O\006\276\037\004q>\264:\204\276\2301\353=x\236\227>#{\002\276\020C2=\232\317_\276\2749\355<\275\t\327>j\210$>XP\232>\222E\335\274\\\2625?\023\343\221\276\346b\220<\0174:?\372\203\314\276\260\004\005>f\t\373=LJ\r> a\233>\376\343\305=\310_\"\276oG\326\276\340\312\215\276\347\234\217\276\000d\205>\340>\366=\306\021\232\276K\0205<\036\257$?\276\362x>\316\342L\275L\371c=\020\315\353>a\217\332\275F\263]=\r\317\201>\031q\034>\327\211z\2766\005\003\276rON>\337\3753\276\362\023\303>\216\271\263\275\373y\232=\310\013\013\276U0\247\275\340\263\033\276A\217\024>\230\033\316\275\326\017\265\275\026\234,\275\374\247\224\276\026{&\275D\352\234\276\242D\236>b@\010?\332(\016\275\360\374\332=\362\334\342\275\325TJ\276\301Db=t\245\260>\357\266\355\274;\020b\2767}*\276\032\333\370;\253\226?\276\316\021t>\354\2568\275nU\302>\350\354\260\276g\352==Y\371\206\276\353\"R\276\037/[\276R\006\310\276\353/d\275\213\'4\275\2248V?\274\325\260\275G\326\024>\370\324\205\275F\313H=o\274N>\002Gw>\342A\004>\251J\204>\346\204\266=\303T\263=NY\227>\273f\243\276F#/\276%\014\232\276\322R\032\2760\364\010>/i\233>\317cC\275\317fU\275\204\357\273\2740v\313=\372\313\271>ZR\231\275\007\006\001\274#\001!\2773\263*>\033\307-&\276\204\023\030>\332\362\352<\220V\327=^\351\340\276t4\221>+\260&\275\316\343\353=c\374\213\276\022\366\010>\317\035#\276\344`]>I\375\243\274\342\033X>\272U\001\276\215\371h>o\006\301\276\307\010\230\276l\244I\274YJC>\253_g>\227/\231>\302\\T>\0353O<\266J\010\276\272m\212=\007\267\322>\312z\225>4\2530\276\234\237\216=\202M3\276\226T\344=\243\270\010\276\004\303\223\275Y{\003\276t\260\204\275H\237\202>-~\214\275\374\214\221>\346\336)\276[\276l;\276\252~\273zh\231\275\255\276\235>\317\376\360\274\2440\243\276\301\240\226=\240\267\352=\215+B=\201\371\337\276v\016\"\275W\224\343\275\242;\006>\201}1\274\206 G>\370\271\273\257P!\277\215Qb=m\354\254>\001\345\237>\311\212S\276\256\2347\277\231W\304=g\346\223\275H\2418\276\253R\234>\0020\232>j\301\003?\004+\n>u~\232\276\026\207\246\275..\352=5>\231=I\226\\=\027pg\276@D\355=\033\321F>;-\356=\326\021\200>\265\326&?BB\202\275\265hy\276\251\376S>\034\002\327;\000\272\234\275\256\004!\276g\024\246=\017\275Y>\251\372w=\353\307\234\276\344q\314\276v\\\370=\352JV\275\375e\325=#(y\275\260\327\246\275\035\356@\276\356D\205=\322\243\224>\300\002.?\312\020\037=\023\320m\275\210\322\346>v\345a\2753F\211<\255\203\032\276\246\344E>>y\276\276\267|_\276\333\232\261>\321 F>\367\0215\276P\302\256\276\013\362:>\240j\343\275\270\027\025\276\210\306b>=\304A>2p\342;~,\030>\245\026\216>\032tb\276\277\377\252>\333t\326\275-\027\304\276\022*\013?\257\016\020\276\376h\215>\273n|\276\275\320\263\276\261(\257;\237\346\322\276\316\207\223\274\234\003E>\010\316\213>\310\036\026>d\217\215=.M\013>\024\263{\344\275\230>!\311\310>D]e\275\250\032\313\275\031\372N=\303\276P>\350\361\276\276\353\007\235\276\364i\037=\0246\327=c\266\253>\036I|\276\004n\213>\"\212D\276\320\220\035=\016\\\227>\263\\5>}0\353\274@\252\253>\0311U\274\375\371\273=\312\231w>`U\031\276\210\234\316\275\255=5\276\377\241;\276\324\230q\275\300\0372>\013]z>\007n\316>\267\256\312>\3174\002\276\225\353\'\276HB\343=\372\"t>oV\333>\2017\342\2759\033\013?\261\317\206>dk\266>\200\242!\277\247^\265\274\202\362\001?N \235\276~i\354>\325\276\235\276\253\323\315\274\271ZO\276lG\315\276?w<>#bb\276;\r\220\273\213\035&\276\325=z\276\033\311\273=\232\270\323=\323\030\212>:4\253\275\342E\320\275Z)\276\275\232\0047>R\342\366\275\241\226:\276\177\013J\275;\"r>\350\031\n?\351\243[\275\354Q\223>\024\363\221\276\243!}\2751\303\345\276\276\274\274>\207\364\\>\"\324\261<\275%\263>\355\224\355\275x\312S=\266X\255>\232\036:\276\236\301\203>\255\256\211>\366D[>\nj{=!\nH=\022\006\\\276\311\006\335>\033d\203\2766\202\266>\330\220\271\276\313\363\005>Nh\224\276A\024W\276P\3419>E\2458\276\030O:\275\323\371t\276y\276\235>}\234\273\276\036\354\206\276\347\367\372>&<*>\205\267\357=\373\0377>\362\330\254\276\"\250\236\276x\016^>K\3569>ZE\201>\340\255\202\276Yq\214\275\216\274\016\277\000\337\353=\254\024\013\276]5\"\276UD\004\2779\251&<\335\024\362\276\0147\237\276\340N\312<\026\316\r>\246#\017>h\"\316\276\004\366C>\343\265\341=\270RC>:\272`\277\202\243\247\275f\263\241>\326\024\204=\225\n\277\276\217s\003?\006N\344\276Q\255\353\275\374h\210\276l\216\023=\024\313b\273W\215)\2760\033.\276S\272\225>\024-0>f\225\037\275\362\303\266\276\244\242\027\277\023t\341=\332\323\377=\207\372\242>3\313\200\275\275\317W=a\"6>&\244F\275\263^\302\275\243\014r\276%cX\276&]\264\276P\265?=\336\031\022\275\263\230\026>)\321\031\277(\276\235={\325\246\276*\341\030\276\255\177\237\276J\310\321\2748\200D>\3509\003\2778\\\002\276\235\023\236\276[\006*>\365\200\246\276\256G\t\276\031\333\243>#\363\201=k\357\033\274\344v\221\275\366\203q\2755B\311>\227\246\243\276\017\234\333\274\362\325\347>\222)\321\274t_\321\275\251_\200\276\006k\332=4\013\260\274r\247E\275\2678g\275\317\242s>%b}>v\365\003>\267\232<\276\231\250\000=q|\r>G\240F>~&\314\275\020/\022>P\227\356\275\304PF>\275\365{>\332\005T>\334\307\001\277\027\375\023\275O\246\310\275\005\024\215\276Y\343\031\2777\351\251>NH\217\276\007\034\003\276\024\036p\276\014[N>\340\250R\275\336\374\314=\307~g\276\237\2664\277\037K\311>\245\322\271=\342\013\354\274W\2522={\006\205\2757\261\323>\272\334\234\276\003Ej>\336\245\214\276\333p\252\276\255\263\214\275\247Xb\277\220\017_\2755Y\213\276\'\340?\276\253\220!\276\361\202\353\275>\375\263\276\251\321\225=7zt\275\275\312\263\276\354\233\037>\371\203\301=\2276\315=\360\307\343\276D\033\270=\000\263\220\275J\034\260\276\337\220\217\276lg\307>Dj\215\276_d\226\275\212\354\210\276dl\324\276e\247\n\277\237cD\276m\002\004?\365\037\031>\256\360\266>u\2300>\\\300\351\275\335JG\275\n\356\331;0\312\244>1\270^==*\336\276\325a\362\276n\241C=\327\017\234\276v\347\317\276+`\372\274\375;\332\275i\237\264\275\'\2769?\362\221\262>\264\337\246<\206tW>\215\r\004\277\207\255\321\275\007\377\336\274\355\203\272\276\267\270\270=\367\357\317>F\217\336=w\343\243\276\237\037\205=\337K/\275\315v\201?3\323*>o\375L>\206\272z>\375l|=|\216.?\376m\275=d\001\367>\206\334\r\276\330\227\310\275\202\014\255=\376^\302=1\247n\276\327\201\204\276M\324\327>\313Z\021>\241\267\220=)I\375>\343\241!\276\351\201\035>\303\364\035\2761\347z>\201\373\002\277Q\033\342\274\253\205m\276\372\2622>v\305\277>y\201\247>\343\r9\276\252\231->\357\335\036\276o\035\243>\215\377k\276U,\200>\000h\336<\001\304\251>\314\230N>\373\032\340\275\272B.>)\272:>\002\033\205>\013\222\276>\234\342\350=Z\\\t\276\366\261\205>\371;\220>\374fO>\202\203\256>\3601\336>%nm>\344\234\212={\222r\273\366%\235\275\260\337\315>>\231\372<\222\210\360\275\360,R9Z\021\221>\340\033\251>x6\036?\31751>\370CO\277\316a\357>\210hx>\357\255f>\200W\260>[\311\002<\350\355\210\276\377\377\271\276Ag_>Z\377\031?\276\261\031\276\\y\263>\027\222\302\275bI,>p\004\273>\337\335E>\336\310\241>\001\000\017?\323<\207>g\335\033>\301\317\252\276:\227 ?\031\261\354>O\376a?Gy\304\2769\342\r?|\360=>\363\321;<\330\323O>\231\010X\276\212(a\276T\242N\275\210!f\275Q\371\036\276~X\240=4\255}>\027n\000\275l\265\242>\266\245\024=c\205\202\212>h\323\006?\332LQ\275\277\276\246>\366\035\265>g\364\362\372\204\200\246\233m\273\375\312\231=\026\316:>\214\264q\275iJ\031<\344\346\275\275\240\367\217>\247\334\274>\325f\332\275\345D\016>T\257J=\321N\251>\340/\217\275j\376\341\276\351\037\375=\225\341+\275A\300\033?\005\343\202\273\241g\351<\224#\305\274\006\337\224\274,)\364\274Ml\326>\315t\003<_\232\250\274d\006\017\276bY.\276\334\271i\276.\244J=W\322\242=aJ\245\275\314*/=\250\365\013>\017\037\247=\376\204\252\275\310]\222>tr\221=\264\274L\275\204\306\021\276whZ>\304\" >24\001\275\202\036V\275\026\014\216\275x3\276>\320\320E>|\026\201=\304\352\005>v\230\020>\005(\371\276\027U[>\005.s=7\031I>`\1773>2\217n=\356[\225\2749\213\337\275F\254\270>+\222\032\275\010\333\203>\034\361\202\275\203W2>\210s\n\276\201\207\355>\366 \224\274\326\307\326>5/6\275\010\261\242\274z\3702\277E\224\203>R\3078\276\210\351\001\276\212\034\244>\210\355a\276a_]\277\223\313\014\277H\256Z>|\346\215\276\356A\016\277\332|\r\277c\313c=\033\211%\276\331cU;\365}\'>F4\213\275?\2670\276\235\370\021\276\214\243\320=\376\367k\275\300&\256\275\227.%\276|\t\243\276\345\027\025\277\234|$\276\026l\211\276\355\001\216=$6\351=m9l\276\252\271\311>\030\251%\276\003\230!\276hx\300\275\201\263\304\276\376m\322\276\372\201\374\274\361\017{\276\367\336\251\276DAf\276\344\">=\251\350\207>IW\353\276\201\321\207\276\260\244 >\305D\367\276\254\317\345\275I\231\241\276X\034\004>\332\006\240=|\310\332\274\362\363\207\276\315\030\225\276\215\312\255\275}\300~\274Tp\361>\360^\230\275\005\211\t\277\013\275\213\275\031\321\242=\266\246\224\276\007\340\241\276*\230a>O\257\232>\311\251\357\275.rF\277S\375\235\276\032j[>\204\007<\276f1\371\2765\2524>w\027T\276\031\235\240\276\270N\254\276i\276\233\275P\034\004?\262\276\237>\263>\022\2773\336\233\275\342W\270\276\302\304\275\276\304b\216\276\275C\216>pl\373\276\304\207l\276\022\277\230\276>\013Y\277N\265b\276\234\212\002\277\272\242\242\276.9.\276q\306\216\276\311y\177\275\276\354\315\276\205C>\275]\230\211=\030\232\330;\276\024D\274]$\016>\304\344\321\275 #\001>svv>\345\020c\276\203.\262\274O\007\254\276\002\343\203=\227\3270\276\240\014\217\276X\273U\342n)>P\351\216\276Pkc>\345\326\215\274\202\tM\276\013\342\200\276\r1\t\277%\205\">\223\017\216\276\261\274N\276\334\350\004\277\201{\200>9\240\322=\022!\034>\245\372X\276\353\311\302\276\0267\361\275@M\320=V4l\276\247\217\273\276\203?\307<\224]\206>6j\242\276X$\252\276\344i\357=\237\301\326>\203~\216>\301t\271\276\265\330\255=Pm{\276:[*\277ql\006=\344\336L\276\254\320\203\276\304\301\000\277\364\342\307\275)\355t\275M>\303\275\006\377\201\276\233\031\002>\022\272\032\275?Y(\275]1!?\"\035\353\275j\333^>\322\371K>\243C\335\275b=\354=\251\324;>n5\312\275\367\014\016\275c\261\025\276\334\213;\276\234\206\235\273\344\241\305=\220\337\356\276S\333\007\276,\203\323\2762\2542>3\316L>j\336\221=\227\273\r\275\277\253/\275T\321\331\2769\232\214\276V\224\373\275\320\334\261\275\037\271\356=k\332D\275u\215\267\275\014#\202=\221\205\244\276P;h\276/GK>\014\204\300>\030^#=\323\362\262\276@\021\017\276\367\336i\275\270\305\242\275\035U\217\276.2{\276\340#\201>\002\207\006\275\241\313\265={a\203\275\2230\212=\222uN>\355QI>\216t\r\276\230\370\201>\217Se=EB\226=y\025\224=.\235\231\275toK=.\274\357\276\326\275S\276\352(!>\204\243[\276\212\005\020\275\020\330%>\020\014\251>)\2755>\225*\260>\213\024Q\277\303\210J\276\363S\331\275\374\311\t>\213\265\376=\271u\321\276\006\324\320\276\004I\341\276y\371\300=\332\237\016\276n\177b\276\201t\311\276\002c\355\274\240\275\031\277yb4\275gs&\277AH\226=U\037\036>XBf\276\271\220\315\276\252\324%>\314\351L>\217-\033\274\343u>\276eP\216\276\314\306\203\276\374\327\263\275\274\002[\276^\240\331\276\257\352V\276\006/\036\276\026O\353\274p\242\020?\277h3\272f(\235\275\220>\261>\001;\210\275\243\306\032>\371\024\254\276\347\"C=m\364\235=n;\325z>2|\364\275\304\277\003?\013\207\013\277\225\237\227\275\240\254\345>>+P\276\250\325$?\243\022.\277g\225\225\276\"\263\030>\371\303$\277\361\360\203=\354\037\261\276\022\332o\275\341\213M=\\dd\275\316\004\324\275\005S\303\276@U\024\275?\272\206;\231\033\177\276^;\231\276a\337\t\276\rIC\276\206\342\330\276\252JG\276\016l\360\275\271\331H=\210Ae\276\337\375\312>\007\351\216=\032\262\270>`\371h\276\3478\217>\237#\222\276\367\271\031\276\'\337\234>\374\212*=\231\201R=\374\t\221W\266\272\275Z/w=\323\n\034>]\357\000?)\316\241>\333\231p\276\177\341\243>\347\234\221\275 \330:\276nv\236\276\355\257\001\2745[\224\275\325\274)\275G\353\324\276\344U\301\273\rF&\276j\n\036>\365\214\001\276\277:\272=\253\230\237\275\241\362\366\275#\223\216>o5\222>^KR>-\213\230>\351&\346<\330\363\035>V\307\215=b\264\010>\276q\003>\251\362\006=\nK\r\276\264\361\375=G\000D\275\342\215\303>d\325\317=\247\277\276\276\3160\242>I\302\376\276\034=\035\275Yv\314=~~\304\276W\263\246=\374\320\257\274T9W>\366\231\\=#\255J\276SKW>*\342\263<\026\274|>Ly\330\275\225\251\207>\210U^\275\303\262l>\264t\034>\273\262\262\275\342\016\265\276\030h\311=\327\022\274>\333\3464>\0025\213>Vc/>\236\213\333>\024:\r\275\240\035f\275\335\272b\276\215s^>\226\3266=e0%?\214\313\370>\321E\352>\177\006\330\276\271\332$\276\254\373<\2761\207\302\276\363S\265\276\025\t.=U\t\203>\376\"_>\336\216\n?:\3260>\210+Z=\024\317\225\276\235\220\314>\256=\240\276\376E\234\276\252\332\325>v\355Y>)/\335=\031P\355=^*L>\n\n\300\275l+\270\276$!\376\275;d\222\273\356y\265\276\305f\003>\276\324\322>\265\2449=b\027\002;Wyk>\003( \277\204\354\336\274\273\330\375;\016\305\351>\334\027\025\276_B\323\272u\037\217><\364\026?\202\345\005>\205\315D\276\2026\\\276\314\230\253\274\345D+\275+/\346\276K_\023\277\223\262\205\276Sy\014<\225+$>C\273\022>\3158\206=;\340\221>\235Rp\276\3316+\277\353\2755\274\360\342F\275;\303\306\276m\233\036\276\315E\006?\304X7\245\007\265\276\274 \362=\250\376\340\273\345,\265\243\214\243>4)\364=$\307\203\275\347\256\237\276\341\331\014\277\266\203c\276J\001\001\275\3266\232\276\313\201\301=4`\242\276\251\227a\275.\327\025;8\306Q>\256\202\330\006q+>x\004\t\276P\337g\275\367\200\204>\014D\322\275\310kn>\373VF\275g\234`?\245\211\007?\362\2278\275\215O\212\276\335%\213?\211Z\241=\330x(\276\013P\007\277 \373\205>na\001\276\233\027\231=AP\025?P\027J>\311\n\275\276\330\034S\275\213N^>\3616\257\276x)E=\210K\315\276O:\266\031\001\000=\312H4\276.\307\023\276#B}\275\242\200\017\276\323\313\351=:@\016\2775T\307\275\214?)\276\253ya=\347\337\306\276\255\230B\275\241X\352<@\237a\276\213\253\202>\017\023\352>\231\336\266\276+:M=(\354v\276oE,\276\260\'y\276!\353m\276\316\337\007>y\246\354\273\246\263\231=\201\017*\277\320n\017\276#\230\n=\257G\225>\236\355\263>h\300\r>\222!\032>r\021\003?\017\200\304>.\346`\276\331$\010\276\210\316\363\274\033xC\276\346T\007\277\373\027\326\276}\343\215\276D\300\235\276\235\023\256=\1771\005\277oD,>\010\221\217>\361m\343\2750\327W\276e(\024>+\374\243>W\307W>\337G\361>\n\017\332\275\014\265\023> \337o\274&)\360=Jz\007\275\272\353\001=c<\361\276\000\001&\275]\375*>Cc\350>\230I\317\276kn\362\275\336\247\267\276:\366\354\276\022\357\t\276\327d/\276I\331\217\276\203\237\004>\374\231\375\276(_\277\276i\225\031=\007o`>\334\207G\276\013w\026\277\"\310\334\276T\237\010>]}\004\277\360\027\346=C h>\376BX>\333d\223>e\247O>\334\240\"?}\274\325=\365\006\243\276\322\033\254\276\324!\347>U\222\236\276A.2?s*4?\305\307G\275\243\240p\275\343\030S>\025f}>\177B\212=)\036e\276K?\274=\357P\274\276\037\372\026?\265Eb\276\274\246\000?\035\263\317>\254\276\006?\277\305\202?\375\356\301\275cZ{>\232\246\203=\310\254\265=\346\226\314>\016*e>YS\245>*p\236<\265\377M\274\372Z\267>\211O=>\240\345N\276\232C\371;\221\351\006?\034!\177>\223A\000?h\311\024>;\003\247>\006\351\214=\370\023\027\276\250T\334>\302\027\372=\347\231\203>\346\352\234>\224\n\333\274\200\373o>\223D\344=&\336\275>Wi\227>GO\210=\232\357\032>^\022\'?\345+-?A(\022\276&\362\342>\010\316K=$K\245>T\372\004?6\310\347>\353\024\216\275\333\243\307\275\337\362!?\306>B\276\355\001i\276Y\226X:9!#=\377\327&?\356;\217>c[\215=\310\332\254>\014\240\206?[]\362<\'+\316=\271\227\344>\275\361\025\275\016\326p?\033<\242=@\200(?\356\227\264\276`DS>2Y\253=\304B\347>c9\340\276\362\237\\>,b\t?\376v\t?\016\377\201>\323\007:>\370#\032\277\3318I>\031\250\376\276\355\004\221\276;\010\323>VZ\256\275\021\250\352>\207V\207>\027\377\255>}Ba>\322\230V>|fY\276\275p\376\275\3407\236\276\311\277\252>\216u\300<7p\227\275\274H\">\370\255@\275\0078\021\274\243\324\031?\366R\036\276\276\343\274>~\0201>\352\0322>\243v\217\276RN\036?\342\343\271\274\222\310\034=\245\233w\274~\371u=cy\202\276\034\352\347<\226j_\2769,\023?{q\200\276\263\266\231=\232\241\224>r\220\350=\312\236:<\255\375\016\276\256`\270=\375@\243>R\020\271\276M\n\023\276\300\267\266=C$\'\275\267\3475?\360WL>\030C\262\274\323Fc>ZtO\276f_\351>\337\370\226\275/{m\276\316T\035=\235}\027>\205\232N\2750\340\330>\304\031p\275\322\235\215>QK\214=\255\370\363\275S\253\021\276\225\021v\2753\202\031>\024x_\275\225\010\275\276\225\267\201=Dm\230\276\n\217p\276\017\337\250\276M:\214\276SJ\230>\325RL?9Gi=\031,\005?\306\371\005>K\005\034\276\215\327}=\200|\037>\345FS=\356\376\253=,o;?\035\215\275\276\267\273\205>\266\257}\2762#\003\276H\3737\276aIC>@r=\276\263\243q>^:\300\276\341\272\267\276l$>\276s\272\022>_M\275\276\242\330\256\276\254Iy>\001\354\027?\243\'x>\036\003\336\275\025\322\203>OY\304\276\254\331\345\27693%\275\341\016\252\275dp\344=\013\333\276=\353\223s\276\330m\227\276\277\351\030\276G\221\231>\246\357\300\275\014N\002\275O\351\204>m\3251\275\265\313\374\275\374\231\333>A\335\247\276!\370&\277\237\347p\276\n)\265\276{`\326>\347\373\327\276\014C\347\275\355\215/>\264\0237>\003\216\035>\350u \273_x\213\276A\242\331=]\232\n>o\273$>\233.4\275\266[\355\275\332)\225\276\264:\365=\3558\004\276\227\032/\2756\365\307\275\000\023V\276g\017\356=2\3577\275\337z\334\274}v\263\275U{:>(\364\342\276\030\360\026\276\026\323\200\276\2427\205\275\250\356~\2761Y\212\276\360\327\351\275\032\343\244\275\252q\230\276\202\3165=|\206\227\275\177z\032?\002\231\227\276[_\213\276\357\370{\274g\005\306>\006\000\207\2756\322q\276\267+\315\275%\030\214\274V\030\217\275\202\351\n?*\203\006\275\014\233\n>\204\370\022\277\372\3026\276\373J\321\276\204\027\247>l\330\236=&\336C>\301\337\221<\220&W=)\241\002?~\235\300\275\212\254\335=-[1=\332\323$\277M\177\026\276e5S\275\343\321\344=\202\343\024> /\221\275\005m\017=V\t\247\275\006Q\230\374c\234\275t\341\226\275\033U\263\275?C3=\023\261\014\277U\311\n>#aw<\307\241\230\276\014}\243\276zO5\276$\277G\276\330v(<\261\210\025>fO->f\t\354\275\340\366\005\275\004\251\031>\262\347\002=[\234\001>e\3349\276:\001N>\221\256i>y\032\234\275{\213\333\275\231\022\002>\005AH>H\345\035\274\310\335\317\275\332C\274\275d\030\363\275\270/p>\342\002\300>\034\\\236\276\310.\010>\325\234k>q+\304\275{\267\314>B\004\023\277\307w\017\276U\251\347\2755\232\206\273\230>1>\253p\245<\247\006\346\342p\200\275\260\261\236\276\233\t\340\274\203I\241=\256Q\270>\266$9\275l\376\255>\001\350\373\272~\252\204\275\263k\230\275\320U\314=Y\353\354\275\311\274\021>\352\025\021\276\363\244\345\275&\260\373\276,\277\356<\016O\375\274\211\352\211>\240\374\035>O\301\006\276C\034\332\275fE\035\276\204fk\277z\213\202=\236\344\332\274[\276\242=A\271V\276\366=\235=\227\243\340>n\004\034\276K\215\341\276\340\344\t\276O`P\275A\274\350=:3\241=\222)\321\274\223\236\255>\230y\210=\001r$\2752m\253>D\r\021?\000\310\354\275\251f\377;\363_e>\365\006\266\276\002\336\025>t\334\037\276\221:\036>3\334\357>0\374\364>\355\316\215\276\225\272N\276c\256\226>\222\034\272\275TU\210>\322\272\325\276\342=\363=\253\264\334\276\367\302\032\276hd\016>Cy\256=D\t\211\275 \252\006;\017\217\034?\272\337\313\275\266M\344>\275z\037\275\250\204/>)\217\224=P\251\255>\030\201M>\215Y\026\276)\260\342>gD\246=X\367\277=\213F\246\276\2745\004?GV\266>\270\363\260\276\326\377\200>!\003\025\276wu\230>\326\305\323\276j\" ?\344\334\337>\345\344$\274r\035\334=w\024m>\205 \316\2763e\004?\037X\255>/Gp\275]\222\336\276}\270g>\217J\355=\301m\204>\243\360\243=\330C\255>\271n\277\276\007\321\254\276)>\254\275\277I\213=\237\232\240>r\237h\276\326\212\'=:\277\021\274\220D0\270\207\200\303=\003\304\024?\272\350J\276\016[>=\256.B\275\212<\366=\227\374\224\275\235{7?\330\215\200>G\204B\276R\200\200\276\350#\322>\212tI\276\277\277j>\241\360\006\276_\202\321\276\353\264\253<\270S\001>V\217\311\273\277\367\310>\001\002\226>\342\246\214;\032ik\275M\004\016>\310q>?\207\367\350>I\031g\276DAo>\305\013\260\276\245C\004=\037\027\370<\203`\230\276\037\254\213=)\316\345=c\326=\276e\320\202>xb\231>\356g\264>\310\210\231=\272\005\223\2765\362%\276Z~\352>j\202\304=\367\013\342>XJ\323\273G\310\223\276^\240\023\276\300i&\2757b\261>\240\010\033>=\264\013>\032bF>\321h\362>Y\226]\276A\t%\276\024\217\225\276\004\316~\275\314!\232\273\022\002\014>\373\204\013>\361\317\021\275O\357U\276#d@>\201\220\217\276xab\276\214\370\206>\314\207\'>\256=\200=GX-?\013\363Z>Y2\301\275M\033\006\276!\261\005?nXe\276\2201\032\276\370X&\277#\235\233\276P\376W\274\317\316\316\275\262u\313>7V\025\274&k\223\274|\376K>\322\371\317\274j\377\001>\225\342\224>\005\237I=\236 \306\275\307\333&>\'\343\212>\232\302\301\276I\351\211\276g\262\023=r\311\261=n\361\274>\226C%\275\337\203\216>P\233\031\276\304\261\232\276\336\323D\274\212\255 >\177\262|<\374\376\017\276\363\356*>M1\211\254\301\323\275B\235\001\276\265n\223\275\256dS\275j\365Q?]\260\270>\021M[?\273\266\213\276O-g\276a!\002?\272\246\362\275\031\374\253=\010=\235>-\031+\275S\344\301\276Z\221\352=\032)\n?\324Iq\276\025\356\203\276\262\376\211\275\345\r\206\274r\254\301\275.^/?\211\220\371>\020\210Y>\"U\022>\253\232S>&\024\241>\202)\254>mc\375=vjg\276\205\302g\2766\265\334\276\202kp>\3237\213\275.OO>\002\304\314\273\342Ny=O2\020\276\222\310\276>\\\000\t\276\274\360m\276\371\242(;G\247Q>\251e\377>\010\374\330>L\377f\276n*&?jG\022?\367\270\222>\361\346\203>\326\226\311\275\217M\333>\262\345w=\340T\005\276\321\303\302\274cj\230\275\027$K?)\033\243<\307\265\254\276\361\234\365=1#\361\276\236\331\310\276E\033\270\276\3504\336=\177\375\223>e\337\236<\031\233\014=\367\022\345=\231}\346\275\321q\371>\304B0>I\366/=k\214\'=Z\260+\275\007t/>]\030;\276\201\270c>\244V\375\276\331~r\275\023\227\027\275\236\'\264\276\006B\257>y\262\274\276b\324\264;\216C!>@\016\213\275\364\021\334=Q\364\230>\214\266R>p\274\342=\024\204\375=8\250\220<\205\021\025\276\312\313m>\260\237\r?u\252\206<\016\034;>\325\277q=\260\345\250\2760\\8\276\304\"\315\276\004\007\351>)P\244<@\264\214>0 \265=fWF=#\224\343=D\313>>\257\014\251>\023\267\334\276\016!\204\276\205B\212\276\353\273i\276\020\013\202>\026Z\210> \202\305\275\211X7>H\223\035>:\326`\275\001>\274\274\212\026\254\275IHa\275\307\353N=/=y\276\303,\342>9V\004\277\027K\030>\246\263\256>#\354d\276,>\010\276\331\000\252>>\213\224>\021\026\336>y\326o\276\330HL=\035\354\251=R\253\025\277\237B&\276\371\033\321>\031\247\004>\365\300\314\276\267\214\232>\364\006\245>B\303\016\276U\351\302\276\273\335;>\215\n`>G.!=\260\274\317\274\347\241>\240\325]>)\376\021\276\234\254\253>\010\013\240\275c&\013\276\336\n\321=\244Q\016>8\245%>jy\033>\317f\n>\312\336\331>a\323\337\275\343\315|=4\371\342>\3750\257=\221\020\021\277\024JT=\211B\272=\013\266\200\275\036\243\242=\235^\013>C\nJ=@4m>\241\360\313>\307\267\327=~]\031\276r\353\355\274]p\013?\037BW=|`\311\274\177\374\302\275\305\373\271>\254\206G=\n?\030\276\273Y\020\2766\2075>m\376\235>\363\032\212\276\302n\243>\007!\006>\237\351]>\257\222a\276s;6=\025\0218\273@H\243\276\023\376\3758\256\214\274\276`\"\034<\242\020\277>\244Nv>\376\313\035>f\301p>\022\311q=\034\207\213<\023n\316\276fj\257>\272\326\344\275\254\262#>\213\260\366\273\336\336\036\276k-\361\275S\177+\277#4B=^=\313>\027\310\365=\0276\213\276\216\334,>\202\320B\276m\251c\276z\022\025\277rL=\275\255\262\364\275\235w\262>+\242\325>]F\262\276v\021\213\276\206\352\336<\341\314\303>\374\0204\276\330\350\034\275\037\002\343\276 ,t>4\022\317\276\206\006\032\277\005\253\346\275\003 \007\277B\333\013\276\363\206n\276>Z\266\275\024}\312\276\372\004f\276\216G+=R2\036\276\037vB\276\'\240\357\276me`>O\316\245\276\272\201\250>e\275\250\276Pk\340;\210w\306\274\277~\003>\021pv>\347>\253<\207\246\362>\235\256\223>\341\365\264\275\325\367#\277E\252\332=\201J\333\276\004\310\204\277\036\274.>D \233\276\237\016\267\275\346i\213\276\341Ac\275In\207\276\361%\000\275DZ\025\2773\344\375<\374\035\224\2760\n0\276\036X\327\275\027)\224\276\363H\261\276\361\223\224\276\302\227\374>\0229\260=\302fG\276\306\337\013\277-\252\201=\324\033\255\276\307\366\375\275/\031\001\277\024\274\017?\317\377\237\275.\021\374=\331b\345;6\345\203>z\010\242>vQ\256\275\211\326\362\275\032f\370\275:hw>s\323\267=W@9\276\262\217\341\276\202\374=\275\302\304\224\276\025\247\026\276V7\240\275Q\370\347;\304#f\276Hpa\275\224\247\005<\327\315\n\277\273\303\202\275{\366\022\277\013\266\373=v\2439>\360{\230\276h}&>\325\345.=!\341$\276\201\016\270>\222\010\312\275\366\223j>\350\256\262=D\305n\276\361\343{\276\035\271\361\275\332\204O\276\266`\322>\2432\203\276u\017S\276\216\007?>\001\207\340=\261[\237=\017\270_\276\027\324S>X?C\277\260!\271\275s\215H\275\3702\307=\326\240\246;\301+\335>\tl9\277\326;\276\275\334\304/\2769\364\365=B\227\366\276\257\207\300\275\025\370\000\276\366\374\375\276\313\274\364\275\243\363\210\275\031|\367\273\233\265\024\277<\304\250\276{B\311\276\333d\311>+i\237=\274\261\004\276ugP<1:%>\330\3037>\025v@>\336\256\353=u\257\021<\353\310t\275+r\332\275B\022\246\276\257<\316\276133?\323\220\215\273\216\002\326\276\035\361&\277\222X\216\276\324Z\n\277I\010>\276\202\342\223>=\252\225>M\312\277\276\'--\276\354\371q\272d(\274\276\220i\242>\376\022j\2768.\"\276\311\372\002\275S|\217\275\356\2238\276\206\014T\276\033.\353<\000\213\007\276]\347\305\275ap\367\275k44\277\243\323\031\277*\027\004\276\325\300\004\277\302\234\020\277,\016\327\276P\271\031\277\326)\261\2769\305\202\276x\366\277\276\346\260d\276\224\313\205\276>\007\202\2766=\247\276\340\027\000\276j\330\336\276\326,-\2753P\221\275\220\344\221\276\254\271\025\277\266\026\260\276\216\240@\276\221\240\222>\345P\323\276\367M\001\276\353\230E\276\036\345\222\276\233[\225>9m\314\276\353\333A\275]I\310\276+c\346\276h{\216\276\326<&\277\247?\033\277\253\321\017>G&\025\277\255j\000\277\300\017\301\276Q\205T\276\250\375\205\276c7f>\"\337\303\276\316\355h\276\2142\372\276\270\324\204\276#$\200\275\214\215\364\276\352\257!\277\366\004\256=L\203\275\276\316\n\355\276\270\327\004\277\337j\205\276\330\373\373\276\206i\303\276\316\342\330\276p\3647\277\341\275\244\275\222C\n\277g\013\336\276\032\355\025\277\225XF\276\314\271\350>\347A\330\276\022G\323\274\312U\231\276\311\360\221\276&\327\312\276\301dD\276\273\031\206>^\347\"\277P^\033\276;&\343\276(\301\204\276+\025-\277\356\345\342=\025\034\267\276*\321\304\276\222-\r=3\217\213\276G\236Y\277\251^o\276\234\214\321\275fx\361\276\r)\315\276\n\361\016\277\250\3610\276H%G\273\254s\361\2765}A\277\010\037\016>Hs\003?\361C+\276\364\307^=\017\007\332;*%l\276[p\356\276#b\225=\360\245\'\275t\343\230\276:m\244\276\352\2725\276\302,\344\275d\267\367\276\035\254\t\276_z\232\2758~\272\276%\2023\275\346\267\360\276\267\321O>#\244\343\276\3007e\276\352g;\275L\361\300<\n\307G\277q\033\211\276\356\240\225>\330S\272\275\005\222B=O\313\301>\253>\270\275e\\\357>\017\374\345\276\\\237\365\275\017\273\017\276>;T\275\025\205c>\351e\254\276\203 D\276<\341\005\276bR\265=\240\007[>\226\025\"=\347h\t>oD\216\276\027\217\022\276\223M\021\276O\224\242=\345\030\030>\230j\\\274\032\306 \277(t\362>\034\236v\276 \321\002\277i$\354\276\231\340\000>\313\"\256\276o;&\277\352\327m\276\371?\377=\244\005.>\274l]\277\263\336\213>\221(\033\277b\003L?\253x1?\277?\314\275\002\336\336\276p\322\232>\315\322\364\275\0371 \277o\025\r\277\030\375\245\276\241Gc>>\016\220\276~:\227\275\\-C\276\241+\265\274i5\324\276\313e\255\276\027\340\016\277\233\256H\276E\215k\276o\362\227\276\214\261\010?\327\233\364\276vs\031>\203\244\312\276\2544\247>l\310s\276;\320\240>\354\351\263=x\256\032\276\223\242\006\277\003\344\030\276\373\306b=+d\305>\233\370B>kY\255\275\244^\021\276=dO=\277n\244>Sq8>z\361\210\275\240\332H>\266\030\002\276G8\352>\002\002\257\274\305\343\007>\'\260X=\323\240\277\276\360^p\276\035\332\372>$\373\306\276u\313\005>\327Q\246>jR\313<\227\000\026\276\213\365\004\276tZI\273iLU\275\260h\013\273\013\351\271\276\231s\320\276lr\216=72\016>\233D\004?\211EH\276%\307\232\276\223\010\337\274\203Xt\274\357w\231\276h\333}>\017];\277\260\336\213>\311b\203\276\t\252\260>\225+C>\313\007\026\274\310\020f\276\034\344\226=\352t1>w\236\005\277\271\267.\276\215\016\326=\244\200\017\277\356\310\346\275 \300\316\276\317\006\214>\353\324X\276|@\224\276\016-\220\276c\2015\275P/\212>W\224\260=\235t\275\276\271\360\007\276O\373\241\276\366\014U?\t\272\203\276\263%\031\276\336H\203?\351\007\334=\376\354\246=\240\315\353=81\316\276\")\007\275\304G\264<\r,\3323:`\274\231\310\014>\320\361\301=}@\253>O\311\322>a\321\231>\352`\364\275\314c~=,\342\224\276Y\220\325\276\033\354\006\277^\013l;\345\257\342\276\244\300\251>y\351\335>XD\014\276)\236\002?e\241\031>\213\204\373\276KR\374\276\327Go\276\2066\220\275a\3028\276\252\035\241\276\242b\347\273tR\374<\3059(\275\365\035\317\276\271\210E\274o\031\311\276v\252|\27623Q;-\031\223\276LG\232\276\277\242\315>\020\027\303\276`[\246\275\3644\233>\216\371K\276\222\277Q\275 \254\213\275\231)\346\275\225t\253>S]C>G0\253\275\202\221\032>,\035^>7\352\333\275\235\014\323>\\\351\250>\226U\243>~\355\264\275\216\277\340\272\370\344\235=O\325Y>\362\204\337\275\232\244\330\274\363 \256\276\222\024\233\276\333\3131\275>c\265\274b\224\321\275\266\037\320\275\376\243\221\276\357\253\263>JG\375\273F)<>\260\263\021\276Y\374\247\276\364\375@=\013\272Z=\235\337\346\276\250\217\006=\201\016\035>\205mg\276V\361)>\221)#\277\026\220s>\016t >\272l\037\276b#B\276\r\241C\275\305\365\206<4<\217>v\235\030>[\210\252\276\326\'\310=c\357\277>\255\313\306>\233H\213\276\013\213e<\']\377\274`\025\003\272\355\3468\276\002N\220>\306\266t>F\216\235=\344\311\353\275\307\022\310=S^Q=\333}\225\276\256\331?\276\320\364\204\276\243^\363\276\244\257\217\275s\017\023>\205 \212\276\022\260\201>\332#T>\246\361\237\275\364\256\351\275\350\274\220<*77\275\\\343\317<\231pM\276\270\211\205>\347\324\007\277k#\264\274\311\326\">+\306Y>tO\274>\332\262\034\276P\347z\276J\337\274\276t\253\226\276Ik\220\275\022Q\206=v\320\271\274\036\312\002\277\025?\232>K\032\232>\213\201\200>\345\240\231\274(\003\342>r\362\000\276\254\216@\277\017\200\262\276\305=\020>\257\361\240\276g4\221=]\301!>\217\203Q\276 -\'\2771\235s\275\r\273 >z\262\237\275LV\n\276ze\251\276\035_\022?v\027\310>\341\363\366>\362\020\207>\316\036\231\276r\311\230\276p\t\313\276n\271\245\276\270\261\217\276\223\337\234>\273\342:\276\324?@>\314\341\237<\"\321\370\275\237/L>I\316\324>\037N\254\275 r\302\275`\336\316\276D\336\211\276\023a_\276\216`\225\272\216^\315>\375B\300>\322\225\025\276P\246\362\275\312\213\246\276h\331\'\276\334\325\340=\314\244\026\276\002!\206\276\236\031m\276C\225\211>C\321\244=\211\361-\276\363\267\236=\237xR>\030\312+>\252\036{\275-\013\001?\354\206\203\276\037D\242\276\231\3766\276\360\370\224\275\2177q\274l\211%>\337\354\337\275n?H\276,z\241\276\272\207\000\277q\265=\276\302\347\345\275\300\002\253\275\226\010\236\276(a\366;\331\236\216\275\345\026\361\275\305\340a\273\357*\211\276\036U\317\276)M\261\275\263e\234\275\035\276t\264\266i>W\272\233=r\2362\276>\306I\276y\207w>\376\266\312\273\306:\223>R\256\031\276\337&\266\276\330,\031>~\376_>\364\215l>5~\275>K:\000=\223)\000>s\244\034\276^\005\317\274\005\310R> k\374\274l\271\263\275\031\302u\276\355\t\033\274{\2600\276\257&\267\276Qwa>\272,\234\276\314m\356=x1\325<\375\244\254\273\365\250\206\276;\217\233<\316\256\317\2752\341\237\276\344i\213\276\310\264\321\275\017?\325>Y\3528>\313H\3539\350\312V>\223Y\335\276g9#<\3113\t\276\0334\205>I\237\230=\224\271\033\274\354ZV=L\353\214>A/p>z?\013>u\335\344=)\024\306\276\346 \267>V\270\203>\326B\200\276gZA\276\233\2540=\337\270\213>\331\304\211>\202B\256=$|\023\2766Vm>\250\272\272>\343\321\032\276\246@\360D\320\023>\027!\267=x\324\231\2762z\253=\t\275\005\275=\351\n>\314\026\234\275\226\230\373=Y\356\275\2759O\210\276\351\356\333\276\206\343\n?_\320\200=\302&\327\275\343\254\001>\274\357p<\217-\235\275\314\323\312\274\211\"B>\203\310k>\261\351X\276\r(a\276z\205\305=\t\234\204<\256vQ\276m\337\005?\230\320+\275r\005\016?\2027d>\374X\217>\355\236\003?1\303\265\275\2215\007?8P\213>*\220\346>\223\350\341>\207xX>7\356\250=&a\223=Y\377^>\355\'\376=\354\330-?\210\177\352\274\347\017\341>A\332\213>\253&\036?t\343\013<\360\237,?ccj>_\352\360>\306\007H?\311b,>\337\026\227>\236\033\345>\313w\003\277\3108\025?X0\230=\203\r*=F\276\027?\312\034\000>\017\377\023>}\253p>[;\324\272\003\335\317>\370\302e\273\351\240\247> \371\227>+\037\341\275u\034\027??\006@?\236\341D?\273\233\242>\361\217u\276\007\273\304>\324\363\n>\370\224\351;\267\242\240=\305\002\340>\241\216\021\276\\\3002?\240Mv\276\377~n?#\247\025?\215/\343>\341\262\310>\363\223+?\322\303\010\277\351N\'=\315\316u\276\314\307W?\311L\312>\027\245.>\224\313a>k\244F\276m%\323\274\317\374\351\276\360\345\360>2\\\376>\026\367\334\274\251\305\260\276\233*{?t\352\302=\033\256\345\274\235&*>\231\211\337=\260\027#\276\240My\274\213\311\241>\275\3135>\r\013\373\276(\030Z?H\227\251>w\307\214>\000\233\001\275\253\363\360>\301j ?B\356\006>\n\361\331>\313\351\325\276\024\021\016>\372\'\342>\374O-\277\330\310\027\277MFA>\355E(\276\311=\274>*\025\226>\035\211\030\275\\\035\215>\202\006\226>\202\000*=\341\320\205>(@a>\257\275L>\027@\357\276\030j\201>A\212\306=\242(\245\275s\267^\276\307=\367>T\305\032\276u\272\362=\307yj>Y\t,?zD\241=\235\202\374>\367}\032>\342\213h\277\035s\360>\364\006%\277\002\372F\277\310Z`>\340()>\251\374\240\2757\337T>\327\254\241>\006E\030>\333\332\333\276ve\034\276v\372+\2769\375)>T\370:>\345c\024\275o\004\352=\342!\026\273\212\037\354<\364\031\252>>h\005?cT\353\275\232\021\373=\302\363\r\275\001\320\002>Pg\324\275\242\"\246>\036\367s\275\033\253\037?\214\334\002?_4\307>D\313\212\275\272\352\220\276\034\341\013\277\035$\\\276(\227\203=\201I\267>N\306\215>\021\352\371\276\213\234\323\276\210\214\252>%;\024?\324\022\253\276\272Rr\276b,a?\327\312\342<\367\365U?\276d\276\275\351\3504=k\237o\275\340\2036<\335\n\326=\373w\021?_5\n>h\340\355=\221lg\2749\356\031?r\375e>\242\006)\277Nm\243>\n\326:?\000r`\276\227/}\275\344\032\016?\371\272\302\276\331\215\372\276O\037\323>\250\037/>\2114\037\276\0175/>\267C{=\220SD\276\214n\010\273Q\312+>\222\034\204>\344\030\220<\033\001\213\276V\323>\274\305\353c;\331<\251>.\241\267>\255\034\316\275DF\245\275\342\267\372\276\210@\'=\376;\214\276\243<\n=\027\276\036\276\027b\220\276lJ\360>1N\224\274\377\332B>\206c4>\024\312&\276\013]\263>d5\223>1\345\214>\330\360\224\276\276\3270>\020-;>U\342\202=\037\244;?\262\347\207>R]\350\276\033\344\016>P4\351=\234\371\343>\204\021\354\275\343\246\257>\374\006\320>c\311\352=\310\346\314\275\360\240\363=\266\203u>\361d\261<\314K >r7\274>\273fN\276\013\213\217>\202\313\310>\265\013\340=K\035\000?\312\344\343\275\260\306|=\037,3?\342\207\241\276\367\240(>\222P\036\277\'\337\232>b\004\317\275$rI\275\022\302E>\310\346\016\276\377\361\326>\000\026\331:\'\301\353\275\356\244\304=\377V\224>\377%\035>\277l\305>\225\335\r=\256\272f\275\276\306\331>\310 S\275\222\345\235>i?O>\211g\007\276\355\370\255>mb\257\276\003Jx\274\316M\223>s\371&\276\200Q\026?\356I\005\275P\234\363\276\026\216\345=\273\2660\275\201/\241=\030\312o\275\355\315H>\007\373\262\276b\025z\275&\014 >\253Y\304\275\010h\n\276_>\177>2\254b\365\274\344\354M>s\224\201>\323\376\232\276\223\337\325=\377\t\236\273\215\206+>\227\271\327\275@\347u\275\010\333C>.@\364\275\265\351\003\2760\263\030>h\037L\273\\\203\312\275\022\210\n\275/D\001\276\216\201\201><\214=\276s\321U=\n\274\034>\267\254.>\233T\027\275\314\223\232>\332\014K>\354\006\374;q\226Y>\310\302N\272\204P\375=\016\370a\276\2257\201\276+\236\236\276\247\334~\274#F\014=\1772\313\276zX\'>\030)\031>\337\021.>G\306\257=\0314\325=m]#\276K)\272=\216\002,>R\277%>r\003N>\274\367\230\273E:\'\276\375\030\334=\016,\346\275\274T\200>j\267\202\276\n5\"\276\230\332\263>\330Y\221;r\264I\275\261r\232>`h\267==|\206\276-\004`>ID\244\276!\230&\276\220\212\360\274\001]\006\276\233\321\255=\004\226\236>\352\024n>B\314\345\276\217\247\023>\314l\364\276FgW\276\324\275\004>7U@\275\000\245C=\017\260R\276\370\222\256>\235\n\010\276tP\265\276\\ma>\345U0\276\030\201\025>\325\341\320>\272%\226>\033yq>;\006\340\275r\027\346=2\245\324\2750\206-\274\343\033%>\2634\370\275\0249\201\276#e\363\274K\263R=\303\200\233\275\376\272\'\274\251hv\276\n\271\\\275\372\274\207>\016\311\322<*\236\307>\231\017}\276\345\252\225\275\255\343U>\220Tt<\344\361\202\276F\316y>t.\245\275\01626=\031\t#\276R\"\035<\324|]\276D\226\317=9\226\244\276v&\017=\230\357\361\275\315\375\276\276\t*\344>~\222\274\276\203\204\n\277\234\366\344\275P\0215<:\321\002\276\242\301{>\350\235\250>|\310\347\275X\210\221=\233\274\212\276^\244W>\307V\261\276\242\375\242\2761\367\303\276\317yB\275a=L=\366X\240\276\365-\204\276&\242\324\274d\340a\277~\'\240\276\033`\255\276KK\302\275S\340\304>\356h]\275n|\336<\013\316\013\276\261\307\270=\352\024\n\275\350\323\004>\315-\246=2\227\226>^\371N\277H\235\362\276\305\365\305\276\'\220\324>\270\376\203>\343\206\304>\335I\265\276\244\331\321\276I\335c>\264\215\026?q^M>\257\t,\276\260@1?K\035.>\237p\264=3\340\021\276G\227\020?\262\353\362<\235\250$>\364\326@>\037\2009\276\347\367\237\276\245\243$>&df\277\235\261O>h\252\337\275&\031{\276N\373\210>\001\302\222>\010\003\334=\232\273\271>\363\242\230>^\025\323\275\300\004[==\320\014\276R\023.\275-\334B\2767Us\276\343\231Q\276(\223\274=d\026\222>\201\036\304>\255=\023\275S9f\274 \304\031\276\261\343\236>Km\330\275\251\2218\276\271v\300>?\330\217\274\013\265{\274\222R\303\276\276\201\226\276\315n\307\2761u\000\275x\313\273=a\326\233>_\016\023\276\276\342\341=\257C\216\2763\272\336=\243\335\205\275\331\030Y\274iN\273>\254F\202\276,|\">c\023\344\274\204Ji\276\251\212\230\276\343eT>U\016\376\275\0207\004>\302\333~=qG\245\002O\337\274\211\030\261>`c\327=j\013\026\275\263\352\336\275\005[\037>\270\352)>\377J`\276E\3165\275\3646\216>\335\036\314\276\226\271\235=\242\242 >I8;>\250\331P\276U\277\355\275<\227\031>|\341N\276\250\203\265=\001]P\275\022\036I>\345\033\010>+F\237><\375\017>\323\367\223\274%\037\205\276]\177\036\276\np\271\2759\014\346\275(\367\264\276\203\362\273\275\333n->\202t\253=\200|m=\314\314\216>X\033\245\276\242\304Q>\347\312\361=Ayi\276\324\207\037=\032\322\323>mi\265\274*\203\030=-\0036\276\253\245\327>\346JC\276\204\343\001=\207\215\001=\246\275p>\264\205\032\276\370\035p=\245\324\005\275\\\210\265\275?@\002\276PPv=n\320\223>\263\264N<\276w\241\276\317\350$<\341\354x\2767\344\274\276M\311\241\276\024z\256\275\361Y\271<\270~\201\276\371\262\274\275V\n\332\276\031`r=23a\277<\365\360\276F\3653\276\335\352\236\276\333R\200\276\375c\341\2768V\322\276\276f\022\276u\364\361>\352\202\203\276\246\265\177\276\'\312F>X\206\205\276.~\307\276\371\251W>\212\030\212>\227m(\2779\332\354\275\303J\013=r\317i\276\304=\204\276\274\023$\277T\027\006?\234~Y\2757_C\276\337?q\276\346p\304<\205;n\276P/c\276Y\362w>\250\034\034\277]\372\210\277+\267\303\275\206\250\233\275\330\032\027\277x\344\225\276\333M\334>\372gD\276-V\266\276m?\202>\016\367\322\276V<\245\276ef\200\276\010\244\236\276\260\315\201\276\214\276\031\277`\275\346\276q\344\241\276\224\324\267\276\026K\264\2760x\215\276D=\245<\277\204o\276I\257P\274#\326\213\276RP\312\276\324\353Y\276X%\372\275\245\332\303\276\214m\300\276\332\367X\276-\326\202\276\002r\252>\240\223\344\276\254S\261>\246\343\365\275\366\330\256>\202\2140\277h\311A=\311(\200\276\330\250\317>\350\366\211=Q7\356\275\033a\271\276\305#\033\273\234\237\003>MO+\277\017\n?\276\336su\275\220g\263\276\353\302\273\276U\207\325\276\201\"\320\275q\275V\276\262&\252=\250\\+\277\341\031V\276\t\020\025\276\331o\231\276\332\236r\276\276\245;\275\340Z\017\276\307u\233\275\221&1\276\376j:\277\316\270\026\277-\352^\276\301/\204=\033n\210\276\202\334\351\276\252\321\025\277\211\3673=\3126P>\325\313B>c\032\314=\215\345\023\276\214\221\036>\320\022\226\276q\214\325\276\372qr=\267\021A\276\003i\375\275H\206#\275Z4\200\276\325I\006\275\355\330\025\277R\302]>\360-\027\276\300\257x\276\335\335\034?WA}\275\331\307\036\276\2454U\276\214\275\226\276Uq\342\275&\226_=\266\036\334\276.\022\r\276\366\256\231=\271\027\227\276,\343^\276\202\340\032\276\214\230\333\275\271\317\004\275\236\246\227\274\372\354\203\276\026\327\217\276I\267\271\274U\215\367=\205\367\023=\376\332\326\276\275Yb\277\267\374\214\2766\244\252\276\215\253k\276CH\271=p\327\231=5c\252\276\244m\246\276aLV\276e\224\346=\215\005\023>\000\350+\276-\030\236\276\362\023\336;b\203\352>\232%\343<\251\301\235=\356\364\254\273\031\274\225\276$C\022\275\214\347\301>\027\323\306=\315\273v=\242\005#\276zZ\221\276\271!\321<\2461A>\251\335E;#\2536\276\003\020\270\276U\353\036\277\030g\225\276\206\'\360\276\035\024`\276}\362\206\276\343\035/>\303\037\324\276\312\335x\276\252\261[>\021o\023=f\223\272\276x\355\353\276\353\271\366\276\n\321u>\232\007\026\276\214\311\227>Q,\212\275\331^~>\265:n\276\342u\326;\274\211\347=LN\220>\253{v=f\276\227\275\361H\004>\271n\036\275\357\0041>^\203\347>U\342\222\275V\236s\275\342\223\\\276\'q\213\275\305c\276>u\340\270=\233ik>P\200j\276\362\327\r?\020\224\204>\255yS\275\371\031h\276\301>w\275\375\302\353\275\"\374\260\276\230\344t\276A\315%=\345\213\245\275\241\330\322> y|\356?\000=Na\217>\r\361m\275v\257\016\276\223\250H?\323\033\202\276u]A>\025\225W\276:\235[>\303w\226>\207\014\261>/\334\231\274\244ca\276&\261\320\276r`\355=7l\224\273p\201\303=b\223\257>\326\250\324>Z\253\376\2752\013\307>IS\026>x\377\031\276\235\234:\276Ea\014?\324Q$\276\023\266\201>\300\"g\276\354\001\240=P\322)\276H\267\211>\244v\205\276\206\364\260\276\221D\262>\314\214f>N\331\276;\303\311\037>b\330M\277\344\214\n\277\237\320\262>\314\253\212>\200\316\307>\226\215\241>b\330\214=\026\222\221>\254l\316<\346#\226=\362o\266=R\340\315>\316\032\313=\003\037\323>\034\234\272>l\214\262>\317\ty>\341\004\314=(\317\302\276\032\0007\276\267\327\265=\231\001\202\275\361\204p\2752\257\214>\365\275}>\351qI\276\355\255\230<\325\344!>ynW\276)M\003=w\211\230\276\340\241g<\357W\347\275\354\312$\276\225\177\211>&j0<\266\017\273>\323\340\321=\272\322G\276\223Z\307>\346J\365<*g\376=\206u\346=\244\337*>\031\303\323\006\2768\276Y%\247=P\271\343=\027\220H\276\312\255\365\274}\315\306\2749Xw>O\232\210>\0036\307=\305\036*\275\301m\234=l\202\300\275p\377\302>d\016\374\275\360t\303\276\217T\232\276\334w\236=\203D\004=h\274\037\276&\222\230\276)\352\026\274\326E\256;\312\017\023\276\244$I=\315V\255\275\002dI>\257+\356\275:\303\311=\312\243\232=#S\304=\203\270\231\276\364\347<=\366~\344\276L\370e;t\202\035=\243\326\225\276/\360\345\275EvX<\002\027\232\276\210\212\220\275\343C-\276\326\033\214\275o\214X\276\350\370U>\333R\001=\310\035\352>\236\224\366\275\016\257\034\274Vw\252\276\n\020\254\275\232\200\302\275\350A(>5\373\272<\317[;>\340n\262\276Y>\216=>\277\000?L\006n;\023T\316\275\205|\227>t9]\276\217\3437\276v\303\031>i:\200\275\223Z\271\275\202\324\225>^\007\201\275m\205\310=\004x\273=l\202\333>\367:\023\276+k\217\276\275.\237\276\232\t\275<\371 \013>\\\016\257\276\022\272\265\276A\002K>\237D\310=B\221\222\276v7#=\262\356h=C\210!\275?;\017>L\263\025\275\326\260\321>\003D#\277\003\020B\276\346\004\302\276\255\241\375\276\252\224Q>.T\352>\276;6?/\272\227\2756q\332=Tf\327>qRD>\233\226y>|\347k\276\333k~>O\375\367>\3302/\275\375\243\273=\332\237\265>]\213>\276?4}>W\200\200\276_\275\177\276D\3625>\251\252\032\276\307\251\321<\315`\310>T\246]>\003\317\245\275\246\312\257\276d\357B>,2\354>e\321Q\275\227\333\026==D\334>\231m\327\275\202\332\003\277\367\026\023\276\277\001\237=\256l\367\273\304\232\276\324\321\001\277\353\233\025>\re\014\276{\026\266\276qZ\313>\216\023<\277\206\242\346>\365p\321>3P\255=\375l\251>\304\302\221\276\334\362\237\276\310\220E\273\223\327\253=\213\270\243>\277\215\225\276\346e\014>\304\357V>(!N<\3678q>\035\340\301=\035-\342>\277z[\274\020\003X>\243\222\037\275\003\314\200\276\367\000K\275\260U\032\276\027q<\2767\r\200>\277!\240\276]\261w=Ef\231\274<\016\330=>[\310\276\\\325\242>T\366\261\275C\035\343=\177\"S=\331\210\224\276fv\"=w\261\033\275\031\211\201=p\027\236>\032\377(\274\"\367\201\276k\230\264<\317\270~\276\"\317\005=\245\276\236\275\003\312\300=G\253a>7m\030\274H\357c>7\332\032=\300T\022\276\303\276\273\2767\340y\276\031\026\367>3\335\371\275\n{\325\275\257\324r\275\367J\224\276\370\261\036>/\273K>\361\230\306\276y1U>\372\261a\276\016\341\243>8s\204>]\325\340=\326%\000=6_\226=d\245\\>;0\022\277\220M\260=6\275\r>\354\225\016>D\206t>\302\371p>\251mq>\325\224\007?r\321_\276\025J+>\274K4>\361;z<\351\247\313\274\355\346\035>\rH3>L\300~\276!7\366>\nd\246\276HVZ>}\313\275=F\333\032>\'\301Y\276M\277\206\275\252\267\372=g\373\205\276\313\032\310\273\274CH\276\027\350\267\276\3325\354>l_\014\276<\"\347\276\246\341?=m\032\306\276o\371\305\276mR\256\276\026\275\374\274\3331y>\224\361\324=\205,\204\275\373\005\'>\353\301\247>\312d\236\275\240\371\227>\035\307\325\276P:\230\276\374\213\206\276\223e\235\276\227\242\246>a\202\325=\"Pi>\021\274\242\275lA\274>w\212\030\275\212\266\245>,\336P><6\004\275\203\221\014\276\340:\270\275;\257\242>dc,F/>\303\250\002\277\330:\345=\213\330\317=b\232\215>\3249&\275\022\320\301>\327\247\206>\270f\311>}\215%\276\013\254\000>7?%>62\315\276}\216\335>\025\242o>]\310F?\035k@?\232p\000\276\337p\270=\304[\331\275\3210\321>\331)\304\275M\367\003>\221s\237>Lh\214\274,o\237>\273\202\310=d\321\010\275\232\251\004\2761\'\310>\202|\230>D\217T\274\333W7>\277\342p>\301\256\311;m\350\326=\331q~=|\3667\276\345\350\242>k\307\257\275D\343\300>o\022}>\002\227\010?uu=?q)t>\036\323\204>g\212\365>P\017\240>CI\331\272\327\375\333>\230\343\215=1X\274=\000\301\037\275\232e\323>\027\303\242>\234\253\004\275!\263\217\276\246\324\026?\217\256O>\336\025\341>G\3132>\335\035\241>f\004G?\210\357\206\276\271\242a<\312\034\305\224\010\017;\300\276\351<\367\360\320>\213\376\177=#\037U\273\240\315\361>\257t\005?\006C\r?\203\303L>\324\034\264>\001\256H=\357\r\354=+\220P>@n\355>NZ\006?HW8>zo\331>\202\373\303<{\023\224>m\356\247\275\355f\310\275\326\035q\276\334] ?\037\200\272\273-v\200\275\273\236#<(\030\311\275\306C\035>>\347)>n\3427\274(\242K>\210(\\>\356 \213>\324\025\266>\203\216]\276e\310\252>\337$\231\276\2512\352=\225\205F>\255\r\333=,\023Y>\376\347T\276\351w =\350\227\245>\304LE>X6j>\264\345\214/\320\300\276,\313\035?\030M\240\276\327\371\024=7\r2\276&\000\303\276u\023\252\276\t\342\202\275\263\tV>m\031\030?WkT<\200F\333\274p,Q=>H\r\276[\0304>\'\330\006=\344dI>\226\356H=\207*\252\274\245\033\224>,\321d\276\217gC>\255\265\235>\225\205n>\276\254e\276\270\356\024>\340\201H\276\225r\252>\375_\326\275eL\243\274S&\000>j\023\200>|\035\203<\222\217\017\276\331Tf\275\354b\227\276/2\312>\351\203\224\275\312Rd\275\036\001Z>\0066T\275\006\234\324=\261Sr\275\002d\206\276Z\261\212\273t\020\026>%\226\305=\220<(>\265,\344=A\372.\276,kL\276\345\032.\275\roA\275\240\266\022\276G[\335>1\234B>\205\237%\274\233\254\007\2758OG\274[\372E>\364n\265\274D,\212\274QiB>\034\3752=\214w\307>.\244\246=\250\357\211\276\230\014\200>/\346\270>8@\002\2763a\022\276a\351\275>[\223\277\276\360e\375>\027@->J\226d\275^2D>?\344\233>T\220\271\276\206\025\"\276\360\"W\275\277\233\310=GB\272;\177\243g\276\213\037\225\275\307\352\230\276]\273\345\275\273j\310\275\340\272\242=\313o\017\277\303F\243>\021\034n>ZYm>\276x\006\277#?\204\276f\232\216\274O\350\232>R\227\217>\205\326}\276\355Y,\275\232_\364\276\371\274\023\275\304j\017>\027\302\250>P\224\005?{\331\307=!\300\005>\203@\024\277\262\254\365\276\020Lo>\230\340L\276)0\227<\233\177\347=7\341\262\275\342\251\t\2757\274\351\274\322=\272\276\024\016\223\276\324\316m<\331\205\244\275/q\344>\272l\264=\245\001\226\276Pt\335\273s\311\313=7Ts>\006%\362<\263\351\236>\217ND\276\227\341\204\276!7\371\274F5 =\316\237~=\214\210\240>\345\037*\276M\204\002\277\300\311Q>MO\025\277yN\232\275\262R\374\276\365\211\251>\272z\016>\317\016\226\276fy\313>L\210\206\275M\264\235\273\265\3209\275k\245\256\276\324\030\247\275I^\357=\360X\037>\332\t\336\275`\023\274\276T\026\234\275/n\247=\335c\204\276\343\026#\275\361\343\210\275\230-m>\217\303-\276a0\"\276\326\t\t\276\035\rq\276pR\030\276\022m\205\"i!\276\324\352\335\276\035g\227>\366\177m>\323\320*?\372w\226>\177\357W>$\266\344=n\331\201=(\302\216\276j\303c\276y\255\276\276$\013\251=\247%\237\276\305\313\376;ihk>\356t\354\273\3458\316\276T,\021?;Y\235\276p\254\007\276\331N\234\275E\245\351=\372n+>Z\2346\2769\2312>`*\024?\002\212\262=\326\220\247>\367v\252\276\255A\">\213\335\247\275\204\357Y\275\376|\311\2727\266\020\274\347zf>\270\314\r\276\232\331\311=\312F,>\001/\203>j\034g=bc^>\335\336\020>nE\334\275Fh\313\276\327S\337>\027\247H=\370w\211\276\233o\226>o\225\220\275\375\246\304\276\377A<\276\203\343\332;\357]\243\276\034\253L>\213X\271\276\372\244)?\2577P>\177Dj\276S\320!\276B\370\366\276\264\257\265>\356\3030>H\300\326=z\346\n\277\230L\035;\325\002E>\005O >\013\341R>\234\201\027\276\334\233\374\275\357\331\220>B\377\311\275W\307\202\275\'\031\002\276\247\006&\276j\013\013>M\010\\\275\353\020@\276\t\373\021\273\367?M\275\261\254\210\276\024T\307=\342\240\263\274\253\303\227\274\354\256@\276\2651*\277rJ.\276C\315\000\276>\223;\276\240e\014>\210M8=\206\262\251\276\3144\263<\2538\027\276\216\322\366=H\227\364<\342\255$?|j\200\2768\022\260>\316f\270>o\211o\275p\253\243>`\332\307\275)\256\344\275h\323B>\030H\266=!\314F>+;\311>D\370\250\275v\235o\276\254\033`>\367kr\275\240`\201=\201(\350=\004&A\276E\374\201\275d3\275>\305\320\333>\346\3139\276\\o\260=AY\262=\024\354\013\330\275\340vA>;\246^\276\312\343y=n\034\014\276\347&\200=\177TR\274\224\266\026\276!J\301\276=\024\027\277\275\241\005\276\025\002G<~$\336=\250\211\300\276mY\274>\316\265\026?\305\025e\276\206\362\227\276\276\352\307\276\014\226x\276m\'\241=\216\344\275>\203\336\224>0\360\264>+\355x>\2126\314>!\320->\330^\201\276\307\371=>uj!\276\010\350\274\275\337\250%?Szk>\2774b=\255\321}\275\231~\236\275\220\267l>\241 2>\034\004Z\276\'d\327\275\265Zh\274\256\331^>\337:j>\rv\223>\035\014\020\277\222S\264\276J\224\206\276O\250\271=!\357\226\275(\263\223=\00537?\230\262\264=\306m*\276\236K\226;\347>\211\275\013V\224>\2356\202;\300\275o\275\302\267\235>F^@=\355wD\276w\300\335>\241\r\220>\000\025\202\276\022\305}>\315|\345\374\3500<%\301\266>\350R\006>\210\213\\<\257\322{>\261\317\337;1+\317=u\020+>\010L\252\274\267\373\255=\223\201\205=l\\\233=\220J8\276Ws\236\274\343N\214>\3519\300>/\354\243\276\272\"\026\276\202G\017=p\256\351=h6\037\275z+\305>;\2679>|@\214=\265\254\255>,\222q\276\306\205\"=\"\251u=\001b%>^\373o\275\305\273\200\273\2343\212>\230\364\200\274\361Y\"\275\004hf\276p\213\216\276z\001\203:y\335\320\275X\356\223>[i\005\276D\317;=#}\316>4J\313\275\017R\341>\226\020\250\276{\324\376\276_\374\243<\273\0235\276\177\001\207=\352\367\031\277\272G\247\276\210\315\240\275\332\370\000?\256`\244>K/2?\275\2229\276Be\210>\000 \026>\020\244\233\276a\253\254>u\002\214\275\240\r#>\341\\\374>\315\270\353>a\275@\276\025x\206>A\252\343=\3352\261\274\361D\003=p[\005>L\310W>)\3513>\255\274\006\276K/-\276\2126\334\276\357\347*>\377\330\335\275\236\224\214\276\327?\265\276\205\350\202>\034;\351>=$\240>\331\264\242\276\232\003\341>OR\031?\256\"\202?{2H>\306\374\267=\357\364\276>\332\245\340\276,<\347\273\257\264o>@\014\350=\303\026\242>\235\254\260>L\322\210\013\000\261>y\034\217>\313\033\315>\026\327\321<\341\371O>J\014\272>\345\246p>\331\262\217>o\214\013>\220\\\242>\377\203:>\362\326\351>\340i\266>\241\245\265\275\"\234\366=\217\213\336\275\240\335m\276p\235[\276\330\3076>\037\035\364>\204\225N=*q\253\275\320\n\245\276|\022\257>\335\021\202\276#\263\017?W\211}?[G\262=.\360\360>\355\354\207\275zT\210=x?\344>\300\203\030?\364\266\244>\\\213\004?\352TM>\000\216q>,e\354>\301\177\252>k2f?O\302\230>:\026\307>\265\210C\276\004\221\237>I\177$?\001\326\002?\037\243\177\275\234\305\262>\032\375\257>u\337h\276\0200\314>\274d\376>\005!\005\276\205\261\250>\2655\325=\265\354O<\230\030\000\275t8\326\275\321[\006?\212\034\263=\266\303\246>D!\024?)\371\320\274\242\345\354>\021\376\336=l\362\321\275\'\017\003?\344\235\274>\321\266\032\275\204\244\334>q\306 >\212\037\307\275\243b\326>\274\203\226=\323%\217>l\027\354>\325\264\225=S\224\022\276\275b\327\275\017\210b\276\245:\255\276\'\217*\275\271\376\347>4\242\333=\246\350\204=\313I\226>\317&\270>l/\221>\373J\214>\003@L>\004\365\315=\034\205\000=V\314\335=\251\373D>\315\262\302\275\036\227.?\323\334\023?\211\350M>\022\n\267;\006^\024>\360bC>\010\276v>/&\233>(C\362\276*\344)=O_?\277\001\367\252\276\210\306Y=\002]\032>\005g\350>Y}\023\275+\230\254>\267\266/?\270\203n\275\2641\312>\360\202\344>xyS=\031\023\246=\242\337<>\005\014\022>\355\003>?\3368\352\274\245m\326<\3519%\276Pu\346=~\333\005\276\n\326\206>\324\210\225=\237\255\267>I\232\206>U&\025=\253L(\276\252\305\272>\234K;>\251\030<\273\207\244\246>\206\320\022\277\322\345\227\0219\024>\245\215\345>\262\335\247>[\261Q>\304\234\031?\220\351a>d\334\242>\276z\021\277kx(>\330J\273>\341\367\t\276y\270\037>\221\3746?:\376E\276\340\232\371\276E\350G>\377!\257>\205\030\241>\240t\023>O\231\361>)\200\226\276bfw\276\374\237\211>(\266\333=\251q\013\276g\013\037>\302\203w=\220\344\347=~\377\232>\235-\340>_\253\201\275\241\353a\276H\022\324;E\014\324\276;_\351\275:\356\244>\241\236U?7\360\364=\017\2129\276\006\374\324=\205B\212\275\305 _>\'\002\317>@\260\223>\206Z\006\277\204?\212==\032\260>\270cB=\210q\310>\360yIZl\247>\322q\027>\321\035\345=TG\201>V\222\263<\364\232\321=n\332\334\275 \347\"=\362\262\306>\373D\354\275\311B\007\276\030/\257=Py\241\273\036\233\316>\261\314\262=\333\0225\276sz\320\275\362\377N\276\244\331\367\274\374\212B\276\205X\201>K1\t??Yq>q\017K>\r\235\206>\003\321\013?\036\341\235>[\367\302>W\330\334>\006\002\257>\014\002\022\276\222kI\274\236\025\206>U\370\201\275>\207\323=\245\220\353>\0203\302\276\257\277\206\275\232\026\263\275\226\332E>\275d\020?\\\003-?\237/\032\275\207\351,>\377.\033?\274\032H\276\363*\006?\372\206U>\236\t\242>\373\3014=/\324\n\277#\217E=\377\211\236\276\tn\016\275\255\310\306\275C\321\204\276\230?\005\277\213\242+>\215\374\000>\342\202c>~S\035\276s\253\317=w3\370>E>\005?H\326\366<_\207:>\261\035\031?\362\332\322\275\204H\035=\246\363]\276z\357\025\275\332\006\230\276e\313\274=\364g\223=\002\377\331>D\031p>\265\225 ?\025o<>\030\310J=u\266f=\027\375\036\276UZ\030>\215\227\272=~(\225>`\337m:\222\330\307=\0379\335>a\241\245=6n\014\275 `\274\275+\0200?\246\365\022\276\270#\336=I\020@\276\244\024\033>\270i4=Z\234)?=\322c>\257\321\272>ob\254=\260\316\213\275\217?\207\232\345D<\203P|=\270B\231\275^\370\376=\356\0026>m\317B=\241\245\301>H\326s\276\256\024Q\276-sT\275\314\325p\276\201\347l>\210\345\332>1Y\262>\311B\252<\315*\242<[\226_\273\344Pv\275\241\311\302>h|\261\276G.\001>\2477\247>M\026\263>6\322\215\275\325\260\223<\275\372\010> \005K\276\376\030\377\276\273\375\000>R#\215>\257N\222\275c[l>\216=\034\276\262Ll<\367f`>\216m\335=\327\3051\275\207T\'\276\353\331\017?\017\013\020>\022j=>\247\220\244>m\n\037?\2467\020>\001\007\365\275\345\0213>\177\234\320\276\221\265\002?8\253g>\020\2534>e\013\344>\032#\345\275\331\3375\276\2125\356\275\377\234\342=V\210}>{%W\275`\302\225\276\335o(\275\254\030\214=`\370\001>\330\254\000>1\344\215>\n\225 =\212K\232\276%\030j\273\326\022\260\275\322d2\275\244\3715<\\\214\345\276|\341J\277U\3028>\000\346\t\277iP\254=H\025!>\342(\006\277\003\267\270=\333m\301\276\314\314\034\276\365\301Y>\376/e>=A\335\275\272*}\276L\357%\276\367\205\274\276\315\r\\\276\260\370\263\274\010\013\202>ke\266>\271\247\357\276V\221\265\276\271\231\226\276\202^\014>\rw2\275\255\213\244\276j&\036\276lX(\276\317\273H=\306\373l>e\235F\276\303\n\376=\024\321^\276n\0163\276\371\366\354=\007\372\313=b\005\n\276\323\241\244=\262\337\335\276\244\216\327\275\217\020&>\2607/\2777\224\216\276%\237r\276\210\n\240>Z\270/>)\t\217>R\236\355=\\\351>\276\270\343\334\275\301\263\227\275\303\031\261\275R8\317\276\340]v\276R*\001\276\327\304Y\276\037\205C\277\264x\330\276\231nf\276!\215\276\274k\3735\276\271\237\026\276\214K\001>5Z9=\305I\237=\036_\366>;U\242>\tf\214=U\031\246=\303\276S\276\320M\016\277\312\362\004>?,7\277x\034!\277r5\241\276\241\275,>H\307\201\276q4\262\276\321:5\276:F\357=\220\364\334=\200-\020\276\036\344\177\276\3379\275\276RB\006\277\307?\203=^\002Y\276d\253\202\276\301\306\255>\327s\234\276%\3575>j^\317>\340\032A>\367\362/\275Pg/\277\272\352\363>g\226\230\276|t~=\263E\365>\222\354\372=\200\312\332\275\314\267$\276\205\2011\277\351\250\363><\326w\276\213p\225>\351\203\270>U[\315=\347\\\322\276\200\241\177\276O?\207>.uC\276\002)W\276\247\206B\276\375W}\275(A\212\276F\001\016\277\232\313\316=vN\213\276\207\037\226\2762\330\331\275\004\tf\276)\002\000\276\270\211\324=\022?\277=*\024\211\276\237\321\330>#\027\310>\210\323\304\275\214>p\274\312k\210\276\255\2057\275\2448\243\276W\364n\276\260^\213>\342\302\263\276\366\020\252>>\266\357=\016[\366\275\247\357\270\273\371:s\276\232TK\276\320^Q\274\224\325\224\275T\312B\276S\024 \276`<\234\275`|}\275[\267\302=n\335\255=\031\n\023=L4$>\014>u<\301\330\272\276\247d\216>\371\343E\275\211\223\033>D\254\360\2747;%\276\304\356\345=d\270\325\275\226\034\023=faG\276~\316\316>4\002\273>\364\303\347>T\351\246\000\263C>\253p\'?\303nK>\321\253\023>3\016m>G\254\212\275F1\245\274CMh>\303\177\025>0\3315>8\375\315\275\223\0177=\365\363\221\276%\266.<0^\274=\247\000\207\276R\234\256>J\347\025>)\203\233\275\3775#?\331,:\276%Vh\274.\3453>\320\003\032:\2265\251\275vl\275<\347\254\242\276\354\205\371\275\271\3370>\271W\370\275R\236\330<\352N5=%\262\013\276 }\222\275\357\343d\275$\030\232\275\335\271\225\275\024\014\231=s\257\216>\302a>?\374\353\253>\016\243\360=\223\272v=\\q\276>E\365\261=\262y\t>\026c\021\276\205\246D>\3730\024>\025\237<>%\223\236<\330\275\206\275\240\021\255\276\201`(>\332\'\245>\374\240\226=NI\352<\022p\242\275$\242K=\254\217\325;\205\301\">\346\266\321>\372\215z=\r\035\004\276\335\260\355=\220G\221\276\221\377\320<,\216/\277\230\272\007\277e\2769=+^\241\275\3652\213\276\274t\300=Ms\321\275df\355=8D\243\276\004\253\254\275\2509\211\274\372\r\313\275\266l\365=\"\013\266>\035s\345=W\201\310\275+\273\341\276~$\225>\n\014\027>Q7\222>\030t\010>]5X\276\354$\275=\237\036V\276\001\351\243\272\033\262U\276\374v\225\274\027/\343<\023\217\300\276x\210\262\275\r\346\020>\246m@>\233\274\343=y\361\000>\327\356\361\276H\234\332\276\343\275\226>\310\337\304=\n\375\031>i=\352=\177>D>\366\030\374\275\265\004\236\275\360\373J>\372\206\222>\205\375\313\276\010\241\265<\344v\'\276\377\226g>O\267w>^\237\200\275*;\304>\032\344\014\277\016\361\203\276\373\032:>/)\304>\241\240\000>\251p\032\276]5\000>\352O!\276\031I^\276<\244\177=\223\202T<\370r\205\276\211Ri\275\\\257\r>A\003\255\275?\274`>d\010c\275\355\2732=\354\205\212\272\347[\352>\nq\212>\204C\244\276\2710Z\276]\242hz\374\260\276>\267\005\275*u\237>\257\232\212=X6m=(@\245\275\302\313\216\276\217\376\206\276y(c\276=\004!>\207L\225=B\342\000\274.f\346\275\204\377\220\276\272<\023>\307\252\262>\321\230\330\274\2512C>\003\221\013>\341\311n>,h\204\276tB\351\275\3151)>;\350W>\026Z\021\276\335\207G>\031\261>>\201\3450>\212\316\231>\330s\266>\246\361@=\336\366\r=\307\200\312>knW>\\\217\376\276h\313s\276\022\217\177\276\323\023\366;\247c\233\274lF\340=\315\177\303>\317\000\022=a\316\366=\364C\332=\377\317\245>\366\240\303=>!\231>\3219Z\276@\224\326\275\005\224\005\274\376\202\">`UH\2762M\263\2754\t\212\275\330\003T?>\025\017=\367w\247\274\216?~\276\325\307?\274s\245\004\276\237\351\300=\010\"\207\250O_\275\237Vq\276\264\345\232\276\351\216\356\276f\010\327\274^\236\367\275x\003\236\276\302\021\322\276\"a\315\276,\222\323\276\362\t\300=h\\\005\277s\010>\276\264o\215\275%\375\262=\266\222\214>\323\235\300=\017\022\361\275Q+.>p\345\204\276\351\'3\276:~\322\276i\211\306\276\377#f=\255i\277=7\001\254\276J\006\230\276\272\275\313<\264_\243>G\331Z>o.\320>\022\345\307\276UM\205>\272\321\213\276\261\351%\277M\207\035\275\230\005\341\275|F5\276\026\010\227=N\205\214\276\327\253==\225\007\247\276\362\253k\276\254(\023\275\273\356\302>_,\274>\263L[=\212\367C=\2170\220<\322\320O\277w\t`>z\325L>?.\210\276\364\365\313>Pk0\275\300\301\215>\245\263m=\242\231\352=\032\006,\275\343\361Q\275::G\277\336\2734=\243\341/\276\305\203\352\276\027tS=\205\223\352\276%\003\206\276\276\323o>\321hx=R\\\252>(w=>\344\007\331=\201\257&\276\323\315X\277Z\217\321\276\362\356#\275\243~7\276Q\342\277\276*\214\322\276\213\277\257\275y\216>\276q\203C=\321\332\360\2766O\366\276\004\210\344\275T\304}\276\352\323\"\275\215:\232>[3\010\274,y]\276\267\210\023\277\265\237\342\275\336i\251>J&Q\274\t\234k\275vl\226>6\200\272>\344\214I\275&K>\276\333<\261\276\2715J>\234\242\367=\327\225\341\276\326\2107=e\027\017\277\305\006e\276\371\261k=\370\252\321\275\375\315\203\276\206\223\211\275b<\222\276\364\340N\275\026|\320<\321/\251>\330\035\313<\002/?\276\242M\257\276\236\\\024>)\254Z=\265\276\377\275\354ih\275\324\030\225>\300>\201\275\331}}>&\221~<6\2332\276\2061\010\276F\245B>\272\251\255\276\010C\210\276\365\216\201\274\333\327K>28*>\302\252\201\276\310D\357=42c>\301wk\276C6[\277\360\024G\276*\036\351=\204P\240\275\344\"\340=\362(\251\276\024\344q=\265\332\342\275x\301\023>E\203\256>9W\\>\332\252\224\274\201\'\003>lh\305>N\2553\276\277\364\275=xwY\276\206~\245=|\232Y\276\331\224\233>Z\232\024\276\214\377\252\275\311d-\276\325\224\235=\3729\326\2761\320b>q\270}\276\212\265\202\276-3\022\276\261qI\276+I/==+\227>\335\241\366>\363\253\206\276\224\"\021\276\220\344\215\276\257iQ\265\376\246<\'\347]<\337b\323=\271\267\317\276\206\250\350\276)0\310>Y\341N\275.0\001\276\3117\347>5\306\210>\255c\257\275\224\007Y\276\206\344Z=k\250T=\234){\275W\004\234>\014D\200\275\203\031\222>\327\007\357>\244-f\275A\362i\275\357@\'>\327\325\021>\353\373-\276\017P\255<\205\341\325>\374\361;>\373\260\236>Q8\000?\245\234\236>Q\332\t?\005\322)=s0I\276\323E\005?\231P\224=p\316T\276\341\177\271=T\361\353=E\021\325=\325J\033\277\301\331\301<\260\256\005\276\354fh>\264\372\350=\206{==\374n\201\272\223\016\336>\007Y\236=_\025\304>=;\315\276,\342\315\276\365\204B?\302\206\022?KZ-?\261\353\007?\353h\374>\213\360_\275U_\215><\017\254>\255\210\241>\024\016\262=\007\274A\277$\001\027>=\014N\276\225\207\307\275N\215\346\275B\341p>\243\326\233>o\307\242\276\370\230\250>~{\343=\325\337k\276\033\211\226=\035d(\276\244\221\251>\350\007\232\276\rgQ=\024\017\260\274X\246x\2767Qf=\266\353\253>\316\'%=hkR<\354O\340>\362\215\000\277\251\320g\275\002o\357<\230v\245\276w{\321=\223\275\327>\210\353\261\275\023\365\001?/!d\276~\311\027?\2375`=\252#\313\276\241\2605>@\375\340>\216\214\247\276\266\215-\277\nQ\022\276\263\024\206;\302F\207\276nL\030>\036\003\313\276*\326\240>\3262\026?\210aw\276\372\244\002?\253\340\244\276\302\016\021>\360\034S>\3079\363\276y\276C\275jR\023>\234}\264<\317\200?>\305\335\247:\364\260->\317%z\276\336\212\000>\310\253\027>\371V\026>{:\321\274T\244\317\271R\210\036\275$\256v\276\275\007\216>\000\204\234=\322\022\266\275\220\260\005>\313\311\205>*\352\325\276:\204\214>\345.8\276\021\210\247>:\247\017\276P\261\014\276\341\367\313=\237\366\341>T\202\\\275\256\311_;\217\232\021?n\275\027\276\025\343\224=Q\325\033\276\345G\023\275\351\266\003\276\376O\233\276\245v\370<{\224?>\221\035\305>X\337 \276*\215\370>\247\324\006>\236.\t\274V\332{\274\330\326\315\276\014\361\230\276U)0>vG\243\276\252Li\275.O:\274\361\203\337<\362#\215\275vL0=;G\334>\353g\257\275\375\206\214=\3046\244>\244ca>\250\324&=oy\036>+*\350=\024\033\257\276\314*A\276\001\031\010>\341y\270\276\225NP\276\274\263H>\004-\'\276\332\345\377>\216\n\203>~\300\235>\305\202\262=8\331\350=\026\213\214<\035B\242\276\3607\004?f7\033>_\210\227\275>\357\332=S\271\327=U\3508<\204\223\234w\005W\276!\236~\2743\330\252\275\213\277\272=\253%\313\276t\003\377=\314\277w>\006IP=\337IM>[iH\276\272EB\274k\032\223\275\264)\032=\235\rO\274c\264_\276\323\322v>;\314@\276y\221\017\276\033}\241>\245\307\216\275\305\222\017\276\261\007;\275\321\246\325<\r\027.?\336U\214>\206\306X=\203E\n\276\336\247\261=x\356\320\276\201{\203\276\\6\357\276\253\252W<\326A\001\276=\337\312\276\260\224\214\275\000\343\022\277\005\340\263\275i:\302>\247r\255>\212\200\\\276\233J\314<\327\312~\276\370!|\275\230\245\267\273\016\242\263\275\215\367%?\357V@>\036\366\254\276+W\036>\210v\322\276\221\2504\276\244\262\237\273)\363\271\276r\306\033\276\013\306\322\276\201\026->Fq\255>\275/7\275\025t\303\276\330\223\366>\346\362/Q\031\330>\304\355\303>\235\213\373=Z1s\276\253\311\027\277K,S>\375\254\350<[\263\223\276\300SN\276<\370\222\276*\325\221\274\336\375\366=\231\213*\276p6\304=\302\206V=\270\250\321\275\245xm>\213\002b>\367D\346=\242{\032?\002\241\244\275\225\034/>\210\364&>\271\260\235=\036\2232\276:\326\343\276\360\037\332<\304@g>j\355\267>\273\371\307=\301\376\251>q\267\\=\345\241\307\276\034jW>\2118\006=\304(P\276E\021.\276\227\223\361<\243|\366>]`.>\231\276\312\275\307\014\206<\2017h\276FJ\\=\333(\334\276z+Q>;m\301=+\253\372\274\006\365\326\276\344\"\276\273\025\354*=*\365&>~\325\022>\375c\214\276\375*\344<*a3\275\006T3\276(\367\256>v\326-\276\010g\240\276@\262\376=+\344\256\276*\202\026>M\376\242\276\033\373\010\275\363\267\352>\310\"\232\276mS\237\276`\330\373<\317\017\234>A\213\340=\306F\200\275(\2208\276{#&>\226z\253\275\316/\262\275R\363\316=5\313`\276\205\217Q\275j\262\235>\207\017W=\266\014M\2765Zl\276>\306o>\0068c\275\230\265\235\275#\215\241\276\'\020*\276\261\375D\276\026\367z\276\006aI=\375\250\035\277\261\266\213;\251EM\276\300\277Y\276\326\323\360\276\204*D\276-\263\205\2761O>>P!\377\275M1\265=\230\325z>\371PM\276\276\331\313\276h\323\251\274\3060\347=3P\307>\352\334H\276\350W\363=\225% \277A\312:>\n\226\247\273mz\276>\376\341\337\275\035\232\302=\222\021{\276\222\376)\276\261\355&\276\360\247\031>P\307l>\037\307\033\275\343K\261\275\034\033\340\276#Z\222\275\317\343\354\275\200h\223=\271\233j>\354\301\225\274\036\317\214\274\367\202\307\276\210\024\204\276\220h\364\275\344\303\014=t\212\365>Ez\231=\326\214\007\274\361\021\032\275z\177\276\276U[\265\275\323\324D\276\3342->\336\027\031?\202\246k>q(\361=\357F\006>\341*n\276x\341V\2762\333B>\033M\261>\273\372\226>*<\221>\264\216\305\275\232\330\241\275T\244v>\214\255\016\277\261,\322\276E\240\243>m>S\276IC\375>#\261\025>v\362j=\3541\222\276\340\007#\276\341};?\375\343\\>q\315d>x#\371>\343t\307>@3\r<\304j\261\270\327S\345\275)\352\227>\t[\341>\304\222\202>\370\302#?G\343\004?\022\210.<\300\211\310>0\347\366>:?\270=Svp>x\022\310\276\024\315\027\2755+\231>\236\371\322\275d\315\270\276\316\241\222<\333\0237\275|\272\305\275Fn\201\276SSG\275\241\370\013\274]\t\202\2737\373\217\276\307i\374\275\002\266\224>\200.\245>\340\200,<\340\"\370\275\225Q\266<\rwW>y\272\344\276,%\246\276\330o\203>\235\241\036\276\351\005\274\276\302\010z=\376\014j\275\314\026!>\037\\\036\276\3242E>\350\331*>\324\253\216\275!\305:>\302\362/\276\rj\270>\316G\017\277j\023\220=7S\230=\344\206\250>\243\266\215>\235]\365\271`\013\026\276\007\243\250\275\217[t>_\311\274\273\3664\366\276I\221\364=9Pi\277K\244U>7\230O?Mq\257=\270\312\205\276\206\n\024\276\nP\212>\305zn>\202\252N\275\017ry\274^x\211\274\247D\206>Z\247#\274\346\376\302>69e>#E%\2753#\034\277\273\270\'\275\304BP>\030`\243=R\201^>\030\216\343\2743\324\204\275\357_\343<\212H\217= F\250=\006)Q\276\326B\273>\275\266\225\275\242\342\377>Y\345\010\277\375\223\366<\244\206\037\276\267\307\234\276X\002\275\276\242+\236\275af>>\004\327\252=\273\034#=\354\267\366\275M~&>\035\300\000=m\276U\276\n\010\027\277\204d\225\275%D\207\275H\233>\276T4\251=|\013\016\276\315\253\022\277\274\340\264>\267Z\357\275\212ZO>\244\361\314>%d\002\276u\210*>\261\273!>]\024\305\276D\341\341\2764`d>\267\231Q\276Ha\006>\005\001\372\276\251\260\314<\326\254P>CS6>\336\026\220\276%\270\262=6 K>\313\340\016\276\271\342\271=\347\301\246<\322\247\217\276\344\322\351\274 \204G>\353\'\033\276\323l\323=^\325[\277\335a\205>\244^\333\276\204\372\236\275\021\023V>\026vY\276\3125Q\276\277\316\206>\033\376\032\276\216\210\311>\313-\221>\305\r\003=\252y\310;e\252+>\001@j>\210\271\264\276\007N\317<1\326\332>Q\246\231>\2412\253\276\234n\r\275\341\237\017>\365\247\016\276\216\033\234\274\253\004\225\275\020\350\255\276ap\221=why=}n\356;\301#l\273+\037y\275\265s\021\276\204!e\276y\206\223\276;\367j=\367\274\333\276c\212\225\276fq\221\276\247\325|>\346\242\275\276<\362\237\275(\335X\276\337J\013\277\261\016\270\275\260q\005>lD\340<,\275\033>m\351.\276\016\374{\276\033\303\341\276\230NU>\333\240\272;\025q\n\275\331\342\314\276\276\336\276\275\331\255&\276,\345T\276\010\345Q?V\226\345\275\357W >T\354(>\333[\367\276S\217\215\275\332\216\021\276B\007\352\2759\252\203\274\301;\001\277\210\033\374\276\325\t\236\274-\002\246\276\366^\r=\232\224\014\277nb\220\276\275=\002\277\245\270r=\335\224\237\275U;\233\276\014\343\005>\014\247\221\276\266\2731\276\212T7\277\014\n\232\275pEe\276\007\203\310\275S\357\231\276\177\272\313\275\312\374\275\276\205\217}\275h\237\036\274-\337d\275$\374\255\276\371\334.\276g\007\213>`o\t\275\266K\033\276Lf\\>lf\376\274pJS\276@&\304\275\233e\371=\035\037\330\275L\202\220\276\353mK>\362\022k>\027\215\373>a\\m>u\355\214=\350{\024\277\317\203<\276\3316\267\275X\265\251\276\303\212h\276\225?\244\276\250\246T\275\202\206\244\276\022g}\275J\036\201=m\216\244\276\330R\356\276\220\223\331=\261\216[=G\223\205\276\275\\\365;}\322\271\276\252Q\317\275=\356\374\275\212\374\022>qg\305\275\362\343\003\270K\246\032><\203\361\276O\230\333;\222w\177\275\021\277\002\276\360X\364>\246\321\375=a\206\260\275n\326\205=\375\007\356\274R\006\312\275J\331\315\2766M\342>\211\001y\276\271\3771=\304p\371\276\241\315\312;n\2144\274\020\010\204=\311\010H>\326\344\242\275\3652\251\274]\001:\276\247\034\003=\350\317\246>\252\037\266\276\201`\025\276\310\361\331\274\344.\007\277\265\222\n\277\034k\024\276\212d\266>GHG>\311|\022\276\212t,\275\260_B\276gl\221;\375\206\312=x\026\222>wa\202=]\367\341=`\345/\276;\037\201\276\033v\333>\2218\311\274*\266l\276\332e^\276\213\251\323=\373(\376\276e\325\250\2755\010\206=\307x\037\275\222\257D\276\004g\035\276\326\362\211\276\231G/>X7-\274\027>\350\276\333\030s\276\346\212)=\006\365\363=\t\372>\275\266\221\223\275\316\355(=(N95h\302\276\315R\233=-\271b>_\200\032\277\326\230\362=H+\013>\016^.\276\031K\033\275a~R>\327\252\320\246>\201\276%\013\027\276\336\027\351>\333\207\275<\322\322\306\275\247be>\243F\364\276~\253\236\276\307go\276?B8\276\006l(=\222\243\304\276\265\211\343\275\026\302\352\275\270\032\217>c\234$>\0264\243\276AF\211\276Y\361\241\27652\243\276H\207\325=\302S\331\275\233\253D\275t-\025>\0024\241\275\314\227(\276\200\020F>\321\204\221>#\306\260\276\350v\321\276wt|\276Z\316\230\276\255v\257\276K \321=\"I\247=\316\023\353=\274~\214\276\007\347\273\27656\216\274\007\216\341=\342\207w>.\271B\277\001\347\370\276\031\007\333=\223\210\275\276\025`\313\276\341\316\034\276\322\275#=A\001\270\276\217\016\361>\235n\345>\321M\307\276I\224\262=b\330K\276\n\314\220\276[\036A:l\332\206>\237\345J\276Z\347\222\275\203v\354=N8F>\254\024\236\276\221\371\245\275\263\010Y\276\021~\317\276!\247|>\310\021\251>\325\203\256=Ys\225\276K\252`\276\357\301#\276\301\213\243\276!\211!>Z\310i=\030 \024=4#Q\276\233\356\013\277`\221\215\276K\336\222>\201\270B\273\220f\255\276\307\333\252\275\003E\315\275\317\315\316\276*og=!\023Q\276\220\367N\275\220\306\027>3|E=3\252\347\276\220\\\221=\364\340\t\276Nq\363\274\202\206\320\276%E\020?C\251\375\276\327\363\024\277z\326\235\275\315\003\205\275\222\3726\276@,\306\274\217t(\277Mu\254\276\226\023\324>\324\234;\276|U\'\276a\334r>\304q\265=\243\335\316\275\263S\031\277\021\244\225\276\325\236\216\275\253?&>\217\343f>\303y\350\275\005\2042\276\001\027l\276\221\216\260>\313_+>\336(}=\214\220\003\276\242\007\235\276\376\270}\276\356\353\203\276@\326\274\276\202Y\216<\314\035\230\276\027]\262\276P\345a\276$c\037\276\333\252\017>\365\242!\276{G\337\353\233\270\275\334[p\276\270o\247\275\242<\201=\345k\374<\225)\020=\017\224\r\276\260\347\222\2760\317\210\276\025[\201\276\347\214\205>\\\206\237=&[\241\276>({\276\304W\365\275\231\027B\273\272ts>\021\277v>\366C`\276B\352\030>\020\346\033=\'\301\226\276\355\373?<\265>\264\275\265@\310\276R\203\265>~\232(\276\214\233)>qX\266>\367\035N=X\214p\276\206\330\304\276\363\272\274\276W\030\346\275\376\3325\276\354\204\017\277Z-\021\276\303\306\302=\360]J>?\371\232\276\267\253\306<\262\262\035>\311g\004\253\377\261>!\206\322=5V\310\276\243L\214>g\312\364\275)G\017>\354\025\376=\335\355z<\321\013\366=o\010\240=\222\313\336\276de\\>]\235p\275\373\250\n>\321\254\250\276\345\212\002>J\3004?\273d\230\276E\266\244>r\024\027\276\213\230\242\275lc\017\276\000xS\276d\221\352\274A/\262>il\216\2761j\312\276\370\202%\276\376\361\277\276\263\353\341\276v3\303>\236\240I>{\007\255\276\366\327f\276\017\264p\273@\254\006=\254\010>=i^8\276\220\354\374=\366\351\016\2762\352\231>o\031\007\276\326\n\212>\330:\374\275T`M\275K\356{\276\266L\037\277\\\241\365=\371r\320\275\231z\267\276!\nV?x\303\207\274\t\376\306>\205\203\234\276\036s\311=\311\255\345>\320>J\276_\267\300>\364\316\243\276\234|\264\275-\251\250\275\217-\334>Cq0=|#\345>\310j\323\275w||>\212\316\370\275\276\337\262;\260\310\216>\351o\221\275_o\235<:U\315>\204F\302\275l?|>V\313\323\274\216!\022>\270.5=8\267S\276\372\031\215>\342\241\207>\250\346\027\276\336D\235\276\315\035\013\277\327\361\233>\375\022\243\275x\002\364=\327\021o\276\340\232\001=Y-\222\275v\233>>\317\273.\276\264\010\316\275\010\022$\276\215\001\315\276\351P\274\276\343\216\202\274\376x\004?\253\177\365\274F\230c>\371\216\210\276\325*9>\007\251\376>\257\206\006\276#\340\320\275\241\020\022\276\330\302\340>\177\nV\276D\030M=\262u\304\275O\321q>[B\365<*\302?\276\304\276\260=m\352V>\201\355\231>\304\370)>\242\375G\275\270o\277=\320\001\217>S\273\273>3\216\365\274}7\342\275h\277$\275\372\213=>Q\243\232>\003\375\035>\370,\234\276\2151>>\034+)>\320Y\271>p\r\264\275\334\232\003\276\215\350\343\275I\331\024;\2273\207<\201\364\210>\302\312\000\275L\276\330\276(=-\274\250\306\322\276\216\306\023>,\034v\276\257gm\276\306\234(\276\377+\257=\004\334\364=\317\300L\2766/\240>!V\t\276#\362\177>\276Q\251=\305\001!>\262\365$>!G \277\256V\273>\240+\n\275\352\210\336>J\222\000\275\263\316\212\276\206]6\275\315s(>\220\313\326>\263\264x<\361\272/\276v\t\300\275y\232\351\276W\005\210=\263\213e>vq+\276\3669\245>t\221\351=\242\372\355=\274\374(>\337\001^>\213\343\276\276Gq\002\277\366lz<,\302\204<\201\034\274\275\264\264\260>x0\305\005\255\317\275\334\307\210\276_\r\365\274&k\272\274u\352\006?\220>\210\276\030\253\205\276\022q\247<\371\213{\275\372=\022\275\336{\216>\331\353\253>\300\031\211=+\307\352>9^\301=\231\033\310>\206h!\276\t\337\337>\317\265n>\370`]\276\231\203\266=\273\244\241\275{\177%>;\267\224\276@\356\244\274\265,\334=\0270\233>6\330\216\276U*\313\276;%\367=\030v\024\276\207\272\264\273 \267\222\276\020\016\322\276C\361\375>\2074\036\2757\017\226\276P\303\302>C\360\207\276\231Q\316\275\330\341\252=\335\320w>#X\006\277@\312\305=kT\355\274\030\320\272>\307]\032?\370\276\373\276\203%\306>\223\261\242\276\235g\224\276\001\307\r=\016\270\263>\2771-\276\n=\311\274\354\242\277>+\033\301\276-\024K>\370\307\351\276\301\251\021\277\342<\201\273G<\205\2760\n\331\276L\334\271\275\234\214\036<\2078\033\277\362\005\303=f\323\\>e=+\274A\203\335\276D}h\2761\334$=\300\027\371\276\224g@?O\323\305\276=\177\210\2762\354\022\277(\357\000\277o\371@\276\366w\334\273_1\226=\336\217\036\274`\350\306\276\225\341\314\276\314Z!\276\377N\017>\240G\210\276[\234\214\276\303y)>\007\267\374=\353\276\266\276\022\276N?\343\202u\276\034f\212>\024\323\232\276\322\010\007>\021\2115\276\257\354\034\277\213\260\212\275\233\200#=3\335\017\276\354]\266>\312\345\307\274\306\345\177\274\202\030^\276&\0056\276\025e\323>\265[\230\276\r\353\031\276\307(y\276\363\201\"\2759\372\016\276\326\244O\276\223\307\310=\030\023\276\274\222 \020?x\220\226>\204\347\004\276\2611*>\214\272\221\276/\2074\274\370\342|\275/z\254<\226\310\366\275\t<\336\276\021\3250>\377\344\237\276d\005\032>\006\031\341\276\224\r\327<,\t\331\275$5\360>\246\205\016?\037\026A\276+\010\t\277\363=\026>\226}{\276\231\200\310>\255\336\356\275\034\230\364=x\305\024\277\246\305^>/\341\221\276H\314\327\276\352*\230\276\211S\270\276]\237\221>\307\327\356\276\337yk=\222JP\276\246S\305=%\230\'\276\343\242\306\275\264\272j>\302\362\207>\214+\321\275\005hZ>\322{\260\276\241\017\036\276\331\227\263\275\356\213\001\274\312Mc>\027V\024>\372\357\210\275\345\254\t\276Ez >(\036\275\274\002\253\363\276hfW>\020~\203\275\344\020oB\215\304\276aj`>\213K\247\276C\233r\275B8\024\276Rj\242\276b\357\376\276t\'T\276\342\2511\276:\273\207>\235\021\"\276o\201f\276\010\201T\274\034I\036\2768\022\354=sb4\277F\344\177\276\206\254\013\277\376\277\000\277\205\034E>\010jp\276\335N\244=\017w\245\2754:\t\276\211d\275\276\245\207R\275\345e\001\276\032\224\365\275\317N\310\276\022bg\276@\004\226\276\315\344=\277\312\010\246\276\315\226\376\275\327\322\323>K}\347=\350p\277\276<\200\240>+a(\2766U\"\275M\330I?\221\206\333\2760D\"=\034j\204\276\271\312v>\007\346\235\275-\214I\276\241;\262\275 @\254\275\247=C\276$\033\265<\n\320\031\277\334\256\013\277\227\035\301\276\207\305%\276\256\310\034?\275\314V\276\336\237\252=\353C\362\276\225]\034\276K\013\204\276\317Gl\277dK\244>QWg\275\3459w\276\371h\021\277\r\354\345\275r\304\241\276\274%\202\275\232\271D\277]\335\025>@\344\216<\221\326\241\276\210\211X\275%\212T<\243\247J>\247\230\346>J2\343>\017\233\341\273jD\251>-\336\233\276\024z~\276|\274d\276\366\332%>\267\2426\276\001B\317\275)M\017\276g\332&?0<\035?p\332\271>\035\341B\276\256C[<\227\305\340\275mAE\276\226\007\324\275C\300t\276\007\250\245\275D?\217\276\335\231\240\2757\\o>\313\2441\2761\312\213\276\033&4>\006!\205\230q\262\275\247\025r>]\332F\275C\003\004\276RN\\\276_aS?\266\217\231=+\372K>\303\264:\276#0\374\276L\221\215\017\266\033>`\274\246\276\203\024\303>+\347\226\276C\202|<\322\325q\276\3218T\276\033<|\276\214\303n>[s\261\276\031\032\310\275UI\311\275\010gf=\2757\\\276v\235\345<\376\304\'\276\033*\335\275\220\2569>z\301.\276\316*\203=\274\027\332>\344\376\202\275!\320\\?\3733\334<\340(\036>\2001?>\026\243\337<\215\033\233>,jd\275\302q\302\275G\n\177=\2060\353=a@\025=p\004 \275\211\270\220=\370\247\376\276\224-*\276l\006s\275\252\270\036>p:8>\300\264\002>a\030\246\276-\361\033\275\036A\027;\362U\024\276\262ik>c\247H>\304\233\002\275\246\007O\276\205s\260\276\277\336L\276N\241~=\021\331l\274\362\204i>\265dn\275\013\210\222<\004\344N>e\323|\275\030\347^>\254\2505>|w\324\276\237q\222\276\266\3156\276C\323#\277kEy\276\312\242T\276\326\326\375\274p\003\273>)I\023\277\237\232\221>\177D\036>\377[\214\276\301\216\006<\211\007\244>)\\\222\276&G\364\273\005\261\244>\037\206\212\276$\"\266<$\037\213>\312\331-\275\231\252\250\276\245u\017?t\215\306\276\2671\237=[\317!\275m\276V>\000\347\373\275\233\036\366\274\240;\211=\343\232\235=\260\213\210=N%\274>\014\r7>P]Z>\312G\275\275\203\362,\276*\263\202\276>O\211>\036\253\035>\232\016d>\323\357+>\245\2302=p\301\220\276C\253\270\276B\273\336\275y\r\314\2762\241o\2753\003\212>\025\377\364\275,?\222>C\262\025\275\332\365a?C\225\002>\037gu=l\236\332=^L\226>\220\037\317>\002\327a\2761\316\210\373\253\'>\262M5>\236\242\037>\230Ij\275\2766\n\2779\254N>\214\336_>\255\246\336>d\247\376>\341\232,>&W?\275\"N\343=~\274\036?\353\361{\275\330\254\373>\361\362==\266>\334\276\267\t\352\276x\214\251\276^|^?f\202\206>.\236\260=\\g\345>\000\254#>`\225\262>\3375\032?\205lc>\3022\210>\311\016?\275J\253\001?\364l\332>\243\231.\276b\243\315=:\322I>\304\227\256\274\343\354\277\275N\243\266>\241i%?\341*\271>\301\266\235>\366\311B?\305NO\276?\335\264=\001\334\007?\327\320V\275y\253\320\274Fsw>A\360e<:\275\000\275\316E\352\276\327\342\275>?\350~>\345\335\212\276\302\273\236>&2\301=\316\266\221>\002vs;\373%#>]^\241>\303s\016?\366MZ\275\256L\330>\004\\\007?\242\303\323\275\2454\311=\375\025^>.#e=\037\266\216>\253D\241>M\340\241>\355\211a\276\031A\004?\266\222\\=\324\331\274>\024\nB>N\264\017>\323\017\375=\311R\331>o\271\241=x\\U\276\200\027\350\275b<\332<\217\226\300\276,\033\001\277\001%\010\275\273ol>\202\327Z>\241\240p\276/`s>\257E\361\276H\205\262>\231\025\001\276>\302\034>6\236\205>Z\275b\274}\217\273=k\231\244\274\362x\256=>=\001>\177\016\031>:T\033=\322\223\214\276\270\n\260\275\257\246\226>\277\346\317>\276\037\250=\223\026\003?O\204y>-\263$\276\344t\320>\210\252^\274\337\357\"<\306\220\352=?\372\">\327l\303<5\3362>)\340\305=\365p\217>\227\\n\27665\001?\324|$?|\251y|Eh;T|\206>\366\225\006?m\212_\276\322\312\213>rAl\276\240\351\266=\330\006\303\273\345\270\256>tX\352=Y\300\310\275\335\203f\274\341\273I<\035\367\373;\203=\307>\363\376V\275\325\006\363<\252>\305>\324e\257\276\344Nu=g\375\363>D\332\222>U(\364=\324 Z>nQ\215=\006\262$>U\355W\276S\177\255\275D\213Y>V\023\304\276d\345\227=\262\350\252>h\016\346;\300Y\004\267\361\366\323>\217\353\223=\003\342\212\276\211\231\216=\323\220b=\035\0000?\034\356\357>dc\356=0\037\322\276\223\304\337>\372\313\317>\203m\021>\n\264\031>}E\212>\373\254\027\276O*<\276\351\224\361>\213\003o>\027C(\276\013= =\341\236Z=\016\275|>\317\255\270>*\275\200>g\362~>u\3643?#\224\251>\353\276\036?\366\255-?\374A\313>\036\205\310>\322\362\260>\002\322\320>\241*\307>U^\250>\262\341k>Hc\t?`\316\230>\251F7?\354\304\366<\377\360\226>j%\316>D\320\332>1\256\312>3\020\007?\203\214\335><,\030>\227\327\372>k\316\326>\261|\343>!\231\r>\026\263\240>\3573\363>mQ\367>\030\256$\276T\327\025??\212\324>\006i\357>\262\204\340>=\205\267>\312\364\370>~-\215>\227\306\334>a\243\274>\213\rE>\362)\033?\221-`>\233\253f=\037\353\245>?\333\353>\266B%>\3502\034?t\031&>\020\360\267>\363\0148?\334J\376>rE\010?-f\375=\022\3520?\202X\245>T\261\321>>,=>\325}\252>\013\263\026?\331\306\016?r\335\373>\nw\201>l\344+?\325\271\261\276\205\340\362>\023\245\204>]\210\364>\311~\247>\325\265\203>(\376[>\230\3420>\r\205\032?ei\235>l\347\305>\344#\210>\270=\360>\340D\031?\302=\372>\217\025\261>\255pm>\257\037\300>v\355\277>\274i\035\274\216M;?\216\306\314>D\246\347>\007\240\244>\304\255\031?S\031\010>\323\271\320>&K\321>N_\016\277\343\376P\276*\330^>\332Bq>\346dE>\341\252h>Q\223\360>\021Z\364\2755\242\336>\361\362\203>#\252\304>\351\345\036>6\327\212>\347\272\215\274\027\032\003?\302\230\026<\024+\213>]\213/\277\232@T?\357\247\305>\021=\003?w\017\341>\226#\247>\220\346L\276K\233\016?H?\204>lj,\277\342\025c>1\365\275\276\221\255E\277:T\201=A\020d>\003\217\002?\231~\327>@]\035?\354\245\"?xm\r\276\326\265\256\274\366\364\266>\206\014\001\273\013\235,?#\362[\276\006\315\331>\264\3779\275\256\2473?\322`\365>{\266\224>/\337\'\277\002\276 \276ct\010;\357Rq?\213\271\001\276\002\020\233>\032G\267>\201\250\035?\007\371\321>\025~\217>\332\347R>\246\324x=\326\371J\276\236;\020\277\261\276\037>\002\230\303>\266\270-?6w\253\276ml\006\277\366\345\260=!\212@>\237\2073\277\237u\036=\334P\231>g*R>\254\\\026?\0042\305>\266\240\340>\r\247\310>Pm\274>\212_#\277\342\347\271>\332\2316>\325\205\243=\365Hy>\231\006\"?R\212}>\006y\322\276\223\321\342>i\310\205>\"\253m>\226.\304\276\025\323\326>Q\n\273\276M\026\242\276\033]\335>\245\322\361>\022$Q\276\377\233\000?B\024\020?\177\007\004\276\300\373\213\274\366\211\341\274\364\270$<\223\227\t>\333\027\257>u\211\254\275\344\320K\276x\326\345\276\306\"\224>\242\231\364>\001\321\310=\363@Q\277,\214\037>a\213\303=\315y\265\276\235\304*\276a\037\247\276\201\232\013>\020\231&\276\214\365i\276\247~%>A\264\002?\002\r5\276\311T\366=C\2725=2\030\360>\264\262\244>E\376\372>\254\202\214>T\224V\276\261\236\273>\315\246\231>\213\004F\276\362\233\243>\350B]\275\336\364\000? 1\342\276\364\'Q=\t\266\006>\315\307\276=\274\023M\276\204\212->\016\355\213\276\312\003\357\275\364\005\034>\232\273\303\275e\177\037>\310s5=\354\200\264=j@\277>\214o\362\276\246\321c><\307\237\275\307\017D>0\274\010\276\265\014\230\276\275k\321\275\341\235\223\276\005\025\253>\2169\021\276\246\250\361>.\2120>\036\317\316\275\004\374\337>dh7;\032\201F><\351\352\274\223z\312>\332\343]=\321\023\025>\320\307\034>\0218\300=\240\226s>\314\2161=\3469\322>\354\377@>Q\252\341>\221\261\260={.\333\276\334\030\303\273\254\006\242\276\216m\205>,y\362\275\370#\260\275Cc\233>\013D\346>_\3548\276\337\311\374>m\273\202>\225\n\327>\214\004\271=\374\216F>\211[9>\335U\277=\340\313M>\250_&>\3500\300<\307q5\276\236:\201>b,\210>%\201A\276\032J\243=\304B\301\276\310\245\246\276\344\345R>\010\211\022>\217\363\032<\300\330$\276C\025g=}\256U\276!l\241\276\252=7\276\2131\304=\262\341\244\276\3114a=\3225\272>\275\345\254>RA\250>B\321\223=\016\014\033>\336\340\"\275\246\301\214>\276\022\315<\242^\370\275(X\337=P\237j\276\025!x>]4\272\275\350r|\275\357\006O=\366\016\223=\310\246\242=\223,t>\361\300q>;\324\234>\237\004\023\276\313\325e>P\340\201>\247&\237<\373i\376=\034a\200>I\034O=\246\256\222=Hl\232>..\235>\234\005\215>\322\247\211>i\256\014=\377v\252=\236\341\363\275\006{W\276:\266\253\274t\265\331\275\306\237\236\276\3253f=\373W\273=\306+\235\275\324e\260>\277\277\221=\211\241\250=\302L\325=\377\030\325>\343\207f\276\213\270\010\273\3556\001\2767A\340=a\275 <\002\020\204\275\254\030\t>\006mY>\373\351C>\351\215\316=\017\261\026\276\361\337I>\244\000\207\276\r\244!?\005op=L\301\225>\347\027\252>A!\201>a\367;=\027C\222\275F\231G>\025\330\256=G\006\200>\306W\375\276\255\323\270\276\337Z\337=\342\346\004\274\204\023\371=O\024\036\276\005\003\214\275\210\255\320\275\264\014\234\276\177\216\014\277\002l\357\275d\010\003\275\370\033Z>\362\230\217>\272\321%\276\300_\245=\275\217\007\277\370<\321\275\310gx\275\301\303\264\275K\330\331\273\335KE>J\342R\276\346\322\264>\301\274\346=\337\002\002>\3042e>c\342 \275\311\312\310\276\3462_\276\261e\210=\323\004\354\276Vg\325\276\357\351\226>[\215Y\275t$\261>\213\355*\277\031\302\273=N\327W\277$\250\312\276\367\016\223>m*e=\251\234\226=\023\204\027\277\260\036V?;\"\264\274\336\253\237\275\220\266\251>\310\201\274\275Dt\361\276\266Mk\276O\334\200>\246\316\030>J\313l\275\"8\221>\216\311\016\277\2426\024\276.M\304\276(\3476=\364\375\347\276\314\267\355\274m\336\254>\254\361\224\274\010\\\253>B\250\233>\374\207\264>a1\243\275$<\305\275\357\256\233=\270\302!?R\036\201\275.\235R\336\033\037\276\364\013\227\2769\n\264<\013\213\333\275\361\335;><\267\350=\373:3?\263\342\240\275\253g\250\276\315\277\">+\037\360=Y\222\270\275\261\034B\276\013R_\276\317\271\t?\017\267\312>q=\345\274,\220\234:g0s\274=8\025\276]:\">\251\277>=\362\354\333\2746\376\315>\253\316\266\274nJ\007?o\211\332\2755\341\243=\353=W\275D\036y\275\314g\242>v\203\245=e\322Y\274\352F\217\275\001u4g~\313\276\305hN>\331\272\326\2764~\021\275\303D\315\276a\324l>\223X\210=\336R\343>+\017\034\276\027t\203=\000=D\276\265\215\212<\377\267U\275Yx\027>\356\361\362\266\n\374>\331W\204>\215=\250>\263\307%\274\270JA\276f\222\336>ZUv>u\223\360\276\001RQ=\"D\223\275\333R\300=\007\032K\276A/A>C\305\201>Rc\302\276K\367\352\275\324\344\237\275\353J}>\034t\346=O\325;>\"\241\">_\277\316=\356\201\237<\275\350\304>\321\225\030<\341\020\244>\017S\203\276\253\322/>6\027\000\276\017\213\232\275\330\247\006>\376O\030>\000\216\276\276a_\016\276\324\\{\276\020\340\203=v\344I\276o\251\003?\363\364\005<\246]\260\274\016@\245>cL\362\276\3002\237>%\327\334=\342{B=\016*\203=\341k{=\022\202\006\276pPU\275q\226k\276\264A\202\276\363\022\232\275)x;\276\266~\031\277)6/\276\"\367-\276\335\311\357>\327f\351\274\t6\216=\3243\361\275\030\337\230=`\342\316=\254\345I>\003\332\221>\306\017\273>\253\350J?\330\251\n>\357*J\276\326\223\346=yk\273>E\023\362>(\217(?\236\031\026\277\265\277B>\277q\034\275\\G\230<4\201\231>t\246\346\274\302\331#?C\020\373>c\330\206=\037\367\010?\272\311\351=\031u\235>\316\366\372\275\'}\004?\305\246\002\2762\372\260\276\327\014\026\276\206OZ>\271\341m\275\"\261\336>\352\361\334\276\253\315+?z\214\032>e\353\240>\224\260\203> \346(>s\001\322>\302\027\256>\027$5>\022\033\235\276\030MI\276\271\363\203?\251\242\313=BVR=V\231\206=\016\022\375=\317\233^\275\022\313\277=\316\030\337>C+\205?\\u\314=\203i\242>k\233\361>\330\222\210=#x\030?\351\034\251>\036\312\023>\037\233\262\276O\262->\303\034&\275\210dw?\241\"\362>\265\330\000?\224\312~>\333\334\226>\357\366\347<\224\335\371>.ah=$\366\017?_\364\330>5\025\357\276_\310\317>+\032\350=\242J\234>4\006Q\275\337\277k\275\3205J?+\260M>\217\256\320>g\215\310>\350\0265\2765Y\261>\204\322\030>p\303\301\275\201\271\215\276\314:(>\322\010?>zr\020\276b$\330=\246G\034=$\311~>n\241\014>>3\036\277xX\273\276\224p\214>V\374E\276@\232\006?<\231&>C\323\305>\200\206\234>\370W\262>M\026\277>\253\272\220\276\036[t>{\324\371>\313\325\257\274\267\356\235>0\000\272\276\334[\211< \263+\277\323\334\037>\022Gs\276\\\010\316<\335c>>G\337\261>\0106\372\275\215\343\030=\\\321\211;BM\356\276\274\245\323\275\234\221\220\276m6\377\276\363\233J=\007T<>\303hb=\334\235\310>,\333 =\201\273\334=\236\251\231\2743!\005\276\341<\366<\320\361\'\276s5B=\032\215T\276\032DI>\032\260\362\273\267\336\206\275Z\323\013>\304\317,\276\375\261y>\331?\207;\330#H>\365\212\267=\220\301\212\276e\022]\276\246DJ\275!\371\347<\"\272\211\276\3600S>\346\343\021=\031J\372>\031\274\333=\007\310\214;\374\347\230\274w(F\276\240.\220>\2169\022\276\305\022\260\276cG{>!%\032\276\207Ph\276\216a\324=\217\277\340=\376=\022?\0065\202>\361\267\271=\357\306\367=\314?\374=\275z\221\275\311\0238>\320\037\032=[\346\200>3T\242=\237\3554>\276\217\377>!\342\236<\002\300M\276\356\223\211>+\326\275>\373\251\334\274\245\353\326>qU$<\026`_\276\260\3173\276?\003\202\276q (\276F\304b\277\334\257\341\274\005!\021\276\337\204j\276\310\021\024\276\335AB\275\031z\216<\341\327\r?\251(\037\276>\364]>q\000\206\276\365\352\323>k\023\233<\317\"\316\276\203\311\241>\373\215 >\250\032\347\274~\342@>\303\324\243>\234\276\322>J\313`\276\336\221\t\275\266*\261\274\030dM>\202n\246\274,\3356?\373H\022?\033\013\241>\276p&\2753z\205\276c\244@=\010\262p\276\346[\333>\267r\256>`\005\304>\263\'\376\275\217\020\211\276\340\306\241\275\'\371H>\002PQ\275\34073\275\322\275,?+0\335>\373\313\334>\201WB=7v\"?\370\322`\274?\203\t?\354\010\261\2768\306\">\200\032l>\317\316\336=\265\3526\277\200\251\027?\345-\351\276\272G\026>U\007\312=\324q\314=\2202\260\276\027f\342\276nW&\276H\342\030?\205\270\313=\353\375\014\277\016\240\321=l\202\224>$\3115?\266\301\254\276\006\350+>\004\363\212>\025p\336\275\373\267\270=\032N\255\276\2416\010>\206\363?>5u6>\337D\232\275\254\225\013\277\353T\241\276\233\362\262\276\037\002\313>P\2148>\256\245\217\273\2546\222=;\227\003?\355\211\002?\225\212\270\276\3262\330>\021T\031?g\025x\276exc\275\343\274\313>\022\306>?jB\322>\336T\003>\254\313\204=\344\273\250\276\320d\217\276\354\031\245\276G\277\326\275\243\261\235\275\367\023F<\244\311\215>\217\303S>ua\241<\244\375\334\275hpY\276\235,+\276\235@\311<\036\311\034\276Q\210\010?&\000\221\275\367 \353>\315uB\276\003\023e\275S\357\312\274\213\211%\276\"\307D>\312\340A>\240\000\225>\004Z\343<\"\211\034=\215d3>\270\036\205>\261\346\235<\252\204\004\276\036}\237\276\303?~\276 CB\276\304\274\375\275\205]$;\233\343\310\276\207\266\233\275O\207\001\276\034[[\276\266|\251=\342\372Q>\362\022\272\276L\267\274>2\224L\276\276\352\255>\255\343l>!\312\r>\252\300 \001<\033\305\236=/\334\027>b\313\027\2767\314\265<\324\336\037\276\252\2661>nF\355>M(H>\273>\\\275[\300\214>\033\261\232>\242a\030=\t\333\">\376\247q>\225\240\274>\242*\243>8\304\233=\304\323\213=\231\353\377=aZ\246\274\016\032\227\275\277\221\233=\210\364\355=MZ\r?\276\306J>k\2645\276r^\242<\225\301\277\275\217\250\266>\370\365\376=\367\005\210>\247\340@>\265\250\360>\211J\253=\334\244\246>\262\020p\275\373\206\311>\201o\365\276\226\253\366=;\245_>I\341\256>\230\031t\2762\355\224\376=\373\333\262\273g\315\345\276" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/gates/kernel/read" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/gates/kernel" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/gates/bias" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - dim { - size: 192 - } - } - tensor_content: "\356\370_?\204:8?\247\313\331>\007\277\007\317/?\260\231\013?\253\343\316>\230\021\n?2\252\030?G\n\023?g\325/?>_\036?\352\3620?\336n\036?\233\000\316>\003\0140?\214\310\301>\254\274\\?\315\247\004?\201\344=?\347\027\006?\343\006\005?\371\305\330>\307\250!?\3775_?\200\223\035?R\224\002?\024\362\t?P\256>?I@/?\002L\002?\323f\n?\337\215\205?`k\313>\010X\031?`\002\010?\323\247\027?\037\321\003?e\242\377>n\3349?|\366\332>7s\004?\334\313&?2\020\317>\326\317;?\352\365]?\210\271\037$Q?\225\177*?\0162\247>\332~\314>k\276\320>\r\3479?+R\304>\223s\037?\313k\355>Z#\025?\207\320\361>\302\t\315>+\276\362>\313s\357>N\3179?w\346\270>\361\275\245?\250\374\017?&\223#?Y\032\341>J\252\n?\265\265@?a\376~?\252VK?#*\344>\314qD?V\324)?\340d#?\031\021\016?\312\360\252>\221m\n?.d.?R7*?\251\332\014?\353\357\026?\304mw?\376}\327>\333\346\t?uw\275>\001\317/?\370\376\311>\263\027T?\326U\017?\253\332>?\245\177\276?\347\335\272?\241\344>?\n\313E?\227\364\211?\246\342C?\327\\\000?\361\312\224?\221\244L?\361\202R?C\3612?\350hR?\341\362w?5\367\225?e$\341>\026\312\242?\'s ?\037\336\322?\357\025??\304\370S?<\255\003?\276\365k?\353\2366?\337n\241?>\344F?\353\220\375>\340\367\367?\312\330\014?\261}\330?y*\341?%\243E?\251\321^?\232.\304>\354L2?\020%9?\316L\360>\220\202\262?\246\224\236?%\2578?\035\371\216?\271&\023?s7\240?xy]?*y\227?~F\306>\001\374*?@\016\033?\214\310\313?\306\247\317?+wn?\317CD>\300\024\224?\277\370\000?0\314\313>\2619\334>\032\367\217?h@\177?\343\001^?\236WE?}P\277?|V\257?\376+N?@|M?\346\224\356>\227;\263?\244\036\361?\275\3079?$\027f?l\214\317?m\007\225?W\nu?{\226h?\030\327\236>\347x\010?Y\266B?\323\232??#\307\221?pc\337?\346\231k?y\215V?\n\227\350>\254\006R?\230\203\037?\203\355U?\020n\242?u\320n?\030`l?0x\010?rl\276?\037\240\023?\346~\270?\301\220(?\312\340\005?\322\205\"?kz\315?\215\301>?" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/gates/bias/read" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/gates/bias" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/candidate/kernel" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - dim { - size: 192 - } - dim { - size: 96 - } - } - tensor_content: "\333c\221=\267\377\030= \017\004\2767\010F>b\322\227\2733QP\275f\375g\276\334\023\006\2772\236\224>-\374\227\275\212\311!>l\302#\277\250Q\245>-\325\252>x\212\342=\327\032h\276\177\301U\276\377\315?>\320E\376<\346:^\275\252:\245\276.cy\276\202=\246\276\275\236\256>.\236\211\273\276\031\006?\221\363T\2761\244\324\276_\200\301\276\242z5>\212_n>\177\227\245<\232L\247=\373Q\300\276\216\006\016=\361<\252>\24567>\266.;?\271u\013\276l\264\354>q\324\205=K\233/\276&\261\023\277\204X\223>C?\354\276&\325X\276\2519\350>\231n\244\2763\271\027\276dj\022>&\302\324=\312!7>czp>\322\036\032>\240\254\350\271\327A\200\276-|o\276\344\364\271\276\375\266\366=\216\204\243=\376\244\004\276\366\370\216>3\n\n\274\0260P>\277#\004\277\3410\322>\315\026H=t\274\223\276\376\255\255\276T\324\211\275\357If=\312\257\355\273\321F\277\275\214\034\245=\253\025\016\275\200\255\370>\030P\001?\215\227\253=\302K\226>\250{\201\275x\331]>q\275\304\275\266\310\227>A\203\262\275\253YL\276\203\204 \276\346\2054>`\235\200>\t-\230\275;\330\201\275:\253\202>>\1778\277WR\221\275\361.\262\013\0278\276K4\035\275\3661\233\276\335\265\177>R\376\000>\356n;\275\322\372;=\350\'R\275hFz\276j\335n>}\316\275\276\202\r\201\275\016jC>i!H>-\023@>\222_r\275\017\326\250>\264\245\305\275\264\273\363=\236!\205>\225\234?\275\004I\032\276\\l\000\273\315\002\230=\373\333:>\034\026q\276D\300\001>\250\010\272\276\264\316\314\276\024\255L\276\352\230\222\276\016\264\233>\316\253\266\275\036\225/>g\276\023;\315\245 \277\036\025\000>\235\177\261\276~l`\276\374\203\235>o\r\001>\036\367\312\275\201\226$>\262\236\231\276?\374\246=\302`\333\275\232\307\032=\r\241\210\275m\306\033>\344\3358\276\204\375\326=Q\326\027\277\305\210m<\031\215\030\276^\367\223=P\311\253>\346\311b\277\335\\\357\276\246\361\026\276g\200_\276\342\237\007\276\313\\x>\n\335?\275\013F\005?\032\202r>K\261D\336\235\227<\3669\240>\370c\214>\354\352\217>\305\236\230>\260[\326>\026 \224>\225\036\333\276\275\205\337\276\317\325>\273\235/q=\017\373\266\2764\025\257>\244T\237=\261\274\n\276\356\214\245:*\252\233\276\237\305\306\274\224E\237\276\204b\364>\244dD\276\247:\301\276A\004\r\275\336V\332>\307Vb=o\375%>\356M#>\306\216\344\275b\343\262>\261L\020?\373fI>\343\216\017=\265\347\361>\224T\277=\3069\263>\207\225\242>0\000;\276d\035\206\276\026\'\302>\361\r\334<\241\263+\276\201\251\212>D\322\317\275\221\377\307\276<0\275\273%\323O=\006\236|\276,\256P>G\360\341\276A\211\226\276\3401\225>\023k\t>\320i<=\003\261&=\331U2\276\365z\324=\212\013\243>\346\234\354\274\260\331\213=\023\214&\277\301[\037>\035\227\324>\217\310\204>\365\303\023>k\326\254\275\224\2018\275\332\r\246\274\316\336\233>\032\360\014\276p\212b\2765D\301>/tq>H\217\363\275\362\0227\276\210\037\374<.K\'\277\023:\003?\331\210\340\2757\367\213\276)\203\275=\2039\311>\317e\224>\244]\203=\006\270\207\276\356`\344\274\006\336b> \233\271\27607\027\276$V\362=\223\325:\276Iy#\274\335\245m>a#\252>T\0366>\366\264\'>\202q\004>\251\262\226\275~-\212>Y\030\n?}pp>\231:\021>\\\370\022\276\326\366\013?&T\236\276\321\032\r?\323\204\263\275\024|\022\276L\375\357>\203\215\232\276\277\n\270\276\206;\000\274O\245\004>qZ\365\275P3K\275\301;\256\276\002 K=\376\261\026=\333 \330=E[\231=.M\013?5sz\275\231\324\203=\217Ok<\220\034]=\333\365&<\341\336\312>\036\264\020?\273\240\305\275P\235\001?\026\355\n\276\340G~\275\233\343\200>\335\277\220\276I\277\007>\202P\222\276\347\036\017?\254\314\\\275tk*\277\310:\035\277\277\355\375=\334\342A\275\323\030\225>y\002:>[V\230\276_\262O>\251\323\336>yo?\275\342\350*\277\362\315\212>SW\364\275\034\213\265;\242\000\005?\r\227\314>\315\035\324=\204\364\205>Sd\203>Z\316.\276\251\374\242\275\322\352\002?\3435\240\275\032\003\260>\275\243\242\276\317\177\354\276\260\377e\276( *\275\332\311\030\276\235\344\266>\250\230\024>J\344\336>\356\356I>\275\316\215\276:\370\227\276\002\232\t\277R\274\357=f\036\003\276-\353V>0\263-\275\36735\277\2321\305\275,.\207\275\204\320\334>?\215H>\004\010\001>\243t\242=\350\352\232><\300\324=a`\320\276\363\215\222wS{\2764\n6\277*\226\033>\231\371\263\276g\"l>\307\371\326\276P\0343\275\2550\353\275U\366%\277\"\257\220=\207\245\237\276S\311\020?\022\277\332>\376=\255\274\177\021S>T\235\315\275\023eA>\353P\244>\205\320_>\265\355\310\275%V\014=%Y\253\275\347\365P?\327;\312>\004D\237>v\263\275>\256\226{>\335\007\371\276\200\202\206\276G\343\305\276\300d\302=\005#\225>\310z\217\275\025#\247>\270\227\361<\374\234\023=:V\001>y\263\n>\034\nQ\276j\336\327\275\355}T>\343 \310\276\300\325:>\210\301t\275m8\272\27643\205=\036?\204\276\315\306[\274k\\\026\277\361B\251=\370]\343=9\257\252\276\353<\317\276\003\311Y\274\216\025\316\276\'y\027\277\036\232\231>7~E>\205\226\017=\020n\022\2778\277\217\276\016%!\277k\003\331=\272\330~>]j \276H\350^\275\327\210\227\275.!\020\2751A\007\276\206\312\213\275E\276\201\274\037\007u\276\001\220\247>\202\276\025\277\341\253\236\274\033H\002\277\010\313\027>\244&P\276\016\336\305\275l_$\276\262\r\245\276\333\265\007\276y\372=\276~`\021\276\303p ?\034\240\205>]_j<\024\210S>L\313q\276O\362u\276\007\233\004?\250>e\275t\022\201>\206*\240\275\001\326V>S\216\203=\304V\335\275\372}\312=\320J\244\275\201\3465>\307C\247\276\363\274\213=\244\265T\275#\215\'>~;\311\275P\360\262=\215\317\363=$\200d>\037!\227>kx~\276\337\311\245\274\356\255Y>\325\352\301>\036\350\346\276\225\333\\=\020\301\007\276\254\307\343>\227@\030>?\007\004=\263X\347=Y\231\001\277\345]\n>\2363\223=J-\313\273\t\024\201\276AE\275>\257I\010\277\224Z\024>\367\256\215<\313\245\251<\237:{>\261Pp\276\347\026\345\276r\315\377>5\3212?\362\2532>S{\'\276=\203\224\276C\305\223>\230|\240\275$0\024\277\221\023\355=\1778+\273\233\023\243=\331\223\000?}\304]\276\233T\277\275Z\224\264\275i\332\315\275\327\000\254=\265y\232\275\032+\311\275\213A\031\277\37564\276o\025\000?\214f\216=]\3771>\343t\230\276R>\300=\225\021\010\276z\"\304\274\3348\247\276\307\231\376\275^a\243=\222\360\004>\320o\017\276U\357\215\275c\200\225\276j\216R\276\202\235T>ps\277>\0267\014\275\354\315\024\276i\001\362=I*\217>\334i\"\277\244>q?\000I\300\275\346\265\207>f\177\231\276\307|\377\274\315e\217>zj\005\276^\366\032>\335\024k=\206\0340>\231x\370=\3618_\274\252\321\214>\320w[>\341\262\001=\342\002P=e\235z\276\361\230\257\2767\264\202\276\331U%>\263{\213\276\300\322/>\366\001\200\273\035y\253=\014\034\356\276*\3358>\241\261\017?\204\014\002>r\316\211\276\216\221\027?\357\267T\276\200n\216\276\304(\201>i\022\227>\300N\265\276\235_\200\276\312b4\277\371\n2\276\246\333\314\275\0139*\275Teb\276U\035\311\276\n_\203>\324\277\022?\337y\254=6\247\203>\212\t\271\276\307{\035\277\300\025\314\276\247\327\240>\350\234.>\360\350\200\275\274\266\336\276\243MB\277\026\317\226\276\272C\221=\214\033\205>\365\026H\276D7F\275\312\371$>\314\304~>9\237y\276,\264\037\275\206\244\266=\267\240!\276\361\311\302>-\255\013>\020d%>\017Jd\276\326\247\001>Xc$>\251\244\'\276\014\334\317\275\334# \277\320\031\327\276-\331\265\276\2372\n>\";\214\276\362-\242\276rjd>+o\212=Ts\210>D5G\276T\n\205\276\377>\177\274\323>Q\274\023\023C>\265\273$\276\'\316\227\274\320\346\246=\317A\240\276\204\305k>\335\013\265\276\030\t9\276\264\244\030=\277\263\377=\003\206\334\274z\323\255\276\357f<\277Ol\313=\033\034\337)N=\241iz>=\233\024?7\335\311=\235\236\225<5AS\275\2270\312>V\270\'>\265\3215\275.%\321\2747\237\211\276\177}\036\276\332;T\276\027\005\024>\007\036\327=\360SP>J\007\252\276\327\232\341>@GP>c\3229\275\3033.;\376\371\345\276\361\334\333\275o\263\220>\360\330n>\027\306\260\276\331/\032>\"e?\276\325\327\262\276\017\276\221>\325n.?\026\331\277\275\347\002b?\327\013\3039<\262\005\277\372:\237>\271k\255\275)\\\242\276$C\035\276\323\275f\276\250\204o\276\312\232W>6\022\217\276\345\357l\276\0004\n?\017\231\030?\327\274\305<\330\353@\277\331\326\023>\333\2477\276s\004\035<\372\336l\276\265n\213\276?\302u>\311|1>\3016\360\276^\270h\276\377\005\345\273}\242\353\276\n\307\210\274\222#\337\274\213\264b>\266\207\032=\\\324\246\275\316oJ\276\374\3624>\220\213\360>o\313\367\274\230\314\342\2750H-\275\317Tq?\332\272\243\276\"\274\024>G\307\321>\243\253S?\"\336[\276\330\247\'?\200=\231>\2060\330\276\326\376#\276l*\224=]\010\252\2761n\222\276;\243\266\276\016\t\213\275\346~\246\275j\211\t\276\2300\303\2764s =E\363\264>\357\373\326\274\305\202\273>\357\201\230\275\276\264d>\337\370E\2763x\370>\261b\345\275@\316@>$w\371\275\022\314\306={\337\006>C\377r\276U\035\202\276b\243B\275\335u\304\276n\201\330=\325\n\002?\232nZ\276\214i\275>\352;z=T\255J\2767oW\276\203\254\200>\222\221\026?\020\007~\276\013d\367<\230F+>\352\004\227=U)\233\276\231\321\257\376.\370\276$LZ>\220f\013>\247>\266\275M\321\336>\362F\262>_\262\016\276\315A\267\275\261\3300\276\347\363\010\277*n\234>\\\214~>)\232\255\276k\245T>\355:\247=\312\0167\275\360<\274=\010\214x>\322\314\320\276\257\347\324\276D\316\277\274Vrc\276.?1>\006\255\\>\206Fb\275\031l\374>\274\017\372>e\230\025\277\341\340\226>\244\007\320\276\340\372\213\276\003_\034>[\302\260>\245\226%>g\004K\276\254\014x>\324\353^>\243l\017>\347\374\202\275\313\316\'>\371\023\206\350\345\276h\275\007\276\327\251\272\275\244\030P>d\215V>\363F/\276\030\332\244\276\347<\277\276\036+P\276\013\362\312=\nY\215\275S^\014>\202v\346w7\363\276]$\222\276\332Z\024\275tb,<\363#\350>\000\373@=\302\303\271\276T#\273\276n{Z\276}\361B\276\204F\317=\256\220\256\274\334\263\'=\345\317\370\276\344\375\t?u\230P\275:Y\246\276}\2572\276\033\204 \276j\252\300>\372\360\213>\360\310\224\274\352\227)>\"\273\225=\275\257\001\276\371z.\275q\364W>G\241\202= \037\325=\320b\013\277\2578\252\2760\005\205>f\256\367\275!D\006\274l\271\t=\234\365\234=/\334\245<\t W=\262\017\305\276\252\371->\346G\013\277\225-_?\240\375\305=\276\231\230\275\374\t;\277 J\034\276\315\001\203\276\215\371~>PQ\246\275\372\260\323>J\374\343>>\246\374\273s \241>\010\353\026\276\242\020\236>u[\324\275\330\0322\276\327\271\t\277\322\375(\276|\220\260\276#\217\223\274\371O>\276\014J>\276\321\213 >r \262>\350\032O\277\354:\235\276?\322\026\276\033\351\314\275\357\260\275\276s+\231=\026\210\010>\303\345\204\275\034\026\311>}\367u\276\3747\274\276\342\177b>\177\274\027>\245Jv\276\376\365;\276W;\212<.\007H\277A\247t>\251\016\346>\022\370|>\217I5\276\271^ ?\211\007\373\275\350\351\026>\350#\002\277\206\332\270\305\025N\276\262\206\241>id\200\274\n\211\341<\252\225\n?M\224\306\276\244\272e>Z<\331<9\353:\277\374\n\362\275\303\307\237\275\303\352\177\276\222\0045\276\202\361K\276\235+^\276\307u\006?\251\327\304=\276\221s>\037\320\303\275\266U\243\275\037Y\033\276}6\357<\204F\217=\010\372\206\276a\300\202\276g\327\205=\363\225%>H}w>C\034\020>\372\332^>\322\356\201>\017y-\277\364\371k\276@\326\256>\'\377\256=\000@Y?\220\270\n\276\336c\225>\030).\274I\234E\276\024\341\361<\375\266\010\277Z\251\257>\242cW\276\334.\000>c\317\\\275\3537B\276\235\016\210>\310\240\270>\227\026\211=l\2643?\2628%\276\341W\276>\036\035G\276mH \276\177TZ\273\337\014\034>\336\022\021\276\033\320\272\276\331%@>\021\026g\275y\030\210\275@\217\276>\277\3458=\350.\262>\300\332x\276\312\275(\276\021\233\211\2768\303\343\275\332\004\035\275b\341\260>%p2>\250/2=S>\233>\2052t\275\354+\201\274\201v\204>\222\225\177\276\226*\217=L\302\302>\234I\233\276:\311\316\276)\375\217<\376]\226>\271\256c\275Qq\321=\\|\216\276t\036\006<\352\261\370\275\005k\374\274\r+@=\263\3323\274\264\327n\276\375\020\226=\357,9>Qm\247\275\265m\202>\314\334\035\276\377j\204>Y\264\322\2744\277\367\276|\016\016\276\035\262\314\275R\233\"\275\301l\r?n\'\362>\253\202\337=X\246\272=\243\246$>\t\324w>\342\t\020>\272\254a\275\205<\213<\363x\210>D\017X?0\002\017\277\031W9\276P\344\322\276dX\361\275\251\341\254>W\350\335\275+\177\203>\371%\213=\003q\004\275\r\303]\2767\316\316\276\346!\274\274\037\033\006?~\0056\276\036\233\'>\335\355i\275\004tG>\370\345\234>s\304\314\275\225\263\315<\263\256\247\275\024\377\376\276\277\356\014>\006\t\243\276\252\201k>\357\226\023\273\035\243\267>=\036\r>\t\301\316\275,.\020=\005\022\374>f[\037\277\306|\216>\215\247\353\275\327\231M=$\330\303<\035\355~\275\273\016\215\276\301\025\225<\000\262\371\275\244\320\215>s\324\r\277\263K\373\276*pW>\305\265\010\276\323\350_\276:\352\311=\376\226\034>\305\323S>\231\265\253\276\326\025x>\004\346\230>\347\005\234>\245b#\276W\356\237=-\367\006\277\275\222\371\275f\376\377\2767\"\n>.e\356\276v*\325\276\307V\256>\177\323\366<\347S\242\275\325\305==6\336)\277sh\250>\260\216\351\275D\\\337=\227JG\276\260\224\217=\250q\333=I\2559=|\014\373>\225\014\001\2761\325\271>,\3429>\320\275\326\276\242\255/\276\235\034\250=x\320M=s\227\240=\205\250\233>-\272\330>9\206H\276B\355\310>\023\265\005\274\021\246\002\277i\371\t\276\010\357\242>i\361\016\276I\'\r>\332\372\300\276v\255\334\275\236\177Q\275\215\224\261>\000\376=>-\253\351>\333)\215\275\025\240Q\276\330iP\276\366\221\335\276\341\001U\276gl\311\276f\230\377\275v/6>\\\322\355>\0365_\276\204\373\331\276b\255\317\276\232o\221\222\345\203>+\220b>KN\312\276|\026\300<\254\330\252>D\306\005\277\372\330Y=\202\241\\\276SG$\271$\230\331\276\357\233\004>\271\014\014\277JAV\276\252\206\210\276mFW\277\255\">\275A-\">\032q\246>\225\264\255>n\223\005\276\311\230\r>\201F\220>\364\026\216\275\261\321]>\246<\270\276\214\2223\277\003\360\t\277F\305O\273` \325\276=\013\022\276\3369$\275`\311\317=-2\021\2767E\330>\337D\233\276\227s\013>N\r\003\274\324\366\031\276\3501\316=e\223\270\274\375\220\261\276\250\211w>\253\212\311>\202\337\033\276\214h<\275\nw\240\275c\276\264\276\345\036\362=\343\225\247=*\007\026\277\026\213\027\276iEd=\375<]\276\351\'\016?\262 \226>\253\372u>7K\215\276\322\263d>\024\222\314\276\025\356\251>\222\005\327\275\354\001\007\277Z#@>=Je>t\210\275>H\305\243<^b\007>\326E\177\276\'\365\257>\362\211\340\2757\257$\2765\017\335\276\316\365\322>\321\n\203>u\335\014>L\013\357\276\373Q\201>a-\251=\t\306\307>s\334\342\274\340\025\265\274}\323[\275\0354\">\217J\345=\277H^\276Sc\032\276\371\026T?\312\270>\276\274\234\246>O\274$?\t\327\356\274u\266\242>\205\320\013\277\323[\310\276\352\274\036;`\334\214=\261!\000<\314\236\220\276\2018\277\275\022I{\275\355G.\275\234\037\306=\271\226\230\275\246\215\332>K\370\220\275bt\242>\tIZ\276\261/\250\2767\366C\276\352[\202\276\352Tt>\374\343\270\275W\271\"\277\344\221\007\277\277\230L>HE@\276\263\034\351>\362Q\247\276\001z\367\276=\r|>1\206\251\274\021R\260>\rJh>R\200\020\276l\231s>\331\342\256>V\263\262\276\263\322\225=)c\t\277\250\214\001\276#5\256>\'=c>\324\373S\276\273\352U>v\n\037>\216\212\025\275f\336\202\276\251gj=l;\026?\005U\243\275K1\302>\253fN>\000o\017\274h\237\023?G\374Y\276O\020\227>@\252\324\275&\035\324=\375\230\306=\226\312\273>8\350\">\010\\\021\274c\255 =Vq\267\276\257\323\215\276\356yo=\247\035\002?>\242\207\276\303\270\262>\003\242\202\276J\027\005?\212?\235\274\362\346\016=ub\263>\265\2654>\036i&\276\0337\266\276\t\245[<\224\016\020\275\354\353\000\275\343\024\214=\237\334\302>\234\265\223\274\251\273\245\276\215*\266>#\321L4\277\027&]m\221\331>\267D\021\277A\363\027?\2028\315>\326e\">QT\374=t\207\314\276\224\342\307>\035f4>\001\307I?T\233\257\276\234\0208?\322\013\262=\242\220\001\277.\226\235=\217Hc\276\252\203Y=(\257\364\275\336\026\274=\371\325R\277#\213\220>\271\322\346=\247k\207>l\327\313=j\317\352\274\230\245\273\275?\215\017>\027\353\243\276\000\253\256\276\341\255\036=\257\364\036>\013\032\265\276;\030\222>3\326L>\200\335\351>\301jO\2760\316\373\314\233\321=\273\215\324>#i\346=[\250]\276\310\211\027=\211\rP>\244_<\276R\312\030\276k\026\215>J\010\271>\206\331E=j]T\276\353\243\274>\217\354\006>%\245O\275\033\\\030\275\245\217J\275\266T\241\274\311q\233>\261\350\212=\276\324E\276\365\016\013=-\210\262vR\253>mG6=\215\3502>\203\220\000\277\214\260=\2769\201\013\276p\304\227\2766\220\224>\357\206\r\277CE\210=HA\205\276%=\020?]\306\215>\217\370\267\276\215G\030\276\340\256\313\276\035\211\362\274\225\312\242\2764<\203>\rfx\275@\322\227>\263@\215\275\352\020Z>Y\242u>T.\016\276\203\352\332=\025\001\245\2753|\233>&\014\364\274\256\270\200\276\244\263\003\276\252\005\017>Yc0\277\365\231s\275\344$\230>\036H3\277$N\314>Q\376 >\241n\315>aj\334>=\000\260\276OJ\253\275\353\370+?\356\313\252\276\264\023\216\275\302\223\014>\212\334S\276h\034Z>I\003\030\276\374\300\230\276\345]\026?5\351\264\276\220I\021\276\326\377\241>\320=u\276\267o\243\276\234%)?\375\017\331>\224\264\307\276O\313\206\276\233\242\373\275\203\324C>\357\351\373\276%=\020>\347\232\312\275C\031\350\275v\334\225\276\263\300\374\276cr\303\272g\260\243=\347T\377b\261\t>`\205t\276\215\243f\276\375\367!>\313^\202\276aDS>\202\232c\276Vq\027\277\017\201\336>\343S\215\276\200\333C\276\231\362.\276\351\254\270>+|\020\276\313\213\210>\204\002\226<\372-!\277\332m|>\273\327\025>\226\\\366=\264\273\256\2750\313\007< \323-\277p\026}\275\343\206\373\275\360\244}>\315\037\243\276\207\333\212\275mY+<\261V\225>\346\256\205=kw\327\276\201\004Z\276kIp>\236\377\353=$\260I>\320\370U\275*\001q>\237\300H\276\271\353\337)\013\247>Y5\307<\207\362\031?\311|\303\275c7W\2461\370<{\370\245\273\375\336&\276-\"\335\2764\205M>?\305\377\276\222\311\220>\224V\027\273K\325\223>\361\2134>\371+\243\276\362\266\206\274\272\255\010?{c\003\276e\010\263=\235\376\214\276\204\233\346\275\302\302X\273Eq\276\275\374\t<>\305e\237>Z\211\374\275\217\271\214=\204\301P\276\314\005\330\275\334B\242=@E\n?\261\247:>\361&\211>\t \212>tA\331>Ev\034>ZV\360=\356\210\250>\371\235\276\275\325W\016>\377\262\232\276\342\204\334=\225z\312>\312{s\275\321@v\276S\n\256\276\342\336\205\276\304\300\233\276v]5>\023\277^\276\246\301\013\276Xb\035\275\227\300\356\275r\346r=!\234\022>\267\304\225\2767\216\266>\010\222\266\275\220\010\271>0\317*\276\\:\'\276\010\311!\277-\253[\276l\375\324>\320wY:R\276\233;\226\214\262>\266^\372\274\014\270l\275\n\340\232=\377\326\360>\363\235\314=\334l\366=\002W\307\276x\345\340\275\224}\344\276\337.p\274\177\361\234>\001\027\254\276\3220\022\274\345\253\201>\0173\210>\266XH>\304\337\216\275\032/\020=\010\272;={#\252\276\275\024\346\2748\333\030\276$\361\243\276\3236\014>b@\377\276\257\265\352\276XP\232\276(\326q=R&8\276gb\247\275U%\260>L\027\341=\016\310[>\351\221M=\225\232b\275\330\036N>L\247\336\275\221\374\201>J!\241>\004\3133\276\t\271\211>\323\316\342\274\324a9\274\334$G9\024\205\272\274\3036\207=\350k]\276\0103\353\275\216{u\276\265\372d=\3533k\276|\304U>\233+\'\277{I\321<\036[\244\275\221\337\347\276\213\231 \276\275\350W?\024\244\311>\221\341\016\277\344\352\033?Be\331=m^\271\273\342\235\362=@\244\307\276\327cu\276\264P\'\275L\356\021?\035Wd\2762j\037=\262!\227\276\353o\033?\204\346\010\276\372\007H>\3264\317>\327V\242:\313~\233=\014}\222;\345\022\024>\2234\037>G,\017\277\302\242\310>\244\324\207\274(\304\317>c\000]\276\245\201\024>\300q\222=W\355\303=\223\274\000=U+\220>\334\245\t\276\'37>N)z\275\305\366\232\276\301}8<\3731\312>r\213\270>P\2401?\001\216\252\275\336L \276\300Ba\276\374\212D?\0252\245\276\017\204\254\276\272\226E>\266\254\273\276\010\253\225\275\217+\t\276\335Y\250\274\"}l>W\020w\276\370\'\263<\337%\276=Q/\304\2767w\233\276\026\032-=\232]2>\214\367\001\277\361d\301<\330\033\242=j\3735\277Re\266=\370\324\313\276\311`\230=\340O\005\276\340\315\210\275O\267p\274oq\300>l\305\315\275g\034E\2765\204\016\276\364\221\336\275\271\245\370=\375D\353>\020\236\027\277\304\325G>\204\357\222\275s\003\240=\314\347\014\275\252\217\017\277\362_\202>\027x_>\332\272\207>^<4>\2254C\276\322\334n\276\241\323\366=\000\030\241>\3009\344\276G\025.\275\274\220\260\276(\032\233>\242\ra?A\007\n\276P\235,\276\332Xh\276\004\2327\276\225r\324>\265\250\206>P\231\031\277\322E\246\274\331E\244\276\200Oy>cb\221\275\223O\007\276!\016\210\276e\000\330\276\217\364\201>b_\002\277Se \276@k\024>x\233\207>\333q\221\276\370&\221>I\323!?\026\312Z>\270\000\276\276\267\203\370\274.\202\330<1%\003?K\311B>\372\334\320\276\306ng\275\330\323y\276Ns\256;\361\335\010=M\324B\275r)M>\230\343J\274\024\376\311=\233\000\331\276\226fl>\016\266\247\276?:l>\021N\003\277|\2002>\275\355\027>\0322w>)\377\022\274\373\362\344\275\301\224\200\275\024\307\357\274\023\353\313\2762_\311\276\250\360\342>\n\340\201=\324\354v\276\242{\231>\314\261\023>\307Z\316>\030*J=\037\'G\276D.\001=\037\227\354\276 8\005\277Zx0\276\261\217\266<3\352\342\276\323\324\333\276**\266\276^\355,>GI\352=i\025\354=\265.\213>\020\234{>\327T\316\276\'\206\010>\240\035V=T\002\374>\334\177\341>T\207\305\276\214F,\274\243-\n> f\213\275\227V\267>\237\343\252\276\261P\256\276z8\306=\3219\307\276M\306\010>6L\252>E\265\000\277\366\215\352=\037\031\036>r\017\267\276VX\347\276\374\337y\274\374\2055>\352\302\316<\244W\224\275y\302t=:r\322\275zym\276\034\354u>ZaO\276.>*\277\271\216\030\274\266\266w\276\325\302\316>\250\223\332\276\017\336\235>}Lf>R\375f\275\031#C\27629\316!\317\031?\357\261i\276\0009|\276\274\204\006>\273\337\245>\030\210\003\277\351,\210=\300\263\032>n\275\223>u\215 ?\226u\322\276\326\027Z\275\353\004\276\276\337\324$=`T\024\277\362\202\200>\343\362\034>\374\315\372\276N\265\227\276\247\317;\276!f-\276#\376\301\275\224\270\205\275\211^\213>Qe\374;X0\020>\366D\326\274\327\262\217>\265+\n\274\234\333\t\277`\237\214\275\"\346\233>\303e<>k\252p>h\263\t\2742\036\266=\026\3772>V\\\020=\355\023n=\237 \023\274\322\256\227>\240\251->\257\373!\277\276\006\270\276\371\226,>>\264S\276\001\311\024=f\225x>\217x\254<\320\211\313>\257-\333>\317\002\344\274\236\320m>\260)_?\274b\226>\242\261\241\274\221\342\r\275\263L\266\275\266*\020>\267\3103>\261\307\215\276\343\364b<\236<\034?\375\305\231\276\204-\322>#D\202\276d=\311\276\244\307\013?\272\2721;\000\260\202\276\311\314\301;\213\227\234\276b\245n>\315\026\013>\372\314\273=\362\374\237>\227\363\033<\323\017\n\276\331\264@\277\n\256\021\275\014Q\344=\222\032G\276\270\rx\277,\301v\276a@\251\275\032\002\000?w\312o\275t\001\242\276{\306\002=%\230s>\324?\261=\250\021\253\275\302\026x>O/^\276h\346\260\275\311\365-?\307\314\371=u\343\272\276q\025x>\234\t>\276\201\201\270\276\376;\223\275\335\364\271\030\206o\275\322\n\010\277@(\364>s\371\022>\'\323A\275N\344~\276\324\310\006<>\270\305\275\271A\343\276r\345B\276a\010\306\275\257\203\'\276\"\365\311>>\300\261=\243\306%>N\267\233\2760\340\036>Y\213\027\275\\\361\240>\332\315\210\276\036\007)>h\\D\276\251\270\032\277\363T\312\275\337:-\277\261c\273\275\377*\337\273dU\350>\031(\326=|\304O?O\314\271\275\027\352\247\274b\016Z=\331\"\021<{\036\220\276\343\325\220>\345\r\037\276Lx\224\276\331\243;\276\n}\014\275X~\256=\"\263?>\370\371Y\276\227>\270\275\353-\244>|\330G>\242@y>c\253\331\274F`m\276J\316\216\276\265Wv\276\223\022y\276l\361w\276a\266\031?\374\tE<#\351\221=\300IC>Wy\340=\2430\325\276\035\215)\275h\363\022?\326\335\016>Cu\221\2768\201N>\"\364\255\275\271>;\276\026A9>{\320\030\274w?\366>%Y\273\274z\035\257=\237^S\275~\313\332=\242\016\364\276\006\337\017=\304\263\223\276\336R\'\277i\232\251>\243\010\216\276\337\026\'>\371DJ>z\333z>C\204\236\275\242#\300;/\r\227\276\213\035\270\275\032c\210>\361e\253\274L`\001?\006\312\375\274\253\t\336=\020\331\225\276\231\205\202\276G\275\330\276\005\2170<1\256\243=D\274U\276\367\330C\274\266\325\246\275%\027\277\276Z\360\224Z\353K\276k\'|\276![;\275\246s\342>\276fT<\002\216\244\276DR\244<\303\342\022=\237\210\357\276;\306:>\312\275\231\276=\251.>,}\315=\272B0>9\250t\276\311|e\275\340\212\240\275\325\225\227\275\t\264a>\334\014%\276\316\005\233>\006\350$\275I@\350\275\307IO\276`;\206>!\254\240=\200oG?\037`\316\276\221 \223\274@n\260\276 9T>/\261\320<\276\245\233\276\023%\203>\005 \">|\322\336>\350\362\302\276yZI>\177\263A\276\363\n\216\276LN}\276\376\306Y>\266(\213=X\275\001\276%\000Q>\266\024_=\222e\260>t\n$\277Sv\036>\307\321\231>\316\352\300=j\026I>\372u\027>\264OQ>la.\276v\266A\276\025\t\250\276\306\0268?\310t\227\276C\252\275\276D\362\375\276\034\363\234>&\335\000\277\231\212\221=^uv\275\371\350X\276\203HC>\266\324e>\272\334\023\276!WB\276\'\363\333\276)\231\023>VPF\276\030\325\017\276T\020\224>\261\017\316>@\353\017>\325\032\017\276o?)\276\303\272%>\034\221\323>*\255\217\274\205.h>s-\005>=\220\360=\361H\202>D\033<\277\027z\033\274OD2\277\323j@\276\257\254\325=\177\323\210\274\273\221\321\275\216\221\031\275\266f\025\277\301\337X\276]\276\256\276j\247\317\276\337\261\230\276u\257I\276\356\022\223>\224+><\336\025\324>\355\202\257\276\245\277\334\276\343\365\207?vTr>\357\016\261>\021[\352\275\210\235\226>\023\252\244\2753\262\225\276G#\031?\025m\214>=\264\243\275\326\242\205\276\353\357\210>\030\001t\277I(C?D\205 \270\217[\313=\234;\002?\\\223\210\2750\264\005\277\\Z\273>}\373\256=u\324\342\276\277\204\360=P\342K\276N\203A\275\037\275\324>\354\341m\274\335BI\276p\031\254\276BV\315\276W&V>\311\235\372\276\343\256\001?\264\255\034\276Xc\243\274eQ\240=R5\220\276\r\273\220\275\231Z;>\037_)>n\034E\276#G\037\276\024\023Y>\341\356b\276\034\030\020\276\265\034\366>\321\233\215\276/F\206;\262\334\010\276pF\227>\267\003+\275\010R\036\275\276F\021>\316s\363<\203\332\\>vb\220\276\320@\264\276|\265]\276sdr\276[\244\217\276\213\354\246>\231\014\000\276\350\301\213=G\355\251>\020\353%\276Hpl\274\317\333d=y8\251\276\346\232\346>\rhM>\004\302\036\277KE\264\276d$\n\277\250p\004\277\214V\204>\260\342\224\276\202\345/>Q\325\305=\010\255K>\305\374\263>i5T=\257~\310\275\227\226\250\276\003\226Q\276\217{\372\275e\272!>[\211N\276\350$\317\275|\351\347\275\024@m>\230\245d\276\301\023\221\276\202\363\214>\204\026q\275\002\311\255>\351\020\027\276\266\\\003?s\311t\2751J\360\276\240\332\033\277|IY>rr\305\276\324M\014\277\314)\235>g\266\213\2769\320k>f\221\271>\2649r>\311\303\217>f.\235=b\245\303>\3549z\275P\356\204\276\213\261\216\276&\270V>m\344!\276\345\224g>\266Q\263\275\2721\301\276Q,\247>\267c\315>>\031\216\276\2200\375<\374\036>\275!\376n\276\206\271,>\261x\016\276|\333\023\277i\'\251>\t<\237>\363\340\323\276\010\314Y\000{\240>\302\313Q=\347\243\335\276\0321#>@N6\276X\300\344\276\244\004\'>\022{a=\266@\324\275a\t\032\276N\266\'\276?\177a\276\255H\212\276Ja\017\276\207\270\026>\344\207\242=>\335\001=\r\013\020\277\023\020d>2\t\265\275\250\302\021?\323\033!?\032N\3639\373FN\276\377\230\275\275\327\236\373>\035\020\204\274}\231\034>Z\3671\277\017Co\275B\r\321\275\252\024\201\276\352\346\';\252c\372\275\021[]>\245\200\317\275<\014\t\276RK\367\276\006\364\005\277\362\032x\274\2210\241;\220\034\341<\211\032\320+n\345\273\2175\270=\207\274V\276\037\227\010\276s\2047\275\2156\236>\253\246\246\276Bi\311\276\321\261\000\276\3151->\275}`\276\007\324T>g\332\213\275+\244o>\355\312\'\274\036F\312\275\222\267\277\275c\235\215=W\334\257>\177\326\366\275\320\213\016>\2432\257\2762}~\276V\265\205>\373\000\202\276\223\334,>\244\374\233>\370\254\255>_\214}=R%`=\376\0377\276\036f\310>\331\312\'\276`lW>4\213\":\014\036\300\276\321\314C\275\354\3502>wp\201\276\223#\010\276\r\314\325\275\234\221\231>\030:?\276*\354\206=\022\327\246\276\226\215\206>\334\031\264\276\022\032\250\276k\224z\273\332\013|\276n\225x\276\351\331\205\274\302\314\250\276\355\236\327=\261w\200>8/\251>\t\022\205\275v@\036>\326\224\001\277s;\001>\0204\330\2762\355\264>\226\253H\276\356\354\255=N}\213>\245\307\205\2750\216\253\276^L\205>\314o\266\276\032\343\365=V\263\257>!y!\275\277/\020\276?L\276=Q\240\007\277\3776\251>4\332=>\317\240,\277\223M\227>\210^\267>\326\327\355\275\373\214%=\366D\006\276l\021\020\277\017\355\026\277\323+\252\275\312\240\004\277\010\236\200\276?\301\222\276\344\323\032\275\327U\020?\346\267u<\353c\341\274\003_!\276\311\231_=`\177\201>a\250\217>\315\251\036>\001\027\351=\230\na\271\300M\000\275\"\215\224\275s\226\247=\302\004\022\276\020\244\215\276\303\266\016\276\305\322\274=\203\002\027\276\2163\354=\343C\261>g\213\367>\231<\014\277\337R\325>\376\222\205\275\001\3576\275\211\247\233=\017>k\276\264\334.\276\032\363\316>\nL\213>\t\377\222>\327H\014>\242j\313=\272\257\201=}\275d\276\'\257\212\275\227E\026\277W\027\302>w49>\004\332\027\276\325\004\270\276\363\217\242\276\203\315\246<\223\026\225>\314\035\007\277c\347\365>\302L\017\275\203,\276\275\024`p\276O\374)\276\205\271T>\203\347\237>\231\245\271:~$\206>\236\t\333>\002+\031\276\273L\257>G\361v=\246\360\200\276\324AC=\225+\037> \222\220=|\310\215\274<\217\245\276\220\373\245\276\340\332&>\007x\">o\313\362\276\255\260\213>\2420\200\276\3441\275\276\276\"\243>\037\317\">{\321\001=\n\273\327=\240iN>n\312\352>P\273\026\276\017\247\007>\212ZX\276V\257\r\276\213\032\264>{\312\220\276\371\231\214=4\023#\276\021\224!>\'J4\2764\220\031\276q? \276\241\312\246=h\325\342=`\252\307=\2508\232>\305\002\007\276\267\233\210\276\3246\366>:\341\367\275\235\"\033\275\360\2400\276\227\177W\276/\032>?\221=\204=\260|\006\277\233G$?N\364b\276M\304\235<_f\032\276\251\255\t\276\317\353K\277dw\310\276P\245/>u\024\276\275\344\245w>N\230\215\276\226\373\203\275\263\206\244=R\373e>RM\367=\357\036\r\2761\212\242\276CN\232\275\206\031;?s=\024?\340\374\277>\251\236\331\275Z\361\312>\346\205\313:y\003\302>/\017N=\302\335\302\275\'\313\272\276\211\365\000\276I\357\022\276\244m\226>\330\"\004=q\237\320\276\331\026\007>\020\024\201\276\310\037\345<\354\322\r>\377\257?\276\030\323\374\273\262=\276>\236\272\370=\312&\244\276\315\324\007?O\364\r\276\240+O<\010\370\235>\004\032\237=8\224\035\276\315\356N>,w\247\276\'\036\237>u\260&\276\355\232\302\276 o\274\276a16?;u\226\275\340]\035\277\"\231\\\274[7\233\276\247\337\324>B\025\306>pl?\277\344g@>HM\021\276\200\276\273\276\034\317\370>\266\374\215=ek\206\276\241\370\232>y\305\007\277S\362{\026\301\275\001\2368<\375\016t>\372\212m\276+\226\237>\376\211\205\276\321\331$>\263y\036?\371/\336\275\372\313s>Q\217L\276\206\273\331\276\275\"\233\276$k!?v\344<\276s}\222>\2379G\276\274m\317=rv\201\276\302c\031\276a R\276z\303\360\274\225\343S\276\355t\354\276=]\243\276z\014\270\276\332\251`\276\363}A>\031:\232=7\362(\276\326p\301>\227\251\266\276\303\226\t<\357\366\227>\332\262\270>\310\204\317=s\\\334\275\217?\r\276\336y\374\275\341\365\200\275\323\213P\276\230\366y\276\"K\342:\326T\r?|P\346\276\312+k\276:k\255\276)\033\001\276\017\230\250>\t\361\273\275\346\r\233\276\340\260\003<\321\3260>A\236\323\276\241\232z\276\017\016i>D\273\376\275\225\363\367\276\031}\007?^\344\260>-\214\227\276\201\317\327\275~\230\257>\335\326\223=\273\212\204\276\342h\210>\320\272\004>Y:\"<\333O\301\276\311p\340>Zs\220>\366\274E\276\252\257\334\275\210\274\350>\270|a=\'\326\304\276(\235\002>)\202\225\274}\024$\275\374\305\362>5]\305\275l\271\220\274\007\017\276\335\267]\276\231M\241\275\231\307[\276\004N\020?\302\036\302\275\364\314&\276;\021\014\277\337\346\002\276\316\273o>\300\262\233>\217\253\302=k;\203=?y\205>Ou\027?\177\302\260=\210>B\273\331\260)?\370\316`\276\0008\213>x\"\310>\251\375\014<)\360/\276$\235\t\277\335\263\200>!{\352>s\231\006?\020\"\030\276\276\0335\275\351\362o=d0(\277\035}\010\2772%\006>\265\220\270>\226\325$=\343\221\027\276\227\344\"=\362B\276=\273\n\373=\037\231U\276\337\315\252\275\234\373j\276\316+\254\276\366lS\2761\210\325\275u\211\324>T\333\356;\\\233\254>#|\026\277\213S\265\275\365\020\262\276\2604z\275H\002\332\276\354\2142\276\224\361\030\277\342\031\022?\334\250\257\272\337\2028\276\261i\266>\\\214\327>K\271\354\274\034\357!?8\"\003>l\211y\276\\8\007>\331\"\216\275\355\017m\276g\331\031?\362d\022\276H\331\202\276\314\357\246>\224\3712=\257[\247=\013\301^\276\244\004\232=g\313T\276(\206\350\275\343x\314=\320r\363=\n\032\030\277\334[\000\276\270\256-\276!\211m\276]?\321\274\300\021\325>\206\361u\275\320\273M>\361o\262>0&\221=q\0222>l\363\200\275`\217\333\274\027\235R\274\214XX\275\300\213P=Y\246\002?\350`=\276|\305\330\275N\310\r\276;\267\375=\377\257\021>\334eu>V=\031\276!\3027>\030\261\222\276\250\177w>\'nJ?\3459\224=\211\021\227\276\330D\232=\371e\033\276<^\241=\272\263 \276m}\350=\010\303,\277\302\2038\276\322yd>\251\n\240=D\022\330;\006\252Z;\202\021\220\275\220\221m\276}\227\222<\" >\276P\2475>O\'\241\275\305_\205\276\013\372\001?\273\225\330=q\343\370\276\210\225\206>\000\207\224>n\020\027>\215\266u=>\343\013\275N\265\215>\267?\356=\266X\204\276\336\334\275\2766e6\276{\325@>kv\200\276\251\253\023=\346_\231\276`S\304;\360G\203\276:>e\276&n\263\276\326}a>x\2663>\246\r\270=V!S\276\023U\371\275:h<\275$l\035\277\205\303\006>B!\220\276\032\263\014=0\213\237>%\211\262\275\2727I\275\n\307\233\005\021==\247l\002?\216[\225\276lo\007>\033\335}\276\256\307\362>[\313U>&\260\374=\375\022\346\275\272\207\330\276\000\320\325>\326\265q>D\315\234>[{\231\276^\236\205>\373\'\276>A\304\265\275z\241\217>\332\214\272\275\362\032\036;B\036\312>\037\241\252=U\370\361>6\\\'\276\034I\355\276\272D\256\276\351\304\212\276\203\246\005\277\340\177\226\276c\327\336<\257\317\003\277\202\037\251<\034\352t\2758\272E=\311\253L\276t\274\210\276\367\215\025=u%(>\007\317~>\030\200\010\277\016\014m\276\244\244\306\275\032\260\275>\nj\207\276\217,-\275\340X\222>\377\360\320\275\267\355T>\214\275\200>\241\220\367\276\342\311\301\275s\304\014=U4 > (\370<&\365\035\275\3428\223\276\331\371\223\275\3145\030\274\365\2611>\275\350\205=d\313\246\276\241a>>\274\230h>\024\020\346>wB\377=U\245\021\276\315\304\002\274\257]\234\276P\017\320\276fX\302\276\253\366\374\275\327W\220\276\256s:\276G\021\223\276x\350\254=\3502\302=\350vi\277sy\351\276\036#5>\340G\245>\212\305>\275\007\272A<:\303(>\263\275T\276>\215\001\276D\331\301\274\022\200\013>\277\344\353\275\366\327\273\275\030\210\260>\030\3772\277\271\204\374=\346\364\017=@\236@\276`f\224=\256\205\322\2754\3533\274\331]\243\276j\227\r\277yt\252\276\244\2773>\256J\210>\034\002fIY\300>\266\315\003?\241\334\277\2769\345\t\277z_\010\274\227\027\204\276\032\206\261\275\364\234U=?*6\276\2629\013>\213\246\310\2752\245\331>i+N\276\262\270\352>6\222\324\2764S,\277~-\253\275\267\3656\275\375wg\276R^\364\275L\220\021>\224q\220>\370!\'=K\371J>\023w\272>1\275\200>B\322\031\276\334%\277\276\023Fk\276_\204\344=oQ\231=\031f\024?\037\252(\276|\244\356>\367\315\035\276\240\025~>\'\241:\276\031T =&\203%\277\373\223\020\276\327\0006>k\311T=\243k\301>-fn>\020\374\027\277\032\257\365\275z\236\270<\3541\020<\215\0377\276\312\020\004>\034j\337\275\360Fm\276n#\240>\200\203<\276\220\313\321\276\370\3748=\264\244\267\276\177\351\205>\345\254\006>\010\346E\275\221\226\201>\273B\255\275\247\331\227>(\017\261\275\236\336=>\242\027\264\275q\344\177\275\023\345\321\276\356Qf>c\245\333\276%O\205>\362\031\273\275\237\305m>\216\275\016\275FQM=\302\323\227>\214\004\310>\211\275\036>\356,F\273t\004\327=@\205\302\2732~\024>\375\233\224>\331\333(\277\257k\256=Tb\212<\217B\223>\372\353P?L\rH>\251\264\242\275p\227\351\275\241\335\t\277\203o\371\276\377\232\277=m`\035\276\256\376\235\276\353\025\306\276\365\027\235>Q\026\272\276\256\026/=\304\227\222>\265\246H>6\\\002\276\201\347C>\316\311\250=\3254\203>\266\036\205>\274_$\276\217\345\225=M\005\024>\337\232,>\246A^\276\343c\225\276\264\207x\276w\210\330>\243\306\241\276\215\200_\276\010\334\r?wM\266\275\251K\033?\027)\371\2761jV>\226\314\203>b\r\016>\177\023\355=\333\375\322>\007\033F\273t\023\227>%\016\023=o\254$\276\343\326$\276\274\316\243> \201\272>\344\250\306>\005`Z\276\006\010\033>0\370]>\022]\031\272\266\333\206\276\346\331\030\276.\222\310\276\2647\305\275\330|2=\364\222\243=\362\361\220\276\227p*>eT\"\276\302\245+>\256\177\202>\325x\265<\002\310\354=\223\211\206\275\022W2\276\356k\267=/\373\033?\220\323%>a\252\251\276bJ\233>\274\353\226=:{/=!\234\374>\363z\263\276\215\022\251>\373\006r\275]g\333=o\367\303\276\246?K>\255\264\222\276\222\241\305>\033\336\271\276W\347\201>\266\250 ?\247I\343\276\340=\203>Z\276\232>RcG\276\212\244\254\275\240\366\024\276/\177\233>\2629\313\276\232V\000\277$\223\241\275(\231\275>\355\rz>\372\231\017>}\257\323=4N^>\312\356\263>C\244i\276\262\177\017>s\221\025?g1\014>o\032\261\2755\2330>i@\323\276\030s\200\276\244\333>\275#\322\266\276X\317}\275\321;a>VNO>B?\303=\212k\016=/\305\300\275^\246>\276\237\262\337>P\242M\276h\206\014>\344N\233\275\204\023\010\275)\211\024\276\361I\026\277\225\267\023\276\336\334\220>n\027c\276\355:\225>\023\254w\276C\243\234>f\217\231=\370?\216\275\330\221\236\2763>\330>\021U\010>\271\325Y>\313\225M>b\2506>\200$\210\274M\373C\276\261L\304>\343\3657\2728\210\256\275j\305\337>\241=L=\211J\202>\332S\235\276\322\315\326\276XM\270>\346\377\336=\373\362\215\276\017\261\036>(9\344>xN\342\275\333\031-=\223\367\305\275q\034Q>\nb\260\276\341\177E\275\256\246\315=`\302\002\2760\020\004>\335\327G\277m\314d\275r\036I\276C\310\344<\207\234\000?\207\370y=\340\323/\276\206\\!\276Po\270;8\317\202>\314\377?\276\210J\344\275\260\014\321>\207v\007=\267\317\333=\323\240\200>S\365\265=\267\211\242>\212\036\300\276{\261h\276\367\316\277>\315\247\226\276t\354\263=A\226b>\203\361\215\276\266K1>&\331\331\276:Q\226\273e\263\221<\325\232_>{\034J> B&\276\267\020B\276\232[\203>\234\326\271\274\321\346\331\274 \226\201>\232\251\n\276 \240p\276\207\345\211>\347\257.?\300a\342\275\221\224\265>\010\351\374=\362V\302\276S\372\013=\217o(=\203i\t>o|\201\2764a\007=\340\233{;R\354\201=U\265\201>\322\266\206\276c\n\230\274\365!\371\276\242\020/\312\2260>\310%D>\033\214\365\275\027\244m\275\332\177\'\275\341\367\223=a\207\256>+X\002>\316\371\205\275\230F\213\276f\250\204\276\262\236\335\276\265\316\t>F;\243\274\t\365\230\276\277b\261>\376\013\016=yW\030>\306\245\334\276x o=n\372\276\275\360\207\201>\336\273\331\276\336S\244\275|\234\274\276a\017\033?~\330\254\275\277\331\244>I\201\252\276<{A>j{\263\275\253\214\306=\231\001\203\275\362\260\221\276\262\264\014\277\377#\017?W(\'>\2574\200\275T\000\237>9\013\210=\202^0\276\326\266\323=\363E\232=4\330\230>\317\021\215\276\024z\033\277\333\375 =\371\216\221>+\034\304>\243\021\315\276h\3743\274\241\276\252\276S\207#\276v\352\307=\374\234\256=?\323\r>\222\270\307\275\313\367\003?\375\n\206=\214!\033>\252\336\235=\365W\264\276\301\321\230=83\021\276\332\334o!\002\252>!\0257>\037\333\262\276i\241\307\276\205Up\276[\007\177\276!w\002>7\026\223>\240\027%\276\303\346\302>\206z\n\276J\367\211=\207\027\223>\250M\262=\024\333\204\275\332Z@=\2114\236>!\021\001\275\245\204\264\275nm\203\276\372#D\276\323\361\234\276@\354\315\276\2150Z>Q\3461?\223\213a>\261\342\257\276\344\232\200>\345\035\341<\205T\323\275]w\223\276\316\376\237\276\304\350\267\276t\340P\276\324\017\246>p\342x\276\023\265\201=\244\021\244\276\'\317\"\275\264\216v\276\006\327\246\276\031z\022?\335\331o>\003\025F>\246g\336\275\316\341\213>.\016\372>N\230e\276\270R\274\275$(K>\347I\307>j\334\374\276M=5\276\266\237i\276uZ\235\276izw>\276\"\356=\242\2606\276B\321E\276Z\357\332\276tD\360\275\2061X\276\244_\r?\200\376\244\275\203\253k\276\246\204+<5\326\240\276C\030=\276DA\t\277Z\300\005\275\375\022\007\277\264\324\250\275\rzO>\362\241\313\275\261\014\213\275\201\225k<\323\300\032\276}}\300\276\256`f\275\303\222\342\275g\341\252=\330\266\206>vL\225\276&K\333>\252\230\357\2751\261\241\276%\032\240>k\230\177\276\232\035\364>|G->\255f\310\275uE >\330l\226>\341\211I\351B\217\276r\330\337\275\206\332\225=\202\252Y\276\035\203A=\226\305\003?\030n\254gE\323\275\221\014\\\275\234\313\224\275\030\354\353\273\222<\263>[o\202=8<\376<[\325\004=v\330\332\276\345\222`\276\263\371\352\276\223xj>\256\275\325>\237\247W= ?\256=rQ\216\276\346\226c\276\310zw=#\210\223\274\037\371\200\276\356\245n=\010D\007\276\313H\321<\364\356\244=\021\275\331=\214\344\274\275\254\316\202\276j\274\353\276\276p\205>5\365\205>\323\347\267\273\336f\366\276b\231\311\276\200\231\271=\217+\021>9\343\374\274eAr>fS\245>s\352\007\276\004\277\030\276\274\236\211\276\023s\254\276\256ri\276\243\254\353\275p\222\371\275\300*\253>\266\367<\276j3\365\276Y\306%?\020\355\265>\324\253\260>~=F\275M\313\275>C-L?\035\013\247\275\r]\353\275\360\206;\276\261xl\275\235\034\302\276\230\020\225\275\206\242\035>\330iR>%\323\016\276\2743\241\272\343?\363\276+\344-\276\017\2720\276\014\333\244=\246\354&\276yr`\274Y&\347\276\344\360\027>\n\344\315\276\306\267\227=%A\005\276\342\177^\275\344\\\361>5\255s\277\376\231\222>g!K=\355\276\334=\275\206#\276\022\220!<\2021\333\274\374G!>\332/>\276\226H2>\3010\n>?\004\223>\234\006\200>+\224F\276\270Y\265>\022\217\213\276\204\370\352>\325jG\275\007f\000?\230\363\277\275J\340l\276\324\006\224>\237\331\256\275Y\230q\333W7=n0\303>\224w\n\276v\311@\276d\215\253\276A\322\263>\020\020\354>Lj\377>]\245\004=\316T\257=\376o\316>\310w\247=\033\177\227>\322\267`\276\224t\315\276\262\247A\275\226\003S><\352\313\276\007\267\217>\220\312\024\277\r\201h\276\321;#\276\023\327\002>\231\025;\277M\255\306\275\3023\233>\346]\033>\364m\013\276s\237\265>\327!\227=\320vr\276\032,\270\276\021\261J>\250(\217>$\223\377;0\rn\276\200\200\234\275\005\356E\276\221\023\220>\377\351\276>(\206$=\275&\257\275~\014Z>\213\001\306\275n\225f=I\314\221\276$\277,?c\325\312>\010\275\276=\347\034=\276\354*\r\275i\323\244>\034\241\202>\230\376\270>\273[\231>\331\242\007>\302\377\n?\031\235>>X\033\370=\363\365`\276\347~\216>\263V\354\276\322H\217>z\t\275:\364R\263\276\024\222X>\260g\361=p\201-\275\364\230%>in\371>\317jY\276\231\303^=+\005J>\2356\363<\\\227\321>M<\336;\263X\276\276\235`F\276\202\366\303>\232\272\224\276 \345\315\274\237\252\300\276+i\001=\361l\260>^\312\t\2779\224|>Fn\343>\325\264?\276\306\023\002=\274qL\275\370\226\n\276\241\'\"\276\236\374\263>\277\230m\276`.0=\334b\322\2751\005\257\276a\037\313=X)A>\210\315\000>>HP>\330\2612>\220\230\321\276fA\241<\260\202}\276~\357\321>.\337\355\276\321\2275>EE#\2772i\207>]\234\317\276\330\235%\276\014\007=\273\336\220\211\276\236\271\270\276\032\257,>rr\026\277\211\376#\277L\010\266\275\266\310\005\276\177\357P>\357(\342\275\365\224\031\276.\177\227=\200(\322>8\032\022?\345\343?\276\261V\254>\222\\\234=\254\210\276\276\215\243\236\275G\001U>V\345\205>\024\375Y>t\025\225\276C\033\341=Y\277\270>\273f\320=\233\2148>\'3\301\276\372\222z\277\004e\276\274K\325\236=\224\337\201\275\027\241\221\275\241\355\203\276R=\251>\361\201\023?\315\226\201\276\036V\277=\351\3269><\207\021\276 \272`\276\224\204\n\275\227t&>\212\r\220>\026\341\201?\222-\207\277\353\250\334>\274\216V\276cg\212=\220\233\261>\336\215\264\276\221l\255\276oP\256>3\227\242>\374\254-\276(.R>\"\303\343>w\233l\276\022\202\255=-\242\215=\332p\242<\001tt>v\2212\277\3373=\274aM\014>\345\247<\275\037\224?>\342\312\274=\336@\212\276s\017R>L=\266>\341\016\274>\223L$?v{+>\375\346\316>.R\211;wg\216\276\261d\277\276\364\203\355=\036q\370>\272J\363\276\245N\317=\251\016\364\275\3201\203=|\036\231\276\302\272\233>s?\352>=\037m>aE\016>b\347\233>(O\262>\326\024\304>uF\204>\207\226\330\275\0225\323\276\262\356^<}\241\231\276\230\202\253:3\263\212>\210oS>G\001\005>~\331\030>${\234\276\024\231\002\277yYR>J\302\210\276\251\317\027\277\221\352\004\276\204\245\267=|-\272\276\323\014\223>\377\270\222=P\266\265>\277J\004?m,Q\276\264X\334\276\021U\210>9J\374=\270\205\274>0\261{>>\037u\2768C\332=\334S\236>i&\245=8,\241\276\216\362\262\275hk\223=\342\033\235=P\230\346=\t}\216\275n5\226\276Q\261{=\033\264;>\367E\233<\204\276\376<\331\010\367\275\214d\274\276Q\273{>\000\206\006\276\275t\300\276\353\357c>\"Q\216>n\236\225=Ig\350\273`\022\205=\005G\324>q\257\334>\311\002(\276M\273\'\276I\205\305\275\251\326\013\277,\362\324=\010Z\305>\351\215\264>Q\337G>4j\364=!\273\226=b\246\306\2764Z\260>k\210\022\277\306\032\322=\243sF\277\244\376\307>\025\271G\276\305\311\360\275G\224\352=\250\343\337<\377\222\000\274tRb\276\355\236B<\307\222,><+\327=\366\200\"?\026W\252>\r\"\205>\010;,\276\300\263U\276\334\034{\276f\010R\276\263C)?\337\234\030\2763\367\217>\365\271q>K\205\323=\347\306!?\342\347\022>O\237\016?Q\310\201\275\3554\331<\016\024y\276\312\265\025\277\241{Q\276,\033*>\344\250a\276\351*\232>|\320\336\275\031\345n\2762\006\025\27761\265\2761\341 \276`\315\300\275\221\277\260\276\177\223=\275%\230\203=\365|+=\215_\264=\215\355\377\276\016\242v\276\237\254\320>*q\275\274\327p\014\277h^\231>+go\276-k\230=\037\036\"<`\3130>;\241S\276\333.\214=\312I\272>7\016\324=y\340&\274\375\014\350\276\344\213c>\330\311\254=\311\327\203>[\347\316\275\233\3479\275g(->\371RU>a?`=Uk$>T\342\362>\231\316\202\275\3362\353\272\234\366?>\204\322\264\276>\277\252=jS\267\275\337\002\034?BX\010?\275y\302\275\275\301\234=vE\314>^\215R>\300>\201\276\2045\227>\272!Z\275np\000\275\356\215%\276Z^\327=\230\364\022?\276\366\302\276\\\375\265=A]\227\274\242Q\t\276NH\361>\321\305\t>IxX\276\032}(\274\253\006\260>F\230\260\276J\022p>}\376\261\275g\222e\276\303\325i=\230\370\013\277\223\254=>_\t\276\276\324\273A>\366\326\013>\007uQ\276\0210m\276\262\262\364<\275\007f\277\000\263~\276]3\276\274\325\253\032\276\253\243U\273\373Q\242\275\303\346A\276\363\375e>\230;\027\276j\326\237<\323\364\266\276@\033\247>yo\226\276E>\337\276z\007:\274\274R\243<\235#\277=\253\2423\276\3150\">\030v\243=+\271!\276\313\332.?>\215\221>\233`4\276\227\277\243>TZ@>\022e\236>t\037*\276\322\255\212=\342\316N\276\007\256n>!\364F=m\315\273>\0358\377\276\236`\276\355\204\262=\376D\304>\331\240\312m\246\021\276\361\252\357\275d5\026\276\004^\t\276\315\333\363>\007\324\206=\373N8\276P8\241=\250`\227\276\271\353\263\276]\312\343<\325~\001?.\264\005?,R\316\275\277\375\302>{Qo=\016\252<\276w\020\214>g\256w\276\373\211i>\306\002\250=\317\031\005\277\270\027\330>,\t\023?\360\n\301\276\266$\023>\234\231d>\333\016\014>w\246K\27615\352>S\203\352\275\367\363\021\276\220$\212=\362L&\277>B\215=\271\223\242<\033\302!\277\023\241w=\021\252L\274\231\031\025>\236\345\362>\321)\255\275(P\330\275\314\227\223\273\323\t\326=\230\306\026\276E\026\326\275kWM\277\350\250\216>\377\024\245\276\205]\000=\306\210l\275\030\000\026\277\006\343u\276\177\363Q\2746\345\337\275\366v\216>\223h\005>\221\261\306\276\033\210\243>\337.\317>\311\325\265\276\354S\267>\n\"\007\276\034\276\372=e6J>\323\221\213>\236\361T>\335\256V\275U\303\210\274\027\324\323\275\350\005\034\212\264\355\275\262\024\013\277\260\221\030\275\276\337\257\276\267D\246\276\307\355g\276[W\021\276\332`!?\301\004\203\276\347L#>\316\320)\274:3\211\275\365\247B\276\035\241\211\273I?\"=\333D{\273\317\024~\276\360Y\204\275\2401\005\276\275\261\261>)\313\000\276&\375\004\277\361dh>\301H\223> \220(>\246\023\265>\207~\255\274\224\251\366\276_\233:\276\205\207\344\276P\211\240\276x6\343;\260\303\264\276\035^\360>2\375\313<\030\262\006\276\030}7>\252\240\000\277[`\271>\235bB\276\360,7\276\256\252[>\324\263,\276\353\227:={\000\327\275ic\243\275\363\211\233=g>\210\2766\237\362=4\023-\276\254\200\021>%\262U>\020m\277\276\277\225\232\276x\036\246\276\211\334M>\366\366J>\213\205\306>&(\306\275\004\035\343\275&\271\031\275\266n\213>}\303{\276\013\317\303=\270\203\315>\013\217\327=\246\370D\276\207F\331>#^3\275\3570\210>\337\266g\276OY\023>\'\261\026>\344~\335=\\\262)>f\"<=Z\364\220\276p\272\017\277\277\350\227\276\016\357\200\276\240&>\277\210\363^>\034\341\230\276s\262\206\276$1\264\276\241V1>\267\025\302>E\247\005?\246\246\204>\356{\035=\266d\017\275\377! \276\250\227\n>\376\257O>\276]\350\275+\215\'=\274R7\275\231\266\022?\235\272*?7\211\243\276\302\032\024\276p\270\242>\255\326\371\276\021-\220>\1776\371\276\270\002*<\305\010\263\276\235>\213\276\033D\353\274\223\032\257\276\321\0349\276\275\004\300\276\367{#\277@o\266\275\217|?>\3120\342\276q9\262\275\376\240\032>\232\306?\276m\302\275<\371\210\007\275Z\017\223\276\372#\357\274\260\017\262\275W\367\\>\352\262\253\276\201\352\217\276\357\367P>\246U\252:\243(\270\275\203\033\356\275\316\335a\275\257\370\347=\305\353\345\2751\"\006\277Kd\301\2760\200\321=Uv\234>\277B\265=\006\331\206>tl\036\276\004\301\266=\245\377\252\276\331\321x\275\\D\340\276\177\362$=M\017\355\276\201\345\000?\255\202\272=\350\277B>\222\253\211>\355[5\276\336\024c\276\260}\332\276\300\314\230\276\235q#=\222\360\254\276Q\017\354<\367\313\350\275r\304\344>V\023\357>/\314\007\277\005\010\007>\206\217e=\365v\226\276\245\0250\277\361\346\241>2/\211\276\007\246\371>K\301\274\275\360\314\252={\211t>sa\013\276q\242\310\276+\360\022\276\035\351M>\345\305\354<\244\246\323\032\017\027>O`\234=\340\257\212>\317>\304\276\351l\n>,\331/>\347\311\264=\233\256s\276\374\325\032>p6|=\032l#>\250\263\222>\024\313(\275\\xn\275F\202\307>[\004Z\276\210-.=\017.\251>|\330\262\276\374g\213>\266\334\025>(\266\303\276\345\312=<\203\013\305\276\376\226\317>\255\246\034>\327u\233>`\265\370=\212\354x>j\200\321\276\263\030)\275\301+\005\277\356b\251\275\021\343\240>D\022]\276\223Dz\276\260J>>wf\025<\255\345}>|\'9\275\014\252#\276\263\345\035\277{\313@\276\221b\370\275\355\0014\276\224\366\346\275\211E+\275\223\270*\276\014\346\271>x\267I\276\202\223\242>\272\261\255\275G+\213\276\330\243m;\000\203E>\013\315\230>\266\215\232\276w\350v\275v\2703=\263\265+<\271\323\344\274>\372U\275\221\302\023\277O\3415\276\005\210\264\276\377\242\327\276\353\264\235>\250\337s>\261\026x\274G5\220\276\274|\206\275\343\014\336=\024\037\242\276:Y\274\276\257W =\340\237\302<\031~\263\337\264@\276Bx\241>\000,\262>j\253v>\360.c=\'\3237>\1773\220>\330\261\216>\221\333\315\274[H\332>\366\2769>o[M>\'\260\037\275k\027\'\276d\324J\275\263/p\276\273\312+>\236/\237>B\321\236\275\313\317\301>=\376T?l+8>\320=(\274\326=\253>=\374\243>\326]\r\276p\022\223=b\320\203=\231S\267=\016 \271>X,\033>(\0276>\260\005\326\276\236\305Q\274%\024\023=e\357\316=\001\301\005\275Xy\366\276\352b\262;Jh\225\275\231\013\206?\216\366\332\275\237\336E>S\306w>\353\364\020\275\000\363\340\273\224\221\t?@4\210\275y\225\022>C\311\017\275~\231z=\016\371\335=n-\023\276\204\261\017\276G\354^\275<\255\304>K\266\273>\230\253\240\276\321\002\035\276$#\204=f\351\260>\233G\344\275^}s\275\267R\275>\212t\207\276\211\265E>\013\235`\276\357\204\253>\254\265B<\026\036\263\275\253UF>\367~\355\274\031\210\013\277\323\313L>\\\234\036\276\003\032\177\276\333\004*\277\336\337\327>\362\320\004=\037F\211\275_U\330\276c\2004=I;\332\276\264\375u>Q\314\016?e]\357\274@\261\215\275\265\016S\276\200O]=\211;&\276\251\362\236=\207n\231\276wZ\274\275\304cD?\n\267?\276\223\024G\2761\2276>~u\271=\032s\'\277CM\316\276\355\014\214>\356\022\005\276\203\342\322\276o\350\341\27606\370\275Z=.>\251\304\362=\254\343\343=\246\362\315\275\320\203\373>\230\323\212>\\\362\234>\004\006\325\276H_\310\275\241\320R\275\320g\221\276\253\024\337\274\246G\034?!k&?\221\376\371\275+J\253\273\242\244;=\335\223v\276\361)\312\276\324\242\245\275\337T+>\202G\217\276\357\201\r>Qt\304\275\272\371\366=\216z\237\276\266\326K>\251\222&>\352\333\264\276\244\177\324\276\217lu\276St>\276\016N\245\276\274\343\201=\314\036n>\265\304\013\277\360\224W\276Sl\335\023\035\006\277\201J\304\273/\233\255<\306\377\017?H\216\206>\327u\214<\033\363\017=\014\304\007>\350\243\231>\311\334\r?4\316\007>\337\266\r\276\300\033/>\036K\332>\375\035\217\276MA\300\276\365\001n>[\010\233\275PX\213>hk\371\276\356\341[=\332\010\203\276\363\344<\276\210w\025\276zQ\247\275\262d\203\276\226\243\001>\3678\377\275\374pb\276u\001\254>/y\017?\214}\252<\366\021\207>\340\344}\276\372v\315>\030\255o=\276\204\301\275Am\263>\301r\205\276\220\021V>yD\373\2742\016\367\276\311{\241>\220\005\001;\006\367\212\275\215\341\244\274\006\200\203\276\357:\201\275\215\331\277\275\372\030\234\275 \346\256\276\376g\r?[\225V\277\214\265\363\276\003\302\026\277P\267\036>x\'\210>\301=6\277^\315z>v\244\004>N\376\335\274\265Y\031\276\304\356\006>\001\355\354=\374\325\246=\257\024\005?6\361s\275M\260\305>J2\246>,\262J>\031\036\034>4\345a\276hWj\276\274\\\363\276\210\252\345\274O\224P>\256\266\031?l{\033\275\222\300G\276\000g\217>\352|\245\276Y\303\212\276\033L\013\277\275\322\236\276\362\375\250\276\212X\353>\007\326*\275\273\001\031?fx\250=6)\343\276\246%\233>\306\261\030>\033H\n\276\010@X\275\3603\026HL\024>\357\207`\275N\326\315>KH\306\275\2678\355>u\027}>\266\246i\276\274:V?:\322\302\276\0344\020\276G\334C>Z\246\273\276SX\243>\351A\013?\177\202\021>\273\250\232\276\207\203\200\275x#\021=\223e\255\276\ngT\276\252\r\205\276S\221n\276\325\360\205>\3149\250\276\234\253\276>xk3\276\362\257\330\2763\265\324=^U\306\275\323\226\266\274\205\265\353\274\250\304\224\275\315\314\271\276\336\017Z=7\350K\277\200\'<\276\2444\014\277.\350\270>\223A\027>\312q\035?4\005\237\276\306\204\212>Sw\235\276y\222\316>u\023\235\276})/\276\001C~\275FT\310=\322\371\255=\321\325\036=\363\240\014>\271J<=\230\300)>W\251!\276\026B\242>\370\275\027\277OW+\276w\306\230=\'\355\345\276\370D\264>\351\342\024\276\036\004\240\276`\3057\276Ic\203\274vfX>\215\277\\\276n\224\350\276\227\261!?\037\220\002?\014\3325>\374\317\223>\234G\276>\261v+\276{\362\327\275\037\337-\276\301\273\322\275\023\312\034\277\216\377\224\274\255y\273>mf\353\2759\025\212\276^~\202>>\2563\276\356\021\215\276\341~\272<\254\270\316\275sU\216>\324*\334>\t\201\374>pgi\276uvf>\301\347\031=\227u\r>\245\225\003>d;\243=\242K\364\276[V\350>\262\r\200=^*\331>\320\273\322>\213\211\202>y\305f><0\023<\315-\375\275VM\252>\343;#=\356\342\014?\336~\305>\374*\231=0\rh\274O\013\022\276?\023\317\275?\005\377=#6[\276\273\206w>{;\262>\223x\313>\331 \373\276\277\323===\267\354>>\223h\276}\340j\276.\252\247\276\016\226\020\275\210\323\020>dt)>TR\202>c,\367\274\262dt\274\t\227\'\275\362\227\227>\006\225[>,\376\303>w\264\263\275\377!G\275\030K\320>\374\004:>)\206\343\276\r\273\277>^@A\277\373_#\276m\351\300\276\230s\256>\324\342\271\275\021\276\\>\234^\001\276M\177\365\275:}\r\276z\214*\276\220\\\242>/\257U>\3162\211\276\'\354\t\277\204\333z\2768,7=\031@w\274\370\332\201>it\023\276X\224\001\277\323\010\221\274\234F\025\274e\024\247=\372!\345\276\326\330\"?\023_8>\ns\267\276\326\3462\275\2205\372=\237\336\010\277\360(\243\276\244\333e>\333\317\347\2756w\002\277I\302\220>e#\007\277\255Kw>^\327\251>\202\204\021;\210w7\276e\027\347>@P\201\275\3670D>\313p\"\030\347S=]\026\341\275I\010\023\276\25766\2761\327\206>\307\t@>P\000X\276\246B\300\275D\000\346>@5\256\2743b\255>\003\033\277\275\022\230\247\274\345\355\007\277\357\37209\260\343\200\272\352\032\330>\000\312\216\276\273s\204\274\210\313l>\322=\262\276W\3252?9\2139\276!\214\202>\274\240\245\275\001h\370=\276\016(\274\303\321\263\276\021\324[\275t\267\232>+2\227\275\025\263\226\275)\217\334>zQr\275\340L\224\275\256\020\341=\372\nP?L\356\002\276\300\001\246\276\346\262T\276\240R\304>[\271\202<\3624\367>\320A\341\274\250\020U\274\333I\036=\332\216\001\277\210\240m\276\360\335&=\034\353\347\276\225\356;>\345\252\252\275I\352\014\274\245\237\016\276&\352\213\275\202\032\030>\374\032`\276\263\206D\276\206\327\363\276\036{T>\253\014\266\276Vsk>\'\254\005\276*u\351>\232+\362\2741x\332=\006\235\310>\033\017q\276.\371\272>\206\177\002>\352f\005\27608\354\275\035RE\276\3609\236\274K-/\276Kk\270>g\340 \276\007:\307=\252\345\237>\220\021\002\275@\340\025\2769\034t\276&\240\305\275b\234\005\276H\203\250>\030\374\275>\020\227\215<]\217S\276(&\021>%,l=9\334@\276Zg@=/\017\371\2742\361\335\274\271ae\273\202\260\201=#\3357>\274\303L>!J\221\276\350\250\n\277\357\023\232>\264\256\244>\343\035B\275\344\252\344=\004\323\034\273\006\000\333\276\316\030\002\277\351\345\270\276C\343\227\275\224\232i\276l\341\017\275\320{\274\274\232\330\002>\243\310\326\275\253\261\250\275Z\254\236=\325\3107=\023>\255\276\205O\336>\203\350#\276\265T\344\275\260]\r\276\267X\337>\323\250\356\275\302F\336\276s\217\353>p!\030><\306\014>\320~4>\3345\305\276\301aO\276Gqr\276\003D\224>S\256\275\276\263 \312\276\237\340\265>\032\003\007?9\023\373<\365\024\316>b\351\373\274\177\311\230>V\356b=\306b\223>3\251l>?;\253>M\3227>z\2474\276\212CL=\274\320\256=\250=\356\276\334\024\006>\220\344\222\276;\260\320>\341\327v\2769\335\002>\337\352\324>\314\\=\276\241\003\370\275\300\301\026\277\234f\243=T\207T\276\000\323\334=\014\222\337=Q\252\263\275\225\340\002\277O\321\260\275\222\273\"=\262\300\202>SI\204=B\377\310\275+\325\251>X\222\216\276\3036\034\277\"g\205>\367\030\033\277\327\343X\275\276\"\003=\37023?\036\016\247\275\374\343\303\275m \277\275\320\304\235\2758\335\266=\204G\'>\225\314\305\276+b\360\276\356\210O>\321\323\212=!\221n>C\341\245>\232\002\221>f\302\355>\321\362\004\276=\301 =r\372\200\276\214\225S>\257\n\344=\001t\302=\271\320\340\276\200 \316=\244\256,>\020\t\205\275\355\340\346\276\274\3647\276F\332\341\275\010\314\267>\266C\215>\255Q\213\276\370\034\\\276\340\336\223>x\237\342;_\332]?-QB\2762\376\333\274\306\214\242>I\016\253\276\027\336\250\276\220\016\252>\272\227\204\276\216\341)<\235_\017?#\0232\276R\270\272>]\353\027?\036R\311\276k\260\211\275\302\304)>a\343\"\276|\010\263\275\267)\362=.\302\206=\304\270_>\341\225\263=\353\263\207>\037\231\330>\311~g>bv\001\276M\020\231>\216@\377\276\270\311\006>i_\027>\275Y\324>oX\025\276\'\206\224>\230@\272\276\260\004:=a\261\037>Q\377\005\276~}\336;u\201\201>xp\250>A>\t\275\002\313G=\026t\001>\025S\255=n\2176\276\2665,\276\035\242\013>\355\005p>\255\022\214>@|\366=\225\353\000\276\004X\203\276\003\257\213>X\337\355\275n\225V\276\345\377\265>\370\323\242>cV\217\276A1\t\276j\371\217\276\224\217J=\2171\344>o]\240<\035D\243=\035tD?\351\267\310\274\317%\342\275;\030\003?\267`\320\275\0371Q>\315\341\217>N\333%=\353\211\225=[\356q\276\005\005\343\275\321\345\227>\372Bq\276I\227V>\352\334\200>\035\334)\277yr\026\276\203\247\353\276\275\"\235\276\222\300\266\274]\231\256>y\253a>l5\031\t\356n\276\220\234\263\276\3773c>S\332K\276\245\222p=RL\213\275H\333\361>\313[\263>\235,y>nf\357\274\261\356T\276V\313\263>\346\372B\277\3275\001>q\3038<\367\254g?\225q\266\276\3606u\276\212\235\254>\273\005t\276\322\223\203\275\333\205\346\275\211H\312=I\031\000\277\014\342\215\276\017(\234>\035\020\204\274O\361\\>\224\347\307\273\27239>\0207\374>\207\323\003?]mX?\201\276\257=^d\230\276\232\210\361=[bW>6Et>M\240p>\232.\226>\022\034+\276\252\362\242\275\215#\304>\274\354\215\27608\213\276\314\372f=[\257\206<\205D\250>\204\302\334\276\362W\267=0C7=\250`\304\276\262!\236>\345a\035?\273\343R>\247\240\350\275\257\324\272\273_\235i\276&\360\255>\036p\300>.\021\312\276\035F\230\275\334\216\013\277b\300\202\276\272\335H=\006\364\353\276V\343\234\276\263\300\010?\026^\264=\022\303\020\276[\274k\276\377V\000\275\321\250~\276,dC\275\301e\256>\301\267s\276;}\243\276\177\356\314\276\267\305$\276\357\202\253\276\2302}\276\031\351X\276\216b\035?g\243\001\274\354\304\214\276\357\372{\274\367\'\'>f\341\200<\310\252\014\276\332A\033\276{\224\024>N7\304\276h\013\245\276(r\213>E\rS\276\214&8>\207\230\316=\351\326\265\273J\364{\276\343g\222\275\212\362\261>\206\006\026\277\361\007\202\276\206\254\265\275\215\247\022>rl\224=\350\200\336\276\\k\302<\026\366t\276\2744\260\274\310V\211>o\312\236\276\370\272O\276\025\340\032\276\027\226\006\021\240\352>e\306a>\373\244o>\273h:\276\210m\255\276[5\242>\310X\230>\236]\276\275\304\244\202\2761\231\000>\366\'M\276\017\303\023>2-\251\276\300~Q\276\217\277\022\276V\221u\275\321\325\033\276 \362X>\376v\020\275<\335Y>$\237\262\276[h\022\276M\336\343> ^f>\313\017\023=]h\266\275@s\204\276\237\360\212\276\021\356R\275\003:\275\275\270Ok=\316\355\375\275\316\205\024\276\341\252\222=IL\022\276\307\025I\276|Pl=vu1>\r\270v\275N\315\227>\032\341\305>\317\033)\276\230\363O\275\357\314\036>\033Q\025\277Y\326\213>2\242\003\276\215\247`>\343\221\310=\001\270\033>\024r\023\277\363\343\234>\250\261\250\275\217\351\032\276(\320\216\275Oq\030>\201\367\335=\375\311[\275a\364\007=eh\036\276z\330S>\230\332\346=\037\231\315\275o\001\031=\346\302\267>\262\006-\276%\347Z>\231s!\275m\033\346=y2\370\274\204\320o\275k\354\266\275]\221\020>2i\310\274\206\211x>\262,\361=t\373\005\275+\3440>\261e\332=%\333\305\275ux\247\276\310\311\236\275 \2063;=\345\230>\032yL\276\323\327\205\276t\335\031=R\232\224>\003\300\355>\250M\003\277D\325\223>\023\253X?\"\327j\275\206s\245\2751\037$\276\033\277/\276\351>Y\275\230+\035\277\311\201\200>\021d\357\274\010\177\275<,\334\213\276\234|\235\276\240q{=\021\263\320\274V[\367=g\211\223>\373t\250>VO\250=\217\021\347>\006o\231>\350\034n>\314r\352\275\306\300\305=i\204<\276o:\375>\264J\004\275\260\203\200\276\370b{>\270\376\333=\2113\257>\223\3364?\245\352\367=\005m\354\274\235\276\002\276\001\217\214\275Q(\253>%\026\265\2755\362\356\276$s\343>\0251g>\310S)<\261\255\260\275\225\306\325\274Z\265\333\275\031)|\276\231\305\253\276\273b\227<\310\354\004>\376\336\257:U<\005?\341\2047\277\3334c\277\233/\355\275\317\277\331=\200q6\274\005\357\304\276Np\223\276,W\r>x\373\016>\317Z9>tk|>\330\245~>\366\261\240>_#?\276\356\334\201>;b\023\2777+\220\276\273$\210\276\336\260\343>\241z)?\221\032\306\275\347\365\020<^\251K=D\207\301\276\377\352\231\276\253\350\n\013h\215\276t\242\261>4YY>\332V\203\275\351\222\217>O\344\365=\014\035\232\274\266\n|\275\360\261\360\275\n\014\263>\222\016\374=\360\364\277\275I\265\356\275z\032?>\014\210\342\275\226\032\340=\356\276\361>\306\001\n?J\3263>\240\345\274\276\266\304g\275\t\314\267\276\222\021\032\276\203-\366\276\202\242\034>\315#\032=G\311\324=\347-\245>\001c\307>\227s\317>\377\351\033\275S\201\236>\201\207\230\273Q1\305\2755\305\014>\264D\236\276l\025.>\303\020k>\343\224\257=\372I\313\275\343\332c<\212]\245=\256*\027\2763w\'\275|\311\321\275WD\230\276N\272\004\276\305=c>\037\204\355>\222\374\317=\266\350\242\275P\331\244\267\227\013n=\223i\267=\'B\341=\0270\304=\013O\334=&fs=5\253}\276\244k\\=kQ\246=i\2663=\261\267%\275\307s!\276\220\306C\276\311\252\205\276\272}(=C6;\276\317\260\030\277u\273\373>V\257E\276$\304\021>\356\312\246>\322\331\231\276\233t\002\275BL\033=\325@3?M\200\276>b\272\002\275\nc-\276~\311!=s\301\262\275m\242\221\2754\340\315\275\034\036-?\274E\324=c\363\207\275\203^m\276d\364\207=\333\240\341>\306\261\334>v\247\254>)R\236>\207\361\345>\240l\225\275gY\004>\030\324\215\276\270\254\304\274\226\332\325\276\"8\033\276o\334\220>\335nq\277\327j\267>\037\320\204>*\277\354\2766UN=#b\021>\203\265@\274\213/\251\275\032Mr\275\252;x>r\311\350=\344\312\345\276<\020\n\276\237C\300<\r\327.\2763w\216>\032W(?\344K\"=?\310=\276\363\217\326>j\306\010\275\3275\214\276\265\256\030?7%J?\002x\227=c\263\365=o\335\271\276xy\241\276\020i\'\276t\355;<[\327_=y\344\003\273\350B\222\341P\236\2762a$\277\373\274\260=\004\236\237\275b\324E\276[4\207<0\330\300<\3301\\\2751+\316>\210\344M\276`\303\227\276\204pQ\276e!?\2772c\255=1k}>\321=\354=\335\006E\275~\250_\274\t)v=\366\226\327>J\200\256<\031t\'=\212\355\016\276$\t\377\276)\305\365>\304\352\376\275\314b\246>6\273\340>\217\214\261=\217\206\034\276D\356\226\275\236\267\030?\304\234\001\276P\265\367=\"\351\300>\355z\307=\240\331\256=\323B\270\276\tV\356>\2460z\275\324\374\215\274H\304?\276`s\317>Z\203s\274,\201\023\276y\351\303=`G\265\276,\007\330\276\201\035\"\274\272\366m\276\020\225\345\276J\027\241>?1\001\276\203\000c>\310\340\277\276>\2516U\321\023=\215\263\207>\214\037\232\276\r\275$\276\305\302\333\274\004x[\276\347s%\277\024@\214>\037\376\232<{\206\267\276e2\340\275V\216\263<\215l\230>\367\010\230<\311\023\232\276\217B#\276~.\226>\352!W<\370 \220=d*\023\276\267\2737=]\233W>\234bG;H\363\356>\020\202\033>\324\215\006\277\3418\253>]2\223>{\2633?2\225\270>\366\334\323\275sz\251=\3465,>&\207\310>W\345\033>5$\003\276\250\265 \277P\177\347=\262#1\276c\374\315=\205#\005\277_\376\252>\350\330\246\275\010\367[\276\235R\002>G\325P=i\255\212\276\276*\303\276\257\\t>\374\340M?\007\357\036?\357M\354\275`\265\247\276\364\354Y>\025U\341\275\232>l=ta;\275\363\004!\276\340_\267\276k\032\207>\022\225\330\275-\025\262>HI7?\305\010\030\275\323\237\024\276=\337\004\277y\353\302>\340.\241\276\372\302v=.\335\030\274P=\214\276sw\315=\270d\031>~\237\261\276\361!M>\031N\231>\016\266\301>A\273\250>\344W\210>\242\233k>\354{\022=\375\237\333\275\\\337I=\377\317r=\243KQ>\363\024\370\275d\203\004=\365\352\017=\276}\274>\023\310\227=\267\350\314>\343\303\035>\235$\221\276\304{\001\277n\341l\274N\246;>.\0144\2770\003\211\276(\202\376=\250\t\">\271_\333>\376\340*>\343\265\200>2\251&\273+p\203>%\335Y\0208\030\277\305W\333=U\304\345\274\374Yv\275\225>\223\276\343TD>\360\344\346>4\277\300\276\301H\354\275C\322\017\277\217\213\205>\235\236\317\275\220\225\027>\231}\365\275\243\\u\2769\205\013\275J\317y=\010C\300>W\365*\276\013\252\257\276&\003M>|\244\240\275\235\245\314\275\221\276\332>\326\026\027\276X\203i=]e\351\276\342P)\276\225-\025\274M~\263\276\223&(\276\n5\225\276\227wJ>5\237\212>s\332\212>\312\354 >\330\322\206<\024\361B=5\232\240\276\017\230\371\275\251I\230>\021F\217\276\304eN\276\355Ra=\342s\005>\245\274\337\275\335\272\352>E\'\236\276E\312\240\275\235\030\315>S\t\214\2764\327%\277p\245\252=\210\261\365\274\r\234\325\276\336\271\252\276\211F\032\277\201\'\200> p\370=L`Z\276L\261\200\275\216tf\275\370\362\337\274*5\220\276\364\304\215\276\300\251\024\275\022\377v>\275\252\362\276\210\244\342\276\376\262\031>\304R\221>\351\256j\276\007\t\010\276J\344/\276\336\253\037>-\214\217=\033\222(\275 \246j>v\231\021?\353\205\021\277\335,\374\2760\223\265>V\375\004\277\216\204&>\2253)\275\034Y\232>/\220\006>\023L\315>\236\257I>\013C\021\276\332\002z>\372>\222\275\370\032\342\275/S\026\276>\363\207>G\276\246\276\313)\314>\214\235\026\275\000\321\232\275\241<\266>\346,\002?\324\263 >\033\2555>\252\350\010>\032\270\031>b\314\313\274\031\346\n\275\'\372\317\276E\\c>\364\226\000\276mR.\276Bx\277>\010\336 \2761Zz\274\3641\323\274\3238\026=\310o\333>\215\260\032?\375{K>=\323\221>\341,\002>6\013)\276E\362\241\276Tu\214>O\204\330\276\353\023\027>\365@\317\276W\365d\276\360\351\251=\245\317\277\276\201\275e=rV\223>HBG>+\213\027?\205:\332=\254\315\306>\005M\355=hD\266=\374\315J\275\311\372\375>m+\204>oYe\276pLZ\276>\000k>a3\020\275\333a\211\276\321\214\215\276\201i9<\323\372\303>\245o\277\276\365\006\320\274\201 \356\274\276G\375\275^~\026\274&\201\244\275\311\200L\276\276\264(\277qw\311>\355{8>\006\026\227\275\007\201\205>z\336\020\276\330\000\\>\302%\317>\201K\242=K\267\277\275\226S\220>\252q\274>\033\242s\274X\227\343>\350!\204\276\303 s>\004\361\263\275!U%\275\352&\367\276\201k\256\276\030 N\275~\364l\274\263\212D=\013\367\230>z\254k>\373\0206>\\\016/>\214\245\372=\270\021A?\006\007\330>\311N#>\3260\335\275\342s\371<\2749\254\276\374,\035>k\232\305>\010\325\263>t\t\356>\237\317y>\3654\260=\204\2606\277\315\370\007\276\214\366\214\276\022\252?\276\2366\312\276\344\362\003\276\333\254R\275\r\324\220<\374\246\215\275~\021\246\276GQ\270\276^\010\356=\260\306r=\375\310o>\270Yu\276\271t&\277h\243\364\275\212\217\251>$\225\302\200\321\313>\304*D\377\321\305=\324\337\202=J\215\233?\017\005\035?\306f\237\274s\024\001\275yE\214\275\263\310\035\276\354 h\276$\210 >^d\025\276DMe\276\246=\241=\222\215}>Qi\r>K\303\322<\345\250.?\005Z\031\277\233\305K\276Hs$\276\314\274\306>6\307\036\274Y\306\210\276\250.\373=>\216$\277\377\227\037>2\361\350\275;\320\'\276\323i`\276J\210\361=&\210\016=o;d\275\224\354\257\276R\217g\276V\314\215\276\261J\347>\t\215\\\276o\014\221>\251\r\215>|i\306\275\325S\265\276$\\V>\022u\375\275\324Q\220=\2632M>\267\016\261\275%F\216=\277x\212=\307\'\272>=z\t\277\312%\257=7\035{\276\0177\255>\206\332\302\276\254\241\267\276\231\031y\277E\022\326<\007\032R\276\267\211\274=\3556\354\274\224\215\030\276F\206\373\274\277}\370\275\036\307\230\275^\007#>\342\353\235=u\306\262\276\316\345\340=\033\366\000>\307i^>b\200\332>L\033\342\276M\243m\276$\371y=>\300,\275\257\220\242\276\022\250o=\005\231\315\276\2176{\276}\017\211=8\372_\276\2259\004?\022\220K>0Bg\276\375\002<\276\265\000\261\272\253\240\014\276I\373`>q\271\363\275\3430;\276\246w@\2774\3332>\364\226\201\276\255}\201>\240\203Y\276\360\244\005?\242\035w\275\317\004\235>\031\302\360>\363\226\"=\2517\253\275e\225r>~|\261>\306\203\004\274\2433\246\275\236\355L=\341\225,>\362i\036\277\343\1777\275\244\352f>\326\215\320\275\343bx\276\365\002\266>\224g\t\276\022w\302<\2255K=\366\247/\276x#\006=\325\010d\276\332i\367\2752\270\206\275h\301\013=\221\2741\276\014\206\201\275\220\225\255>\261\362[\273\263K%\276\266\021\263\275\346{\031\277\0260\200\276y\276\232\276\221\361\203\276+\266\225>\363\022x\276\331x\232>:Z\207>\021-\215\275U\\\014>\271\315\355>\273\243\271>~8\315\276\200\212H>\201\272\017?66\235=\213p>>c*\020>|\210\305>|\3011\277\225\r\007\276T\367\003\277\017\034%\275\242j2\277M\220\350\276\r\215x>\r}}\2765]\026\276\373p\327== A>\273\351\326\275*j\350=\215q1>\353rY>\357\331\376\272V\261U\276\3365\306>\205\277\035=7R\352=\336Uk=\020\024\014\276`*\022>8\236\356\275\025!4<\216\346\222\276rQ\033\277\205@\365>\231G\177\276\217\260\203=l/\266\276X\213\306<3\253\247>\021\304\217<\3637\232>\226\013\237=\312pC>m\321\201>\260X[\275\321\333\307=\222\177\227\275\255r\253\275?@\321>\372\372\333\276\177\205\342\276U\203\224>/S\207>m\252Z\276\235\372\005?\320\177\317=e\326\212=Oi|\276\210lJ\276\251\350R>\361V\263\276(\315\232>\250\007\376<1\256\222\276:\261\320\276\203m\254\276\2060->\302\025\025\276>r\364\276\304W\317\275%3\215\275\337\253\'\276\334\222\306>\253\223\213\276U\201\213\276\243\352\014>\211\353\374\275)G!\276\226\243[\276-\212\320\276\357\356?\276\215\251\264=%G\340=\222\326\242\276\227\005\310>\301C\263=/t\234\276\237\375 <\211?g\276\232\262\241\276\323\262a\275\312\374\342\276\005\367P\276\242\025\216>\2634\037>\320P\247\276\302\353\357\275\n\214\017?\276\204\231\276\027\307\362=\311\213\225=K\016\210\276(\2524\276\277\322\310=y\241:;X\274t\276\247\313\345>\370\013\020=\343\216\307=z\023\266\350\276\\>\340\211\270\276\325\366\314=\354\357\224=\306\326\211>vl\034>\010Gq\277\214\204\360\275z>l>}\214\242\276|DN\277\263\231\315\275j\373\r\277\022j\256>\311B\016>\212\212\347<\327\363:>\310P\006\276\006\244\326\275\n\326~=8\335\024\276\364\323\214\276v\331\272>R\376\323\275\000V\214\275\037\327Y>\373n\210>\266l\000>\222R\020\275n\234\235=\202d\0245\235\271\276\314\023\010=Q\221\215>\"\367\205>\017\253\373\276L\242\260>\235\265\236>\233\277\264>D\2434\276\032P\262\275\2643t\276\036e\365\276!\322\002=r\035\027>\323\006y>\250F\262\275s\375]\2765%\'>T\200\250\2763\202\230\275\t+\023\273x\005&?}\222\321>\236N\313\274\254W\206>\365\215\016=\004i\217\276.\254J\276Fa\264;\355\r\377<\006c$>w\314\204\275s\003N>\350\356\005>\222\232\021>\274\\\025\276\305%\304>\340\000\000\276\021;\362\2760\310\020>G\307\315=\031\255\310\276\221l\204>&\361\350\276\234\372*=\210\260\276>\252\010\253>\023\264k=\326\371\220\275\220!\232\274$Z\310\276\347g8=\0016\n?\254\036\003\276\205\372\350>\016\001\200>\222\365\220<\336\'\304=\327\346\230\276\t<3?\302\356\221\276g\334\033?\016S\272>\310\317=>F\322\224\276\221\201\010\276\206\233J\276\216\336\"\276Sz\207\276\347\261M\277;x\315>\243\225i=_|\031>J\320e\274\205~/>\261\263T=P\325K\276\245\252\323\275\257\020\345\275\255\313\261\275S\337\241\276$\240\024\276\237\250\205\276\327\241\353>.!\017\277\023T\240\276\372\330I=1\200\262\276\036+\n>\370\315\361=\004\223U@H\225:z\314\371=\"T\243>\030a\352\274\223\237!>\214w\327\276\032\032;>b\233\227<\362\306e>5W\263>\306\\\236\276mc\266\275n\210-\2749\216\024\276\021\033\255\276\306\366\216>>f\017\277\262K\266\276\352IX>Q\276\315\275\201\250\316\273\002\276>>_\"t>\006\277\231\276\017\034\200=\275S\207\276\245\336\252=\302q\307>5\217\310>oF(=q\031(\274\247*k>\252y9\276|?\242\276\247\330R\274\223\n7=\000\356\206\275\221\270\201\276l\005\256>\210\020>>\275\236\333;\276\267)\277\300\033\364=\0321m?\006\313G>\000\301\354<\023\005\277>M\210\256=\000\311\001\277\271H\013\276Zi\035>\257\233\217>x\2425\2779\374#?\030 \353\274\317\327y\276\265\"\321\275(wJ\275\275\207\266=\222\340*>\3263\370\273s\2713=\036\227\345\276JB =\342\203\017\277[-\307>{Q[>\"\r\226\275\333\004\005\275\224\2633=\333\235\223\276\2116\307\276K\357\256\275P\361\007\276\214\354\330=N7B=\032\312\340\274\321\007r>\204r\350>9\n\270>\204\034\305=\323\241\030\277\031D}>\370\300\006?9~\275\274\275K\362\276&C\207>\343\023\224\275FA\215\275\302\253\212\275NO\306<\010\320\351\275\304\236\307>\314N(>\010\034\016\276\274D\251\275\363\227m\276\273\335\350\276\332\352\332\275\026\367\267>\260lK\276\334\324\357\275\311N\234>\201P%?~v\247<(\"\000\276\177\021R\277G\354\336=\371u\240\276\2719\007>}\027\016>J/\274\275\333}\301>\267\237\364=\317\275\254\275H\365\t=\356\231l\276\336\216\253\274\354ob>f\335\013\277\271\331j\276\016@\214>\345\371I\276\"\200\004\276[\332\003=\317\0319\277\260\224}\276o\322\324\275{7g>\365+Q>2V\006\2779G\313>,a\237;\240\262\316\275\206P\310>\340\253\341=y\237\010\277leK\276\352)\020\275L\216\227>Fk\331>\270\244\264\276\026Lx>\026\236\251=\346+\315\275\004\303\210;\273]\202=%\203\322\276\0336\222\276np->\327\315\370\275\0079/?R4\304\276*\210\306=U\355\346\276O\020#>\236\357\007\276\314:Z>\235\231k\275i\242\232\275\327\032p\276\332\351\330\276\026\300X\276\206\345->\021h\004>-F\365\275\031\377#=\207Z\336\276o\347_\276\206|\220>??j==\030\001\276\032\273d>\tNv\276\247m\205>J\0033\275\354c5\277-z\274\275\267\2063=k\311\245\276\262F\327=w\331\253>\307e\222\276\320:\355<_\247\336>~+5\277\034P\003>\275%\333=\326c\\>b\202J\275qM#>j\336+\276\376\211\333>\273\301\214\276\325\376@\276\020!a>\276\225\325>\310\347*\274\260\014\215\275r\232\243\275\023C\255<\242\014@>\340\377\345\276\"\254\023\275\373\312\224\275%\001,>\314\360\t\275T\324\260\276\220\255\305\276\000\007\340\276^\221\007\276\270_\007?\tL\346<\035\364\247>a\261\371\275\336\327\335<\207\327\202\274\245*\273\276\252\360\341\276s0\343\217|s>r\376l\276\211\027\312\276=\027\005>\031\366r=\257\222\275\275J\001i>?\366\236>\262f\032=\364\245*>\0356\206\276|\213\031>\315\023-\277\237p0>XP\034\275\356\305\355\276\335/\302>\'\307(\276\360\325\001\276\325\330\220\276I\344r=\364_\013\274\031\326\217\276\201\246\250<\234\203i\276\323\215\227>\314\224\022>\271#\317\275\237\345\263\276\221:\327\235HU9[\213\327\276[\342\366>\036x\223>V\024\323>2\315\322\274\272\\\020\277nUL=9\373\322>\023\373\357>\016\005\216\274\374=\307\276\033$\345\276\310\t\033\276\254k\210\275\232=\034\277\022\006\304=\032)\276\275,\346\244=\3070\006?\321\204g\2760\035\320;`\223\235>U\301{\276@\312<>\020\331\207>\201\323\355>\237?\273\276w\310\202>\375\361\032\276\241\364\212\276\316\317g\276\337\255\212>g\354\003<*\252\001\276\3102r=t\031\315\276\'m/>\004-\265\275\235\332\253>\256q\233\276\325\024\\\275\014\321\226\276H\003\242<;*\306=No\033\276\206\251j<\030\200\014\276fF=\276<\033.\276\214F&<\2204\017\276\233\021\257\276\361\301A>\2028f>v\030c<\n\006\271\2754KZ\273\334jI=\t\254\332\276\363\241\301=\371\302\247=o\263\225\275KcD\276\271F\306>a\200\201=\350\356\314\276Z\275\220>\267\306\323+\313\270>2\225)>h\307\010:}\302\010\277\254\231[\2736\327w>\332\335\366\2745\226G>v\013\334\2768\000\346\276[a\004?3\370\236\276\2051\240>-\223\021\030\017!>\252u\034\272\203\221\014>K\266\221\2755v\240\276>\\\304\276\244ee<~\344\206>\253\022\357\2757\020B>Q\013\206>\245\322\206=\361\223\240=\371\210\240>\246\251u\275\347\330Q>\035\230z\276\010\274f\276\352+\240>\301#\215\275\270\245e>O\322\332>\246\320{\275\342\017B\276#\343\272<\220\313\213>~\356\222\276\231\324\235<\215\253\234\274m\307!\276^\204\343=\307Me\275t(\346\275\361\352\020\276\211\nc<\337H%\273\007u\003?H\021h\276\\B\272=\366\215\021\275\331\3309>\250\335\250=H\001\207>\211f\211\276\035)j>\361\016f\276\251P?>\032G\216\276N\351\230\276\270\'\334\275\371[\211=\216\304h\276BZ\257>\177z%\276TC\021>\025B\246\276T\274k\275\026m\271\274J\023\303\276\341\274\222=\214\221\'\276$\226\005?\307Au\275A\253\312\276\335\331\032\277K\027\340=\007\333\323=C)x\276\317\364x\275V\314\266;\304\220\016\276W\216\000\277\312\370\226\276./\213>\263\022\330\276\307\034\200\276\272\005\366\276I\3554>\353\226\001\276\313\347\220=\213\214\005>\t{\354\276^\312\202\276\277\214\004\275\337pd\276d\226\331=f\270i\277\321W\277>\356/0\277\201\3153\273\024\2739>p\314\247\276\362S\373\274|\214\231>+\323\220>9f\301\276\241\336\267\276\202\237\003\276\266,\231>\313c%>qp\026\276M\352\366\276\034\276\007;xp\226\276\321\373\321\275\266\345\214\275\347_\217>\225\025\232\2754\202(>K\347\250>\2639\036\275\373\366\014>\377\210d=\274\201\366=\024\"\366>\034f\215\275\2214\217\276_+\232=m \231\274VA\304\276L\r]?\233\302\023\276X\345\266\274\251\201=<,\005\212>\224\240Y>c{\032\276y\355\r\276\225\245\205\276\232\255\232\275S!\347\276hA\273\275;\253\253>m\r\230<\371<\250\276q\354\177>\323y\010\275#\034\274\275\005\0148\276\226b\263\2731\322\302\276\\)\207\276\217\253\343=\361\002\017\276\333\226\206\276V5\022\275?O\032>\t3\033\277\t\330\365\330c\255\276Z\211\343\275v$\202\276\325\036q\276\344\000\001\275l\236\240>\303\0061\277\276\256\346>\322\346\034\275\377\'\266\276VX\356=\316\317\265\274R\324\364\276\260$c\277rA\321\276r\344>>u\232\346\271\354\351/\276\213\214\375\275\356\343\315\274\003\376\220\276\003\226\010?\336\346\272=\335\337\216>\000XG>\242\370\245\275\333\250\363=\30322\276<\311\230\276\010\334K=\273\325\363\275\206z\230>\234\213\220\276\346\026\262>;1\020\276\276\032\240=z\307>\276\315\013\345<&\3710\276\316\263\033?\343\210\207\276HG[=\030j\274;\013p\313>\260u\212>@\335\031>;\\H\276\365\244\\\276\356\231\340>Ybn=\270\016;\276\006\034\010\275\212\304\312\2761Yb\276\303\345\t?\235>\035\276\366\202p\275v\353\231\276\3161\244\276\2543s>\250\016->\254G\016\275\214\037\203\276\334\311\243>\230z\217=Vd\034?\213r\014?\236\252:\276\310z\323\276S\213\260\276\254\357\273\276\235\036$>A\023\000\276\003\266{=\"\361\215\276]`\230=w|,>={z>\353h\302>\006\343=>(\367\013\276\tW3\276\214\200\022\275S\342Z\275v@K<\275\325+\276W5)\276%\317\217\274\270\256\206\273O6D>t[\201=\353V[\276\020\322\364\276\374\005\022\274\371o\206>PEl\276t\031 \2769\023\007?$\371\024>\325\374\220\276\304\221s=\213\004\213\276K[\263\275C\326\026>\366\016\263>m\206]<{\254$\277\270Jr=\210\226\232\276*\024\234\276\317\270\345\276\233\tt\275Q\322k>\177l0>*\225\"\276\266$\276=\216\251\013?\016\345O\276\003;\'\277\273\237\222>\247V\037>\033\257_>\256\233\303>Pe\225\276\252h\352=\330\300\032>\027:\234=b\026\351=+\316q\276Se\337\276\t&\372>\313\372\251\276Qd\232\275\201\301\251\276\227\377z;\205\327S\276D#\200\276@\216\031>\314\200\376>\303J\302\276\ni\021\276\004\304\364>\357\273\301\276L\324v\276R\017\331\276q\"\037\276\262\265|\276\266\261\030>\250\262\036>KF\207=!n(\276O\362\272>\311[\261>\031\013\252\275\320\352\332<\353\275\027\275a\326)=\214\340\204\276\024h\253>\316\037\017\276\333(\210\275\225\365I>\232\3725\276\361kk\276\234\230\243=\342\017->\202\347\313>\324a\312>\017\370\333>\275\256 >\222\372\036\276\r\211\377\275s\303\237\276v~\213\275\232\217)\274G\226\355\275/\026\207>\312\311+\276qj\014\276:\217\274=O\314\004\276\351\237\364>\003\310\233\275\267\010\255\276\344\001\324\276`Q\242\276A\355\367>\352\321\301>\325^ \277\002\254\036\276\252_\025>\365>\205=\232-?>\377\367\032?\310\270\264=s\272\372\273\330\231\204\276\314#\351\273\3205\240>c,\311\272l&\354\275@E\331\274\274\313\010?sy\203\2764A\376\274\nm\267>H\360\362>\263\323\031>\224\302M>oC\354`B\201<\213P\003=_\200\363\275\213\3200=\350b\275>\332d\014>\202\302\036\277\317\022\t=\370\034W\275\221\263d>5\341\t\277,\224\014?\030\207\244>\301\311\214>sV\334\276BD\025>*\224{\276xR\001?Z\031w>\355\312\245=8\343\251=\325f<\276\343\341T\275/r\n=\306]\263\276\377\274\204>\n\262\r>\257-J>\355\320\315\275\266\207+?\211\n\345\276\357\211\233\276\335p\\\277\000b\374\276\274\354;>\303\'\232\276\336\337D?\357\0022\276\317\020\257>\263\3775\276\177\245}\276\027,H\276\263\027\365>!\251\271=\002\367(\277\203;\341>\363\3106>)\t%\275X*`\275\3046|\276\224\3668>6\270\241>*O\245\276\205a@>ooD>\221sJ>\330\372\214>Q\255V>\377=\324\274Q~\272\276\"\263W\276Xt\323>\242 \343=\204\244\r\277\232\366\222\276C\014\253>/\277\007>\254\260\371=q\376\304=2\231=\276O\017\220\276\356\020\346>\022#\210\275\013\217M\275\236\0273\276\271I\365=0\220\200>C.!=\034T\211>\317\307a=5\275\316\274\225q\246>H\323\336\276\235\273v\276\233\324\202\276H\370C\276m\275\202\274{\242\301\276\201\0374>\313\202\274=\333{\312\276d\3305\276\177D#?\230\363\271>m\372\215>\325\004!\276R\301\204\275\247\021`?\223\371\342>\031>\254=Q\203\255\275\274\301\252\2764D\035>\225\004\241\276\320\324\343\276\032_\"\276M\351\221\275\2127c\276\263\367\372\275\356\305\035\277\377@\371\273\301\271c>\035\013\345\274\345\027\010\276o\026,\310\303\000\276\233\373?\276n]2\276e\000\253l\363\032>4\326\275=\274\222\221\276,\024\013?\023h \276\300.\"?#_\223\276! .\276\366\354\251<\311o\272\275[\240w\275\265\270\252=\326\355k=95\211>\365\332\231>\320\344K>\333\217\260\275\363l\263\002\344;>(A\324;\271\177\363\276]j\270>\3347\336\276\325\270(\275\232\347%?\250\2130=c4\331>\372\267\203>\255\014\341\2732\020J< \361\222>c\246\020\276\264\360\256<\346\220\261;C\220\240=\343.e=\252\032\352>\361\236[\276F\240\230>\0020d>G\365m\275\th\202\276\264-\233\274\223\241\213>\211m\232\276\245\265\354\276\002\366B\275\302\247z>`T\276\275b(\001\275\r\255\267>}\250g\277o[\377\275\216\207H=g\253(?|\036\n>D\350\254a\235\301>\2262a>\351\3147\276\271\217\327>\2570%?uw\204>\222Y\260=\244\333E\276\367(\003=\243\"\r>\327\360@?X\272\247>@99>\3226\302\276\261U\000>\205h\353\275X\n\346\275\r\312w>\350\241m>2\227\205>j>\t\276\341\202\003\275+\363\027\277rz\363\276BQw\275&\361\327\276\"yE>\314\327p\2755\321?>}\031->J6\256>}\266\213\276K `>\204\260\373\275\016\315\250>`N\322>\252~Y\277{-\274>\346\r\211\274\030-\022\276o\253S\276F\247\233\276\300\241\315>\302\333[\276\220U\224\275\253\310\236>\200\233\250\2760\037\032\27629@>rg\210>m;\007?\217\265u\276\374\330\211=\342\306\211=y\250G=\237h\361>Nh\034>Y\236\330\275\330mx>I\222\325\274\255[B=\305\2273>\3662\326\276\027\371\000?\347r\226\275\314[E>\037\213-=\032=\032?\321z\221\276\214AK\272b\014+>\351_\000\276\357\334(=\204\022\225\276$\215E\276\004\270\260>\312\2600\274\017U\237\273\341Y&\276\372sX\276\225\313\215\276\343\177`\275x\220\227=\016\273\306>h\025\006>:\232\317\276Pu\246\275\345H\010\277\232\311\206\276+\325\342=%\220\342=A\2714\276\325\224\277:\277\320\270=\016B\232=\253n_\276\273\312]\276*\341\034>\222;\212\276\035\316\241=w@\222>\014\347\317\272\3108s>\\\261\374\274\370\007\013>\246&\301\275\351l\257\276M\004\330\275\014}\355\276F\327\250\274*\366X\276H+X>p\004\234\275\361\204\251>4\235\023\276\216\262\261=s\300\364\276\r\304\201>\374;F>\370\231W\276\354a\202>\3058\"\276\336\033F?\251\370\007>v\222\005?J\2406\275$\343\271=Oh\214>\002Z\033\276T}\320\275CZ\270\275\215\352L\276\016\210\342\276\352\375\207>?M\n\277\276\\(\276\2214]>\007\361&\277\001\240L>\026k\031\276}E\244=S\272|>\223\354\330<\274\355\t?\337h/\276\225\024\315>\3038\251\276\217\273!=Md\226\275\343L\026\276\023\366&\277F\314U>`\333\210>\037\202\316\274\37490\277?\030#\276\001q}\2765\371\370<\210\376\201\275!\322\006?\3635\274\275\331\006\200=?\311\017\276f9\330>\t\365\204>\306\2627>_\371z\276)\377\214\276e\265\304;\357\262\r\277\262n\236\276\221\234y>\335\006\211:\371\006\007\277c\370\304>\322E\225>g\310\351\275cP\265>\251\177\014\277Hk\307\2742\242\217=\315\032\004?\331\333\231\276\340\035\274<\347#\216=\260\376V=\014\227\031>B\177l>\315c\252>\277\232w\276\311\221\344\273\357\262\027>\235\340\230\276@\362\013>\016\214\216\276o\216\010>\356g\034\276;wu\276\232\244$\277\341\353-\276\337J\230\275\223\2515\273\3341|\276\'\365\273>Z*.\276\227\373\014\276\321\177V>\351\251\262\276\220@S>\321\251\202\275\030\226=>=\367\206=\222\233->\202|\264\255{\013=.&\017>U\371=\277\"\243\222>\307=[\275\2723O\275s\301\274>\366\031\233>)\335E\274`\3641>s\253&>\272\321\207>`\033\254\273\234\326\266\275\271.\237\276\351\221<\275\222\310\225>\327_\204>\322\266\253\276vB\236\276\323W\037>o\272\032\276\025`\267<\245\344\304=rZo>\002\2053\275\201(%\276\033\221\017?YML>4+\206>\033NT;\335]\354>g\005\r=\"#\335\276\356\2150?R\016\345=H#\311\276\256\245\021>\307\003\222>\273\226\225<\373\377\201>X\213\331\276\203\353]\276\223\302\230\274\374\273=\276\360\001\267\275\030Pu\276\\<\322\276J\375\335{O\027?\205Lm<<6l\276\247q\277=\357?d\276\351\265\242\276\347\365\300\276\265\270A>y\017\010?\337\032\205\277\217\255\231<\3279\376\274\302\r?>l\232><\376\316\211>\252\261^>P\336$\276\270\302\311\275A\354\273=\215\320\204\277\232\001\231\276\374\340\366=pE\210=\0324\340\276\245\007\007>\345\234\215>\021\225\237:\245\204<>\210\217m\276-\274\245\276+\256\034>)\370T\276\353\255\t?Of\327=K$\255\276&W|>\360\306-\275\022\032\267\276\t\376\006>-\324\270\276\354\311\024=\264\330\327>\'\367\254\276&\326\214\276b.\213>\376b\001>\253#\004?b\201\253\276d\217\314\276Gu4\276\367\013\245\275\331\202\252>\"c\310\2766\341\230\276\264\353\256>A\207\213;s\032\216\276%\242\n\276u\311\335{.Z\276\300\307\255>$\246\344;\250G\227:\206n\251\2756\310\310\276M\210N>\234\223\260\275\342\013\353\276\211\201\033>V\240o>\252RW>\252f\035>s\227\241>\322\201\253\276\001\206Y=qj{\276\365\240v\276\026\\\304:\365Ul>\333\027\252\276\207\025\201>gn\340<\255\374V\277\210\217\233>\\\351\002>w\001\204\276\006Cx\276F*\225>d\037E\277\003;\310>Px.\276aA\275>\312og\275\020#G=c\232\r>\374\253\254\276(x\262=\215\312Q\276\371\007\314>@\023\354\275\033\310C\276\263\037\027=\207Z\221>\021[\214\275\035s\327\2765cS>\027~0>\032\314\267\273\233\352\212>}\021O?\274\274\260>\341\347\236\275A\3719>\227\375\317>\244\253\031\277aR\036=\037q\225>\031g\354=\026/\220\2756\355F?\356\207\024\275#{\246\276\237\r\266\2764\307\025>\376Y\333>\017\024K>\325\345\206=dd%\276k\227\311> \331\374>\276\320\250=\020\271\277>\313,&\277\201\243\343\275\340\367\214>\357\266\255=\022\036]\276\334Q\255>c\371\273\276`\2007\275\024\362\003\276\207\367K>\036>g>da\'?\257\036\220\276\345R\343=g\3404\275*\367\330=Y\204\342\275\n\245\016\277\010\306<\277\372\317\027\276\303\310o\275\275a}=\230\246\252>\263S\360=\237D6?\371\241G>2\231\207\276U\357\003>\023\310\036?\306\237~=\335V\352\274\265*\215>\021\373n>\373\360\023>\267\001\010?\177\247\230>)\233g\276\341V\330\275\022i\203\276\3440=?R\3229>\316h\217=\005(Y>S8\332>D\264\332>\211\242\330\274C\204\256\274T!G\275\324\240\274\275\250!\217>>)\242\275\267=B\275\214\361\222<\336y\014>!m\327\275\033<\244\274\3270f\276\334J\272=\207\246\246\276)#\262>>\332\030\277\354**\275\345\357\r>\014!o=\263?}\2769\351K\276[\254\315>\216/\031\277\305\361z>\031\270Y>\224\031\231>r\3359\276\005K\362\276k\330\341\276p\036l\274\005\377\211\276Z\213/>\372&\371=*\016\260\275\256\013\n>\237\357\334>\372n\037?\373\377\364>zT\322>*\234\243>#\316_>\214d\214\276hh\277\276\265\235Q?x\000\346\274#\326\354\274\315{\221(\014W=\r\3325=\227_\257\276K\210\001\276\367\303c>\336)\n>3\023\346\275\313\005\267\275\016z\213<\301)\241=qY\256>sG\303\275~\316\204>\243{\035>Ox\372\276\2058\246\274\3206A\276\245\357\000\277S\316(=\270\315\242>\331\016{\275N\366\273\276\010>\374>P\377\020\277\252[\375\275&X\263\27580\310\2751\340q\274\241\224\020\276\322\361P\276=\016\001\276\343b\357\276\233\346\271\276,\037Q=\316$\331=\304l>\276\334\214\033\276\373\375\262>Ot\312>q\324\010>\274\231\027>\313\351\n\275\267`\342>l~{\276I\343\000\274\342\306w\276\226\314\202>\340\036\016\274\274o\203>\375t\211=\235\334\312\275\221\033\003?\332\014\340\276\337\277\204=\314\321\005?\275!\252>`\254\224\276\350\005:>v\213\242\276.\253\273<\355U\270=F\206O>\324d\033>*0\266\275\2640\203>\3674n?\202\331C>\3000\204=J:\313>\232\025\201\275\376\321\350\276\n\313\025>\302]\334=\204\014\220\276p~8\276\252\222X\276\332, >\271#\026>\354\027\311<\262i\016?\004\234\346\275\373\222\320\274\366\261i\276\352\222\303=\227\343D>\013\251\035\277~\356:\276}\022\325=\020\312\325\276\367s\274<\314Q\277\276\315\321\257>\250\032{\276}\005\214>\240\325J\276>\3170?\321\030\026>\373\305\241>\254\326\356\275\337\312\323\276\036\345(\277J\225\003?\272#\n\275\313^\315\275m\333^>{\034\210\276;\322\342\273\307\0221\277\234\265\030=\356\030\330\2738dR\274b\3146\274:5I\277c\017y=\355\340\243>u\311\027\276\265\355e\275\302\247\342\276\307\310\277>F\232\201>\222>\251;\363\035\037\2765\003\231\276\375\327.>\201\323\306\275\216\264:>\330JK\276\240\351\335>\315MQ\275?\272\340>\272?2\276\214\3516\276\377\272+\277\316\341/>\262\376\267\274g:\223=dX\000\275[\230\347>\344\307@>I\231\254\276*\023\230>ZN\n\277\027\326_>\215qL>\267\002\310>\036\237S>mo\230>\307\325\212\275\020t-\277d\251\242=\355\3657?>P\204>\257\256\032?\007S\216\276\301\177L>OD\004?m\372\023\276\226v#\277\371\261\217>\361\351.\275\265\266\305>7D\252>\273\010\202\2742\336i>{\302y=\241\036\264\276\261\004\271:\003\236\214=\210\310\221>g\216\246\276\276]\027?\256Rr>i`\213\276K\304\207\276\267o\313>Ymo\276\237`I\275\034\213\370\206\276\367\030T\276\020q\003\276\003\313@\275K\345\021>T\325\232\276\325\243T>\236d\270=F\330\305>\303&\252>\270\276\212\276o\342\353\274f\001\233>(x\224\276 \226K=\312\036\274=9\270\307>\3151d\275(C\006\276\355\013(>\327\317\235>/\035\324<2\276v\276\202\312\251\276\0064s>ZVf\276\210<\263\276\361V\022\276$|%=\337\037\017>g\364A\275\205\264\311=\333\247\010\276!\004\272>\201\002\r?\336\2274\277\273$\361\274\315b\026\276\010p\321>f`\313\276\\\322\211\274\326\373\374=\\I?\277c\275\241=\252\270X>\031\201\t?\212a\253<\320\243\340\2761Fq=\2552\257>j\373\214\275\321+\311>N\033K>\325!\350=\273\"\007\277\310\234(?0\233\213\276\370\303\235>\271\273\r\274l?\007>X\344:=\230+\325\275\362\213\334>\031lA>\322c]=\370\202\207>f\323\305>\262K2\277h\240\205=\026\253\214\276\024\200\304>\376\221~>\346G\236<\201\022\203\275\377\364|=\004\214\303=\220t\035\277\000,\222\276\224\222\323>\267J2>\335\331f\275hi\372=\304\265\240\276\216]M>\000s\303>j=\016\277\216\314Y=^\251\214>b\355K=\267\315]>^\223\037=\325\223\347\276\365=E\2759\256\220\276\037C|\273aOM=k\300\023>R\372\360\276\032\232=\275GBW\276\\\244\321\275\007}\334<\344\334\013\276\025\304\234>YdO\277Y.\232=\007\352\245\275\300\016\315<\244*/\2748-\223\273\277\0231\276e-a=>k\204\276\271m\263\276\363s\023>(\323\001?\006\333\177=T3\273>3v\023?\237\207;\276\333\236\225\274\200;\202>\rd\020>\271\344\257>\300\324\261>\334n\000>dV\214\273K\005\364=\201%\236\276\232\034\326\275\366~\342\276oc\263\275\206\311j>\000\255\240>\350\225\367\275\244\253f\275*)\201=J%y\275\203\022s\275O\\\211\276\n \355\276\234\345\372=\025\327\210>\350\377\217<\345\222a>\325\325h\276\3630\241\275/\275\036\277\342\210\264;W[\334\276\330\261\276:\003\263\265\276\366 \305\276\215\332\301\276\230\235\016\274\300\215\023\276a\256\222=_:\217>\2233D\275>\3521\276JH\233\276\304\373\206>\235\310\252\276\2276u?\245w}>B,\010\276\331\201\220\276V\017\203\275kmS>\014\220\t>\311\265.\277\227\020t\276*C\304\276[M\264>\301-\t?UR}>\304\373\323>c^\002\276\2476\376=IS\306\274h>\"\276\005\031h>\005\324\375\361\323\333\276\023\336X>\035\323\324\275\216D\223\276K\374\333\276\2456\310\275\263\237\232>\034\251B\277\213=d>\372\201\221=\177\305\031>0z\030\277\247y\"?\222\022\\\276\263\356\220\276\316\315\230\276T\0245\276\316n\327\275\260\245r;V\016\010\276\222z\037>\355\302\317>*\035J\276\342\350\223>Y\250\362=i\351\363>\252w&\275\022\214$\275?\212\230\275\354\216D\275\300\341\235>2\016y\276\t\021\t\276\005\371\257\276yX\242>\276\232\305>\201l/\275=\244\024\27676\315\275+D\\\276\n\014\001=\260\367m>\334\256\315\276r\317\333\276e\010q\275\261\376\r>8\0301\274\324Y\225>\323\316\213=\003\332\"?\366\235\225>\010\014\235<7\243t>R\270\251\276\370\233\000\276\013\365\316>\334Gb>\337\305\274<\356w\007>\376\314\326>$\rT?/.\242>1\203E>-\363\226\276\234\026\231\275#\211\'?\2400\316=p\240\375=-\231P\276\333;\021\276f\370O\276\273\206\"\276\355\241\205>\242\036\246\276o\317\311\275\243\201\336>?\256\222>F\332W\276\311\247+\272\024m\031>\272\367R=\250#9\274\346V\361\276\022/\"\276\366\006\004?4\235\227\276M>\201<^\247]=o6\306\274!}\010\275w\230\266>\242\372I?\354\364Z>\001#\211\276#\317\006\276\331\220\260\274j_\276=\333\357\014\276\221,\025\2764\366\270=\360|\277\301\001x\276*\205\345=\231\304e\275\305X\234>@\310\351=\3058\037=\223$R>E\345\213>\272\222\023\277\215\352\321\276\252Jj>y0-\274\245q\n\277\325;@>\202e\274\275u\010\220>\"<\310\274\237\0320\276\nud>\326\030\234\276\237L\007?\004#M\274\262J5<\322s\343=pL\341=\013\300\213>\271y\330>\263\265\274\276>\216\">p\330\003\276\212\326\326\276#\"\356>\r\010m>\314\354\244\275\254\036\345=q\375\003?\325\217\006>|Xk>M\t@?cB\317>;\217\026\276\222\243\006?h/\220\276\255\253\311\276I\231\232\276J\243\265\276\252\333\223\276\026$\254>\375\030\016\275k\242\026=.\373*\276\3001r\276\214\013\263\276\232Y\234\276\253h\177\276\222$\017>\340\021\274=)\006\353>?\013O>h!\276\276\217\026\322>\302#!\275Ta}>W\351\201>v\215\033?\360\242\036\2767w:<\211\207)\275s\210\332>\006\274\245\276D\355\202=N\274\014\276\177#\013>8_\210\275\276\034\372;S\361\217=;Q\225=%$\310>N\204\353\276j\241\343>\037X\354=\204@\006>\304g(>\335\203\317>\227W\303>\026\232\241\276\271\323\004\277\274\345\007>yZ\327=\202,|<\232|\017>\257\354\266>hT\204\276\233r\231>\241/6\276\344\342\217=\243 \177>\021_]=\241\344\227\2761b\021>Q}\252\276!m\372=q7\014\276\364\013\201\276\256\307\007>\221\312\210>\026\351\245=\020\234\360>\275\301\224\276k\352\264=\245\367\020\277w=)=\304\270\'>\370\313\r>\001\341\200\276\243X\274>%\333\301\275\200\363\252\276o\315\n\275\206cn=\261`O\276Pa\223=\3730\354\276If\034=/(\001\277\021\261\r\276\017\244\310\275\247\220\030\275X\216\177=!\206\302>\273)\317\275\354X{>\331h\002?\231iX\276\222\016}>q\010i>\333\375\203>\220k\325<\014\020w=\014\350x\276\205\0130<\300\271\247\275}\230\204>\013\377c\276\013\313]\276\347`\247>\342\017`\276\355-\372\273\303yR\276\262\217B\2743\036\034>\034\255\236\276\'\313\003>\005m\247\276R\213\352\275\253\331\326\275\303\004\310\275\370\037\322\274\005\375G\276\224\376\231\276|\342\375\276u\344\224>\260c\212>\230(\213\276\276N0\276\357y\275\276*\'`<\027\325\033\276D\356]>/\344\333=\033\351\027?O\334\002\275W\326\264<`\"\221\276\0344\304\276u\375n>\216\226\272>\342^\216\275\n\312\217\276\240\207H\276\316Ux>\037M1\275\337\354\361\276\237>\254\275\017\336;?]|k>\345~\330\276\203\236;\276\227\200\026\276\242\022\272\276\030\003v=\363\200\205\274\207\014\314\275\321\351)\275\371[\224\276F0\375\276\014T\276<\332\212\305\276\001\277W=\032\236\324=i\206\212>\373HY\276n\260\200=Hs\003\277\322\030,?\350\021k>z\344\r\276sd\006\276\270@\273\275\331\265j\276\317P\r\277\210\233\004\277\363\336\360\276~\311\221>\311\263;\276\020\3612>\356\031W>1\201\203\276.\"\024?\277(\300\276z\372\300\276M\342\273>\376C\016?\352\0278>\234\321E>\216\020\353=}H\242>\2119\003>D+\021\276\316\216\244\274g\325\354>\002\277\367>`\264\306=\203\204\246\275\236e\255\276+(\243>8\243\305\275\200\335\267\276\3320 \276\034i\202\276\344\222\326\275z\211\001\275\200+\036\277\215\305\000\275$\311\330\273{\327\033\276\317\372p\276\016\n\242>Rwl>\001\345\301>hYS\276H\220\307\276}2A\274\226\270\014\277i\312\332=\230\005\245\276\276l\016?\232\263J;\017\001A\276u\r^>\2561\226\2758\206\000\277\323\026\033\276\217%\226\276\351\005\277\276>b\010\276w\240\370=}\266\230>=\266C>\346\314\330>\250[\270>@\005\370\275\216d\316>\234\321\235\276{\231\377\275\003\030m\275\315\016u>\"\252w=\252\032\315=7\256\301<\020\252G>\027\376\372>\300\350\036=\214\027[>\006\0315\276{W(?,\354\n>\006\255\250\276\317\325\365=\267\204\202\276\254\347\027\277)5\246z\231\252\276l\360\213\276V\t\r>u3\342=\254KS>\210y\345\276\304>\262\276Oy\022?\215=\271\276\210\337\205\276\217l\'>\306\0324\276\340\365y>\222l\251<\257\253\373=\231%\253=\312\315\002\274|\265\030\275x\0326\275l\334\231>\262l\301> \213H\276p\036\022?g\025\000?\\\254\327=\362\363\004?BB\003\275\354Pr\276\364\262\364>$\341\367\275\025V\017>ym\037\276(\223\204\276\2549\215\276\213a\005\277\222\021\\\276X\314\304=\273R4?go]>`\273L=L_C\276\003\356W=v\001\022\277\027\263\234>\360\347\267\276\340_M=\267L\016>J\036\">\263\344\t\277g@\271\276\375\330\255\276\207\351\024>\255w\237>t\371\306\275R>\356=\201#\277=\242]h>1W\257>~o\016>\003\0142\277J\031\337\275z\252)\276y\235\242>\260\n\214>\342\016\007?\334`8\276\270\242u\276{\027\365\274\327x\214\276K\325\376\275\3443\272\276\242\346\335>\334+\365>[\272\235\276\271t(>\005\001\311\274\333\021e>Ct\336\276\221\226\232>\3415\226\276|\354O?\375\323z\276\3533\t\276?\314\224>c\213*\276\251\317\325\276Q/\000?\2533v>\'\322f\276^{\250>\022\251\347\276\317j\217>\215\335\210\276\304\004^>\251T\261>h\342\352\276\221)@>>\303\323\276y\tz>\"E\212\275\266\310\314>\305\262\243\276xw\272\276\326\362\330\274\345D\311>4\226\236=,\257\232\276\006\330X\275\033\020\372<\001\010\346\276\233B\232>)\205\025\277<\202\354=\323\237\027\275\025\214\241\276\215\227\201>h\351\216\276F\247T>\300\346Z?\027ut\276\234\347\346\275\355\324\332>p0)=\013+\023=Y\365I>:\365 \275\337\330\225\276.\232\357\317\300\332\275\260\223\333>\304X\002\276\215~\025=\360\031B\276W\316i\275\177e\013\277\231\222\251\275G\307\246>\275vc\354j\221\276\225\251*\276\2435\223\276b\343\333\276\224vB>\344/\345\275t\032\231\275\345\314^>S\021\002<\320Q\335=\253\364\036>\211_\030>M\rT\276Z\271\3749\2518M\276\225\266\267\275\322\330A\275\246\220\246>[\246%\276\267&;\276\304\345\033\276W\214\353>c\263\371>1Q\001\277\373\321)=P\261\024?.\343\202=[g)=a\300\203\276\371\034\271\2753\031:\275\0241\364\273\300\242\266=\031\203\376=\365\252\206>\324\007\341\276*\001\313>\341u\274\2766\241\217\275a\323\360\276\262\354\347\275\014\315@\275=JX>\231\255\006\277\243\271\341\275\210\237\003?Y;q>~\205t=\3728)>\324Aq\2767g\016?\033*\254\276\364\232\244>\314t\206>`\022\237=\266\265=\276`\301L>\032\332\006=\245Ez\276\354?\320:HU:\276\212\004M\276\272\202F\275\317\275\222\276\021\216\247\276W\\\203>m\227j\276\217 \343>\223)\"?\276\007\344>\216b7?\331\027\220\276\361\356\326\276\246h\335>\353e\265<\320T\351>\222\214=>@\231\232=\313\237\256\275h\031\327\275\365\212\204>\034D6=\271\364\356\276\310Q\033>\025u\217>\267\253\005=\253\337\203=Q(\254\2766\3661>\340\376\274>\037i\315\276\000\254\020?\374\004\230\2755\220\217>H\023\330=\035\302\261\275\nj[=\210N\352>D\352\367\274\316\246\256=\365\352T>\226\222[<;\373\225><\262\216\276\257 \033\276\007\031V>\363\227.>\362\001\213<\361\201\327>\226\022\363=\021\336F>\305\333\200>W\356\212\275\211\217h\275\324o\225>B7\206\274\262\277\200\276\364\r\025\277\200\034\367\275O\004L\276!\212A\275:V\221\276\337\024\322\274\300\310\317>\034\374i=\344\347?>\361b\311\276\374\303\254\275\356\355\375\2751\264\033=e\261\021>\027\274\343\275\352)\207\274\241\005\266>;\361.>-}\232\275\031B\260=\030\315]\276\334\031c>\02135>\243\247q\276\274\nE>\271\344Y>\013\371\244\276\260H\215>=0\262\276\317\330\223=\247\217:?\002\231\262\276\247\030\301\276\035Q\302>%\360\245>\266\211z\276\022rFN\357\017?\253\334\016>I\375\001>\344\376\001\276b>\227\276|\306\303\275HGc>\200%\017\276\322\343,>\306\206\311\276=\344\035\277\241\251V\276\006]\'?=\340\242=]\201\363=Z{)>\355\217~>\353\354\037>:\261\362\275\302\373\325\276m\326\231\276\235\365\322\276\347\336\355=-r\032\275*\2576=r5@>\314p\330\276QB\303\274qzD=\275\200p>;}|>\345\312??\200\031\233\276\220\336S\275\"\361\035\277?P\333=\216|\005\277Y\230C\277\020\234\243\276&\252\023\275Z9\320\276`\001<\276\202\263\020>V8\337<\262\262C>\335\267\223\276\212[\237\275Iz\345\275R\203l\276C\333\271\276\300\272\376\275\321\237#>\217\367\225:\024\266\310\276W\241\320>\232\317b>I\262,?\250\020\355\276,\200\245\274\306iE\276\034\027e?\022\215\254\273\261\203\232>j\336D>\332\205\263\275&)\367\275%\310F=a$\375\275\304A\271\276\025\212J\276b|5\276\312\365\321>r\375\257\276B\204\217\275\250\247\330\276\020\022\226\276\337\342\323\275*\221\203\274\352\360\030\275%Q>=\017;\346\275\330\252\022>\240\223\214\275\210\010\241=k\300<\276u\361\346=\370\025&\276z\002\226\276\231\326r\276`\013\022\277\313\010\025> y4>p\002\t?[\276\207\276\373\203O\274\335[\311=\376\243\306\276RM.>\234Bf\276\277\237\n>\013l\331=\204\367\245>\333\326\274\276(8\251>\234\177\216\276\016\302H\275\"\355\177\272\261\366\004>\240\0001>\352\322\341\274\035\211\263\276+8\220>\300\231\n>\356\236\365>\321YC\276.\301\325;\330U\002=\265\220.>\314#\242=i)\353\276M\255P>A\221\244\275\204\257\237>\022\000m\275\017\272\302>\356\030\244>]\353\277>t\"\332\276\022\\/\275\304\225\306=\255\352(\276\354\224\237=\252F\207>\352\351/=WU\277\276\254\034\000\276p\234l>\020\335\355\275g\023)\276>\255\271\276\304\301\t\277X\250f>\216\n\343>\3217\316>}\320\006\276h1*<\022\351\024\275b\236B\276\024\314\333\2767\205\032\276Z\177#\276\337T\232<\031\221\267\276P\331\257\2753t<\277j\323\325;\314\236\266\276:\240S>\305#t=H8\216\275,\265\004\276k2\263>\r6\333=?1z\276\2275\355=\222_v\276\307`\201=\330\217\225>\021\201\220=\325\271\357=\270\202B\276\242\007\013=\367J5?\356\342yKu\307<\361\036f=6X\361>U2\027>7\245\243\275\227\233\247\274\002:P\276i\334{\275\341S\241\275)2\r\276_2k\276m\022\244=\271\340\357\276\240\225H\276e\016\221\275-\226\346\275\005a\006?\375\213x>h$\022>\202H\354\276\222\231\306>^\270\276\267\223\275\352\270\336\276[l\010?\344\3050\276;\357\252>\264\3771>\342S\306\2761=\023\276\374\213>\276\303\3746\275\246\010\'=:\271\256\276v\227\014\276\2008)\276\203\240\356>\306\272#?@\375\302\275\377\306\372>\001\247\016\276do\372>\335=\316>\026\340\006>j\341\261\276\322\323\203\271f\200\035>2\333\004\277t\330\341>9\241\242\276\326\257\267\274\221<\274=\274\377f\276\331G\326\274\266\203\002\277\237\247Z\275<\210j\276\010\253?\276@\374s;\242\331A=\020\3236\274\004\216\255<\335\342I>\373C\274=\275\201\202>I\320`\276Z\204=\276\357\256e<\351\255^\275\344e\372\276\242H\326\275\036R\361<\317\322\201\324\261\202>uwg>s.\"\271\304C?\276d\337\377\276\303(\277\276\323 {\275\241z->!\226\302>\n\273D>\353\003\253\276\325D\344=\335\336\376>\004\357u\275g\002\254=\301\'\247\276\242\242U>\314>\314\276\363\032\342\275\376\225\032=2\324\210\275\033$\341>|\220\363\275\332\351\212>\230X\370=7!J\276\322\222u\276]*g\275.@F>\027\374\204>\344\203\013>Er\020>&\365\024?\023\352W\2760R\345>\332\215\317\275m\214\341\276\211\270V\276H\321\274\276d\361\320\276\363l\355= \210)=:\n\267\275w\267\253\276\257W*\277 \265\242=\203\204P\275U\340\210>\203\271)\276\0354_\276#s\237\275Qw=\275\276p\312>\345\006I\276(\037\263\276\371\364\035\276V\213\004\276\261\305\351\276o?;?\246\313\230\276\331\354\337\275\016\344\273\2757\226r>N\357\242>\356\241\007\276!\375,>\271\374l>\264\204\371\275j4\203\276.\0211>\301u\277\273\367\021\360>R\376\010>\023\314\270\276_EJ\276l\310\370=\005\236\335>\231\005\262>\324i\277\276\251\366\270\275\352Hc>\000\240\363\276M\302\334\275_\030\032\276.\024\205\276$Z2>\005i\262>ry\244=z\204\243\2769\2069>\335\212\274>\030I\270>6+\252>\222/\327=\031\326\023e\000W>\233\303\021\277x#\251> L\237\275^\257\010\276\216\345\311>\025nA\276\217,\366<\265\034\014\276\357\321\325\274X\366\013>$\n\264\276zo\276\27508\007>\361\020\203\276\022\204R=\350\214\3628\374q\r\276.\303n\275/\325\307\275&\245\222=\007\272\330\275\277\237x>:d)\275\206\357\206>\342\265\\\2756\375\317>\340\326\260\276N\304\006?&\353\031\276\345\273\026\276\007{\256\276?\245\244>$\222\331\276\250oK\275a\305\322\276\242~\232\275\304\023\025\276\200W\225\276\345\032\260>$8\013\275]T`>\364\006\215\276\310;]>\365\'\244>\013ON>2g@\2752\347\321\276\243?\244\276\270%,\2761\0354>\303\003\220\276\375Y\200>\322\241/?}\310\226=\366\310\255>\'\374\n?P\347\220\276\237i\350<\004\216\'\276\264\222H\276k\243\2209\\\365O\275\350\177\274\275?\311\255>\263\372~>;\215y>\216\276\213\275\025\310\355>\355\004\035>f^\004\276\261T\203>Y\346\214\276\312V/?f}\327<[\340\300=\262U\001\276\227\000\032?\304#q\276\222HM\277]\367V\277\346n\300\275;\213\272\276\361\233\364\276b@\333=Y\220\t?Qv\213=\370y|>(\340/\275\036\201\217>OJ\250\276b\256\226=\344\347\031\276\263v\201\276\300\"\276=Z\303\334=\226GO>\270<\362:\376\317\t>\252\363G\207\245\256>\204\007X>E\333\013\276$\345\257=\266\265X>\005\201I>\253\215\227>D\273\221\276\001>\256\275\233\270\365\272=\nz=\306\304\037\273+P\350\275^\244.\276L\r\374=\252|3\276\031\240\265\276uh\204\273I\300\\=\335i!>j\035\242=\374\017\271>\376\226\225>\360l\000=\360\322\262\276\322I->\027\030\337>\254\361*\276IEM>0\344g\2761\240%\275e)\273=\305R\256\276\r\"^>\222\243 \276\274\303:>\'\031<>\017@\201>\241PD>\246*\334\276(!Q>A\306\213\275\265\325\322=\317\374p>z\376*\276\2307\275\274!fR\277\321\325\226=\014\200\026=\022>\301\275\212\331\027=L\331n>\342\322u\276\304\263\037>*\203\241\275\024!\276=\017Q\\\276R\200\343=\021\311\323>.\t\024>\034D;\275\232!K>\222[\276\275\t\275\361\275$2r>\020\2551\275\202\004\371>\375\376\207<\370\260\323\276\240 \n\277[\256\010\277T\303\375>l\t\376\276?t\250\276\034[\240\2769\321I=.\206\357>1l\341\273{\307\034\276\263\3548\276o9\351=\311Yt>\\\273(?\034\253\036\274\347\324N\276\375u.?\016\002\013\277\035\276\216\276\3159\022\276\244%\\\2767\223\002\276\336\346\261>Wf\231=>\3559>13K>l\365#:DD\021>\230!\212=\"2\324\276\342\346I\276+\300\241>\327\264\345=\353\367\n>c\270\374>*5\204>5\016\351\274\321|\031>\024z\244=\250\321=>g\016l>\300\310r\275\364WB>T\230\341=v\317K\277\322\266Q>\017\226\273>S\363\257>\250\223+>;\371\376\275\317\001?>$/\277\276\365\236\356\275\325K\307\276\232\276&=\273\237Y\276\230\t\005\276\031vj\276\246V\224>L\025z>p\035\322<\210\234\217>J\003\002>\273\020\301>]\214\264\276~fp\2771\373\366\275\337~h>\rfY\275\017|\232=4\362\013\307\372\303\275\307\256\363\275\0350\355=\214Y\241\2762\343\016\275\275L\217=fA\211>\336\036u>Qf\261\276\353\251\234=Z\212a<\'x\030\276\375=\200<\031\310\216\276\257\222S\2773N\302>\245$\022\277\267\214\247>G#\017>\031\365\234;\032,\340\275(\025\036>/\321q\274\214#\354=F\265k\276\nA\333>\365 e\275b\016\034>\372\251\343\276\034\000\261\276AtR\276,X\304\276\030\317\245\275\303l\022=7\2311>\307\003\266>\377\023g>\252\363\017\276\330\316I\276N3\205=B\023G\276\215\3646?:L\220>\017\207\002=\211\340\204\275\276\000\356\276\313\235\372=\231\257g=\230\337\200\276\246\306\\\276\356.\226>\364/\227\276\355m\252\276\277*\201\276\262\0346=#\232\344\274\347\276\314\275\234\336\331\276d|\007\276\251\223^>\302y\363\275\302\372G\276Yv\225\276\313I\332>\345*f>\272\356\024\276\023\330\363>\273m\243\276s\316\n>\351\301\277\276\3735F=\032h\340>\236\221#\277\361P:?\371f\177>\352\017\003\276\016=\217\276I#|=X*\213\275\2410\004\276\025\221<\276\250\352\346\275\347<+\277=V\273>\276\025\302>1i\331\275\017\244\240\276st\212\273K3\333>\017x0\276\330F\220\275\021|T\276]\317\220\276s\304\201\275\244\305\331\275\001\2743\275\004\276\240\276W\241\214=X\023\357\275\244\305\202>,\334\306>\305g\025\275\t\354\351\274\243\303\242>\353$\005?\336\246\364\276\324\340\322>\305\310\016\277#\243\353\276\347\307\340>)+!>\000\314L>\221\250>>\022(\257\276Bi^\276\267\310\321>\025\347-\276\276\017_=\302\341\023\277\322tu\276\rf\025>\374\326\361>\220y\'>\323g\344\276b\310\241>\t\263\031>\271)w>0@I\276\270\303\366\274A.\006?:\311\005>:)\210>\\\232K=\014\233E\373W\010?p%\350=x\306Y\276\200\036\261\275\261\366\020\274\244\364K>x\2337=\265\333X>v\020f\276\300K;\275\214\365S\274\332u\245>&\236\206>\260l.\274w\307\036\276\301\343\346\272u\340Q>\3208\271\276\362}K\273;)\026?\232*{\276\033\336\037\276i\301:>t\225m\275Q]\271;X\026\214\274\206\221\022\274\'\225\t>\302\245\200>\320~\035\276\353\257\031\276\273B\030\276[\204M>E\026\355=B\203\257\276\306\001\264>`\314\n\275:\2400\274\221~\365>g\206\300>\031\0012>\255}\206\276\340\210J>\341?\246\276X\305\223>\017Vg>D\r2\277\261\020C>o\354~>\364A\373\275\272_\241>\025\306\235\275\313\200_\275\325\233a\2779i\374<\262\221)\2771\321b=\024\017,\276\233\270\177>Z[\207\276\257\305\245\276`\t\022<\222\203%=VAk\276\024\226\033\2768os\275\216\265\301\276\221\300\\>x\307\225>b\236\022=\245R*<\376l?>\220\376\325\275\322\334\251\276a\326\005=\325A\206\275v_\037\277*p\226<\202\323\300>(\3101\276\212\226\203>\032\320#\276\027)\005?\210T$>>\307_\273\341\315\301\276b\336\323>\3314\031;\353\236}=5\337\220>R\370\266>>7h\276\2257\245>d\254\001=:#b?\247(\267=\221\202\247\276\304A\340\275\225[6\276\255\261D=\225]\221>=\033\314\276l\r\255\276.\030\201>\2548\302<\310\036\215\275=\271\230>d\2359?\271\233\017>\246\322\021>\0272D\277\0166t\276>\237-\276 .\227\315=E\027f\276?wY=\236\301\303\275\357j(>}1C\275\032|\272=e\265\001>\030\000|=\302\352\004?\"\"\017\275\013\030A<\206-L\276\325\020\241\276i\203\300;h\277\361\275\252dg>\241\177\263=\205\335\267>\006\026s>\013\037\374=\237W\237\276u\331Q\276~)2\276\025\314 \276i\257\203>v(\312=\323\211\326\276\020\000[\276{U\206\276\371\377)>\237\272\014\277\t\263\232>i\265\215\276\215\245\262>\005\204,\277\324U]\275\235\233\320\0175\013\276\220GW;l\376\231<\036\357\274\275~\026\321<\0020\360\275x\251\253\275\372\266\261=\253| \276\014\005\220\276\207E\321>\257\225\\\276\236\3779\276v\216\366\t!\316\273\017\373\300>\315\337;>\021.+\276\377V\323=,E\204=\351\023#=\267\030s\276\254^\271=\030\305m=\017\'M\274\332\250\t\276\264L\030\276\004\032\370\276\373\266\353>K\213\223\273\346\253\371\275\363\242j\272\351\330D>4{\322\274\261\016c\276\2716\275\276o\224R\276SR\002>6{\306\275\365\364\234\275\346X\205\276\316\024\312\276\304\3277\277\013[\025\276z{W\275Gn\271\276x\356\336>X\331=>\220E\200\276Y\370/\276(\333\023\277\256\177\314>\370\304\262>E\317@\276\346\310\226\276\242\237\360>\317\254*>\024\350\031\275lK\341\2751\232x>\261A\t>\262K^\2760\202\017\275\231\351\364\276k\235\212\276t\312\271>\325),>\032\023\276>\342C\337\276\n*\014>\350\215#>\210+\017\276h\366\377<\245ro>\002\260\000\277\274\303\266\273L\233\224\275\302Q\214<7\362\250>Q\355\234\274\210S<\275\177`\221>\272$\367>\352\333>\276\340\345\325<\210\367\226\275q!\023>.9\340\276[\025\313=\037R\234\276\367q\373\276U\001\023\276+$\010\275L\270+\276]A!>st\004>W\333\254>\360\030/=\030\3022\276\037\363Q>\204L\027>$l\214=\377\252\217\276\252yi<6\324\223\276\373+n>\271\371\213\276\375g\260\276e\030\000=\334\271\212\276<\274\320\276\031\271\224\276\022\t{\276\240Y\006\277\2477\023>^\340\365> \023\317\274{4\207>?-\300=\347\302\250\276c\203L\276T\204\010>\264\246o>\304\243\305>\2340~\277\334JC\270\204P(\275;\346\032\276hU\347\274\211P.>\024\037\'>\225\t`=\244\242\">\"pM\275\321\302\222\274-lu>\373\361\003>v\300 >W\344\017?\017\256\200>\262\233\212\274O\234\\\276\353$\003\275\252&\336=\032\206^>\326\316v\276O\342*\276t\354\212\276d\001\334\274\314c\236>I!w=w|\267<\357p\026\275\247X\320\276\206\343\263\275/[\237>\353s\255\276\371\342\017\276\367oe\276\032[\377=[\312\350=\024\000\276\275X59\276\205\201.\275\010\352\252>)\0359\277~\315\212>\311\341;?\340\362\003>\0007\364\275\022a\275\275#\340F\276\367\r\013>\211\0311>\030\3616\276\002l\271>\276R\257\275z\217\003\277\340P\277;$\247\000?\376An\2769\260!\276\324\355v\276j\330G\276\002\225\270\275B\234\n>\247\262\026\275\342\306\033>\004\325\240=\nf\r?\212d\347=?\363e>+\322\201\276\363\2211>P\r?\275k\357\014=\352\222P=\276\033j>\216\244\212\276\226R\356>\275(\311>/\340@>p\003\025?\337\026.\276\302B\031?\035\356\201\275\333B\004\276*RN>\323\334\360\276M\003K\276\336\240\005\274\216\372\346>({\005?~Y\335\276d\302\244>\250\306\315\276\231<\363=\374\030\004>\223\265\262\276\225A\216\276T\352n>4)=\276Q\352\337=\221\300\036\276\353a\371>\0245W>\344\306\326>d\372\252=k\373a\274\253\n/\276S\021\321>\201w\014\277\025`\366>[8\202\276\244\327\010\277\202\353:\276\363\033B\275#\275\355\276\000%\256\276\213\212\026=h\255H\276\320\224\002>`\264v\276\216\356\023?\320\n8\275V\256\243\275\273T\274;\254U\302=g\233\225\276P\256\324\275\304\204(?\267\343\272\276C\243\250\275{x\021?\000\365\236>\301z\201>\3233h\276\001g\251=\222\3256\276\330\356\033>\343\027\245>`\335\n\277\272/\213\276$\241\036\276\345\202@=\025\006V;-\377\004>\325\014\343\276\2448\272\276-\332\302\276\177\236e>\363;h\276\311-y\276{D\035\275s\212J>\027\3202?c/\010\276\0201\215\276f$\376\276\236\016\252<{J\257>\336\356\251\274\312\320\223>I\253\026\277\314\345\211>z\310\307\275!}c\276n\217\037>Y\021\335\275\373\246\250>\207\222\361\276\225d\246<0^5\275\214\n\372\276\263\3562\277l\313\276\327\236i>\377\347\313>\3746\360=w\354]\274ZY\321>\312y=\277\240\324R>\006K=\276\357\2266=\267=\t\276\236\247\345;\233\236\220\276\261d\005\276\345\357}\274$)#?\020\216\322\276\231\324\313=\236\n\253=\261\360\226\275\314\0005\276\322\'\305\274l\207\"\277[4(>\'\252\207\275C\271G\276\351\027\206>\332\024\217\276;=\216\275l\\->\231\250e>1\003\276=\2476#\276\205\2367>\361@\232\276\357\\4\276\276\014\316\275\271 \000?\364vq\276\207\233\026\276d\213(>\320\236T=\362p\275\274+E\325\274\253|7\276\220\377\3729\377O>[8\035\276/q\212<\035\211\342>S\026\216>Z\374\241=\021=\217>\0062|>\233\350\346>j&\224\276\002\231\003\275F\276!\277:\360\031>\007\363\210>\362\320&?\220S\345\276\330\361\200\276\307[\231\276us\025<\276U\342\275\210R\235\276{V\030>\227\025\324=\2717a?;\006\271\276H;*\276^T\023=\332\352k>$a7>YJr>\226\035\226\276Y\201\201\276\377\330/>h\n#\277\341,\250\276v\272\023=0h\370\275\255X\301<\367/\277=M\206p>\005\374\354>p\321\371\275\344q\333\274\264T\026>\226\336\266>\005,\023\276\0139&\275W\031\036\276`[\016>Q\375{\276\327\220\"\276\253L\037>\016N\251\276\254\232\255>f\235\231>\000T\205>\335\262\276\276\277\327\024>\311\271_\275bP\335>R\221D=\227\223F\276\\\357a>#5B\276\355\035\226\2757\030\252\276X%A\276\266\233K>\257J\022\276\267\350}=}\273\007\274\316v\'>\224*\322>C\226<>8\376\231>\210\264\\>\330\263\027?B@\263\276\016\225\355<\013\257\236\276\322$\315>\014ZE>\37524\276\347\253\361\276 \235\311\276\2429\235>}\020\224\275\241\000\203>\242j\031\277K\025\332\276\027#\013>q\360\250\276\331\376\212\276 I~\276\221Ix=\022\032\002>r\320\322\276&\240\346<\024\335\225\276\026\374\244=f\3071\275\265d\307\275X\213\344>\\ m=\206Ri>\022 \017>\271>[>M\367\000=\3651\343\276~Z\302\274\307\265\276>\002\240\275\276\177\030\360\275\010X[>\007\340\252=\334\322\246=mM\017>i{\274\276\0028V\276\272y<\276\006\367~\276\225\027\002>\326\373\320=\017\352\262>C\270@=\232\302\241\2763+\234>6J\311;|z\215>/\026 \276F\335\221>Z\372\211Hy\206>\346_\303>\332e\351\275\360P\256>Km\207>\3718n\276\361\020\375=\332\307\263=!v}=\334\005!\276_\370\275=\253\332\320\275\310\314\331\274\233\024\354\276b\005\352=\360\2157\276\211H\326\275Wn\347=\301\223\206\276q\212[>\347\0015\276R\316\263\275\221D0=\001mJ\2761\265\014\277\r\235\343\275\331\273\252<\234\274w>qW\265\273\353c\374\275\352PJ>G\261\201>\222z\230\275\241\372V>J\343n<\336\000$\276\026eE\275\361\336Z\276\177\307i\275\326\'-\276\235;\214\2757g\255>2\233\206\276\214\270\363\275\241\025\225;\t\214,\276\360\270\202\276\306Vyi9\313\275\343\321l=\250\276?\273\200\034\364\274\352#\266\275\231\321\034?\304\350\255>k<\222\275\333\357&>\322\n7>\331\264r\276\020\211\211>\311pf>8$\026>|n\"\276\205\243E\276\200\336\356\2763\377\315>\316v\302\276\2160\252>\347C\352=6\227\002>\034.=\276\360\207:>n\337g=S\372\212>\342\215\365\276\236\004\340>\211\351T\276\036\302m>\0061\265\276 \n\260>St\232=\273c\r>\246\000\255\276w\270\245\276$=C>\363\310Z\276\026\261\253>\\k$>O\234\036>PDF<\305\336\003>\303r\221=\353\"\236\276\225\331A\276\377\207\253\276h\232W>\351\335Z\273\013&9>(g\340>\325~\226\276\332\367\000=\224 \004>\362\257N\276B&\333\276\303\375\275>\273\243e>\335B\304\275\303\335~>\"\333#?\324\014\202\276\034\220\201>\032\035\\\276\347\014\274\276\030G\r?\333n\020?~\271\\\275z\321W>\tu\204\276\256\036y\276\356\0213\276P8\201\275N1y\276\254\260\201>*\326\241=\220y\'=\000\234\000?\334z\361\275\274\326\307>o\257\255>j\353\270=*\223M>\013\214:=\226\377|\276/\245y\276\345\241\'\276\007\247\220>6\371\247\276_\371\324>\371m\364\276\276W\354\276h\262Q>^M2>&\033m\276*\266\303>4W\002>\034\300\304\276\233\335X\276\303i\223\276\322\331\224>]\340?\276\254\206\200=\245\304k\276y\324G>\231\272\254=O\307\243=E\001\314>\265\316\017?\025\214\210>8\276C\276\240s4=\375\203\307>\325\327\207=\016\327\304\275\317\003\234\275r\366%\273m\276\314>i \267\276lB\352\276\310\211\014?K*&>\200W\n\2777\360\325\276^\305\016\277\245U\234>\023<\316\275\211p\332\276\036/\272\276-,\244\276\362I\034>\330|C=\227\031\036>\213\221\230\274=uj>\302\307\255>\010M\302\275\221\352\036\277\357\206\337>;\302N\276\377n\321\275e\245K=\315h\305=e\306i>2\201r>\002\221u>d\200y\276\373\320\035\275\314\010\223\276\315\324\320\274d\253C\2760\177%\275 RF>\315\'\230=\r\250\336=-\354z=\3320\n\276\272+=\276j\247\340\276\230K\336>G\237q=\037\021q\276\2746\023\277\270\267F\276\241\353\020>\377\027\364\275\206\332h>~\266\325>\317\257\233>\023\213`=\2164v>\200\316Y>\300\250\210>\031e\321>\222\273Q>\340\303m\276\361\327R=\230\014\333\275[cf\276\257S\"\276\032%\267\275\330_[\275\370\366\204\275\267\272\225\276\353S\247\274E\002\227\021#\311>\307\222&\275\300K\306\276\365*@=cB\320=\200_\032\274\340p\260\275\223D(\276&\340F\276\347\265\274\276\203\312w\276\250v\251=\255\316\242\274R\225\376\276\313\344\302>\246\2641>y\300y>\313BA\273;\024p\276\231\337\363\275\032\373\366\274\377l\006\277<\224\210>}D\247>Z\243\350=`\tt>\346+j=\304\022\035?\375\305\251>\000\203\257\276\\h\327>\372\247\003>\265\357R\276\224\237\325<\211\245m=\320in\2767\002M\274\021d\353>\233\227Z\276\215\233\270\276y\361\245\276\333\367(\276\250 5\276\026\003^:\330\317x>\244\215\244>\211\343;\277\026\260\350\276\326\275\373=\322]\216\274\334rd>\272YG=\365\226\270>\027\232-\276\251#\314>\323\375\212>6\213\274\276{>&\276\337\177\017?`\343\005?Z\3464\276!\001\370=\337xY\275\264-\004\277\276\273\210>`\341\001?8B\273\276\344\203\034>\253\250\230>&\277\230\276\205\024\323=\203Y7\276Z\242\256=\270s\n?@\025\005\277\370\010&>YL\332\274\200s\273\2768\026\303\275|\250/>\204\241\343\275\257\276\001\277\273mx>G\227\263\275\177\016\020<\336M\242\276\014\350R\276,\266\312\276s\373\242>a\014\205\276\257\243\303>xJ$>\276\206\027\277\211\265#\277\322;)\275#e\217>\212\305k>\001X\234>\020Q\324=K\231\000?\224\tc=}o\361\276(\'\020\276*\346\250>\260\004;>\026T\214=q\242\325>\0311R=z\273\374\275\252\360\257\275Hb\263\276i.8>\361\335x<\325\003\016?\271\003\032\276\005\250o>(\017\376>\014S\037\276N\324\202\276\275\277\211>5r\372>\340A\222\276\350h\377=Q\343R\276\250.\212= \214\313<\316,\'=\255l\200\276\254\212z\276\215\350\247\275q+\216\276]\346A>C\020O=\370\265\326\276/\241\270=M\310\314\276\211\210d>\234\230\"\274R\257}\276\347\223#\276r\330*\276fC\272>\373P4>;=\037=@\037\332\276Exc>\201X\007?@\270\035?\376\234H?l+\342=H\210\352\276\'*\223>:\231\345\275\304\227\371\27321\001\276t\220\'>\302\304\364=\2566\234\275\303+\210>RC\005>\227f1\275H0\326=\265Ib>2\270\227=\3678x>\323\343r\276V\313\242\276\314\000\n\276[\306`>\332\301p>\233\035\307=\263\233\264>\251\363\310=\377\005~\276\305y\035\276Ji\276\274c\344\037\276\206\022J\276\332\271>\275\306\247\270\274\355\203h=\360e!\276w\244\016>+Q\360=\340\336z\276\277\335\245=!\022\016\277\213\312\251\276r \036>\205\371\213\276e\243\372\275X\323H\274l\362\250>\245\215\023>t5\213\275\207\240\303=aU\242\275\361:\252\276\002\235\226\276\375.\010>/\353\276\275\367\367k>[\353\036>K\303\227\276p7\272\275\\$\203\276\035\004\t=\312\017`\276\260\212\335=\211S\275=\217\351\303\276\307\262\271\273c\276\343\276\0344\227\276\007\301\252\276\326\253(\277\231\250\255=\226\243k\275\013m\225\273\002\203v\275\t\3162>\212\307\013\276\t\346q\275\\;\037=\302Y\177=V\267\237>\257\276\201>\203v\217\276\037\037\305\276\304\n\003=iU\312=\232\267[>}\007(\275~>\025=\r\347\016\276\270I\200\276\224Oq>\320\305\360\275p\343F\2775\021&>\340\030\221\275_)\263>[-\341=L\013\347\275;\227\271\275\335Q\251\276\026\202\211\276\024}\3542\276{J\250=\234\344@>\205\"\242\275*o\363\276TJ\016\276\301\326\360=5\256\222\275\261A\306\2767F\371\276\273F\304=C\032\307\2760*\255\276l\257X>\02216\275\2619V=\021%J=\206\025C\273\263K\377=\302\233.<\342\335\t\277T#_=x\036\330\276\034\342\204>l9\206>\314\253\243=r\342\221>\324:*\276\003W\213\275T\244\244\273\353\223:\276#\307a>\324\016\235=a\245\253\275\357U\234\276\307`\216=\021\256A=Q\\4\276\013^M>\002\353\267<\2365`>\367\"\304\276\362ta\276\023\000\">]\345\351=\\\367\364<\3067\246\273\016\335\321\276\n\255\000\275V\035\252=\324\315\007\275\327\252\302>o\217\213\275JP5\277\261\243\273\276<\233\253>\023v\363>@\022\361\276\3253\200>\022+\245=\031\241\272=\270\271\222>\304\033\233>\216\344\222\276F.\242=\306@I\276:#0\276\324Ji\275\2312<\274S+\221\276Us\016\276K\332\205>\205]\035>W>z\275F6\303\276\321\037\243>\351\311\326\274\321F\355\276\250\257\361\275\225i\224>v\275\224\276 y\312>kZ\366\275\t^z>\267;\231\276\005s\">[T\216\276\271\242B\275\273\034\006?\004e\016\276D\017&>\242\023\250>9\323\206>E\215\321<\343\252\024?\026T\376=\301\013\205=yXk\275IC\016\276g\317\256\276*E\212\276\216:h=\240\361\027>0U\332<\236\243M\275>\372\321\275\227W\r\277\361\t(>\223\375s\276p\342\214\272?\226\233\2755\211\177>\267<\370=D}\272=\nZ,>\222\203\215>\013\202\303\276\212s\307=\234\227\250>\370\2068\276\332\365\300=)\016\346\276\344b<>\313\220\215\276u{~=B\250]\2767\371\356\274=\203\002>E\275\313>\020\0011>o*j\274\001\217\223\274\010\022_\275-\240\211\276\367\345\314=\363-\314=\206\333#\276\3113n\275\334\000\254\276c\376\311\275\311\330\233\272\016\307r>A\274\"\276\226\2678\276\\/\'\276^\000\343\274s\026\310\275\332\032\335\276\016\237B>\240$\256>\265D\206\276\233\201\230\276w \027?\205\230,>\354\262\230>\373_\350=A\276~\273\332\343p\275\332\277\204>\272\3147\276,\033\321=\247\232\004\277\205\272\376>\374e[=\347\251\270=3\2313\276^S\270>a2\\>\345M\005>\260=z\276\347\300t>\262\032-=Q\024\275\276G\204\013\275\332\223H\276<\177\205>J\347d\276\333\337P=!5\307>>\220Z\276\205T\300=|\"\204>\222\2250>\371\214\263\276c^>>\201\344=>;%V\274\"\325\014>\345\214\356\276\235\2710\277\243X#\276\324\372x\276\243\260`=K\232\216\275\245+\327\2761)\222\276\375\277p=dbx>mr@?3W:>5SP\276\345\273w\275\325\035\035\276\026I:\276n5\352\276\353;\"\276\236\367#=\322U\244\276\337Df\275\211a\237=\376B\256\254.\307\276\271\006\322=;L\221\276\225B\023?}=\265=\304\242!\275\363?@?:\227\243>\211[O>x\310\371=\263\251}\273\311\307\025\276\306\250%>V+!\276\275\311\233\274x4\n\276\202|p<\303\250\213:\337\307??\373=5>\307\376\033\277\'\006l\276\221\333$\2774l9\276\366\255\337\276+_\260>aB\303=\177\345\354\275\307\322\023=\307\366 \277\376\003\306\275\247\370s\276J\370\245\276\224\213\272\272\345\251\240>`\347%>\201\257\300>\276Q}>\310e{>J\237\t?B\334\234\276V\312\253>\310\037A>?\366w\275.\216\n>\216\303\227\276\260\250\205\275\002\376\310>X\244H\276\326\350\024>j>\376>A\345\352\275\322M\216\276\260\373|>\014\242\352>3&O\276d\372\001=\013c@\277\234\030\364;qi\037>w\336%\276\256\205\250>\036\340\300=\026\366n>(\345\352\276!\313\225\276\037\313\014\276\260\026\245\274\033\311\"\276L\334\324\276\210\026\030\277\212\222G>\330\3065\274\226\212b>\240\202\t\276]\016i\275\357\020\027<\020&\367=\270\n,\276\210\034\002>1\025,\275\324\212\314>/2\352=\323\027\330>\241\240\010\277a\331\204\276\273\240!?\rm\265\276\002_\307>4\244\225>j\373\264\276\261\024\237\275{\212\273=\315\341\017?K\224\372>\270HH\276\213\030|\276\221\031\345\276\315\364\251\276\223`F\275\241\235&\275\346\251\020\275\362\370\327\275\262_1>\367\232\264\276y\267\016\276\250H5\276\365\246\352>\374SO\2766\001\371=6\013\250\276\266\313\235==\265\244<3\303G>A\305.\277\371\247\211>\324\276\341\276\321\320\206\276\241\306\204\272!\244:>RL4>\321(\264>m\324\271\276\215\370\213=\305\337\212>\377\364\272>\016h\326\276%iy\277xM\351<\274\271\327\275\234\033\356\276K4\247\275\373\231\005>\222;\032\277\0035\254>\273\224N\276\r\332\003?\242f\002\276\341\206\353\276\317D\360<\0007A\276\344\365;9\261\376$=x\355\321\275)\203\304\276z\2770\2774\r\323\276-\016_\275OD?\276e\331\266>E\223\371=C\254\003=\262\347l\274\'\371\253>\234\320\303=\r+\267>~v\002>o\001}\276:\307\344>\212\213\261>\262Q:\276c\314\350\274i)\342=\307*\277\276\255.*\276E\035^\274\210\001\265=\350^\311\276\231\177T\275\332!i\274\232\362)\276\345u\214<5o\037>\005\337Q\276\246J\315\275\2035\226\276>]\004\276I\253(\276\rEK=:w\314=+m\265>2\303\227\276\324\346d\276\271\223\212>gT\362=\347.\374\274\243\231\366\275\030\351/?\332\363\245>\021i\243\276\221I\221\276\334\370->\213\276\270\275)\305C=\004;\314=\234\221V\350\3001\276\240\023y=N\\\r>^6/=\334\367\211\276\016\0002\276\004\207Y\275\017\203\212\276\014\347\247=F5\347=g\206\310>\226\355\300\274\267C(>;n\205<\302\236\215=\023c\322\276d\230\032\276\032\365\352\276\332<\260=\036\344\347\271]\253\027?\351\312\021\276\320\004\303<\307\212\000>\241,\035\275n\013L>\244do\276\367\232\324>\n\334H=\321c\332=6%\025\276\305\246\202\273oG\217=\332c\247>\305\310\001\277\270\261\347>\365\207\304>\343\370\336\275\346\253\204\276\225\205\331\276L\201\027\275GG@>\260\236\240<\323L\030>\360e!?\014\211n=F\367\314>\320LA\276\267h#\274>\024\244\274-M\033>\030a\003>\313[\005\277/\232\213>\323\226P\275\230\362\207\276\346af>\3310(\276\312&\202>\264\274\024>|\335\312\276W\031;=oWc=S\246\004\277\310}.\276\177\346}>\2771\255\276Jq\262=\203\025\233\276,\263\246\275\325\021\022\276\264\201\037\2761+\350\276Y+\241\276l\203\252=Kd\343>42\233>\353y\033>\037\233\234\276\315\301\222>\337\346\230\275\345\211\021?\214\376_=\002\3275\276\000\336X\276\212#Y>h\013\374\276\\1s>Nh9=\254\364\224=\n\304\302>\0107\221\275\324\353\210=\354\203\002>\033\031\n\276\021K\355>\360\367x\276\337P\204\276\252\275\t\276\034KQ?f\027)=\343\017\376\2762~\206\276\272\213`\276\017\276\374\276;*?>\023\254\223>c\363\022\277\215ij\276\014W\010\277,\032\275\274\201\356}>4\261{=&S\272\276\303\203Y\273o\\\006\276\034\366\336\276!\024@\276\265\021\317\275e\303\022\276\025\300\300>#\225\310\275\243S\033\276\321T\267\276\311\321\222>\203\264\037\276\302\242\000?C\325\212\276\216(\355>z\026%>\254=+?\336\300\367\305\303_>\311\2602\276\317|\237\276\177\240\265\275\321\346\215\276\005Fy\276\017\016\367\274\363]t\276\246\243I>4\234\216\274=\353w>\277\370\245\276\322\351(\276\256\010\237\276\247\344\004>HS\001>\305\022\214>\333\221\206\276\350\202\314>\244.\262\275\323\262\310\275$\216\376>\220RS\276w:&\276\231\324E>T\310\362\276\220\327\006>\314\037\354\275\357+V>\263\275\212\276\341\273\275=O\356\334\275\212\235\t>\227{R>\231\177\350\274u\221\035\276\353\210%\276D\264\220\2756\213\345>\262*\202?^\2115>4\266\201\276\342\013+\277\320\220\336;\344\204b\276h\006\361\276\241\014[\276\246\033\253>\366\217\374=\217\270o> ;\025<\252dp\276PD\313\276\351h\253=;\206\376=`\300\257>G\253\207\276Z\203\336>\331\3755\277W\033\276\275\270\r\257\2766c\235\275]b\352>@\005\210>\257a\340>\341\262\332>\240\025\320>\215K1>\231\310\007=\320d\252\274cV\345\274U\242\032?\371\0356\276\210\260\354=\320t`\275\2642\225\276\347\260\036\277\364*\367>\316N\024>\005!\237\276\223tz>\032\261\250\276\356~\031\276\036p`<\271*\353\275&\303\307=\361\302\225>\205T\227\276j\002\272>\265\375\n\336\253\027=\266\327\">\337O(>\247l?\276TVf\275\3444\243\275\317\372a\275k\257w\275g\261\366\273{<\315\274\r\216\241\274\310\034\\\276\370)\313=\321:\224\273\254\307\367>\024Sc\276\275LA\276\r\323\304\276\373\200\357\275g\333 >\302D*\276\272{\320\276\326\317O>(OU\275\2726\234>g\371\201=c\371\343>\017\243O\276\337\341\271=\361\3229\276\276\247\306=\304;\365\275AF\225>\224\014\212>\243\245\376>\300o\212=\342\333\271\274\025\362\256\276\030\362\211<*\370\270>\377\300\325\275\207\263\310\275\363\273\256>\231<\216>^\207\356\275\026\372\001\277\2627\361>\232\326\367=\350\023\020\276|\313D>Q\257\000?\314YS>A\230\206<\3450\014\275\350\273\241\276|\217\t\275\037\265\275>f0\006\276\345\341\037=\310\211\322\275\367\374\245>\006S\354>\354b\376\275\201\253-?\331\353\025\276\027p\251\276\217*\231>2\337\003\277\302<\361\276hr\226\276\323\244\'\276\271\271\350\275\346R\320>t\005\353\275-\337\277\275{\r\330>\016U\363\275\373\032\203\275\256\327\207>\343\215\033\275<\374N>\351_c=\230\210\311\275>\206\r\276k\243\236>\0319\330=\335\001\013>\203\r\200>y\332\316\275\312\363\331>\331\346\274>\273u\226>\032y\300>7\234\016=.\330\027=\014\313\003\274\256\036\274X\253\364=}T\323=OVf\276\031\037C\277n>9\275cV\313\275b\265\331>\222t\232>\301l\316>M\t\342\275\237\261G=\212\241\026>r\2343=\273cq=;\360\250\276\177:\272\276\220U\243\276{\237\n>\017\230\004\274\"A\213>l\007x\275\001\216=>\335:\'>D\274H\273\001\"\263=\357F\220\250!\371\274-\302w\276:z\203==7\266\276\354`\312>\343\251\255>>i\254>\332\243=>\222\246\310\276\356\\\222\276\372\347v\2768\035\177\276\2137(\276E\211r=\341\356\034\276X>\004>$\230\345\276\327R\203\275\3666\274>$\307Z=)\000\244>C\317\273>g\230\t\275\305.\331=Zz\211>\"\363W\276$#\023?\235\317g>j\013\003>\354i\r?cw,\277\n\243\266\275n\377\212>\340\216\241\276\"~\371=\262Bs=\351^\222>ys@\276\243Q\">\271S\212\275X\267\206\276`\000\003=\363\034D=SCn\274\330\035P?\347Y\016\276\255\014\034\2762\010\325\276\206\263\220>K\201_\275\004\330\306>\034\363\347\274\321#p\275x$m>a\002\205\276\265\3302\276\347\r\312\276\260\335\265\276=\350M\275\316p\217\276kX\215>1\262\345=3\315\323\275\335\304\312\273|\236T\276\232\223\025\275\224\257\335\275m\266\033>v\246\311\276r\017+?{\002}>A\317\273\276\2406\301>j\2325\277\372\325\n>\017\244\326\275h\023v>7\242U\275\336\213:\2765\256\235\276n\345\251=[*T>\236,\224>\312\240\333>\244\257\212=!\215\254\276\233\220\232\275!\322\255\276l\020F?\023\323/=8\014S=\247\024l\276\343\360\302\276\2508i>\311\350\241=C\350\271>\\\366\303;\010\236\200\276ep\337\275\227\347\021\276\275\245\373\276F\3271>\206g\212\276N~\254\2762|\026>\002\013>c;\361\276\257=/\277\306\366\344\274\304\375\341\275\361r\021>8\033\371\274\346b9>\367\340\316>W\353\251\275\366M\322\276\035\246$>~\217|\276I\246\342=\016\022\273\276\220Ro>\204Ma>\236\214\336\276\006\nt>:A\213>\203P\001>/\220\031\276\340*\214=b\343\036\276\261\245)\276\033\214d\276|7\232>?\265\253\276;$\367=\241\221\330\276 ~n\276\204\247\257\276\234cg\276\2150\247\275\257\254^\2762-%?t.D>K#\236>]\033\017\277\023f\021>\001\016}\276s\230K?z\371\270=\204\007\244\276\025k\034?T\t\022\275\221\2720\276\302\241\212\275l\024\027>\261\303\274\276#\023#\275 \356L?0\350\256\276@\2279\2769\335@=\355\205\027\277Z\n\007?\263o\210\2765pi<\325\026\032\276@E[\273\341\310\333\275\361\363D\276\312@\353>v\242\020\276\316\016\314=\202\370\252=\004\311K\275\376\233\207\276A\261\217?\3365\007\276\3242\307\275*\251\022?\336\271&?\372n\350>C\237\025>\031\277\201\276G\306\267\276\324\313\377=\367}7\276Cu\213<\356Q\355>\327\005c>Lx\001\277\304\224>\275\364\262\260>\224Z\020?C\351\255\276\334\260\370=\335\365\245\276\216\213\356=<0f\276\261\374N?\306\215!=\212\320\367\274\317l=\275\2236\253=`\260\006\274\216\346\235=[Z\001\276\375\200U>\210\004 =s\364K>,\r\216=m\251B\275r\035j\274\3356\336>\210\"7>\020\361\243\275\356\330O\276\201\013\367\276d\247^>!4,>\246\202\037\275Oi{\276r\361`=$b}\275\273K\327\276\352\2428>\200\260\\>\n\243O>\306\371o\276M\272U\276~Y\342\276\003\272\204>w\360\335\275\203\340\013\276\235Y\260\275\252\345\207>\362\375\212>\220=\032\277\023=\316=\343\377\001\277|ov\221\202\247\276\203\231\326\275\205\363\327\276\246v\001\276d9\276\276\376\235\006\276\014\320\"\276B\026\312=\307\344-\275\365\306\246<\323k\\\2760\016\310=xd\352\276\375\361\351\276\364r/\276f\267K=\321\262\037\276\313\246\363\275\307\014p>\002\3302\277\002\351V\276\313\361\226>\033q*\276\0253q\275m\307\334\275\035\217\251=\263\304?=T\305#\276W\213\362=\003\250i\276\337l\205=\243\377z\2765\tk=\0363\004\277\257\014\353\275\357h\027\277\343Y\274=\200\304\205=\357\036\372\274\252\0208>\272/\r>\345\343\221>\344\362\257\275\002\010\243\276\331\027\017\276Q\201\177\276\256\362\000?\032^\330>_Q\372\275\325\316\355\275\302\351\216=\001\305`\276FA\377>b\241{\276\210\324\252\276\226g\332\275\236\353\315=\372b\317\275\255e\t?\004\274\313\275 \003{>\024l\351\276\026y<>d\3024?\204\272\277>\006z\307>\033\260)\276\357X\362>c\327\256\274\006\276\233>3c\364\276La\254>\r\302\263\276\035\313\220\275\370\223\021\275\020b7=\355\254\004>\311{n\276\373}t\276\274\2677>\251/\001?\340\212\223>\262\277\273>\020v=?\311\350\016?T\271\203>F\237\222>\302I*>\235e4\276\334b\240>0\334T>\313\3559\274\023\340M>&\340\r=\322\252\026\2760\317\211\276\233v\r\276\002\251\300\275\220\177\312;\270!\360\255D\321\276\332\377\003\277\343\336\360>%xU>\301O\t?4>-\277\270|\322>\314Z\265\276\360V\002\276\031\335\273\276\361\371P>\363c\206=\274.\263;\302\311\032?\214\243w\275y\200]\275\362\215\317=\363D\030=bHt>\372\270/\276\005b\225=@\3728>\326\265\303\276\006\360\324\276r\321\247\275$\226\216\275\251\335\203>\206\207:>\233\216\357=R\007\n\277\374_\263\276\271\n\004\276\2020\221\276\177\233\267>\362\205\t\277}\317\216>\346\255\365=e\227\203\276\224%\332\276\300\266\203>\256\3057\276n\232\316\275\003\375\274\276o4\252=\033\322\310\276\006\307;?\241lX\277L\234\321>x\227\320\275\021\020>\276\251\002\270\276\364t\320\274)Md<\342\222\350=od\317=qZ\325\276\341\242\202=\006\312\215\275\311V\013>\016m\023>\232\220\003>z,\021>\265\206\375>i(\237=\2219\007>\374\033\340\274\354W\265>Q\333,>\330U\264\276A\220\310\364\205\234\272#\243\t>\3209`\274jSm=\364\222\036\275\247\236\200\275L\376\267>\316\354\020>\261ZB>w\374V=\224\253\261\274r!\307\276+\030\371\273tK\226\276^\311\"\276\"\276\371\276\246\270\256\275jpO>\245\232\213\276\027:\373>\204\356q>6\204\303>/cL\276HwP\276w\220v\274\354\304\000=*L4\276u\355C>\250\033Z>\'\247\372\276E\365\226=Qq$>\277s\207>\237\234\226=:\234\023\276\004\305>=\211\215\245>\024\266\374;\307Ej=U?K>\260\307\240\275\362\325\276\275\376\376-\276\017p\">\323}<\276+\220s\276r\216\370=\200\036\367=^\013\014>\340`\322\276\334\323\274\275\300{g\276\254W\346\275\214O(>\372\304\261\276\313j\r=N\326\221>\034\005\271;\\]R>\323B\345>\3061!\277\216\313+\276\322^6>O\303x\2764\330^=\250\tx\275\201f\252\275\317\220\"=K@;>\310;.\276\334\254\373>D\322F\277\251uW\276\366\032@\276\300\177}\276bSw\276\006\303p=\233\315\030=\014\366\302\275\tX\022\277\240\246\223=\363\377\366>\025\342\030=.a5\2768)\244\275\023\326\257\276\035\r\350\276>i\273\275\213)\255<\256q\020\276=[l\275\275\2275=4W\276>^\205\216\276\024\324\214>\273\234\013\276N\020$\276tO\355<\354\303>\276\025\315\031\275$4\234\276\244\n\010?\276dh\277\000\001$?\257O\016>\222a&\276\323R\004\276\344Uc?\255\237\213=\355\376z>p\027C\275f#/\276\304C\023\276\306\0253?65i\276\236\005\341\275t%A\276\2511\343>\271_\231\276\023\213\316\276B\353S=\254\367\234>\232\336\024\274(=O>\340M\300>\217\364\364=\333\"\312>\267L\037>\215-\320\275TDL> C\002\035\213==\021`\211>\035\363U\275[\222\341=W\371\317\276W\276\334<\275\273`>.)\247\276o\030?>\314\273\203>\240\014\227> \326\262=(\307\335>\245f\013\274\025\362\273\274>\204\305>f\2709=f\365\306\275\274\311\013>\273\307\225=\302[}>f&\'\276\216\3505;\3733\336\276\314\016\014>c\355\233\276-q\256>\206\023F=\304y\314\276\300\010\247>1\221\217>\306\301n>\200q\301\276)D\227\276Rk\253\275\277\014\244>\301\020y>\320\332\343>\206\325\240\276\207\230\371\276\277p\217\276\360\214\022\276gc\227>>0{>0\301!>\250\305G\277\033g\226\275/BV\275^\360\204>@;\341>\213\004\371>\276fU>\310\306@\276\312\270\356\276\325R\\>\025\366[=F\275\247\276 \324+?\277M\346\274\037\365\226>\216rB>A\023\266\276/\'M\277\360\323\221\273\233\\\335\275\347]\030>\376%\253\276\037H\303\276\261\261\320>\363j\272>F\240\001?\035N\017\276\355\347X\276\035\262\273\276\253\360\326\276\253n\016?3v\036\277.H\322\276F\203\264>l>Z\276\366\372\323=\333\035C\276l\237\355=a\265\014=\027\271\245=\361\275\307\276\213\026<>2\033C>Z\255\375\276\356s\354=\257k_\276%\230_\275M\323c\276\226=t\276\315\253\271=\025(\244\276\005\376\330\276\231!\256>bjA\275\256\224[\275\372d\253>x\"\014\276<\370\346<\313\2047\277\277\245g\276]\2009<\327\270\301=\250v\024\275i\301\230\275<\352\307\276R?\032=\220,\251>\231\346\275\276N\341s\275\341\005\r>V?f\276}\002\221>\266\321C\275j7\213=\345\252\303\276\037\267u>\353cj\275\213\323\332>Z\033\202=\217U\372>\241\260S=\361\0227\275\226|\304\276\366\207\330\276\222\272L>L\252)\276\327j\371>9\213\207\2759o2\276\314\002\234=\255\240&=G\006\005>\031\312\331\275\326\312\201=/P\305>{F\210>\")\347\276k&g\275\350\036\346>\365\304\222\275\317\302\221>\365\353\027\275\347\214y\276\241\216%\277a\312\205\275\n\014\253>Wz\221\276\267,\232\2765\201\233\275wG\002>F\220\236>\0246p>\177\331R>\013\321\276>tQ\247>DA\\>\273}\203<5\3507\276\316\023 \277\376\'\032\276\365{\014?\024\000\324>m]\327\275\026\376\376>\013M\214>0\3279<\277,\266\272\"\241n\2763-\365\275c/\246>\370\227b\276\231r(?ZT\022\2777>0\275\256\202\362>zW\033>[\022r=R;\330\274N/\236>_g\342\276\344\373\212\276s\255\254\2236\333=5y\317=si\002=\242\205U> \033Q\276^7]>&7\233>C[\372=\224\006\223>/\320\030<\334\331\205=\334\210w\276%\t\307\275&\363\227=\300\035S>\212^\246\276\025\300\206>\327\260\201>\352,\241>n\261\267>h\312G\276\246\253\230=\007\300\241\276\2569\002>\240\342\023\276\351\367\213\276\204\253 \274\234\233\256\276\216k\264\275\237G\232=C#$?\\y\366\276\"\372\010\275 Xp\276+\220\222\276\037Q\235\273\207\033\274%\000\033\277Y\204\225\276\357\3774\276l\030]\276\240\264\005\275k\252\377\275\037c\200\276\306\332\203>\217i\010>\200\374\210=s\\\214\276v\003\342>\236h\206\275\376\037\301>:\303~>6W\213\276\262\031\273\276\230U\006=Z\376c>\'2\207\276R\241\302\276\010cS\276\223\314\203\275g\0270\275\224S\341\275;\027/\276\261A\014<\346 K>J\325\007\275\313\r\320\276-\322\005\276\006\336\263\27564|\276]uK\275\275\'O>V}\032\273Cje\276\030\006\316\273\315\337v>?J\200=\342\331Y>\240\276\236>\200T\265;\000\027\270>\364\337\010;-e\232\275l\240(\276I\004Y\275\371\250\203\276\031\0263\275S\200\312\275\247\205\220\276T\276\217\276VS\\\275\3134\031\275\207{\266\275~Sv\275\006\337\316\275\302\336\210\273\344%\026\277v\323\306\275\277Mo>\351Q\3473\276DX\223\275hJ\005\277p\t\330>\003z\022\276\331@\221\275\247N\032\276\302\037\362\276|G\350=\027h}=\327\014\010=\3222Y\275\233\275\274>\'\263\021>\207oQ\276\343\260~>\367\341\360\276-\220\311>\232q\265>\326k\363\276\316\002\270\275\265w\030>\247\262\232>\236s\333=5G\363\274\362\257\274\276\2601\250>\312%Z>2\t\211\276\026%\355\275=\245\206=\013\206\334>\343\201A\276M\330\204>^\004D\276\363z\311\275\355\366\212\275n\n1\275\251\267I\277\251\253P>\360\244\360\274?\333O>R\004e>\354\346\331=\277\217\333=\214\227i=sr\036\275\373\256\205\276\234Hx\275\265*\267\276T\307r>\311\226\324\275\233\002\212>\374w\326>~\241\251\275;\354\232\276\240\365\324\205\032I>\242&N>\347\251\265\276\\\262\227\276O\245\224>\000\035F>\252\215\260\275u\003`>\231\301\016\275:\270$\277R\353\340>\"\203\261>S*\341\275\270%\225>\032\037\272=zu\320\274`qC\276i\252\211\274\242*\346\274\032li\276\327\301\035\275\n\300)=DU\310>\2733\326>\300\3647\274\022n\363\2754\273\277=\315\252B>\306.B>Z\022\203>\006&a=\223\307\006<\312b\317\275\030V\241\275\322\305\224>S\227\211>\335\031\022\277$0\205=\204\377\274>\205EF\273\305\\\312\275JLp\277\344\201\002?\370\215\215\275\372\356\311\276\242*\355<\270\307\302\276\363\314\213\275\242\223\330\276\273:\250<\364\002*\277J\2243>(p~>\020\247b\276\315\201\375>\246V+\276Y\316\324\275X\271\210\276~\335w= \341=\277q\261\364\276\260\030k>H\025]?X\260\307\275WC\212>\037\235\246>r\367\266\276\337\261\376=z\266\363\276r\314\320\276\331\253\323\275\223\306\316>\336\207\316<\'\037\244<\324J\r\276\267\351\n\2747\314\230\276\275\201\320=p\271\275=\373X\316=xG\224\275\201\257\\\275\001\201O>\312\301c=\357\317-=\255\204\000\275\317\245v<\'A\337;\303\n\216=\200v\302\276\341m\311=\221\244\212>\272oA\276\256\252\247;\374\355\036\276\373x\330\273laG>\265\352Q\276\323 S\275K\\A\276@F\002<\224\014\332>A@\212\276\274\241i>ZL\233>\237\312\035>\376\017\025>C\\h>FL\347\276\036\365%>\300l\251\274\270j\221>\357\313\246\276O`\222>\005\367\200=0\210\223=\316\340\240=\224\261r>j.\204\275\363ZW>p\035\313>T?L\276\324\2321>\327\206&>{\302\263\276RM\014\005k\036\276A\216\304;\234\322~\276\276GV\276A\035\"\276\307\020\'\276\325\216\002>\235\221\201\276\236n\212>\2375\275=\177M\240\276M\354e\274c\370n=\327\227r\275\277\243\212\275H\3112\274}\222\327=\253St\2761\023\226\276%\342\302\274\025\251T==i,\274m\312\222\275\234\314\332\276\332#\277\2751\330\021\276\357\247\203=\232\n\224>\313\366\010\276X8\252=J\204\026\2763\274\377\276\264W\363>\"\006\350=\315\027\272;\332k\275>\310i2>%N|\276o\307\223\276\255\240w=\212\033=>i\360\316>\270R\010\276\364r%\277\031\255n\276\030\024\356=|\302\202\274\344p\'\277\004\033\200\275\357\377\005\276\r\221 \275\"k\253=/\356\020=\363\232\037>\300\273\301\276\273\366\032>GV\025?\210\203\216\276\300\267\331\275\313\370\2079R\003\241\2757\0339\276\276\021\235\275D\3014\276\365\355\276\276$\3051\276,\023\213>\223\3538\275\213C/>/\264\256>\370\260\322>\305\275\200>\000t\035\276\001xC\276\035*\236>\021\030J\275\361\264\034\276A\026\361>\271e\010\274\302\361\270\275#G\000>\267\263\253\276\331T\217\275\002\344\006\275\337\233\213\276W\240\247\276\273dP>\374\272\020\276\313\003a>N\035\014\276\330\331\226\275\352S6?,|\227>kV\233>H\216(=^\2118\275\271\352D?\370%\205\276\367J\377=\020&\305=\375\342&>Z\360\376>G\243!?v\372@>\221?\024\276\017\312\231\276\033\254b>p\324\036?\275\257h>X85=+F\231\276\361\227\365>a\201\340=\nY\302>={w\276\\\276\014>\233\254\025?\350\213|\273\250\331(?\256\201\033\277\333\203k>\210\250\016>\255\257o<\366I_\371\r\240\275k@\n>\300\215z\2766\311\003\276\371O\260\276\257\240\\>\253\n\252\276X_y=\324\341\257>0P\252=MS\312\275\215R\034\277\243\010\034\275\260\r\013\276\234\247\016\273d\224H\275\247*\255>\267~\300\377\021\266\276&K\010\276:8\233>G\306v<\313\273\200\274\231\357\312\276x\276\312>Y\255\230\273t\237\346\275 \371\234=AI\253>K q\275\010\034\003?_\331\253\275\214\361\361\276\346<\220\276\230\366j\276Gw\251=UG\264>r\255H;\020\020B\276B%\341\275]R\005>S`|\276\300\353\346=\367\260\215>\016\356s>\244\332\212\272\001b\236>\006/,?\342zg\276\322\033\275\275\347Y\213>\265\216-?\312(\305>\331\264\351=\372\377\372>\302~\365<\027\312\307>\240\211S>\277\344&\276Q\210\033\277{\307\214=\320\252&>cR\206=\017BX\276h%\204\276\272YJ\2757\264\016?\263\224\361\275\352\340.=\226\365\203=\263\244\262>g/\305=Ue`\276\364$\221\276lq\215>\305r\353\275\266\010\327\276{\243\032\276\372\365\312=\260\211j>\3658\035;\207\206\276\276r\205\002>\335\340l>\314\234\010\277\311]\005\276\021\317\024?\340\374\201\275\210\372$>\211\303\r\275/\222\231\276+\323\306=W\356P;\376\375\266\2757\215\221>\006\212/>\007\352\200>$\236c\276kv\035=A\024\365=\nC?>Z\004\236=\263:{\276\033\350b=[\215S>Y\247-=\360.F\276\273T\021=\035\313(\277\267\200\246>\006h\222\276o15\277Y3{\276\362\215\230\276\321\342\323=n\035\255>Ds\272\276X#\006\276\314Y\220=\312L\225\274\004\251\334\275F\364\200>N\244}\275{Pa=i\363\304\276\251\031K\275B\244\034\276\205\217\244\275\324a\267>\275\204\202\276F\2269>\312G\335\275\346@\213>a\357\n\277{q\260\274\274\323m=\235[\246=\326\236\326\275\236$\'=\335\005\023\277\225b\317\276\013\256\357\273}\220\005\276\177\231\252>/}\264\276\224\006z=\206X\251\276\270\005\234>\036\276\212>3\n\006?\177:\214><9\230>\333G\347=\033]\356\276}\211\351==?\333=XL\206>F\005\300;u}\316>/\303\315>\233\000Y:\301\006r\276z\3635\276\r\345B?\\1\026\276\326\306\242=o\360\240\275\203\301\307=\220\362\244\274@\315`\277\245\371\353=T\245\n>\244\027\215\276\202\230}\276\372Io>)\214J\276\276\336\215\276\225\262\261=\325b\310\276\356w\354;{+\261\274\301\000\310>\322\206\350=hy\213\275J\346\267=#dK\276\303\331\035\277P\273\224\275\356\354\313\274\032m\212>\365t\201\275x\303\267\275\243\316O\276K\2271\276\200\212\'>\242\203\332\276\350<\031\27685g\275\320C\004>O\r2\277\"\037\013>\220K\232=\345\360\217\276\035\314%\277yf\006>\r\240\207>\370\207\225\276`\347J\276\026L\321>+\031\036\276\275\365\305\275x\237\336\275^\262\\\276\317\241\004\277nI\031\273\260\245\014\277\035y)\276\266\014\025>2\235R\276\351\356\006\275\025\310\024>\245bL\2767#\201\276r\200\277\276\244\017\203>\214\353\212\275\235|\037>\332\340\355\275\261Dy\276\322z\330>\234\321\250>\316\202\234=\374.S\276m6\216=\300l\330=8\277\366\275\3042b\276\014\342h\276\314\320\302=I\237\274=\245!\021\276J\t\271=\340\2773\274\200,\243\257\331\221=\244\004\254>\0034\252>2\323\010\277!\207\364>\276\343\325\275\233\031\002>ZJ\025\274d\300\316<\022\254\032\274\357\261j=7\213\250\275&S\214\273\336#\252\276\277<\211=!\323I>\352\334\005\276\366\202,\276]\336A=oO\330>\275h\312\275\271o6>\277\240\003\276\312Y.\276\207\274\361\275X\211\356\276.\231\014\2772P\025\276\304\014\206\276\036\355\350\274\354Ak>Z\001T\276\203m\232>\371\341S<\021\312P>CK\342\274ZK\010\277\002V\244\276\252d\220\275\317\027\261\276%7\331\275\225\350>>1<\327=:\365;\274*\213,\275c\237\226\275P\034\024\277\274\362\221>X\353@>^\023\035>\227N\232=t\315\031\277\"\205[\277\223`(\277\373`B\276_\246\234=b\r\251\276Ra\255\275+\252\272>\310\277\016?9\000\325\276\021,\003\277+]\335\275\301\027M?\341\357\276=;f\200\276\202@\202>\364\274\002\275\210;\272=T\370\332=\317\030\241>\344#\031\277\312\262\233>9\260\013?.fC\275\220\302\006\277j\027I>\335\366\251>_\177\351\276\275$u\276\307\271R\276\271\317\300\275\252\033\205\276\201\364\036?]\034W\275\200g\207\275\000\362\347\276\035\303\224>\264\347\006\276i\243\222>\307\241\001\277\347\275\264>\367m\257\276\'W\363>\202\260\220<\033\366F>XGO=\216\343O=4j\230>X\313M\275\221\364\202\276\013#\205>3l\306<\371;\021>\315B\354\275\240\'`=Q\226\033\276\007\206~>\321\265\260>\004\342_>\323\367<>\363 \007?\343cM\276nY)\277\216\315x\275\\\225\214\276\230\317|>\247\370\327>8B\365\273\013\220.?\255%\201\276\036\3712\276uHs>+^\027\277#+\224<\366\317\262=\252\r\247>\323C\200>\316I <\301t\202\276A\352\207>\341\357\220=\231a\334=\235\272\357\275\032\'\252\275m\332l\276\002^\277<\310/_\275>N\254>\356\276\020>\273\236\306={\222\n>\264\007\275:\205\352\304\276\351\344\026\275\ts!\276K\020\300=y\000n\275\251\310t>4\375\363\275\265%\223=\00082\276 \313\002\277\337\320\217>\355\375\177>\253V\t?\026\201J\276\317\336r\275\223?\222\276\340\234~>O#\342=\331/\026\275\200 \222>\372\001\206\275\231u\213\275T%\301\273\337\033== \017\016\277\241\034\212=\215c\214\276P\304D\276C@\324<\254\020o\276k\331\343>r\371\346\276]\2278\277\244t\271\276\020Z\242\276\2345\362<\334=\300;a\305\230>GEJ\275\035\242\033>S\347\312\276\240\330\252\276\234\202\252>\373E\363\275\350\227H>\3137\347\275\211\245X=\2247L=\246\254a\2763@!\277f[\016\276\335ej=a\330\303>\224\035\365\275,{4=ib\332\2749`\006\276\025\355\231\274N\024\226=\326\002\344\275V3!>\n^+\277\204({>[?\212o4\351\276\262\272\211>8\246\225>)\216\332\276\260\002\263=\344=7={\tn>\007\022\014?\340n0?\335U\001>\332\216\027?\206\217\265\276s\353\t=NO\265\274\324\315\257>=QL>\357\367A>\217\372\242=I\004\n>\014\034\002?\365\241\364\276|\rk\275\365\206\026>\205\020\r\276\'t\344\276\262R\271\276\370\260\n\277\227h\241\275\224\371\260>\'&\235\276:z\235>\203\271<\275J\022\200=\210\246\034\276\326\013\003?\364\327\312\275\024\331-\276$\374\371<\037a\270>\007\270Y>\247\303\030=\201{\230\276b\216\314>K\005\261>\343\313\004\2769\026 \275\377\215\201>k9\275:J\331\001\275\024\033\232>\272\342P\276x<\254\276\365\256\031\276\300\027\001>7\277\206=\234\270^>\277\024\003\277\245\300\030>`\267B=\272zM\276\330\006\207=\376\317\304\275\205\273$>S\213\n>\217\307\232\276\235\024i\275\326]\013?\233i\005=\023D\223<\254\320T>\310\276\335>\003\352\225=\210\364Y>\207\204\223>\322\226\357=\234\r\346\272\225\223\240\276\345\305V=%\253)\276\032S\201>h\220\327>w\276f\276\242\013\334=RR\251>\327\022\317H{9>\310~\035\277\234=\352\276\366ry#\347\216>\000\345)\276\017j~\276\250h+>=\022\000\277\017\321d>\\\017\020\276Z\276\364=N\355\232=\204\331A\276\'H\301=\231\005\023\276\372\346\204\277\207\224\200>\202\213\306\276\226s\211\253\017\014>T\350[\275\331\263\205=\014\310\210\275 }\201\2760m\242\2754\231\240=\360GX>\266\336\257=\266B\004?U\3508=U\302z=\315H\035\275\325\275&?H@\215>j\236\252>5\327\037\277+_\372=\257g\372\275\3237\340>\241\244B>\004\347*>\034\342\311>\340\343\255\275D\306\016\277\212q\"\276\236V\264>\202\225*\276\217.\005>\314\205)?\232\025\273\276\250L\002?\310h~?=t\273>0\267I\276\364\337\261\276\300\025@>\310\250M\276\022\341\254>e.\331\276-\241\350>\2207\360>\241\252\213\276\375\260c>\203\311\306>]A\263>\232\266\215>\023g\026\277M\332\257>\243\372x\276?\270\362\275\337\032*>\332\342\211\276\365\373\241\276O&\366\276o\214\033<\222\240\005\276\340@\203=Iwt\2745o\203\274ky\241\276\271Pb\277\303<\n?o\345\252>\227s\304=\267<\315>\033\377\243>\361\020\225>\341!\333>\204}\206\276\224N\261>\366\347\343\275\024\315\230=\377\373\206>xrO\276\021\002\311\276x\251\331\276\023\237\314>|\242\033?\221\252\020\275\204\325\035>\316a\254>\307j2\276\321\007+\276\205\313o\276\245\317\376\276\246\360/\275\021\317F>A\201o>= \345=\243\367~\276\272\033\007\276{\261\342\276\260\233\274\276\t1\206=\334\323\200>4\177\224\275`^\224\274.\347\036\2779\037\273=m\314\202>.\361\200>\261S\225\276q\300\231\273\206\225\034\277\310\204K>6\005\217\276\353\276g>\346r\275\275?\035R>y\375\276\275\301]\264\274Yd\266>\266r\000\277\027)m\275\255\252:=m\007m>\200\343\002>\251\306\014?u\320\014>e;\255\276\337(k\276\023\310\321=,\317/\276\245\364\010?\213L\310>\304\035\003??\341\017>\031<\206\276\332\262\364\276\211/\022?[\212->I\242G\276\217\222\004>\024\264\307=\241q\353=\320\234\317<\341\206l\276\326\271q\277\241R\005\276\321\3541>\346\326\024>\241\024\034\277d!\004?\231\325\332\276\230b\257>\327.,\276!y\227>&\356\237=[ \217>}T\204\275\036\224\210>\220\303\347\274\264\236*>c\211\004>p\356\204>\217 \235=,k\254>\r2\203\276\017\371,\276\320c\036\276\245\347\317\276\322\264\316\275\253\216\031>~\332\r?\233_z>T,\235>o\265\270\275\232\212\346>\215\001\330\275&D\231\275\244\252^\275\002Q\023>2\264\030>\000+\351=\177\242\214\276?V\213=\251\223q=\355\253\342>\301\030\036\276QW\270\2766\013z\276\030\200\202=\315\376\355\276\201\006\227\275~\361\001\276\357=p\276P\202t>\270\206B\276\317Y >\3515?=\304\267@>\373\306\005?\002V\n\276V\366V\276@\270\251\276\026V{>\202`W>\200\365\350\261A:?\220\256\016:\334\232\221\276\245\020\212>\240Pu\276\007gm\276s^q>-\002\336=\370w\347\275\256H\203>4\266m>\203\207\236=\205\352\020\276w~ \276,-i>y-\364\276F\205\314\276\371^\275=(\270A\277\023g\312>J\001\307\253\000\005\277\211\t\245>\026T\352>\360}+=\3603O>\265\207\275>4N\t\276H\271\025\276\254\004\205>\177\324\324\275\270\355\024?\322\206\006\277\367C\306>\216BW>\252;u\2746\340b\276.\314!\274*f\"?\027V\373\276\367\300\220=\326\006\363=\200\335\023=\376\0029\276V\001\320>\371;-\276\306\352=>J\355!>t\376H>\303x\020\276m\377\273=\234\n\300>\227\324b\274F\242s\276q@b\275L\nl>\313} <\205\247\017\277\307R9\276\017\306)\276!\214\313>\211PC>\031GK>\327?\005> \302 >\221\257\014=)2\302\275\255\262\317\276y\227\034>>\310\032?\013\210\323=\213p\240>\373zs\276]W\271\276\350I@\275^\330\265\275\324\354\214\276\316\301A\275q\332\265\276E\263,\276#\276}\276\222@\241\276&(\330=&\240\316\275W>\315\275]M\216=t\214\022\277\004a\022\276\301\221\020\277lYk>d\020\221\276\321\274\242\275\341(\t\276\263\310\031\276\260\256\322>U\352\210\275\374\013^\276\322\246->\007\256$\276\243\375W>C-z>o\316\302=o\310J\276\263W,>p+\010?\023\016\306\276\030\257\366\273\215\330\271\276\257\n\231\276\354\216\023\275\2652\315>\223\274\n=\2073\027>R\356\273\276&\022\302>\264b\262\274\327:\347\275\303c\026>\2322\233>~QD>\007\277\206>|9\203>\356\2415>\334P\347\275\212\036\213>\314\014\265>\275 \357<\0011\330<\3759\327>\r\365\002>\262\306\243\276\341#\313<\021\177\267>\215\240D>L\266\312<\330C\t>\260\333i\275qd0\275\317$\270=\236\207\271>1\205\237\337\014\023?\030\362\360\276U\212\031\276|X\027\2774\361\013\277:c\361\275\360\254_>\214e\335=\374G\221>\261\030\260>\211\202\316\276\247\224\233\276O\272U\276D\311\245\276g\237\227>N\022\271=Dtr?\251&\022>\362\244<\274\205\022g\275x\375\324>\376\313\317\276\363\205B?}\310\264\276\272\367\371>^\020\005>NP\024>\224)\342>\001\271\255\276\204\312\361>*/\201>\335\005r\276K\024\205\276\312f\014?\275\373F>F\270?>\255-y\275\216\324{\276\320\rt\276\326m\010\277~\321P\277W\\\224\276\377\215&>r\001\n?n\030\201\276\322\007Z=\016\267\355\276\370\204n\276TeQ<\315\010S\276{\350|>\277\016\021?\347A\330>\024\347\257\276\257\2364\276\350\276\266\276\005\200\223>%\377\350>m\325\352>\021u\364\276\221sY\276d\'\014>\356\260\263>uk\016\277\235\367\250=\023\313\377n\r\r?\214\356\\>m\013\214=\365#l>\375\032G\274\312;\013=\030x\216>Q2\207\275\263\0232\275\340O>>\027\374h=eH\025>\261\340\273>\\\022\t=\017\344\222\276\212\366\327\276D\263w>\251 8\2777l\310\276\232\205\353>\000\3111?r\315\307\276O#\302\276x\226r>\330H\350\2761[3=.\306\243\276L\277\215>\324\374>=\244l\344<\030\251\334;_\273\201\2743\023\020\275\270\311\364\275\324\305R=\310\276\354\274\247D\271\274-W~\276\222\310\355\275X\340\234\276\312\213\367=\364\016=\276\355{\312>\306\244Y>8cZ\276\256\256\243>\251\027>\276\251U\005>I\273\345\276\332b\247>\275\206\030\275\211\203\231\275x\260\232=Z\224\237<\240\3055?#\363\263>C\321\213\276\347\342\276\275\023\333;>\327\351\306=Qg\023\276\310K\236\2754\236)?_gg\276\302\250\211\275\332\215\215\276\257\037\t\276\\d\214\276\312z\211\276Kx\223>\367\365\271>\362!\271\274\327\276\222>\307\246!>\250Mi>\373\312\252\275\335\362\352>\271\022\252\276\367\010\001?\241-\021\276\243.\240\274}\264\226\276\334\254\253<~\364\306>\255\370e=6\303T?]\246\224>\330\324\326\276\351fa>EU]\276\311\252\271\275\205\263\367\275\355\304\367<\2728\332\275/-\204=\225\253\234\276\277e\265>\224\002R>\343\370\330\275A\300\177\276ej&?\334\307\212\274c\342S\2763\261\033\277\235\317\274\2761l!>\r\250\000\277m\2713>S\357+>\277K\337=\021\235\320=\265ft\276T\347\342>\261d\264=\362\306V>\306\356\000\276\202&\252=\031\210\030?AV\227\276\nv^>6~\037>\310\377\204>\201\345n>U\360h>\236]4>\315L\314\276\332\345V>\354\353,\276\000\363\201\275\277\254\235>\224\351B\276\356,\037>0\244\357\275\276\222T=\3031P\276\367\314\221>0\331\265\276FH\022>=\235\226\276\356\010\332>\370p}>\376\323\257>\331O\217>\\{2\275wf\355\275\347aI\277@\360\333\276\354a\231\274y\242l\275=\337\226\276+\357%\276\3646!\274\022J\324\276\203Y#\277\327\'\031\275\361\330N\276L\010\215>E=K?ezF\275^i\024>\022\372\373=*l\227>\320w\323=\255\345\253\275F\030/?\337\234\234>\266*\347\276\036Ek\276\365\323\240\276\004\365\013?\372\013j\275\'\331^\275fy\273\276\222\031}>\362dQ\276\026.F>\350i >R\331\222\273\377\244y>BhA\276|\230\246\2759XA>\306\214j>\024\330/\275K\2772\244o\251\276\003\021\371\275\230u\315>\277\006O\276>\372\301>\217\361\303\276HR\220\276\333\260\025\276\364=d\276\345 \031\277G\210\321>\326\257\017>\004C\254=]\314\304>\010n\323\276&\223\n?\202\304\354=\342\352\213\276\004\027\300\274\251\304\375=~P\216\276\337e\262>\223\310o\276\335?G\276\024\271m\276\274z\300\276\375\210\017\276\266\036\224\276C\353\245=\317VO=\035\202\350\276V\036\207=p\264\373\276B|E\277\264d\326>\034\347\301\275\247\317\021?V\'\320\276hS6\277\243\010>>6M\313>\340\020-\275\345\255\373=4z\220\275\241\337\210\276\322\356\371\275\257h\305\276 \375k\275\377\026\303>\321\214\204\276\013\210\277\2762\027H\2766\250\316>l\'\210\276\325\346\263\276?Z\021>m\255\261\207/y\276!\007\362\275I\020\232\276\255d\213>\030\275?=\336\031c>\312R\335\276\251\317\332>f\265\244>\227N\221\276\350I\265\276\221Z->dY\020\277\227\223n>v\031\006\276\006\320\321>\212\rv\275\220sN\276\264^]>\314\230X>\330\207\252\276E\255\324\276b\322\212>`\304\006?f,\234>]\253\203\276\357\303~\274\305\223\252>N\312\026\276\210|Z=\361\373\272\275\374\242\304\276\313\302<\275\324\320\235=+\252\201\273=\374\203\276=\255\006=\364=$\275O\021#>:q\227=9b#=]9\335>T;+\275B0m\275x\235\030?\216\321\255=\206\312\304>\261\216\324\275w\225j>\207\262\214\276\2227)>\263+\215>\032\250\002??G#>-a\n\277M<\014\276\342\003\273\276\236q\300\275P\210C>:\232`\276\216/\224>\241\343\301>^&\"\275\352\373\'\276\354\004/\276\311Np\275\331\344\236\276\363N\246=O\316\242\2746t\214>\246S\301\275%\037}>Z?6\277\314\361\347>\364\256.=\314:\373\275\351U\340=p\214*=\005\354\265=\243\037z>o\203\277\276a\325\244:\203Th>nx \275\307\n\265=P#\306<4X\331>m[\016>\365q\375\276\221\024\327\276%\262@=\211\306\260\276\325\314T>\265\354\224=\2447D\274\256\031\t?\347K\224\276L\266\316<\333\260\244>\317\261\223=iZa=\225Mp=B\r\261>F\201\200\275H\316\030?\305&\022\276b 4\276\214\317\340>o\024\002\275u\241\315\276\002\245\321\275\027\007\355=FK\256=\241\372\035>\003?^>\325\350w\276\236|-\276\335\326X\276\220\010y?)\026\316\275Uz\327>*\212\272\274\tSW\275\365\205\372\275\352J\205>2\036\233>\351\323\316\276\222\217^\275\307\'\224\276\311\3243\275%\375\002\277\313X\211\275\020\251\230\275\361\375\377=\376\340\306>\032\372\201\276\025\355 ?\270\264\252\276\020\037\263\276\354\221R\275&\312\224\274\327\n\237>(\311\254>4\014\261=\247\241\201<\210\217\177>t\200\346\275\032\232\331\276\271\231w\2764Y\030>\372b\004?\0205\\>\027\217\345\275%5\224=4\253\374>\220;\312\275\271\020\234\275\244L\315=\224\310\200\276R\324\312\276\335\344\260\276\255\355\367\275N5\245=\"\342\360\276\330\036\021\276\332\013\316=\335\026\037\276\222\354J\274\307<\214>\003\262\222\276P\314\325=a\030,=\232\034\234>\030}\236\275W\036\353\274\t\315\214\275.\"\260\276\320\030\270\275\0369\220=\374\247\343>\305-\330>\177v\217=Y\342\000\275\007\261\t\276\3410\343\276\275\266\310=ek\276>\010\274\367\276\325>\211\275\014\225[\276 \003\336\274\367\355\261=\377\236\330\274\220u\"\274\002\341C?\221\342\235\276\247\370`>\206\236O\276\243JI=\205(\034\277\352\002\037\276K>\236=\223\273\"\277p\244\n>\304\345\n>3\233\270\276.\306\243\275j\261\256>\242p\373\276jn\367=\215\271\036;\035 \232\275a\330\325=]\260\350\275\203k\330>\256\272\262>\330*\236=\355\323\031\277\353\325\242=\373\306\t>\346\363\250>&z\261>\224F\"\276\220`\321\276\220O\363\276\203(\004\276\314\336t\276\367\004\026\277\332,\303>[\346n\276D\315\215>,\271\217\276\302\224\335;\235\'\201>O\236\271\276\312\035E\276\265\033\'\276R\217m\276\037q\016\277K\230\024>\305\243\001\276\243\037\313\276^\373V=o\236\215=*\362\346\276Ob\027=^Z\336\275\024\365\'\276\324\263\037>\013\346\335\276\234\357\210\275V\30119\3438\313\276\267\001F?\265i\260>\263\277\311>\214QB\276\333\365I>\002N\335=g*\314>\245\215/\276\321\254\034>S\366x=I\'\010?\204\267\242>R&\267=\234I\240>\363\325\247\276\236\276\244>h^\343\276\027\027\326=\237\237\">\223\252B\276S\032\364=\234\024\013\276\272\265\212\276V!E\2767\267\271>96\037?.\227t\276\232\016\242>\007\016\272>\317X\342\276J\001\r?I\370\361>\010\364\036?,U\n>\254\365\315>\243\"\316\275\276\217w\274\240\\\233\277\253&\003?k\204\314=\226$\316=?\337\264\276\355y\252;\341^\'\276\021\320\t\276d\233\311>\014\022 =\320\344\234\276!Z\206\276\310\321\350=Y\336\237>\340\001F\276,\2267>+?\240>\375Q.\273\363\302\361=\025\327\344>Vp\214\276\315\324\024\277`\206\317\276\007\215\021?}\214\n\275\356\030=\277\204\362H=\\\241/>\224\372(\277\314\205\323>\275\233\362\276=D\347\275\375\211\350\276A5\232>\325\202g\277\264\037\355>M\037\010\277$b\007?x\310\365\276\370\225\025>}c)\276\005\205\311\275\315\373\331\276~^\010\276\013\353D?\315%\301>\306r\227<&G\304\276Y\217\255>\365\027\273>N\356j\277\336\014\277=\3507\212\275\366\203\250\274\037\023&\276_j\213>\354\264\031\276\334\241\036\276\020\013\221=\032X\001?C\002:\276;y!\276\321\026\274>\202y)>\255;\240<\251\037\245\275\212@&>\367\275\316>\343W\242>u\321\201=2\320?\277\001\351\236\276\367\305\007\277\241\242\031\276\202\362\370\276\351H\242\001}5>(x>>\306F\237=oV\231\276\353\230\202\276\332\266\374\274r\226\t>\022bk\276\004?\206>\200.\n\276VX1\276C\216\323\276\034\264\245>N)\273\276\257\305\227=\004\373\364=\275\350\002>\274\207 \274R10\276\342\020g\276\325\004\375\276\265\2462\276Y0B\276Q\000\034\275\307\234\260>=\314i\276\206@\204>q\023f>)\302\353>\244\262[\276\357\220c>i\204\033=\010\036\020\276{\333\213\273\336,\323\276\227p\277>\027\354\245>\241\356\\\2763+\036\273p\233\000\276\314[\203=\321+*>\034\235\255>O\375\223>W\271w>\273\037\"\276\223\363\036>1\331\216>\311\035\026\276\240a\215\276\035.\335\275Y\356\341\276\321\273$\276\272\242\241\276\024\305!\276\002\320\006?\367\200\214>\217\0234\276H\344,\276\250M\350=\0305\243\275\342w\244\276\336\036\035\276A\265\215>\03506\277\027\247\270>$\3743>\037Q\241\276\342+m\276\027\031\313\276\333n*\276\357\036\350=M>\005>\354\203\026>\263\016\204\275\370\252\336\274~\352\270\276\2666\222>S@\256=\275\360,\276\002p:<\353;\336=Fu\233>@\024\326\276\233\343\177\274\035\256\031\276k\271c?\276\247\016=J\205%\277t\327\n?\322\363H?\331\307^\276\005gu=\017+\335>\320\207r=,?d>|-^=\232\303\356\275ZU\242\275\212n\300=\037\265\300\276\257j7?\002:$\277\250\315\317\276w:5\276?\345\256>m\2109\276Ib\360=o\002M\273\370-2\276z\223\375\276\306\t\355\275\375\006\314=\254\003\217>\226\343\262\276>E\227=A)\272\276\000\366\267=\236\363\250=\241\300\325=Jr\363=\364g\006>l:\227\276\242\357}>L\216\222\276\323\362\354\276\207\242\365\275\"\254\200\276\203\034\376\275k\222_\276\233q\316>Z\177\202\276f-uh\372\207\275k\030V\274\310\374V\275\300*\201\276 \244|\276k\226\200=\322]\232\276\022O\265=*\252\205>(\304\302=\240\225q\275AW\013\274\272L\241\276\260si;{J\035=*\216\230\276\365=\r\276!X\233>7\004\200>\246\222\025>\204\032\367\276\'\211\217>\0201i\276#\337A\276D\236\217=\370\210q\275\024%\277\276\005\300p\275\023w\224=[L\372>\260\375C=\354r\266>\213\0029\276\003\016\261<\325\236\356\275\220\026\310\275\023\305\243\276\301e7\277\'\377W>\303\013\334\276\206\347\212>\321zO>WV\266\276rP\003\277-\354\365\275\026\215\210\276\"\213\314\275pb;\276\032\316\346=\035\242\211<>kf\276&\342Y>\243\r\">\'-\177\277\250\365/\277\r\263\334\275s\356\221=\211T\001?Y}\003\277\263;]\276\212\276\224\275\271q\264\275\266\364\013?\240\004\373=\204Pw>\275A\276\275::\r=\320\307\256>.\367`\276\273Zf\276#\260N\276\334\323\366\275\222\207\026?\263\223\350>\274.\201=W\226\221=\203C\333>>Y\036?S\270G\276\230\357\022?\255\246\233=\005\367\356=Z[M>\002\265\375>\2610\256\276\t\025|<\246\013 \276?9\374=3\031\215\276\177\356\245>\373P4>\220\210\221\276l5\r\276\\\215\'>H\213!\276\316\233X\276T\240@\276n\246\205\276\362m\233\275\025\326\237=\265\370[\276\377,\201\277RC2\2764\354\234=\367\003v>\205B\030>\233\r\007\276]&\346=\'\345!?\265mh>xz\026\277\346\002\245\275w\027\201>\025\022b\276\227A\230>\244%9>-Ti=\215\370\244>\312\030\313\273K\315\276\276_\300\326\276\2029R\276\3221\206\276\203\353\005=q\0270\276\025P\034\276\332@\r\276\036\307\022?\327\313\306;cdz\276\217\320\201\276\277\370\342=\031\361\021=D\202\241\273S\347\227<(L\034\276\252\016\252=>f\207\275\202\360\212>H\261%\276\030\324\324\276\330\235\362>y\335\024\275\242\030\300\276e\020\267\275\267\307\357\273\212\000\221\2762\232.\276\r\366\242\276x\220\346\275\356y\310\276\276\264%\276\246\346\314=\035e7?\204t \276\325z\313=\223\263(\276~\036\0244\224\306\276\201&\360\276q\223E\276K\036\243>=O;\275\262M\213\274\0268$?\266\003\355\276\300@\016;\277\347\335\275\032\312\217=\323\225\017?0L\244>]\235\264=R\200.\276\345\251\374>\252\027!>\300^\231\275\224\312\313\2766\326\242\276\214*}\276\276\342\000<\330\005\323>\351\246\027\276oC\004?\023\335|\275\034\376\335=\231{\335>\'\346B=C-Q\277I\3254>\032{R\273M\323V\275w\250g>\240\365\320\275Vp\247\276\335\355\321\276nh\353>\313\215\371\276\221\304\261\275w)\257=\207o\346>\202\346\203>+G\014>\271\376\353>4\211b?k\366\003\2769\352\261\276\014o\311>\025Y\201\275\350@\177\2769\304\203>\025me?\260\321V>\370N\274=\355H\005\277\2379$?\252`S\276\017\006\325\276C\272\213>\024\301\233\275\305J,\275i\370\254>\247\021@>\377ER\276\267\3134\275\r0\024\277\rY\222\275\230\257\223\276\005\030\251\276\270\277\323=\241jP>\274\320\003>\232\2658\276o3\017?\\TD\276\370\003D\276\377\243\363\276\217\222\216=\020\227\372=\20604>\266\305\203\277\301\225\232=fW\273\275}\236\202\276}C\203=b\320\335=\206\027>\277\240M\035\275\307\r\017>\374\002\021\275\262\2071>\316\303{=\202\213\010?@\2245\275\001\262\023?\305l\352\276\227\302\251>\311e\247>\225\240\373f\202\314>\276;\025>zKg\276\253\'8\276\204$\037>\243XC\2762\202?>[\332a\275\016y\310>\003\016\034>\266\231,\277\212\317\217\2760\326\335\276bT\240\272kN!>\"\014\355\276\271\343\330\276\204>+?>\267\360\275hl\205\274\241==>\223s\350=D\302\254\275\220d;\276\374\203\006>G\261\232\275?uS=D\310\001\277R\002\204\276O\220]>\034h\234=\2464s>b\357\245>\237n\325=\332\367\301=PH\207>\211\333\217>\244\305\217>%\035+>\010\355\260\274\312\215\237>P\255\316\275}\361\264\274!\342\203\276\262\373\025>.\rp\276\212\313L\276\205U\333>\030\177z=\355(\272<\344`\n?\241\332S>\024\274\263\t\312\241>\267\234\210>\231\024\002>\203\220\224\276:\274->u\351O\275\346\367\357=&\"m\2769\022\357\275JF\007?\232\267\331=CuL\276\017e\035\276\245A\'>|\001\230\276c\'\377=\307\347\031>X\257\214\276\322k\215\276=\340\242\276\203\277\300\276\253v\n\274,I?>G\323J>$\\I;N\314\017\276\026:M>\313$\301>\274|H>\300\324\340\276\303|\232>\004\374\242\276\337\013\232\276\177\'7>\020\226^\275g`\010\276 \213\206\274\275\010\243=l1\205\276\230\253(\276\375\340y\276\214\205\207\275\205C\304\2751\213\260\276%\346\006?\226`a\276\257q$>\010\000H\275\360\035\336\276\005?q>\333\320\227\276\214\370\316\275\260 `>l\027l>\376\r\223<<\223\220\275\257o\235>\244\352\210=\232\235I\276G\026/\277\003\216;=\213\014\333>\2470\267\275\264;\034\277r\323A=\032\337/>K\330\376=\303\374*>\345\321w=tVX>\016\374A=m\246\021>W\354k\276+(\244\275j\356\223=\340\000\317\276Y\233\342\273\255\362\205\276\304f\223>5\304\304>v=\355\276%>\301\275Z\205\036>\200\025\262\276\231\200\324=\260?i\276\343Fl\276\016\230\240=\3314\307>\350\242\240\270\266\270\217>\007\252\251=k2\273\275\354\377\227\275\225\257\265>(\323\021\277\2266A\276L\037\215\275J\263?>\005\271 ?\373s\342>:(\275>\014\030N\276\200\347w\276\376\361\202>\300\036\300=\223\241\204>\224t8>h+\233\276\235R\333>\000\246(\276\005\"\001>\013]\354\275\0027Q\276\331l0\275\037\235\271>\242\032\r\277\363\361\261>:\'\037>\022\232\353\272\033\341\275SjN\276\026\252^>q\273\257\275V\344\333=\330\374\315=\276\355\242\276Q\336\223\274\351\365\312=\347\252\316>:v-=h\356\237\276\037g\303=\021\304\013\275\242:\016\275J$i\276H\206\325\276\243\\\207>\362\036\211\276\223\356\366\275\342\025\214\275\221NN=\212\321\211Z\221\210\276\251_\224\2760\243e>\3327\325>|\240\212\276\350M\214\276\2645i\275\375V!\276\013\006\346\276\237 \021\275\365\367\230>\351\037\306\276A\206\007\276\225\225\365\275\265Y\216>\314\244\313\276\373\024w=\314\031A\277\245\036f=9d\305\276\350\220Y>\213\274\370\274\352\271\227\275\317\360\016>\202]\252>F\226t>\214+l>\315\231\234>\244\315\237\276\013\225 ?\322M\024?\323Z\253\275\361\220{>\006\004^>\307\356\277=\203\210A>\365 \304\276I\257\245\275\220{\024\276=T\201>S\277\277\275\355\205\314>(\317+?\373\217\327\276d*\241>\215\217\261\274\325\032\214>=\343!\277\215\226\017\276\250\246\235\276\003J\200\276\235\206d\272\311o\375=K\226j>j\003\243\276\243\327\215>,(;>\221\031O>I\005\252\274\346\323\243\275\316\257\263\274\340t\256>y\212\255=\202|\274=\337\031\373>\'f\306=aq\327\274s\005\303=_\202=\276\033\016\037?\306\\\367\275~P\004\277@\226\324\276\366\017\r>\032\330h\276\n_[>qc\231\274\345Q\342b\271\243=\3373\305\276\005z$?\225\235\333=A\373\355=\355\205\363\276\237\016\203>/\325\030?\004\023\312\275\357r\'\2765\201\330=.\276L>\266\2762<\037\021H\276\217~\025\276\275w\r\276O\215\0358\257\033>nAg=\033\336\215\277\274\311\240>iRK<-\343\030\277DAZ>\3228\253\275\207Y\367>iLv>XN\376=\224\002\341=\361\367j\276\371%\303>\276\221\335=\312\027F\276\027\026\245\276dl\340>\306\204\025>\347\037\031=B)6>\235\245T=<\204\216\273\362a\217\276\331\337S\275\275g\231>\177U\234\276\224V\255>]\013\277>\363\224\365=\325\005\017\277\210\324#>A`\245\276\277=\344\275\036\341\022\265D4>\372:\241\275I\202\260=\263 \360\272\301Z\310\276kl\"=\250\317\371\275e\0319\276X=\000>\311\251u\275\325\220\027\276\3200\240=5\210\010\276\354#\005>\333C\253\2763\237\\>\270\037\261\275\217\022o\276(3\014\2764\027\032?8\005$? \332\226>\327\311\027>KgX\2756w\002\277\240\3668\276\026\376\004\273G\264>>l\356\323\276\301\331\274\275Vr\204\276\204t\267\276\264)\272>\245]\024> \224\311(\032\201>3i\220\276\3044\327;\224|\034>>\203\343>\036\\\014\276ns\304=\232\026\226\276\\\332~\276*\264\200\276\276\240\224>\35478>S4\204\276\317 \373\275\327\310J\276h\030\n\274K\002\002?\210SN?A\215\363\276\344*\365\276\207\311\022=\016\236\326\276\240\322\263>\343\031!?bc\200\276:@\007?\315#\217\276\\\263b\276\240\370\020\276(AC?\304f[\276x\335~>=\260\037>o\344\204>\256\036C>\031\326Y\276\177!\217\276\255\336\304<$p\034\276\264%\201\276\315\206u=\352\360\225\276\262\3029\276\r\037\317\276\306TG\276\010\302\231<\376\217\360\276\'\375\250>^\376\231>n$T?\2458\211=\257\266`>\216j\332>\n`3?\274\3709>\016U\356\272cZv\276{o\321>l\353?>\307\234\210\275\357\245\202>\026+Q\277\3171\013\276\374\332\032\276\247=I\2768\0062=\336\030h=\205\232\342=\311\341\252>sf\030\276\"\237o>\32487\276L\326K\275\210\376\036=\317T\007\276>\n\375\274\250\207x>\355[\214>\333a\325>pM\361\275o3=>*\'\220\275\372\321z>\006=~\275\264\nG>\232o]\276\362\366\240=\200e\241>c*\342=\007\212O>\302\230\022?y\227)?\234\253\215=E\362\031\276R&`>\331\302m\275^l7\276\r\351\341\275z<\005>\251\346F>\350&\276>\014\263\360<\355?$\276\245\227Y>\t\326\027\277\254WA\276Do\027\276\376g\350>\272\264 =\032\311\252\275\034\202\004\275\363e\276\275\021\274l\273\265>6>\'k\243\276\035T]\311\360\035\277im\203>\2617Y\276J\266\244\276w\210\257>mC%=(\031{>\376\265?>\220\215 \276\304?\004\277\240\332c>IR\353=~\257j\274U\245\223=\017\006u<\036,$>\0215\237>iH\353<\333\263\357\276HW\350\271\025\3030=<\323\343;\233\200p>g0\363\273\227)\331=\304\002#\275r\017\261<\r\370\000\276.\030\016\276\370,\227\276wo\023>\255\260:<\004K\205\275\222\006\363>\'\345,>\201\336\344<\t\263\340\276P\355\006\275g<5?K\327\225=\350+\247=J\366\206>\010\203\377\275\311\217\214=\264\260\250>8~\254\276Y\307\026\276\232c\032=\331e/=\0369\336\276XN\266\275i\032B>\356P\210\275R\264\\>5\331\342\275\267\237B\276\256\342g\276\257\276\006?j3%>\257Xb\276\301\357\272>La\000\275\350\371}>\326\367\370\275\3202\005\2765\2507>b\312\261\275\261 T\276x\275\236\276qPv\276\363\362\343>\363mW\276\253A\221\275\211\251\022>0\321R=\033\310\214\275y\177\036\276\232m\315=F_\212\275\241e\037>C\375\030\276?zr=J\320\021?CS\250\276,\2658\275y\214\230\276\260\0352\275 \200$>\005\265e\276\361\223\277>\347\247\240>\001\202\377=V\016.>\322\220@\276\262\242\237=\235\240D>u\206\271>\340\243\350\275\020\"\207=\357\260\201\276]\231\243>\206\024\332\275\027\337\241>\315)q\276d>\330=\344R5;\214G\337\276\213\256\264>\347V\303\275\256?Z\276\263/\230\275\336\347\270=\244\256n\275\363\242\274>\240E\350=\304\'\347\276\002`\260\275\217\226\335=$m\275=\020\265\271\2767\217Z>\213`\262>\212%\254\276\033\013\315>\232u\222=\346*\025>\003\342\322\274\233\373\005=ba\373\274\r\007\313\275r\312#?\347\017\233\276\2451\211\276\271Oy\275\234aJ\275\250!T>\030\350\336\275\300s\231\276\262y\007?!l\033>\2633*?\226:\222\276\322\320\\>\306/\214>\356\275\366>\371\347\033>\202\204\334\276F\306\211>H+&>\322\004~\275\247e\214\274/3\207\275\276\223\206>R\253H>R\224C>\023\210\001\275r\n\317\276\210\371\003\276\340\357!?\224\005\001>\351\031\323=e\371\005?\303\355\324=\030{\352\276\316\374\256>+s\226>zZ\036\275\355\034\357<\016yR>\2779_\304>f\300>\275sO\002>\314\r\001\276\353\241\r\277$EB>\271\217A?\211\020\324>=zy\275\364\"4=G\007\337=\271\202\'=\221\\;?]\337#>ov\307\275nU\026>\300\001W=9Y\345\276\300\330\310=\336_\005\275\371\214\202>\245\337l>\360\037\376\275\331@\002?\222\372C\276%E\362=\214N\010<\366C\360>\300\031\213>\222\306%\275k\336g>[\"\n\277\3261r\276\207\363\n\277E\332\303\275\315\225\261>\024?\244\276\361i\026\275\346@o>\336\005\007?T\342\233 Lo>\273\220\026?\007\210\324>/\336\314>IS\037\276\223!\336=\024\226\320\275\302\300\316>\006\321\313\274Z\373.>Dv\311>9=\357\275 \002\265\276\370\005\364>4\2363\276\247\203\264=)\325L=\n\275\240>\325v\337\275q\274\033\274\307\r\032?6\215D\276\313\336c\276\373\202i>|>\210=\273\255\207>\344\014Y=;;\262\275&6\023:\244\266$\275\20492>A`\014\277\275\375\245>\264\236\235\2759d\267\2762K\221\276\313R\371>\322\224\267\276k\234\256\276\317v\205\275\212\323\016\276K\253\312\275V\036\010?\022\363`=p\307?\276\231\307\016>\300\255E<&`t\276\224e\334\276\327\343\006\276\031\223\317=j\232N\276\205@S>\355,\016=\210\250\353=\241\253\036\276\331\374\275>\246\010`>A\324\225<\203\301\037\274C|8\276\006\367x>\267\261~\275\252\337w\276\227\313\013\276\026\247U\276}H\211\275\323\001\237\276!\014:\275b\307\330\275N\026\356\273\3566\374\2756\304Y>\365\271\242>\2311\005\274:\254\347\274\346\347 ?\350\222\235\2768or\274%(\223>\315>\363\275K\263\007\276\236\200W>\246Y)\277$\215N\276\314\312\310>6\227\020\276\313.\026;\030\372\213\276g\322T\277\201\275\362\2764y\240<;\n\366=\201,\327>U1y\276T\253\223>\212\014\352<,%\217>0\211\322\276\341\ta\276D7\231=nX\355\275H\n\202\275\376w\241=C\033B\276\276\"}=\017\367\330\275\3465\214=\325\321\340>,\240\207\275\305\303L=h\227\212\275\003\214\275>tC\362\275\031\362\226\275\301\362z>$sb=\223\\\211=\212\025\361<\333h\017\277\261t\241\276\242\340\031?\377n\373>\276A\224>!O$\277\357~\206\275\000Q\367\276\r\027x\276-\353\321\275I\033\035\276\275\024\337\275l\035m>\306\214\205\2760\326\212=\244\026\t>\254c\260\274|\351G>\340\317\261=\032\250\034\276\"\235\016\276\317\367.>\016\010\342\276\273(\325>\311\331\226\275\027\226\342\275\204\017\325\275\325/.\277*\215\226\276\366\357\273<*\016\267\275\034\343;=\275\r\233>\364:O<\236\374\351;\274\212\254=\017\336\322<;\245\334>5\205\347=s\202T\276\233\033!\277.H\232>\321\314N\276\t8\033>\326\220M\276O\023+=4\270*\276\234\325p=\227\377?\275]\230\004\276\277S\317\2768\001\016\274\312|2\276\373i\225=\226\362m=\316>\260<\000\354\321\275\370Zc>\351\266\217>\304\312\036>\266\341\361\276L \035\276\347\020\372\275\002\252\007\276VT\315>\363#W=dx\344>M.\314>\344\342\244\276\247\332\347\274\302\273\271\276\022\310\"\276`\"\020>@\220\225\276\037\324}\276\014\033i\276\221\013G;\376\272\340\275@\222\022\276\315\247\003\276\340\327=\277b\013\251\276\354^\030\275\331\2156\276\366\351\230>5c_>D\014}=\261\362\314\276\214-\341\275d\311\004\277\004\235$\276 \240\304>\260\234\240\2761\222\347=-\226\242>\r\031\017?\205C\202\276\273M\332\275s\'G\276\376\210\355>\300\016\253>U^\001?,\016\200\275\236l>\276\262\006\277\275\003\374o=\0046\340\275!88\276\240y\372=\235p\333=\3238\210>\364\277\206\273\362\232,<\311\302\345\274A{\275\276\010\'S=\256\264\370>g\245\241\275R\223\r>\005yw>\002K\030?l\341^\276\265k\t>\260\177\250\275\353R]\2763\301?>B\326\001>\337\231\200\276\230\007\226=\267\355\314\276\273\007\224>;\024F\2765\372\033\277\371@\277>\353B\213\2740JN\276@\272\007\277Gb\320>\300\017\355=\010\310\026\276\234\010\377>$\275\n>\364x\014;z\252\207=|+\346>\206\361-\277=0\033=\223g\366\276$L\232\276\331\021\235\276pJE>\'\270:\277^\241X=\230\000\251\272\341_X\276\310W\344\276\016m\376\276 \363*\2761<\342\275\330\223\376<\240\332\'\276l!\300>\316\257\232\276\362\021M\276\372\275\210\276\026^\323\2769E\001\277Lu\n>\034z \277V\205 \275\251l\026?\276Ky\276\240\260\037\276\3360\262>c\032g>\376\276\205\274\360\256\375=\002r\320\274\303\'h=\236j\254\275\250\021\215\275\331#\221=\026\355\335>)\341\037>\022\315\245>\3041\021\277n\335\026\277g\362R\276\366[\307\276\363$\345\276\354\014\356=\202\371\020\276\203\253\217\275i\374s=\276\0032\276\300\244\t>\325\3341\277\315\312\312\276\026\227\243\276\2046\002>\364\201p>9\362T=q8`\276\236\037\031>*\025\267=\2563\223>ft >e\243\231\276\203<,\276V \206>s\3028=\026\330\222>\241{\303\276[U\000?m\205\315\276%\216\272\276\027\255*=\340\326\006?\307,\214><\224\234\276zD\017>V6z\2764?\250>@5\036>\376\020\262>\273G\312\275\353\365A\276Bl&=\006w\337>?\227\204\275\215!\320>\351\330\010>\'\207\033=h8\304<\274n\342>\207\215\326\275~k!\275\003\250h=]\257,\276\270B\177\276]\277\270=\033\344\357=}\245\250\275\274\271\277>\t\352\236>\211\246\001\276x71=!\203\000\277\207\301\007\277\010&\277\276f\206\020\2760\n\210=\235@\223\276\221\231i\276\274\357\235\276\016\234q?k\013<\276VL\326>\025q\266\274\223x\266\276mC\345}7L\277\255\036\014?\254\014d\2766\306a?1\342\257>\325V\361>\236,\254\276\022\220N\275U\233\254=qs??\244rW>\225A\264\274\264b7\276\337\374\013>_W!=9\251\t?\334\223H\277\245\224R\275\376yW>\3008\320\275We0<\362\363&=\177\t]>\177S\312\276e\021^?\272p\020\276\021\257R\275\036d\217\275f\021\343\276;\031\221>\301v\034>\330\367\036\277\371T\361=\336\r<\276[\032\330\276\313\026\021\276\202N\217>u\010\267=\017\336\215\276yP\370\274\243\376e\275F\001\300=\363 [=\224\333\353=\263\366\004?\246\373\023?\313\222(>!\220\r=[-\210=\334X\n>\244\361|>\252\265v\274P\364/\276\314\n\230\275\237\227*?2\224\213\276\233E\030=\274d\367\276\212\2224\276r\366\031\276\312%\331>^j\230\275l \355\275\033\214\">\230|\004?\231I\017?l\337??O\352\363<\237\377)\276\340\223\215?0\346\243\276\000\247\251L\t\202>\r\217I\276\335l\'\275,\371\020\276A\257\372>X&E>\350\252\311\276\317.\304>v\3743\277\242[)=d\270\225>\372\202\276\275M\303\243>\354&`\276\352\332\232>f\360Z?\232\004g\276ob\210\276\242\217>\276\243I\242\276Y\\\246\276\014\355\313<\220\023\\\275bw\r\276\246\230S= \025\352\276\316F\253>\256\227!=U\344o\276V|\016\277\345\005\350<\371\253\007\276\t\272\251>{\2618\275\206\257\317\275\030\366\231>\t\225\345\274\241\247\216\275-\322\313\276\200\307\036\276\017\000\355\273\247we\275}\366\206\276pE\350\275\347\276\361>N\374s>{\003\307\276>\340\300\276\3058\336\276\025C\325>\025\333\335\274\214\325c>z\267\013\276\233\272\207>\003\255\276>/\274^=M\263\306\275\236g\002>x\226\337\275\352\234\204=j\324\346=\032\nf\276\024;c\276\217\325[\276Z\222\030<\215\0310=e\026=?\036\267\363\276\275/\024\2777S\265\276\341\373\300\276eN\002\276\266\372B<\024\266\370\274\'X\023>Y\240\364=\361\211\004\276\325V\004\276qb\021\277\275|@\276\013\031w\276\357r\013\275\013-\265>\034\342T\276\027\262\032\276\np>\277Yy\311=7\000;>\250>\030?\2337\007\276\230`\031=\367\277\277=\\o\n\275\322\357\200\276-l\\=\232\264\027\337\324\214\275\240\335\221<\000z\213\276\246\033\355\276V\232\272\276\"kd= \220i>\272\247\326;\306\262\003\276p)\3177Bh\356\274uT\277=\374\262\307\276B\226\301\275\\\034\226\276+\361\030=\230\365v=\254\245F>G\313Q=\237\343%\277\'\262\205\275p9?><\274\267\275\r>\365<\017\345F\276\377\343\342=FV:>qV\256;\250\306\030>\341\023\331=\200\257\232\276G\007\033=\345\206\327\276c\0362=\021\255\200\276i\2062\276}{L\276\333\225!>\332\231\211\275C\014\231>\377k\316\275<\265\333\273\227\205y\274\016\316\260\275\\\224-=\354\332\264=q\265+>\036\355\213>\3249b\275\030\302\305\276\250\203\306\276e\375\016\275\2239\202\275\000C\006\276\354/~>\234\333\355=L91>X\221M\276\231\r\242\276\355\361\262>\233O\r>\020\030O\277K\264`\276\306S0\277n\201h>\003b)>\357\223\263\275MsI\276\303]Q>A\271?\276\267\323\023\275-\371\373\275\334\333^>a\031\256=\356q->\3205U>\247\\\307>:\254\341\276\263\370F>\tD\270=\307\027\007\276p<*>\2249\216>\\H\205>\230\222\">\265.9?\341\031\032?\361(\307\275\r\271\370\276_*\321\276\202\341t\276\250\006\234\276\260I\330\274u\257\370<(q]>\005\000u\275Su\032>O\276$>\000<\244>i\244\027>,\272k>\013)\207\276\206}\274\276f\216\234>s\273\332>\266o\200>\326]n\275\300\000Y\275\n\215\267>\0017\337\276\373\022\034>\365x\301\276\334Q\343\276\025+1>u\234\212=[\317C\276\246x\004=#\177\336\275\231\310\032>G5:\276\234\027\301\276\362\260\036>\361\356\t\276\316^[>\370T\216\276\326\326\361=\240\247r\276\336\362\315\274\270\n.>\243Fy\2768\225?\375\2318>\224\322\237>\001g\262\276\262=\332=\344O\264>\r\320!?>>\252\2767\240O\2776\370>=()\001\276\004\247\345>\232\007\210\275\234\312\330=\360~\016\275>\323\315\276\213\"\002>\2060\336\275o0\032\275\305V5>\211X\347>^\003\001\277]\221\227\276J\316l=\202X\006>\013\355\322=[Q\274\276+\374U=\326\332i=\364\232\206>\225\t\342\275&\215P\276\207C\263>\020T\333=z\347.\276(\361\213\2768\260\273>J\201\241>\216Z\260>)\211\246<\243v\004\2765\362u\274\346\024\244>1\203\256\276Mj\275\276\357\016\211\275\274\265,>\314\016*>\266j\263=\350\323\300\275a\210~\275\3066\365\275/h\275\275\3532\304\276y`i>S\014l>\325\215\005\276P\226\022\276\234\334\302>\352m\001\276\262\232=>\206\266\014\277\302\216\321>g\2427\276\304!;>\261v\243\275\033\242r=V\200\257> \322\225\276w\334\242\2756\2660>l\350\207=\300R\253\276\360\317q>(\212U\276\003\\c=\034\024\204\276+\351\377\276\324\325\251>\345\177\350<\0355\312>9I\006\276\335;\\\277X#=\276\323O\312\274c\375\037\2776\r8>%^{=l\250\375\275\0317\245\2762r\241=p\211\036\276\034v\035\276\005\353 >\251M\001\277m\220\320>lt\307\276>d\336==}\356\276\3670\257\275\231=\272>\337_W\276x\246f\276\213?`\276m\211\242\276\352\325\236>3\032n\2768\267\245<\357\262\372\276\031\361!>\202\237\335<\372\313Z\276\207\3603\276\221\204b\235\240\243\276\031\'\t\276\204\2424;\300\210\037\276\330\2773<\204\226\367\275Bw\255>\276\324\262=\353\226+\276\2256\026\277\244\242\016>\305U\252=S\316<>|O\030\277\037\357b\276c\374@\277\273w3\276\0365A=\362\357H\276Ite?d\361k>\203\303\217\276\357\376O>\326\2651\277\001\256K\2752I>=j5u=2\t\221>\356J\352\275\354\277\216>L\341\363\276Q\321\376\275d\302\023>\334\256\007>\316kh=\345\017\355>\317&\261\276\302\2437>\221\265\344\276\276\214\255=\343k\216?\t{\310\276\267\221\034\274f\033\326=\361\206\026\274\354d\374>\232\305_=\221/!?\241@k\276\333\365\333\276\014\307\330\275@\253\277\276\335\204\346\275\360\212<\2763\231\020\277\237\247\351>@\025\202\276\003\303B>\235sZ>g\206\342>\231f\204>\344\003Z\276#&\310>MCF\276U\257\374>\273L\276>\313\205\300>\277\235c\275q\021^\275\000\201\305=\367\301\351>\0363\336\275\205\214\235\274s%A\276\271\304I\275G\212%>\202aC\276_ND=\2544\264<\313;\314>\326\030\r?\232\362\315;\205e\177=\036SL>\225mg=Z)\024\277\355Y\350\276Z \032\276!_\277\276I-\002>Wc\340\275a\022#\276\034\001\200\276C\301\340\276\264\200\323\274\346\330\212=\355~\325\274K\226\006\276R)\314\275\372`\323\276Z1\354>\357\367\265\276\357\276U\276/\365\210>\030\251$\276\324\311\206=\025_\200>\256\352\004\276\351+4\275_t\026=\344\242F\275\036A\327=\t\313\277=\337\371\314\274r\274\232>\232E[<\316\276\3309\220\241!>hm\213\275Z\240\035>\303\210\013>D\026\223>\235\021\n\276\254D\202\276i\272\220\276\313\377\004\277\332\273M\276\370\013P\276|I\232\275\001\277g>\002)\253<>Ck\276\327\202\243=B\232,>\373q\002?\007P@>\201z\260\276\203\230\316>\224\\\325=/\340c\276\305K\007?\276\'8\276\241\277\303\276\021VK\276p\233O?\341\271\001>V+\260>\217\200\350\274\252K3\276\277\333\237>$L\330\275\371\350\321=p\300\343\274\266\r\237>\237\256A\275\342\010\222=\026\334N\273\213!;>s\317\271;\261\311i\276\337YY\275o4\267\2734\036\226>\320\365\000>M\3075\276]V\020>H&/>\375\354\017>\203H\264\275\340\300\314>t\312j\276\264\211\237\276[P\036\276K,[>\300}{\275\210Yc>i\301\331\275B\036[<\257\356\236<$\035\275\276/:;>\301\277\220>^\032\301\275g\016\014\276\343\224\357\275)>\277\275\361\024[>\006q\322=\344/6\276\005\301\301>\\D\223=x\256\240>2\312\213\276B\324A\276b_M\276x\004\227>n\254\303>\006\3054=/\334\002=\027U\024\275q\265[\276\301\265\237\276y\213\220=\360Y\221>}|c\276\347\246\215\276\265s\006\276\200\203\322\275n\023\355\275\243q:\276\333.}\275M\367\211>\2465\220\276\203\n\261\276\217\312\310\276\206lM\274\302\375w>#\305H>\000\232\031?\336?\214=1u;=d\321_>\371z\273\276\202\223\242\276]\304\272\276\213\222\005\276\324\025\331\275\324\tN=\021\017\255\276\312s\2249.\327\205\276(F\203>g\200\252=>N\024=\347\215\007\276\226\\\222\276%\227\307\276\206\221\242=\232v{=[\023C\276\366\203{\276=\233\006?\317\364\205\275#\226v=\013\304\237\276\337i\001>\200\234\226=\214\006,>\301\331fv\277\t?\272@2\276\332\373o\276\035\257\351\276\252\006\001\2752W.\276\201\204\363>\234\351\336\276\020\036\177>\275\310w\275\024t\t=\336\325\207>,r\212>\032\037\031\277\312\004\267>\362\2747\275\'t\'\276N\203\367>\037\327\231\276o\003f\276\361\375\272=\366\216\375\2762\2726=\327\317\016\276mm\021\275\203%\272>\260y\301>\206\021\222=\376\034\245\276!\307]>w\3629\274\301\255\023\275\220\325\337=\3466z>\016m\t\277\277L\304=Q\340[>hY\211>N\214\314;\333E=\276@\002\353<\364<\201>\353\254\244>\r\330\302>J\005\235=\314L{>yM\240>\201\256\225=\266\3770\276\2139\310\276\373\265\215\276-G7=gZ\303>\250S\'>,\"\352>\375\337\330\2755\214\252\275\002\335\241\276iW)>\214\032\307>\265\315\207\276e+S>)6N=\r\360W\276\027\314{\276\231\336\031\276S\267\032=\347\033\315\275H\366\240\275\327\2551>KE\232<=\376\330\276\t\307\026>a\177+\275\r\242\007=n\235+>M\265~\276\344\357\226>2h,=b6\276>\037Q\"\276\343nE>9;>>\273\3427>9\0017>\242y5\276\326\207\213>\256\215M>-J7=\253UW\276}\223\232\275r;\264=|K9\276\n\037\313>\235\363\224>\2336\206=\"\321\346=\315]\314\276*\211\345>]\325\201\276)8W=D\235e\276\211&\351>^Q,\276\313\336\310>\026\224\350=5\264\373\275\303\017\240=\247\312\240\276p\356|\2768\016\267>(\312;\276\022\353\t\277u\230\203>d\036\252>\307\277\273>\027s$>\346\'\033\275L\222Q=\3618\236>\273\363}\275h\202\272\275a\367\364=\335\244:<\3721\200>\216Z\204=\211E\021\277\373\376X>\315\231X\276L\212\234>a\n\207>\\:\030\276:X\262>\367\222#>\376RL\276\006+\247<\003\212\325>J\313{>\256@\255=\372\017\'>\371\023\264\276\2425\276\276&;\327\276\013c\t\276\245\350\201\276*\220\346\274\336n\271\275\n\261/\276\032\311\226\275\373\307b>\2459\272=\000\217\303>);G>\2735U\276U8$=F\211\024\275\002_d\275\351\272\313=Q\302\370=.\324S\276\031vO\365\273\236g\277\276^\r\260=`\024q\275\337\270\323\274\376\227E>\205\301\263\276\274T\007>#\260\325=\037\225K\276l\035\312\275?\232\214<\210uh>\240T\200\276\200(\230\276\362\020\003\277\036?\030\276\201%\213\276\014uv\275\225]\010?\361\236Z<\234\031W\27630\014\276\312\363\262\274\330b\361\275\277\201\313\275W)\036\276\242\227\213\274r\226<>\214=\324\2757\037\255\2757\301\276\276\355/<>\301\271\271\2767\025\300\275\206FC>\340\277\262;\206g\336\276\247j\371\276\257#\270>\370\206%\276\275e\202\275}\306\235\274@\004\262\276\215F\315\276\027\272\275>\344\354\n?\261#\002\277\000z\240\276\257\323\254\276BL\356>\315\324\006?\267\255\340>9\303\343=1\243\274\274<\373\254\276vw\014\276\321\305\244>G\010\203=\255\217+\275\374\355\370\276\035\330\303\276\202\374\025\277,\261\372\275\345\344\016\276I\217B\276Vf\337=\333\334\200=\210Qf>\2778\002>\211W\"\276%\327\306\275\033=\001\275!M*>8M\325\275\330;\273>\002\311\315\276lD*\276%\036\002\277\203\363\221=\342q\263\276\002\374\262\276{t\"\276~\376\003\335\2763\205\305=3O}\275\374\"Y?w\276\017\276\030H\200>\026\206P>\237W\027>^~\002>\317\366\016?\304\260\315=\230&\263\276\244C\254>\205;\007\275\010\203X\276\027\r\n\276P\261\002\276\377\362\313;\267\257P\276\317\267\353>Q\004\336\275D\2008>\276\2113>i\223\024?\357i\177>\022\024]\276\0241\006>\331\343\224\275\356\005\321\276U\210\036>\305\233\356\275M\006\232<}\214h\276m\016\334\276\'9\305>\266D\255\273x\231\260<\"w\251\275Z\020\210\276\014\246\330\2757k%>K\245\230\276\207\277\247\276\217v+>\274 \215>\001\367\360\276$\272\210\275Bm\325\2759\3722\275P^U\274c\002\360>\336\272\374>\035\332\351\276\003!\201>\253\300a>{\235o\276\354#\252>\272\032*\276p\3535>\022\363\340\274\024Gc\276R\371+>\230\"\253\275\342\240\366\276\027\373V\276\362\241\307\276}<\035\276a\227\266=\244\330\003=\227\303\351>\"\361h\276\215\366\346\275O\300+>i\240i>O\342|\277\\6V\2749\237\333\276y\'6?\240\226\'>\255J\r\276&\313\223>\267\201%\276f/\307>&\301\242=\373J\\>\310;\204>u\275\022\2753L,=\362d\017>rS\372\275\260u\217\276\031\036\335>\373\256\312\275\021y<>%\364\031\275\236\317$\276/\023\303>\211\006%\275qu\237>U\230\026?\2564<\276r@,=Sl\261=>\001\232=\261\370\303\276S\006\211>\227Z \276\347\221\204\276d\372\370>ce\220=6,\220\276\3131m>\230w\341=\027\371\275=\312%\265\276c\327\017\277\036h>>\303\013\201>\301M\322=\257pJ\276\323\373\r> \3639>j\022\025\275dT\301\275\202ld>e\364\000<\335l\021\275\326\247\371=9\205\003\277tr\236\276\255\3301\276\305\n1>\230\345V>\225\347F\276\237J\317=,\022\202\275l\315>\276\nl\206=\312\355\364=[\205\234\276\316\273\234>\306i\210\276\363\315\305>K,\003\276Gx\376\276GH\216\276\\\236+\276\2415\226=\217\342p>>\027\211=\0350\221=\315\013\004\276\200\315\247>u\r/\275a\310\365\274\302]f\276R\310s\276\317\n9\276\035\323\226>;\321\334\275*\372\232=\255\265\r\276Z\201\236>\3671.5\236\201\276g2\206=\213\264\357=\037\302}>\200\021:\276\226\226\234\276\245-\245>\010\036\353>T\312\361>k\254\360\274\034\263\034=\264\254\362\275\312k\023\275A8\337=?zY\276\n\235\204>\006h\340=Sl/>\363\266\\\273\207q\247\275\276\363\321\276\221\222\022>i\300\310\276\367]H\275%(_\275\276\327\352>\003\210~>\006\303f\276j]\347>\236G\006>%\317\267\275\333K\215\276\034\245\356>\212a\214>\244j?\276O\221\200\273~\246\006\277\223sS=\322\0212>\313\\\227>[k\202\276\251J\217>\032\366_>,\361\361\275\000\353\010>\355=j\275\033\347\327>B5\221=\006\247\304\275\270d\010?\004h\224\276d\372\202>\377\004\255=\353=1\274\007\217\210\276\317=\023\276\364\207K\276\020b\255\275\266d\021?y\301\334\275A\026\025\276\315h4\276\222\304\374\275Sha>\203\241\267=\250\022\205=\243&\244\276@\343\250\276\251\360N= \255\271\275<\023\225\275\3325\203\275\006\r4\276.\222n>\331Df\274\260\022*=\372\267)\276#\352\241=\205w\374=C\226\222>jD\303>\256\\h>\335\212\330\273\353\301\350>\2221\363\275\343\366q\276-\374\000\277$\013D>\253\336\034\276-\240\227>\256\234\255:\307%\223<\333y\365=^u\022\275\3211m\276j;\030\277\371\010!>\265X\007\276;\t\014=\na=\276\344-%?\311\354\004?\364M\301\276 M\362\276\252\373\016\277\207\000b\275\364\276\372>\035\004N>\234\203d\275\006\341\'>\037:u>\007\031\241?\347j\276\314\033\006>%\333#?b|N=\273\252c>\276\005\021\276\240\354\022\276\375\035 \276\214X\005?\251\'\235=\352\001e\276\253\256\177\276\342\215\366>\205\227\221\276\020\226G=lu\315<\315D\272\2751}\304\275%\014\357>/\373}=\326\260\267\275kL\307\276\312y~\276\206\023\220\276}.\262\276\\\327\251=\373\371\253>\377p\"=\354pG\273\322yp\276\372\001\234\276\021\335\331>\357*U>0~g>~4\310>\235\376\r?\243\021\353\275\205=H\276ZPX\276F\2753?\331\271P\276\212\017I\276s\262\n\275\\\224!\276\241D\342\275\274\017\203\276\222f\177\276\363<\336\276\355\230\227=\032\276J\276\347\177\034>\322\363j=xT3>\261\220\274\276\371\025\365\2758\252\305\275u\345\257>b3\227>r\225\247\276\304\245C==\201s\275\r\t\201>\374[\372\275\034\212<>\250G\317\276\352\213\020\276\266\202\262\274m\256\244\275H\241\337\275\364M\214\276\362n\227\276\354!\201>\365j|>\356(\370>M8\212\276\334s\177>\364h\215\274bb\364>wK\311\275\210p\372>\257\367\262\276\303\207#;\322i\032>O\0313\275)\004\361<\316\351\005<\326\311\316>\255&\007>\001\3649\276\310\222g\276.V\217\275\367\267*>2\226:>\205\276\013\276j\247\220=\356\330\000>\231*\';~]\237>r\371.>\026v\261>\252\376K\276\210W\035\276e8J>\2671\030?k\'u>\204\307]\276\272\rB=\004i\r\276\327\324k>K\365\257\275\360Q\223\276\001z`>7r\324<\253\004\303\276\030\375\307\275\261\022)>\231\326\300\276\263OU>Yly\276U\224\037\276>\370\302\276\005\376L>B\257\200\276\230\210\275:}\313\243=6\213l\276\242\345\003\277\250\342\036=S\226M=&i\275\276<\3112\277\323\223\341 =\014\277\255\211\343\276?\360\244\276\352\375\034\276\0133\213\275\255m\236>\254wV>8\357\306>\n.\342\276\220^\277\276\227)\207\275F\2262\274\344\372g\276\245\367\335\276*\'\226\275\214\266\204>\266 \306;\3166\006\277\006{\006=kl\355\276~\331\335\274\340\305{\276\207`\260\275\020\371\020=[\263\t?\"\026\366>\243\256\366\275\035\214\177\275\226@v>!\353U?\324Y\207\276\004M\022\276\327\333\365\275o})\276\nKA\276)\213p\274?\305\016\275\235\354\336>7\333\331=\236\002\274\276\261\367O\276\346B\240>\273\220\023?\177\\ =\200\233 \275\233K\216\276\330x\013=\346\274\032>qG\305\275/\222\220>g0\315=\221E\353\276G\206G>\357\254\223\275\003\004\032?\026\327D\276=\262\237>V\352\247\276\t\230$\276\363P5>_\275\242\276\020\2621\277\225\335\262\274\250\027v>\373@@\276\356\240\220\276T\3241>\2266\330\262\351\364\275\026\302A>\034+\205\276\\\022\342=\346\303\343\276\360\375\375\274 \302\010\277N\300\025\276_\376d>\373\t~\2769\203\253>g}\023\277\306\273\213>\255\231v>\356\316r\275\260W\027>\242%\270\276\010V\310\275#\020\221>$\360\277>\220`\r\275\254\022\300\275\010\261\027?\355\227/\276d\212u\275\262K\304\275\235/\210\275\337\341\272\275\311\305\026\276\306\207\204\275\346\376\236\275\270|\372\275q{\321=\0138D\276g\310\235>w\311\371\276\347Z\014?RNu>\016$w>c\255\031\276\356@:=\230\224->1\014_>\302[3\276W\013\343\276\346\016\363\275\325N\\\276w\\\266>\371\373P>\310\251\210=c\306&\276j%M=\017\322\036>\237\320\260\276\252\301\003>\230\026\254\276*.\360\273\200I^>h\314\352\276\312\323\214\276b1A>I-2\276l7\376\274\315\271@\275+W\247\275/3t>\331\241\007\276V\210\002?\330\002\037>\017\032\322=13w=\022\024\304\275\243:\376<\355\205<>\006\362\230>z\363\332\275B\217\n\276\027\3464>\027K=>\315\005\005?\313l\027>\332w\234\276\014\223\350>\320k\221\276\024\307\207\276\3721\241\276H\266%=\212K\226\276\270\343$=\032P\201\276,\345\215\276\221\216\273>\364V\034>\347\3417=7Bg\276\346\220\314>\351-Z<@F\255=\373\224\377>\275<-\275\264\017:=\273\200\273\276\362B\001>9\357\226>\254s\367\276\316\326c\276\374\333X\274>2\225>\315\333G>X\364/=)\341\212=\352\354\035\276\370=\264<\241\263X\275\372Cj>}e}>g}\325\275\014\254\315\274k\262x>\223\210\217=\342\362\272\275\330>[>\024\346\302>\022\322\355\276\236\203\200>,\"\372>}\310\377\275\217\340q\276\211-\207>\201\206\307\276h\220=\276\321$Z>\266Z3\273\005\225\023>\330%\024\277\306\252\026>O_\301>\324\304\243>\206i\036\276\313\223\361\275\202\356}\276\364\354\003\277\325\010\300>/\246\272<\0141\t\277\260\356\341\276\303\212\271>\344\3667:bN\311;N\233\255\2758j.\276%\275X\276\246\205\r>\206\272\306>\300\303\346\275\333\375?=\352\032y=\013\004\304\274\200F&\276\3669\361\276\241\212\322\274\027\222\320\276a\272\343\275\024M\002\276p\215\212\276>\3044\277\\\313$\275\337\242q>I\277\221<\365\215\r?\355\367y\275<\227\224>\2133\211\276}w\257\276$h\252\276\'\245~>\276\n\317=t\374(=!\037\025?\203\225b\276\341\257U\276\362J\307=\241\241\025\277\010y\246\275\212\326@=\317\333\206\276\\\200~>\343\366B\276z&\010\274\227\326\027>\323\007q>\270\345\272>\223\305N>\rE\312=s\320\003>\024\022\r>\177\003\031\276\240\377}=u\263]\276W\270:>c\354;>\035\233\211>\231\002\007\276m\270\020??R\224>l1\013=\002\317\202>\256\303Z=AB\212=^6\211>\017\343\001>S\311\223>\313\273\226\2765(x>\340\321\230>\005\304\202>\206mI\274\244&\356=\024\320\262\276@\206i>\375\323\254\276Xf\353\276|\207\264=p\360T<0\233\354\276\347\212\246>=\210\021\276*G\362>\311\374\322\276\240\tZ>\226^\342>}(\363\273\323Ti\276\364\356\237\276\235\227X\276\273a2>\337\364\035>}\311\252\275\226\\\373=\375|:?\301\340\260\274\n\257\013?S\234\353\275\006\322\036<\222\302\014=t\226^\276\270dG>*\272\">\332\317\266>7\335\367\276\235G\017\277 \273Y\2768\371\244>/{\036\275\356\2207>\225]\036\277\311\327\261>o\210\307=\347\200\'\275.M\033\277\327q\030=\244\323\204=r\203\232\276\336\364\260>b\206\242\276\265\222\350\275\320\277\244<\325h\230\275=\222p\275\277S\204\276`kT>\325\215\313\276\305\221\205=`\010\212\275\323e\205>V\356K>\226L\362;\237\372\361>\246\304G?c\006d\274!{\246\276\367\243\204=\205\267\332\274B)9\274\020\201\325\276\355\333\307\276]\264\237<\217\334\006>\2156\257>Z&]\276\242\343\356\276N\375\221\276\014\027\304\275Ar{>\023\261@\276\323\277->\321\200\257\275$>-\277,+\t>\017\243/?n\371\302>R\303\221\275\234zf\276\037\027j\276\345\326\217=\213\016W\276x3\204\274\261\242\035\277 \272\250\275 \217\326\276\235\232`?5\252\030\274\2647\010>1\325\247>\013WA=\3177\002\277\277h\350>\310-!\275daN>\312\364g\275\362M_\276\314b\302;\311\334\357\275\377\241\037>i\033!=%\214\026=s\030\261\274v\343\001\276\211\3056\276\265\315S\276\240\242\027\275\3773-\276<\340\355>y\362\356\274\275\250\007?\010\337\367>\375^%\276\353+I\272H\224\311\276\340f2\274^R\337\274\211s\251\276\024v~=\223\367b>\'\334\262>\254\004P\276\022\023\002\276\t\356\300\275\375P%\277\275z\334\275\217m_\276\302d\233>\361o\300\275UF!\275\"\242\366\274\371\2152>\202\244\263>\030\341+\276\356\202\346\275h\364\227>n\253`>\227\222(\276\001\232\031\277`\2471>]\225\361=n\362i=8h\245>k\206\300>\007NP\274r\376\024?\262\353\016\277\200mm\275U%\217>\331\216J?o\370\203\275\202\246I=x\370\262\275&u>>\330\007\200>\247\350\030\277+y \276G6l\276\322\031\251=\352\323a>Pl\261\275\234r\331>_\255\332\275\217\360\007\276S\261\375\275\001\272\014?b0\016\275e\331\005=\252\336\243\276\242[\322\276\213J\220>\035\305\332\275\364\265\244\275\270\365)\276\025\251\310\276T\305V\274\257\243\276\276=o\005?y\324N\276K\177\212\276HR\037\2761\251\262>g{F=\310k\t\277\016m\257>\277\0136\276n\375\203\275b\366V>\225\352\232>\232P\023\276D\227.>\264\251\357\275\177\033\224\275\263\246\271\276\277\035\177\276\377\306d>l7\007>\276z;\276\260\272V\276}RL>\001\034\255\276r\235X>\373\265\221>P\225\247\276a\214\233\275\353\030e<\023\032\327>`\220\215\276!\010\315<5U\034\277\223/\327>x\301\342>\332\363\247\275h\000u=i\006\211\276{\002\316=\003\366\034\276\005!\020\277\231:r>\335l\274>\006P\030>\351\300\223\276g\014\232><\230k\276z\252\016\276A*V>s\n\311\275u\321\032\276\312\240\273=g\026\031=\021\351\237>.Y\324\275\206\267\232>\250\204\324<\003W\207>\277\256\236>R,#>\364\033.>\rI9\276R\212\243>\0203\362>\335\245\260\275\367\330\016>\273\273\031\276\"\277\324\274\352JR?\024\211\215>\261\252g>\226\270\221\276\311\256\271>M\347\367\275\034\362)>\357-\211\274\240\371\326\275N=\031\276\177\357\274=3\347\245\275q\354h\275Z\023\305\276B\226\r>\037+\001\276\300\3230>\273\343U>e\353\353\276\200\323\270=\365\177\352\276\300v\332<\021\212\346\276\224g9\276\t\016\020\275P\031\231>\030I\301\276$\335\025\276\256-\234\275H\220\004\276I\370\031?\356\276K\276\3559\007>\223Q\037\277\326J\t?\324:\331\274O\016/\277\313\370\022?\226K\006\277\204\232_\277\'LT\276\311\233\275<\2577\264>%\254\210>F\275h=\251\345\243>\361j\367\273\301\327\251\275\r\275U\275\004R\003\276\242&\346\274\355\313\262=\000T\300\276\016F\004>\321\214F\275\224\246\342\276\257\317q\276X\323V\276\327\006 \276\307\002\260=\201\'\026\277\271\3707\276\224f\271>Q}\244\276@\035\215\275\"\025\307=\014G\361\276\341\366\006\276\364\026:?\013e&>\225F6>\304\335\343\276\263\240H>\204\213\352=\273\345\326\2764\306\340\275d7\220\277\342\275\233\276\255\000\246>\242:\005\277M\212,\276\030\312\340>\3251\375>\262\002\227\277\355\214m\276\001\342\234=\317\n\307>\217\177f\275z~\033\277\026E\207>\201T\207\276\023e\322\276I]\322\276\364s2>VJ\004\275\307@X\2767 \252>\004\014\021\274\'V>\277@b\n>\271\326-\277\201\267\302\276\277\026\261<\206\372\215=K\262\261\276\236\272-\276\3754\n\277T.\254>\276D@=\nl\206>\205\236\266>\217\2463>\343\225\345;\276\231A\275)\031%?n9\014>n\377\323\2754\306\223\276\032\007\364=\024\013\311>\357/\354>`\224\373=W\322\n?\013\347\233\276\377\n\226\276\315\200\312<\224L/>\215\241\034=r\204\017\275\312\030\343>\372\205\257=Y&2\275M\312\311=\361q\251\275z\310\313\276\376\237\001?\233h\026>\214\211\021\276gA\236\275\362\223\376\275k\317\366=/2\207=p\274\256>\r\027\270\274\360\033>>nP(\277&\200\360\275S,\312<\tg\363\276\3171\307\275\266\325 >\203l\222>]0S>\251\217\272=\005\300\030\2773\177\\\275H\317\354=4j\007?\325\022F>\2077\307=\004\035\266>\312\000\247=\370\245\014>\357\220\224>g\340\331=\331\362M>\345|U\2769\212\340>\017I0>\257\000\000\277\346\026\242=\274*\332>](\231>\312\305&?\220\330\232=\277\205\200\276\\$\345=\n\013\005>\003\240\341>Z\323\234\366V\270\276\247\336;>C\240\021\277\257\274\021=&\334\201\275\316\'\324\276\013u\260\275\363K@>\'\031\031\276\366\263h\276\207=*\276\025\212\215\276\010\330\007?Z\222\375>J>\331=V\211\235\276\332>r>F\354\331=\342\002\230\2760$\016>\364\320\255>\267\240\321=k\312\353=%G\204\276l\256\346\275\236\364\306>\206\232d\276\274\375\201\276\ry\322=\346N8>9\252;\276X>D>J\376\017\277\'*\030\277\344\302\210>\277\234\010\276\266\343$\276\345\326\215\276\220G1\276\357O)\276\200aH\276Ov\345\275E]\210\276\\\025\213\275Y8?>\264\217\232>]wP>\242.\263>\210\271\215\274\211*w>\262\003\004\276\336\262\260\276\001\300\321\2751\037\357=\026\265\223\276\222\254c>\3143\306>m\350\r>\r\210\231>\254\027\373>\366\344\272\275Y\210\323\276\370\026\273>5\263\300>\215\264\'>\251&\321\275\376x#;T\363\356\2765\027@>k\027\227> p\261=\211]\213\274\025\016\004\274\215&\020>u\026\366\276\021\336\273=\006\272q>C\206\">\221\262\246\276\277\202\275\274\004_\232=\233 f>R&\212>\373]!>\006\022J>\366\005\013>\316\326B\276z\030+>\"\371\347>W\243\263\276\014\270\\\276k{3?\201\226\032?\023\370\324>\330\007\375>\236\216M=\307\034\201\275\247\027\245\275\342g>>j\021\236>\333\tT\274\314R<>\227\370\251>\230\361\251\276bY\341>\335\375\307>\331\326\'\277\203f>=\310\322\271\276\001\204\252\275\324\257\230\276Z[\274>~\205\220\276\234\023\006?K\224\300>\\\275\207=\367\2521\276\243\237\001>\257}\\\276s\224a\276\025\314\023>\020\360\033\277\025\254\266>\t\200>\277\327\2453\276\230h\275\276\274\0239=RR}\276\301\337Q=\004~\247=\276\211\033\276\215\366\371>\006\356\314=-\313\364\274\231\tJ?\344\034\246\275h\276S>\354\r\214\276\362\221\233\276\017^8>\3224\247\276LA\240\274\310\017.\2760q\017\277\220\333\245\2767\244\304\276\274\211#\276fo\303>9\314\330\275\330\311\251>7=\253:e\331w\275\360\005\'\276\323\250\272\276V\"v\276\032\211O>\375O\361\276\255\270\367=\306b\036>\240N\331>\220\307\225\276O\346y>\311\207\032\276\363\361T\2753e\304\276\021\354\207\274\315\000\345\276\374z\206\276i\250f\275P\274\022\276\260]E>w\177\013>\030\224\316\275\274x\226><\024Y\277\317\351\344=\257\221}\276\270\2007l\257\276Q\354\n\277\242\312\235>U\024G\276\213|\034=l\331\253>\345Xg\276;\336\310\276\017\374\357=\0071\360\276\235\346\320\275\210bD>\303\001\025?\321\206\210<\346\t\003?1\270\241=/\262*\276\216\032\223=\220h\'>\336\302\227\276\366\2428?\351\207\271=\026\257\213>\252\365-\2770\211G?\212&\373\275\276Y\030?jf \276\273s\031\276\244\037\020\276\275\267\031?\365\214w\276K\312\311=\007]\336\276\264\346\004\277i\307\374\275\302 <\276u\r\305\276H_\245\276\226\344 >\371\244\024\277\340\331{\276W4\331>o\017x?\316\351H?\376\024\321>\215 G=\320\315\330>]\305\361>\307sZ\276\\@8>8\031M\276\336\335\017?\025\t\224\276#I+;\325\224w=\016\355\341\274\307\177O>\005I \276o\001\272>?\365\213>\224\327\336\276\nw\376\275\214\334u>3\350\003\276\350\230\264\275:\216\">\351\333\250>z\022\237>\205\207i>\246Z\007\276\346\203v=g\314G\276\372\352\251>&\003\234=c\0136\276?i\367\276A9\215\276\346\r\263=\312\006+\275GC8\276\344A\267\275G\245\220\276\275\200,\277!\177_>{\364:?\213S\276\276\347\t\350\275\303\315X\276u\025\306>\266)7\276[\007s>\240{z\276\001\300\207>\360%z>u}\330\276On@>\220\357\324\276\367\271\216\276,\344\220>\356j$=\004\022w>s\236\004\276 \033\263\275QK\244:N\n\001\276\226=\340=\303M\r>z\272\207>\364E\355\275cg\216\276\361\2319>Tj\264\276\023\324Q<\354#\250>\210\026\014\2777l\315\275\363$\265\275\006\226\022>\273\266\205\275\274c\217\276hN\034>\251!\013>\265\325@>\003%\260>\003&7>\010E\366\275\327\305\214\275B\271\341\276\023\325\201\276\337\230\216\276XKr>42\021>\300)\007=\t?\207\276;\372\303\276TW\024\276L\217\225\276\320\007\237\276-\300\331\275%\301\024\276\032\241\213>\242j\275=vG4?\315\004\344>\276\017\342\276\224\027)\276E\242m\276.\362\311\273[z\357\2760T\233\274\177\246L>2p\362\275{\230H\275\335bR=\212\373j=F\n\013\276\364\026\222>\377\375~\276\271r\022\276i\214\242=q\214\366>\331\221\">\254*\270=8\177X>\236\221\204\276n\300\266=\301)!>\\O\021>\346\240\256\276\\\221\222\276\203\201\017\276\365\267\353\275\030\272\311>\276cM=K}\220>\325\366\317=\3228\317\276\\@j\275\211\006\262\276\241\"\326\276\354\357\276\275l\n\337=GG\227>\200\033\000=a\344\207>\250\246\257>s\273\026\275\030K\232>\277\314\345=\207\274\201>\260X\037\275\334\007\032>\364\347\317\275\231\316\277\274[\307\017?\225\370\246=\221\313\234>\004n3\276C\2640=)\302\376=o{z\276\257\025-=\177\203F\275\222\314\005>|\335\217\275\264|\271=V#\336=<\271\"\275\374N\331\276\321\261\213>g\2740>\232\206%>\357\350\020\277^\322\027?\262(-\276n\'\r>\032f\213\275\224\023\032?\023\212\370\275\323t:\276[^\t\277\241\005V>\245\366\"\275*\327\337<\367\357\212\275\265\314\370\275\355\354A\276\267\275\226=\270\212A\276\\qA=&\221\023\277\317\316v\276\237\327R\274\340hk>\\e\304=3\351\265<+\376V\276\361\363\005Yg.\275]\227\274=\374$\000?\342\237\251\275U\005e\276\350f\231\276\001Q\223\276\'-\322>s$\">\226C\010>\315\323\210\276\333\032M>\312\021\t\276vb\254\276\032R\202\274\341]\332=!\237\234=iQ4\275)\n\221\275\335\271X\276\270\316Z>wU\347>\367m)\277\260\000\267\276\340\022!\276J\202!?\276\254\375<<\245T>\366\216\223>\305\247\347\271PV5<\274\327N\277\224D<>fU\223\274\316n\213\276\253\222\003\277i\334\007\277\327p]\276\215V\305>\354x\236\276$\213\016>\023\\\016\276\314\3260>;z\225\276\033\330\363>f\257\n\275\301\016\234\2762 I=\215\211Q\275\333\310\260\276\013\223p\275jk\265\276\323\022\221=\230\334=\276\336$j\276\n\326g\275V\373\230>\214\377$\277\025\236\023>\177\t\251\276\232\303\'?\023\037\240\276a\226E>\373\225\306=\014F\000>A\304\332=\223\345)>4H\335=v\315\330=\022\017\276\276\260T\220>Z\311\252\2763(n\276\213,\226>\367\323\365\275\342\003\220>\323\177\247\276}\344L\276\023\220\005<\005`T>B\273[\276\3677\026\2764\010=>\034K\302\276\"\030\363>\262-.>\262\221,\277\343\367Y\275\234u\232\275\222^\022=7\360R>i\365v>\322\345L\276=\244\013\277Ve\340=\245\010c\2767\257\237\276\231\210\260>E\305\334<\242\272\221\275\016ZX>\341x\303\2746u\261>Kt\250\276\260>\313\276\20580\276\346\346\003>\306H\256><\324\347\275]n\022\276\245\340\303\274\016\246M\276]\344[\275\270R.>\254*\311\276U\300\310\276\361y\262\276\271ZI>f\000\240\276R\324A=V\325\227>\265H\243>\005\377\006\277\024m/>\307#\270>\217o\317\275\n\237\333=\247\\\016\276\236\370I\276\325\013\214\276\303\327/>.\351`>_Z\234>\332<#>fV\206=4\321\216\275\206\224\255<\021`\306\276\210@\004>cg\276>\222!Z\274\3329\361=\364+\211>@PO\275\250\220\005=\263\010b>\362i>=\302\025\365\276\312\030\366\276\270n$\276\260\205\300\275PF\215\276\307VF\275\344\201\356>\\\241\202>8\341\227\276\307>\353\275e\230\223\276\004\022\216\276hx\206<\177\345[=o\256\005>N\273\240\275\375$\014?6!\252=P\2129\276\364:Z>*\n\303>)\304a:\271O\220\274\177\273\r\276\212\222\231>\005`9>8\347\024\275P\352\211=`\'\303>6\324\343=\365)\200=\025\003\337=\371\247\032?%\275\254\276O\0057>\202cy\276\273\237\037=z\335]\2762Pn>eO\237>UE^=" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/candidate/kernel/read" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/candidate/kernel" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/candidate/bias" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - dim { - size: 96 - } - } - tensor_content: "?\357i>}\316\364\274]^c\275#\345\264>\322\357\356\275\022\242\266<\010\034O>\035\031\242=\177Xb\276\007\207\214>\206\035\240>\347j<>p\237\271\276\335\022\242\275\337e\227\276-\202\272=\325\303\017=\301\324\336=\346VO>C\335\242=uU\024\276s\370\357=[{F>\266s\245\275K[#\276\227\370C>\255\324J\276 i\270\274\312\221!\275?\231&>\036i\254>\224D\261\275Q[{>t\202i>C\374\236=\262\203G\276\004\220.\276\032\237\006=+\373\035\2765\005;>\235\375\'\276\212>\277\274\337\260\303\276\034\300/\275\320N\026\276\250\253\000?\326\271`\275\272X\312\275B\306*=\225\327\036>@\227d\276\335O\260>\230\317F>vE\243\276\206j\024>\000\003\274>\225\026P*\276JR\025\276#2F\276\372`\335\276\374\256\244=\177P!>\362Ne=\263\003\303\274\340O\264>~\036\243\276\200\337\t\276d\370\310=\220\375x=\026\311\300\275\347\000\271\275u}\355\276\370\236\250=\010\033\346<\332].\275]\330\205\276\350\254\271>k\327-\276\245r\302=t\253\021\276\210\341\313<\031N\215=W\277\277=]\373\030><\341\200<{]t>\345\256\336=\002e\363\275\256s\273\277\361\310\234\275(E|\276Z\022E<\375\366[\276" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/candidate/bias/read" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/candidate/bias" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/concat/axis" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/concat" - op: "ConcatV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/TensorArrayReadV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity_3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/concat/axis" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/MatMul/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/gates/kernel/read" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/MatMul" - op: "MatMul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/concat" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/MatMul/Enter" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "transpose_a" - value { - b: false - } - } - attr { - key: "transpose_b" - value { - b: false - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/BiasAdd/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/gates/bias/read" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/BiasAdd" - op: "BiasAdd" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/MatMul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/BiasAdd/Enter" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "data_format" - value { - s: "NHWC" - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/Sigmoid" - op: "Sigmoid" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/BiasAdd" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/split/split_dim" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/split" - op: "Split" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/split/split_dim" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/Sigmoid" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "num_split" - value { - i: 2 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/mul" - op: "Mul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/split" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity_3" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/concat_1/axis" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/concat_1" - op: "ConcatV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/TensorArrayReadV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/mul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/concat_1/axis" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/MatMul_1/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/candidate/kernel/read" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/MatMul_1" - op: "MatMul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/concat_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/MatMul_1/Enter" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "transpose_a" - value { - b: false - } - } - attr { - key: "transpose_b" - value { - b: false - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/BiasAdd_1/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/gru_cell/candidate/bias/read" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/BiasAdd_1" - op: "BiasAdd" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/MatMul_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/BiasAdd_1/Enter" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "data_format" - value { - s: "NHWC" - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/Tanh" - op: "Tanh" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/BiasAdd_1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/mul_1" - op: "Mul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/split:1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity_3" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/sub/x" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - } - float_val: 1.0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/sub" - op: "Sub" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/sub/x" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/split:1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/mul_2" - op: "Mul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/sub" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/Tanh" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/add" - op: "Add" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/mul_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/mul_2" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/TensorArrayWrite/TensorArrayWriteV3/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray" - attr { - key: "T" - value { - type: DT_RESOURCE - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/add" - } - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/TensorArrayWrite/TensorArrayWriteV3" - op: "TensorArrayWriteV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/TensorArrayWrite/TensorArrayWriteV3/Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/add" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity_2" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/add" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/add_1/y" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/add_1" - op: "Add" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Identity_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/add_1/y" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/NextIteration" - op: "NextIteration" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/add" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/NextIteration_1" - op: "NextIteration" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/add_1" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/NextIteration_2" - op: "NextIteration" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/TensorArrayWrite/TensorArrayWriteV3" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/NextIteration_3" - op: "NextIteration" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/gru_cell/add" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Exit_2" - op: "Exit" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Switch_2" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayStack/TensorArraySizeV3" - op: "TensorArraySizeV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Exit_2" - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayStack/range/start" - op: "Const" - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray" - } - } - } - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayStack/range/delta" - op: "Const" - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray" - } - } - } - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayStack/range" - op: "Range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayStack/range/start" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayStack/TensorArraySizeV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayStack/range/delta" - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayStack/TensorArrayGatherV3" - op: "TensorArrayGatherV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayStack/range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/while/Exit_2" - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArray" - } - } - } - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "element_shape" - value { - shape { - dim { - size: -1 - } - dim { - size: 96 - } - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Rank_1" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 3 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range_1/start" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 2 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range_1/delta" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range_1" - op: "Range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range_1/start" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/Rank_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range_1/delta" - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat_2/values_0" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 2 - } - } - tensor_content: "\001\000\000\000\000\000\000\000" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat_2/axis" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat_2" - op: "ConcatV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat_2/values_0" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/range_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat_2/axis" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/transpose_1" - op: "Transpose" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/TensorArrayStack/TensorArrayGatherV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/concat_2" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tperm" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/ReverseV2/axis" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/ReverseV2" - op: "ReverseV2" - input: "reshape_to_rnn" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/ReverseV2/axis" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Rank" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 3 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range/start" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 2 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range/delta" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range" - op: "Range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range/start" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Rank" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range/delta" - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat/values_0" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 2 - } - } - tensor_content: "\001\000\000\000\000\000\000\000" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat/axis" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat" - op: "ConcatV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat/values_0" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat/axis" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/transpose" - op: "Transpose" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/ReverseV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tperm" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Shape" - op: "Shape" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/transpose" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "out_type" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice/stack" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice/stack_1" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 2 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice/stack_2" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice" - op: "StridedSlice" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Shape" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice/stack" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice/stack_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice/stack_2" - attr { - key: "Index" - value { - type: DT_INT32 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "begin_mask" - value { - i: 0 - } - } - attr { - key: "ellipsis_mask" - value { - i: 0 - } - } - attr { - key: "end_mask" - value { - i: 0 - } - } - attr { - key: "new_axis_mask" - value { - i: 0 - } - } - attr { - key: "shrink_axis_mask" - value { - i: 1 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/ExpandDims/dim" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/ExpandDims" - op: "ExpandDims" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/ExpandDims/dim" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "Tdim" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/Const" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 96 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/concat/axis" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/concat" - op: "ConcatV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/ExpandDims" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/Const" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/concat/axis" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/zeros/Const" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - } - float_val: 0.0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/zeros" - op: "Fill" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/concat" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/zeros/Const" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "index_type" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Shape_1" - op: "Shape" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/transpose" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "out_type" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1/stack" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1/stack_1" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1/stack_2" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1" - op: "StridedSlice" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Shape_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1/stack" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1/stack_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1/stack_2" - attr { - key: "Index" - value { - type: DT_INT32 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "begin_mask" - value { - i: 0 - } - } - attr { - key: "ellipsis_mask" - value { - i: 0 - } - } - attr { - key: "end_mask" - value { - i: 0 - } - } - attr { - key: "new_axis_mask" - value { - i: 0 - } - } - attr { - key: "shrink_axis_mask" - value { - i: 1 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/time" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray" - op: "TensorArrayV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1" - attr { - key: "clear_after_read" - value { - b: true - } - } - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "dynamic_size" - value { - b: false - } - } - attr { - key: "element_shape" - value { - shape { - dim { - size: -1 - } - dim { - size: 96 - } - } - } - } - attr { - key: "identical_element_shapes" - value { - b: true - } - } - attr { - key: "tensor_array_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/dynamic_rnn/output_0" - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray_1" - op: "TensorArrayV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1" - attr { - key: "clear_after_read" - value { - b: true - } - } - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "dynamic_size" - value { - b: false - } - } - attr { - key: "element_shape" - value { - shape { - dim { - size: -1 - } - dim { - size: 96 - } - } - } - } - attr { - key: "identical_element_shapes" - value { - b: true - } - } - attr { - key: "tensor_array_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/dynamic_rnn/input_0" - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/Shape" - op: "Shape" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/transpose" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "out_type" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/strided_slice/stack" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/strided_slice/stack_1" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/strided_slice/stack_2" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/strided_slice" - op: "StridedSlice" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/Shape" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/strided_slice/stack" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/strided_slice/stack_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/strided_slice/stack_2" - attr { - key: "Index" - value { - type: DT_INT32 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "begin_mask" - value { - i: 0 - } - } - attr { - key: "ellipsis_mask" - value { - i: 0 - } - } - attr { - key: "end_mask" - value { - i: 0 - } - } - attr { - key: "new_axis_mask" - value { - i: 0 - } - } - attr { - key: "shrink_axis_mask" - value { - i: 1 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/range/start" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/range/delta" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/range" - op: "Range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/range/start" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/strided_slice" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/range/delta" - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3" - op: "TensorArrayScatterV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/transpose" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray_1:1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/transpose" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Maximum/x" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Maximum" - op: "Maximum" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Maximum/x" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Minimum" - op: "Minimum" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Maximum" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/iteration_counter" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/iteration_counter" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: false - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Enter_1" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/time" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: false - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Enter_2" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray:1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: false - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Enter_3" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/GRUCellZeroState/zeros" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: false - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge" - op: "Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/NextIteration" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge_1" - op: "Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Enter_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/NextIteration_1" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge_2" - op: "Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Enter_2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/NextIteration_2" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge_3" - op: "Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Enter_3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/NextIteration_3" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Less/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/strided_slice_1" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Less" - op: "Less" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Less/Enter" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Less_1/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Minimum" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Less_1" - op: "Less" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Less_1/Enter" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/LogicalAnd" - op: "LogicalAnd" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Less" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Less_1" -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/LoopCond" - op: "LoopCond" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/LogicalAnd" -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Switch" - op: "Switch" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/LoopCond" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Switch_1" - op: "Switch" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/LoopCond" - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge_1" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Switch_2" - op: "Switch" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge_2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/LoopCond" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge_2" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Switch_3" - op: "Switch" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge_3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/LoopCond" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Merge_3" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Switch:1" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity_1" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Switch_1:1" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity_2" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Switch_2:1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity_3" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Switch_3:1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/add/y" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/add" - op: "Add" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/add/y" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/TensorArrayReadV3/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray_1" - attr { - key: "T" - value { - type: DT_RESOURCE - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/TensorArrayReadV3/Enter_1" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayUnstack/TensorArrayScatter/TensorArrayScatterV3" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/TensorArrayReadV3" - op: "TensorArrayReadV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/TensorArrayReadV3/Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/TensorArrayReadV3/Enter_1" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/gates/kernel" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - dim { - size: 192 - } - dim { - size: 192 - } - } - tensor_content: "\207\004;\276\355\360\241>\306\352\231\343\335=\260\355I=\262q\345>\244\023\252>Y/\366\276H\353\335=R\346\003\275i\305\313>\0302\256>\256_\314\275\275;C?\375\251\007?~j\243>\016\352U\276\334\013o>\267Y\315>D=\216\274\303\006\253>v\033\206>Q\256\004<\275I\263>G;\301=\321=\023?y\362\321>\214\0021>#\315\230\276\030l\210\275\264\350\247?v\323\031?\245_ \277x\004,?\262\036\247?}0X\275\206&\350=U\275\320=\016\n\335=9j\023>\300cX\275)\313)\276a\305\340\276\315g\036?\325~,\276\307\035\022=U\256\220>\210\367\025>jH_=++\252>\205\270N>\361\254\264>=\222|>\003\002\031?X\332Y>\177\3359?\302\355w\276\350@\037=\022\265\031?\367`\206\276\3244\234\275@\364\352>\263#\227>\007\016\323\274N\255\373>e}y<7/0>\337\371\316>i3\317\276\201\314F?b\005G\277\301\232\303\2769\202\n?Q\252\334>\032\247\213>]\330\003>\034\017\341\275-\022\304>B\253\227<:\006:?\241\264J\276\216b=>9\'\276:\336$\271>]\352\224>\363\372{<\345\007\212\276\336\"\334\276\215\262\366>Xu\024\276|\212\017?\216\r\024?X\276\316>1\"\021\276\203Gk>\3762\200<\352\236\273=\3151\347\275\307\021\027\276$KG>O\227\214\275d\241q>\342[\005\277\264o\317\275Y\246\352>\375k\016>!5\233\276;\336O=\271}\332=\343t\035\276&`\336>\222G\302>\2001\362=\2143\035>z\3729>\251\233\024?4\377\350>d|\n\277\253\004G>\245\314(?j\\\037>\207\361\244>F\3079?\317\363`>9\357\013?1\010C\276\213\315\316\275\004\343>\276\323,\360=\021\201\003>\302F\262\276N^\032>T\237Y=\003\311#>\324\003V>\204T\235\276\347\311\027<\n\266h\276P\225\361=\274{\t>\177\014\264\274}<\220\275-:1?u\2511\277n\314\r>A\214\267\275\316\203\326=\204\264o>\251\215I>\227\\\321;\246\343]\275:\213\311>+\274\232>\304s\002?\033,N\275\032\250\"?\025\360\223\276\340p\201\276\301\353\215\275`\305\212>-\320\241\276\251\303^>~=a>\227\336\r?\026\223\245>\230\"\344>\341\n!\277\252\373l>\257,\017\277\327\277W\276&\027\334=\273\001-\276\025Uc\276(C\234=\275\243\037\277d\343\034=\347\260:=\021\\\313>\007\026j=`O;;\357\305\274\274}\234\260=Dk\037>\307\201\222\275\273\n\n?\207\231\245>Q\277\234\274\320\346:\276:\371\241\276\373\350n\276\016o\013\277Y\245\277=H\345\265\275\r%d?\033\324E?\030\204j>\321\313&\276\277I\341<\234%u\276K\234G>\017\233\207>\336\024\020\277\232\314\217\276\232_\200>%\340q>~\334\302\276+\303s\276\226\036\007>\013X\'?\322ac>Ons\277}\340\272\276Y\306\002\277\273\365\\\277\202\327\037\276\234A;>N\'\266\276zh\247\275C\266\\=\314\0058\277 w\301\276|\246\334>b\250r>\226\016\332>O\225=\275 \000\311\275,\357\364g\377\360=\263\347\231;\375\226\332=_m\266>\335\262\t\276\354\346\220\276WG\243\276!n\'?\300\2352>\372\344\365>O\332E\274\262\213\276= \022\020\277\213\356\354\275\326\t$?NT\244\276\004\244\337\276\250\320\206<\010^\216<\031\355R>ETP>+\362\345=\276u\024>mr.\276v\224\225\276V\262B\276\'\333W\276\233\007\223\277_LW\277\250\201\340\276\331\202B\2751t\n\277,6\247>\325\352\277\2767\2771\275\213\252\343>u\216\373\276\266\324\035?\263\002\016>Uv\234\275\311d\010\276\303|\037\277O\320\252=\3623p\276\343\252\237\275\245\200B>\262\313\001?\021\372&=\356\203\333>\227Y\253\273\365X\203>7tK\276)\270E?\230\231\350\276\026<\005\277\217\320\004?~\025\231\274\344\361\316>\243%Y\276]\362k?\025r\023>\266\234\207>\221-z>!\303\255\276o\020\306\274\255\321\022\275\277\031\211>\372\270\317\274v\257O\276\257\306!?\250\271\361\276)t&?\235\rq\276Gv\021>D\0316\276R4:\274\225\3726\277\277}\245>\201\375\305>\242\313\351=\263\216o\276\221\310\267>\014P\242\275U\034%?\365\003J>f\374\001<\271\227\316>\327|\036>\341\313{\276\341\252\030\276\317\372\317\275\304\264\305\274*\260\255>\334\344\022?q\177\212\276c\272~\277~\270\010\276\001\022w>\344\372M?\363\326\315\276\207\036\036>\300~_\276\277\333\220>\374\362i\276}\000\270>~!\242=\343\275%>\016\244\326=\202\352\370\276R\327\236>\250\253\355\276\"M\306\275\034+H<\352\374F>\323\177\243>\266\213\345=\207\260f\276\361\275n>\374\324\314=\317\265\231\276\n\025\302>_;\202\276\276M\260\275\263\227/\277\213\025\357\276\r\017%\277\232\253\234\276k\001\222\276\316\270G\277Q\267\272\276\257\311[\276\354\263\207>\342\024F\2762A$>\230\2141>\375\364\361=$\033\014?s\362\236>\223\364z>]\374\210>Ic\335=\035\277\373>\264\231\211<\037\026\266=\2279\234\275\315\327\014\277^\321\245\276_0\270>\3435\213>\217\331N\276\224\300^>\275\323\303\275\267|\332>={7=\027\215\237\276,\323\004\276\367\372\245>\310\243\021={\321\326\276\334\r\034>\371-\222\276\242\001\357>I/\336>\263\241\006?\323>\246>?\317\243\276\177\230\t\275-0<=\\c\205\275\330`\354\276\367\013P\274%\351\242=`\245\251>\255\253\303\276\332h\000?\316\310P\274\273\251\226\275\3138\246>z\321\370=\'\272\241\276\214\025\236>gF\217\2767\350\217\276\365\005\256>\257\224z\2767(\266\276\265\035\234\276[\320\300\2763\\\n?J:\203\277)\001\004?t\253\334=\034\323\001?\000\252\203dTh\276\2250\212\2767\213\377>\027\037\001?o\321I?\tf\324\002]\002\276\306\304\334>@kc>\346\211\216=\007\272\036\277\205\261\000\277\370\234:>\330K>=\334/;\276\323.@>\r\223\201\274\025\253\242\276\370o\300\266\276\256#\022?\266Z\032>\263\363,>\037}\r?\017\253L\276\025,_\276$\230T\275\230;7>\312M#?\264\020\372=\030\300\220\276q1\303>\302\266\364>rF\007\277|\235t>!\022\005>\230i\351>\335\241\202\274\311]\025\277\3015\251\274eQ\027\277\234\000\257\276\323x\221>\306Q\237\276\204\334\265<70\023>\366\301\222>vl\336\276U>N>(o\376>6\202\330>8.\335>wvU>\340\263!\276\212\377^\276\'\256Q\275l\213H>\243\315\r>C\'\372=\212\270\274\276aX\203>\264\301\342\276\004\302\230\276\271&\013\277\315\233\\\277\263\355\221>K\207g>N\273\315\276\326\300\317\274\206V\200\276\255n\031\276\310\032\353>tY\237>\334\211\235\276L9\334=B\263B>\301Z&>\376\320y;N\323\006\277\0231\205<\373s\376\275\006\020\027?\264\010->\027o\364<\032\264\231\275\267\361\213>\263\004\017>\376\360\215>\217{\256\276\223\211\355>\372\347\325=\323\206\213=\017\307\277=\016\202\025\277Y\344\336\275@\304)\276\262\004S\277\205\010\223>\0348\030\276O\334\314>\306n\213\275!lm?#\3615\276\300\331+\276\211\251\325>\025\257\022\276\001Q\220\2762,\213\275FQ\t>-\304\031=s\027j>\221\206\353\275\230\246\274;Ow>>\245\342\222?m\210\254\275\221\340\271>u\340\245\276:$\213\275/(\007?j\215;>\377\2003>\332\233\022\275\207#u=\246\344\271\276~\250\246\276E%?>\262\2678\276\372\311\347=\261\211h\275\265\245A\276\274V)>\031`\236<\002\243(>M3\332\276\221\233X>\000\013\340=\270\324\307=*#\331>\254\253\355\275\301\220\014?4\213\334>\336\327j>?;\210=\n\023\220\275\345\213W\276\210Xx=m\036\263>\233\027\250\275\306\220\351\274\2302\261\275\345\221\177=\350\031\032=\313\244V\276\250\277\"\277\330\232n>\245\t\234\276Hs\262>\240\202\234\275\314f\261>\355/\343>\256\007\276\276\305\316\036>6oB=\331\327\r>\327\223\243\276\027\003\272\276\030\251\020\277H\227\342>\000\206)?\213\0225\276t\347\303>!\227\214>1e\351=\252\367Y\276\270\365\227\276e\343\347=\253\224,\277y\207$?\0011\330>\374\031\337<\361\3120\276e\274;?\255\265\300\276Jn\317<@_\247\275\026T\244>\374\364\307>:\212\021=:8\260>|q\334=\177H\207>:\010\356\276\013\251\005>^\353W>\202\302\003\276\300\013\371>=?\263\276\271N\362>\307N$\277F\352\027\276\000\237%\277\371\251\211>\251IC\275\022j\254>\313\223)\276[\313\266=gd\223\275\320M\263>(=\035\277)\004%=\216\370\275>4\032|>\333\275\307\275\263\035\t?\254\033\237>PQx;&\205\231\275n\370\275>\335\232\303>#\321\232=\t\317\214?\312m\035\277\311\361\212\276:+\266\276\375\217\030?dk\345>[\330 \276E\334\277\276\273\222\024\276U\356\245>\224\333\231\275\342Q\262>\347\255\215>N\3141=y#s\276\021\236\335\2761C\274>\352o&>O?\375=\256\243\346\275\2777\330\276\261\237??\\\344\020?\021\350R>w\357\r\276\241%\214\276\361\315\313\275\273\030@\276\027\004\013?\311\353\251\276N}3\276^6\005\276\226\032\331>S=\007\277f\260\001<\254\327\013\276\365p\363\276\007\213[\277\352\352Y\276\344,\307=\303\3324\276(|\225\276(p\301>Zu,>\336\251v?\001(0>f\333l\276$\241]\276K\322\221\272\265Re\276\367*\016?\007)\361\274\333\362\233=\333D\r?\2629\005?W*#\276\242B\014>\025I\017=w\202\020\277\301A8>v[\212\276k\333x>\277\327\235\000\307\260\275{\312\036\276\034\357\037>\002S\267G\270$\276/\320\264\276cv\303>\016\377H>\024\211$\276\2426+\276\223\371v\275\024a\317>\224\201\315\276u\332(\277\311\243Y>\346\331\376\020\276\231\275\030\002\">\240\030\254\276S\223\266>3W\211=\350O\205\276\344\002\005\276o:\366\271]Y\244\276\312r1?X\240{>\376X\213=\001\035A\274V<\006\277\214N?\276\252\305\n\277\363F\346\274\341M\256=\037\243\331\274p\n\233=\201r\240=\265\227\315\276\334\254\212=\264r\223\275\360\250\000\274PT#>\320h\252\276G9s\277QQ\035\276\233`\311>\374\333`\276\022y,\274\377\342\302>\216\252\002=<\225\240=\271\373\006?\321\300\037\277}\266\367>_\275\255=\2124\363\274i\275\330>\366\346\004?\357\225\016?\263J\206>\312\010n=\3602\334>\326z\211>\227\252F?\224\356\252\276\361j\312>n(&\276\214\203\224>\256\207\206\276#\200\275>\232\264N>\267X\033\276\326\233\205>\207t\177\275\203\274\021?\037\305\"\276@\2134\276\\\302\003>\207V\036\272cV\017\277ShR=\223\253\354\275\317\222\036>\357Z\255>\200\212\263\276\211v\242\274\277\245\306\276\330\307D\276\007{\212>\267\306\323\253g/>$\336\264\276W\246P>w\2330\277\343\213Z\276\310\236\315\274u\321\312>\033\013n>[lf=\357B%>;/\365\276\374\373S\275vFP\276\265\352\224<<\320\317=\331\001\206>\301\202\274>\331\276!\277i]L\276\251F\357\2760++>\363+\230=aO\'?\247L[>\244Q\223>\014* >7_&=\327L\261=\326\352\005\277\221\304\255\276m\217\354>ZE\205\276))X>\034\241D\276r&\207?\201\027\345>\027\001\374\276+\376\223>\274\245R\2753D\270=\267\251$?\023\235\256\276g\220\212\276s\351\271>JZ\271<}\370V\232\207\'\276\\\230==\240l\327\2750\367z>\212\304\024?\325+\010\277\320/I\275\275\371\360\276\003Z<=\207\2449\2779\366\033\277\221z7\276\216\303\203\276E\010\016<\362,6\276T~\236=\247\316\322\276\223\371\031?\317\331\357=\303\311\201>u\tR\275\367\014\205=\235X\273\274Nm\037=7\350\007?\252=\002?\266\245N>m2w\276\376\236\310\275\025\233\302\276\025\305\256<\353\037v=2\027`\275\235^b>\246\365K\277\372\230\340\276B?`>\301cC?\243\341\244>\263\320R=\247\225\225\275\341\320\377E_[>\350\016\330>\346\377\323\276\203\243\232\275\234\257\302\276\310\007\234<\201\366\376\276\275\027k\276Sk\270\276D\253*\277\002\200\305<\243rk>F+p\276t\026\225>C4[>)w$>\316vD>L\366\342\276@W]>\211\237\357\276\377\023N\275\344\267\276\275\212N\013\276l3\203>\264i\035>\336\340\024\277\374\360\006?MFw\276\204\366\r\277\356\3356>\341[N\276\277\345\263=M\010\232>u\223\226>\326<\226\275\222\247\332>@\261\232>\3305=\276\224M\366=\212\334\244>\257\206\263\275\336\353i\276n\343\353\276MT\271\276!\351\272>\016\257\026\276\221\333\241\276\003\027\277\275\371\333X>f&p?\265\234\005\276\227\312\304\276\331\275\014?\363\225e?\3077\222\276S\366\254>p!Y\275\223=\335=/\r\000\276E\242U?\211\007K\275\206\335\231=_kF\276s\321\027\275\273\026\221=\252\r\273\276\021\241\320=\347\020\300\276\211[g=\200R\217\276\204\214\361\276\273rD\276\000\363\023\276\016\202\320>WB\017?\372\236X\276\272\207Y??\213\255\276\005c\262>SD\200={R\240\2751\004\022\277\232\312\263\276h\027\225\276\232\233\014\276\210S\261\2767G\272\276S\323\022>CA\013?9\036\322\276xNs\276V\267h\276U\034;\274\273\243p=\370i\221\276M\322\000?\3509\324>\226\366s;\216\204\251\276&b6=1\226\253=\363\266\270>\242\340\276=\240\320\326\276RH\243>\223s\310=*;\240\276i\254\324\275d\177\375=\030\3441<\263\371\315>\014}\252=S\375\277\276\006\001\356\275Yv\242>\242P\022\276\372\\7\276\231\017\255\211\203\230\275\\\021\005\275\304\272\254=\240\365\023\277.\254\235\276fZ0>\360=E>7l\272\276\032`y\275\253\367/\276\345\013\341\276v\230\232>:\377\325\276r\362\037\276{\363\252>e\243\337=\370\374\\\276(\242\236\276\372n\020\276\002\262\031<\273c\177\276\307U\212\275\342\260\265\275\331\"\002>\234\207\374<\301j\267=\217\255\256>\373;\264\275\257\223\r\276\3472\351\276\374\255\375\2761h\206\276a\206k\276/Z\312\2763\314`=\0360\365\020\242o\275\275%\353\276F\001A\275\034k\226\274\206C\026\277\344\361\207=\t\341\274>\237\200\n\276\235\305\344\2768\250\307=E\254|>\254\333\014>H\027\362\276\212\3753={\0102\276\020/\272>9\216%\275\276\013\036=\0141)\276\241\236\241\276\225\237\032>\212c\261<\014w\215>\022\347+>mf\203\276\004\006>>\035\315\350>-q\215\2763\004\004\277r`\345=\332\003\337\276x\241h\275\327i\242\276\247\230 =i{*\275\004E/>\355\262\007\276o\274\344\276\335\316\320\276\033\220\324\276\267\311\237\276\025\313\221\276C\006\016\277-\246\253\276m\306\230\273\200=C\275+\0216>j\360#\275\211\255\366=\321\'\245\277=\333\207>`\234\235\276\207\311w\276\360f\340=\221?S>\216M\030\277_\240\201\276\016c\334=\0075\244\276\036\310\210><\336\343=\255\203\016\277\033\367\\\276\007\2509?\035.\027\273\3771>=\265\301\215\274\213\000\332=\351%)\275\244\267;\276^\360\013\277-\360h>\324\014\006>NW\212>-e\365>Y\010\232<\325\202/\276.\036\002\276=%!?\262\206\234=\001\020q<\032B&\276\215\022\r\276v,:\276gL*\277\336-g\276\\\000\000\275\024r2?S1$\276\224\334\371\274\206\312\310\275\021\241\265\276\236\253\246\276\"\374(\276\307gX=\322\001\030\277ED\205\276ux\177>V\360\242\276\242y^=\362\273\325=$\212\037>\251\242y<%\030\\\276\374Vd\277\035\256\\>\003\203\310>\025\372\r>)x\360>\375}\311<-\226\366>_ \'\277\005T{=W|\016?\3246\327\276\001c\257>\225\245\\?\3058\335=\375\315\031\276\325\304\003\276\005\303V\276D\231+>\034\2322?\324\310n=\030\n>\275f\223\\\276\272G\'\277\010\313\265>H\317 ?\336c\230\276iO\"\277\345\363\313\276W\242\236> \277x\275\177\276\277;\026\1774\276]R\211>\004\302-<%\211,>\301\360\212>\226\254\250\273^\377\323>g\230\271>\232\264\022\277\276G\010\277\2719\344\275\276\013\010\276t\000R\276g6 =,:\024\2763yB\277\235Xm\276\r\036\315\276P\020\035?A,R\275A\357\021?\2131u=\262\226g\276\220\350\027>\270\255\032=\023\'\024\276W\334\234\275z\r\244>\200\217\337<\334\276\315>\257\222S\275\312o:>\233?d>b\336\252>T\234%\277\360r\023\277\373\276\234>\262\007\020\275|\3415\276\2669\231>\2555\250\276|H\003?K\363\334=\274\336a>\035\326!>\022m\262>\320\316\321\275\314\000O<\372\365\335\275\306\301+\276?$\016\275Be\234\275\323\301\271>)}\"\277\363B-\276ij*>^_\006?\205_\003>A\014\260\276\002LK>\3723\245>\r\303\322\276\267\332!>2\263\213>\300K,>\233su\275\350\002\216<\314\016N\276\326(r=\312\233\301\276\366G9>/\364<\277\'\002\322\275$\261\327=\025Oj>\204\221g\276\036\365\225;\322\246_\276\001\333+\277\276\226K?&\355\247>\001\367\305\275 t\247\2765\361;>\3718\360\276C\333\002=\315\ty\276\034\326\231>+\354}\276q\332\377\275\307\327\343\276t\241\374>\017L\206\276\256\220\311>\246_\216\276\262,\006>\377R\247\274\"\377c<\027\020A>L\r\207=\033\327,\277\317\032\332\275IN[>:\233e\276\351\372\311\276L3/\277\300\352\367>Q \273\276\033\2141\276\n-\257\276\306\330\252\276+:\323\276g8\021\277Oa\245>)\325$=9\316\237\276;\313:\274\267\216\316>\212\005\365\275\211\335\363>\355\226I>\301\226\220>\217?\214>w\246\027>\216\1773>\341\315\235\275jJ\210>\264z\364>\245\256\312\276\375\221\313=\366)\266\276\3569i\276oy@\276\0015\266\276\017\223:>\264\316\355=\262\376\237>6;r\276\264\240\265\276\351\374H?KS\037?\255\300\325>J\345\210\276\301\2472\276:\255\353\276l\225\350>\366X\252\276Z\245\035>\224[T?\262\026\300\276O\2441?\350(\231=\220\177(=\253\265\210\275\206f\005=\370\316\022?\235\253H\276Dr\227>\364\2529>\022Da\276\3752 <\370\027\217\276\010\305@>\276\3164\277\365\275\273\276\256\373\245\276\276\033\253\275\242y\342=h\277c=}\204I\276MA%\276\342\237\272>\352r\335\276\267R\\\277\240\255\257\276\266\023\266>li\r>*x\237\276\346\362\277\274L\211\374>\2035R\276\221N\334\276\357bP>U\371\300>j\233\210>\265\337\313\276\201\3510>\236\366<\277\322\254\200\276\270\027\025\276v\314\305\276\256\2170\276:\355\004\276\014\335\020\276\013hW=\007\312\r?\362\232\266\276w\331 <|\014\214\2763\273&>^O\211>\366B\234\276\327\205{\276EO\005?\3627f\277\370\336\256\275\200\305\262\275\243\010B=\2458\'\276\310\346>\2767/+\277\332\216\033\275\221\016N\274\266]\004\277\272\277S\276\267\324\200\276xi\303=\332\226\251\276\346%\242\276\3060\211=3\002l\276\312\"\024\276,\300\234=J\216\206\276M\237\305\275\030\3303\277\177\\X>oZV>\221\236\333\275\211\364\315:7\212V\276\2678\365=\020\254i>\347\326\035\276\004@\302<\266%\213\274\374\272\212>\261F@>\315\225R>A\3007=\000cf>py\217?\316$h=\313\355\362\276\032\250\266\276\276\342\250=Q\223\274=<\271\236\276(\261\210>\354\224\016\276\020\007`\276\224\260i\276\017\374\376>[\233b\275\214)\351=C\323\302\2753\375\263=j\220\206><\213\367>\3730\275\275\333\313\234=\335qS\275ZDh\276O5\207\2765R\364\276\005\273\240\275T1<\277\376}\375=\001\241\266>\271\357\250\2751\"3>\241e\210>k\224\207>\245^?=\203e\004\277\276\223\234>\322\313x>\334p\314<\231\346\007\275\212\2222\276\373\201p>\317\331\034\277p\364V\275\272\005\212>\270\016\220\2769[f>tY[\276\220Zc?\233\234\037=\021\257\313\276\016\204\305=}\312\\>\227}\213>\303\376\352\275\177\212\207>\014C\367\274<\341a>\006{\037>:w(>\032g\347\276\265vi\274\26168\276\313\361c>0\223\203\276\374\340M\275\372\277\275\276\343\317N=2Y$\277YF\353=]\213\301>\253\010y\276-\341\023\277\246\2075>\227D\333>\241X\244>K\320J>\201\352\272\276\177\232\361\276o7\240> \013\265>L\273\250>\006\276\261\276O\375(\276\202k\343>\233Z\023\277\306\360\206\277\2276\331\276\262^\207\276\305\215\362\275\251\264\024\276L\332\210?\221\0069?N\241\216>4\262f\275\251g<\276\230s\374\275x\006\201\276G\232N\277k\rl\276\250\007\201=\240}\240>J\215\352>[\356n\276=\007\376\275\365\371\301\276\231\031\030>\247\035a?\274\3335\277m\363\313>\006x??\370\177\234\276\347\tL?4\257\272\275\312\367>\276\351\272\375\275/\264\267>\2104\374\275\243\351f?\263f-\277\021^V\2766E\202\277\005\236I\277\035\204\273>\031\360$\275\330\251\211\276l\305\312>\276\251\214>\374\032\343>xL\220\273\347K\302=)\242\007>#\r\217\276\307= ?\232\247\267\276\374$\035\277\373\276\256>\222\n\202\277\310\270\213\277NL\303\276\202I\271\276(\253\017\277\2458\321>>B\020\276r\306\241\2761\257\231\276\215\327O>-h#\275\334!\220>t\254!>\323c\210\277\371\255\307>\231z\231=\277\374Z>\254\362\331<^\213\301\276\243\320\216>\355\264\275<_\025\332>\355\327\327\276\026X\376\276\302\001\364=H\017\005\277*\216t\276?\235\t\277!\375\320\274\000-%\276\300T&\277\261(\267\276t\204\276=\230*)?8\227\251\276%O~\276\374/\204>\310c=\275\264\353\275=\315\360D\277B\353\004?\276V\235\275\022\240\227\275\251n\000\275N\334\001?F\265Y\276\213\302\265>\325\023\230\276\224\252\222=\205\034\213\275P\307\201>\363\316\274\275\326\203i\277\240\253\324\276P;/=f&\242Od\272\275ae\352\276\247\004\231\276\213\362\264=\337\000\364\2763!\346>\254\004\223= Y\006\276\000\247\200>\020\256\343\2754\210n\276\226\\O\276\243\232~\277\\\302\303>d\212\274\275H3\346\273,\251\267\276\200\244\247>\240h\312\2765\t\331\275\331E\031?\317w\235\276AP\370\276K\016\375>L\302\300=\0022\025?\200Z\244>E\277\014?x\300\024\276\'I\307>\0051\215\276\303\314\374\275\350\364\256\276\244\325J\277\006\267)>\0034\234\275\364c\206\274\323I4>Y\272\005\276\0018\350\275!\266\017>\357\030\205\276S\010\t>;\275\260\276\354\322G\276\267\006\216\276+\252]\276S\177w\276\214\230\210\276`\334\031?\347\312(\276\310}\223\275?\221\242\276O*(\276\031H\250\273&\2074\276\304\263N>r\265\234>GbT>\311k\036\276=/\266>\374\027\250=f\370\036:g\214\236>\264\374\262>}\226\215=\267\027\205>\312\032\027>NGj>z\033\205>\366b\212>w\233\033>I\256\224\276U\212\232\276h\231\356\275V+$>\231\013\024>\314\2564\276\020m\322\276i]B>\241\014#\276\227\t\037?\203\017B\275\300\271\030\277(\000\204\276R\355\335>\020b\241\276\353\345z>\274\034%\276\273\370\316>Y\014\021\277No\361\r\"\272<\203\346\006?\'Q\211\276\346&\014\276a\347}>\344-\002>\364_Z\276\315\324\251>\216X\277>\271h\365=\262y\341\276;\001\213\276TQ\312\275\226\r\007=\277\242\226>\333\017/?\277\r\252>\214\373\217=a\252;?\246\025\306\276\005c)?\257n\256>gr!?\017\320\214\276^\347@\273\326\252\274>\0144\241\276\002\257\"\277\267Z\255>\357y\003=\216\036V>\266\233\276>\021\025\235>\315\367\037>:\r\010>}_\374=>(\013?+\255\254=G\225\315> [^\276\233\370\214\275\222\022\023\277>\222\211\276t\265\032>\317v\231>\266\030i>\307\374b=\004\211\031\275\301j3\276h\036\004\277\323q\247=\337\205K\277\215\265\273\276\255\310\036?c\352D\276;\224q=\021|d\316\213\246>\026<\"?\212^\262\276\210\312a>\214\021\030\276\313i\032\277$\222\017>}C\320>\3209\020\276\330@F\276*W\000?\032y\261\275:\366\256\275\204\212\233\275\221\320\206\276\234\231\230\276Q\217\205\276t=w\276\224\236\217>\315\202\230\276\301E\203>\367\177j\273\345\217G\276\371\343:<[\345\302;\023d\227>\3746$>;\213\367>l\271\302>+\201\265\276@\346\271\275F\360U\277\325\3305\277[\025\023\276\246_\221>\032\3651>\315\202\006\276\255\340\373\275fQ\006\276\370\371\274\276\345{T\276?\002\306\276>NJ\276Y\256\001?(r\263<\"\001\216\2763\024\020\277\211\257\367\276V?\017>4\352\333\276\262\002\236\276\255Y\303\275Y\243G>\035\026\304\276\3539l= \206\274\275\001H\332\275&\273\200?\007\310J\276\367\252\177\276\2368J=f\0333\276\370\226I\277\242\362\324>H\201\232>\032\2507>\020J\222\276\'\343>>\376-$\277d\327\250\276\270\332\332\276\366\\\210\276\357\237K\274\020\037\304\276m\010\024?\324\336\036\276\001\351\021\277\006\367~\277%\025+\274\027\331\277>]R\217>&\346\250\305\"\036\275\341,}\276\256X\234\276B\331\213>$\252U>\363\354\237<\311\351\036>\021\021\000>\362\255\360=\356\301\001:G\353M\275\010\316\213\276+Z7\277W\006\253=\027\000\232\276\332g\232>o{\304\276|wS\275\010U\273\276\333u\275\275B\316\027\276@\003\264\276A\024\346>kK<\2761{]>\337\014\215\276`\023\256\276;\004\310\276\374\350\202>\351\315\021>\250\022Y>\234V\360\274!\037\220\276\010\274\227=\030\2458\275\347\273\344\276~\330\340=\270\374\250=\274A\"\277]\202\r\277\305O\335\276\373\203\021?\250\2439?\2337;\276v\243\035\276[\335\243\276\326\251\304>T\341\241\276\271A\013\277\025\235\357\275\201A0\277\324\031\275\2762\024y>\000\212\025?=\021\310=\237\205k\276\267Gq\277\335\360^?]\261h\277n\032M?Y\340\321>\342\260\333\276tf\315<\271\035\340=\363\357g>\314V\275>\234\310\337\276\241\026H\276d\270\263\276\371 \207=zF\341>#f\307\276\243c\245\215\231\212>\336\323c>w\213\306\276\022\020\321>2x\036>d\024\230\275\345\003\277\276\311S<\276\374K\247\276\215\3737?\271\001\251\276\311/~\277\267\306@=\r]A\277\244\014\r\277\010\203\332\276\330\301\302=\351U\332\275\255]\203>\255Q\323=\302\214l>\366\376i>\374\001k\276\340\262\013>\367\021\023?\341J\317>[\222\272\275[\256\036?F\035(=\007\342N\275fb\307>;\355p\276\340+\035?EqP?\227\230)>\231\265\212\276\023\322\310>\224\025<=\020\222 =^\213]\2760zb\277\233i\244=TtK\276\230\315\201\276=?\177\275a>\010\277;\272x>X*\352>\303\006\264\276\336_\376\275\3236 \272=\036\245\276*B\203\276&\000O?\257\224\304>=\323\242\276\301\357\204\276\361\006\016>\247\235\220\275\337\373\252>h\020\r\2768^\273>\177-\237>\233$\017\276\234\232?>[\313\217\276\356q\245>\220\246\032>O\361\274\271*\227V>\035\242\314\276a\306=?3f\257\275\r\371\370=H\271\324\275O\346N\275\255}\252\276QC\030\276%\232c=\026[)?\305\301\212\276\356\250\341\276Op\010\276\201v4\276z\247\227>I\314O>\005\002\224\276*B\'=\212\327\030>\301\250\016\276\177\326\367=\262gG>\337\244\235\275QAJ\277\0142\261>\240\203\334>z\356\276>\024\226\260\276\273_\312\276\215\355?\276\275\363\234>\356\220\275\276\206\270\372\275?\027u\276\341%<\275\207\020\276=\263\335\340=_\364\016\275\331\030\330\276{%\333>\2116G>V\237f>\376P\004\2777\340w\274\225\374\260\2755\337M>\t\034\004\277\313\341\372>\010_\206\276\246\345\224\274t;\325>\276\220\033\276\303\320\224=}\212\266>RR\241=^*7>\374\336\022\277x7\215\276\330[/?|\'\005>\217\375\336\276\270\261\013?87\361>\026\2410?.\202\346>\007\207\237>\377\345_>a{@\276\272\270\370\276|\363\035>\210\374\001>\017\236[=\02390>+`\345=\030C\242\276rV\330=\024=\273=y]\374=XT\344\276\'5\344\275?\177\003\276U\030\016\277><\035\277\224\010\257>\233\350\341>\346\006\315>\3065\">\301@\353>\255h\275>A[\235\274\013\002`\276\255\177$?tq4>\333~\202\276\006C}>\260\256\325\276>\202d>(0o>\241\276A?f\214k\276.\213\246\276 e\037?\230!\311\275\025\022j>\241\017\377\275\305u\246\276T\261\214>\300Gb\276-\365e?\306\204,\277\370\020\222>\260\030c=8m\226\276\202\202\002\277`\034b\277\236\010\364>\300\025\265>B\250\236\276\365\003\304\274\261s\275>\237\240\255\276\004\311[?1[m?\213\215\034?\035\227$>7\035\210\276\322pt\276\214p\203\275\275&\010?\243\206\340>\331P\000\277\032;\240\276\346\235\r\277\242\207\367<\3175%>\224\002\004\276U\004W\276\223\257\301=\016\036\206>>h&\277 u\027?\350\363\240\276\003\216\254>0\332\002?\323\270\216>`\264\345>\3111\264\276\334?\201\276\322\025\344<\363\334]\275K\370\317\275\247\266J\276\320A0=#\206\345=\336\'\275\276R\351s\276\256\036E<\003@\272>\226/Z>\315\201\216>\323\300\264>\272U\262=\3034#?4\262J\276D\304\024?\346\2236\276\241\226\323\275#\225\206=\001#\207\276\277p\035?\3017\311=\305\357\272\2763\024Y\277\'B\275>L\205\226\276\177\377\224>kF>\276\010\230>\276\211L\004\276\337\032\366>?\203\035\276\207\027&\276\010\241\225=_|\363>\302/\"\276\221Z_>J\364\221\276\234\353?\276[\331\337\276\n\367%\275\003\373\312;B\372&\276\220\031c\276\251\317\035\275\032\001\201\277/\021*\274\202\004\375>\t!X\277w\302E\276$\360\005>\350\212\205\277\363\001\355\276\302[T>4\343)\277\326\226\005\276X\241\266\276K\006\307\276\206MB=\232\234\353\27401_>\302`\345>\220.I>\322g&\277n{\023\277\342\240\274>\307s|\276\246\027f>\025\333\245>\234\247\253\276\371`\340\275:\354\031?\225\013\325=\256\251d\275\253}\263>\030\357I\277\017\355\035>B\234G\276\325\372\364\276\207\246+\2776-\273=\236fI\276\216\025\016>\257L\246\274@\237\037>\2040\242>\031\267\352\276V\237f=/:\354>&\336\234>\253J\335=\210\312E>n\210\340\275\205\276\275\276\366Fc\277b\253\025>,!(\276Ls\366=\235}X>@;]?\377\273\274\275^;\315>w\231\254\276`\346\246\276t\246(=\031K>\276\267pB\277H\322\353\276\355\036N>o\231>\274\021A\021?\242)8<\322j\202\276\026X*\277\355\223!> \306Q\275\202)\313\276 \275\004\2772/\254=\216\236\333\274f\356\014?!\326\221\276\n\366\025?\260\371\034\277bI!>I\357#\275\026;\363\276l\310\254=o\373\232\2758\246\352;6:\030\277\303\231\246\276\255\201\341\275\262Z\273>\024\370\374\275\020\000p\276:\317\000<\020\365 ?XbE\275\016\366\007\276vE,>!m\253\275\367B\037\277R.\321==I\257\275\242O\204\275(\210Z?\200\251\277>\310\316S>\017{\021\275\226\2131\277\221=\264>\330YK\277f\310m\276)\3147>kk\330>\3000d\276\217\\\001>\2251\222\276\232\260\013>Q=\265>b+@>\017-\332\276mP0\276\270\035\253\276S:\363\276\316\334\302\276,\366/\277\273\001\247\276J\016i\276\243\212P<\361]\200\276\340o\016\276Ta\255>\337\335\225\274\022\213\321\276\312\251\263>\362\304\025:\326\373K\277\313\217Q=C,\256\276`\000 >\223n\262\276d\224\037=\016\347Q\276\025cY\277\370\233\206=\247\375\014\277\231\211\014\277n\2423\277\"\272\306\276M\250\265=D\206\337=Q\207\331\276Y\030\303>\204$9<\271\222\025\277\357\312\304\276Z\227!=\037\204\346\276\352\030\315\276{H\306=e\350\210\276k\030\217\276\327i\304>\352\276/>8\330\006>T\223%\277\260X\252>\323<,?wL\366\276\367uc\276-\257\366>\310\377\010\276\337\224\354\276\\D\333\275\247\325\340\275\nZ\320\276B\252\304>\227\"\345\2755*\023>\263\257\262\275\260\262\230>D\317o>\234\252\276>\257\247\'?x\271h?\362\374s\276\225s\256=\267b\242\276s\317>\275\274\357\231=\216\323\276\2326\267\276\ti\212>f\234\326\275*G\340\276\'\261\254=\312\216\236\276\2416\275\276g\"O\276\'\224\231\276\210\377\346\276\3132\"\277\253(P\276\276\363\345=]\355b>\303e\263\276\257\212\220\276]\026\204>a\243\231\274\031k\342>\335\277\350<\301\242\'>\333t\001\275\252\377\344=a/+\277?\323\337>a\310\272>\332Z\211>m\243,>\324\375\206\276y\263\273>|2p\276\037T\227=\374i\326\276\365\274}=J3U\275\203\026\027>\020\020q\2763&e\275`\215\335\276\016M\361\276\246~\000>Hc\256>\033H,>`\311\214\276\307\277\201<$\365;>\321R\005=wy\004?\265=\311\276\304\217\217>n \177>\275I\017\2779\000\356>O\325\232\2761O\016\274=\226\034>&3a\277\202\034\257\275\ro6\273\251\361\360\276\354^\346\276;\356v>\272\220\000\277d\377c>km;>$\301K>\"\017J\276\010\272H\276p\312\354\276K+\277=\020\366\017\275L\306\312\275\322P\230>P\345\272\276,(\034>C[\266=m\001/\276\013.D\276p\037\353\276\241N)=\237\203\373>\275\323!\276\324\375\r?\3138a>oC\002\277\307\3371?4R]?\033\254\330=\364F\306\276<\231\311\275\204\206\276\275F+\346\276w\357\331:ph0\276p\261\237\276\206\241=?;\221\217>\352\264\235>\343\321\372\274O9[\276h\330\000?\000\201\'\276\246\327,\277\370\222\031?\210P\342=\371\007\201>\236\"A\277dZ;>\357\361\013\276\30663\275?@0\277O\362\222\276\265d\206?\236\241!?dz\021\275z+\023?\264@\243=z\234\001\277\037b\277\276\212\347\234>\014\235\372=\261Cp<@E\036\276\267\262{\277\373t\326=\005\"\243>\355\r\211>\330\204=>D\003p>\343cv\276~>\026?\3052\316>\333\234\375>\245\253\023>{\344\321\276\0339\345=\031\316F>I#q\276\035\240\257\275\353\000\212\276\177b$=\240s\023\277\360\312\241>\371\340\241\274\214\273\232\276\362\213\267\276]\0162>\017\033W\267)\234\273\317\215\231=5\352\263\276\315\203\245\276\013{\263\276\370\331~>\303\252z\276\356^\234;\225\001\362>mt#\276\006{\267\276/\216\242>)\023\354\274r\316@>D.\032\274k\206\024\276\252\335L\276\346>h\276\237\001\336>\201\233W>\202\006r\276\274\010\267\274F\335Z=\377\277\312\276-\000A>\224\202\264\275yc\355\275v\362\034\274\002@\212?\001UK?\0237\250\276\033N\001>\366.\255=\346g\206\275\364\250\216>\2605\303\275\352JR=h\237\207\276\355\305e>\021I\221\276YF\037\276=\335\340\276|\232\321=v\207\026>\037\020)>\003Y\010\277\3263\023?\327&\276=\371f\265=\316D\370\276\236\221]\274\301\202x\275d$9>9\017\227\276/s\177\275C>\004\277\222\030\217=\201#V>}k\210\275\367k\206\276\344?\315=Z\222\255;m\366\257\276e\n\033\275\357\341\326=!\325\220\276\324\261\305>\245\027\223\276*\313\024=\253t\233\276,>\262\274\035\212f>\035Q\014>\236\023\326=*\202C|\304\225\274\320\017->h\214\314>=\357\234\275n[\326\276\362\307\265\276Z\240\234\276\3625P\274q\272\341=\317\371\\\276\340p\350>\033\325\226\274\271u\301\276\210\255[\275>\307\210>X\332\214\274\271\252&?\351\213\347\276\256\001I\276\001\251\223\276\365\352q\276\203\364\307\275\375\333\345=,\032=\276\261\317p>\332%\024>v\220\243>$\226\324>@\035b\276\253\202\212=\340\375\365<\252\231\030?\353\326\322\273;\324%>\313Q?>&\354\271\276\306;$\277\3712C\276\306e\341\276\355Lg?\001\302a>\224f\t\276J9z??\207\207?\017\022\376>Q\341\322\276y\025\026>`\262\236\275\020\025\000>nM&\277\r\247\030\276\342\357k\277\315\356\037\276G\271\245\276\262\260\330<\252\344\255\275G\201\315\276\021\213\234\276\020\3748\277X%\376\276\022\0021>H\253\322\275\211x\014\277\024\214\340\275Pc\271\276\261\245\377\274\211\254\363>\025\330\225\276\362\342\234\276\010\267\317\2763X\257>\014\356 \276\301\305O\277\245`\255\275\215\213\037?c\320\021\277\223\345\211\276A\270G?\343\300\230\276\376)\251\276\347\235`>\334\255\265\276d\237B\2768\317\332\275\336\321\020\277\220\022\003\275\"\277\330\276NK$\276N\003\027\277\255\326\016>\252\3000\276V\226R\276\346\031\310\275\356O\301>\225\216\023\277\24303=:\215\031=\315\227[\276\257\021\255\275\373\223\021\276\264\035\341>p\253\253>\265\301E\275\"\017\374\276\202\316Q\277\303\305F>d\242\014\276\261=2>jUX>\206!\244\276\005{B>K\355\254>\303s\314>\243$R=\267\270\273>\34244\276\275\367\026\277k\235 ?D`\033\277_\247.?@O\332\276\037\211\251=\223y>\277\3129->\253\303C\277\372>M\276f^\226>\310\200\226\276\0168\027\276PZ\274\276\232f\007>\367rx\276\307\340\r>-y\005\277\205\326P=\366\224\004?E\307\250>!\267\316>>\321%?\227&\243\276\221B;\274\220\373K\275\330\204\226\275\0009\345\275:U\363\275%\373\306=\376\003\n\277D;\n>XX\320=\006P\010?D\273C>%\216\225?\347n\366>\361&\321>\010\000\242>\006\372\231>\274\316\217\306i\306>:\026@?\"\030\010\277\222\311\337>.\257\316>:\010\357>\360_\217=vs\013=\205\026\214\276 \311\034\2759-\271\276\203\200\341\275\377\310\276\276v]9<\016Z\276=\374>\326=*1\006>\023\022-\274;\235\026>\336\211\246=p\0265>E0\025\276&G\000>\231\310\302\275y\r\313>\362\324\277\276\232\321\031>Q\255\214>\noY=\306@G>\214\t\002>\317\325\304=\206\317N\276\211t\302>\2718\313\276\300\361\032\276*\333\034\277\333a\002?\364\253\336\275\001\033\206\276\227\236^?y3\324\273\021F\372\275\345\276\037>\303\214\017?\232\263\314\276\271\031\230\276\032\223\220>;\203\357\276\2344\306\275\352\377\302\275&\261\250\275U\240\230\275 e\343\275\344^\211>\203h\324\275\224n4?\271q\242\276ck\305\274\026z\317<\231\005{=s\370\270>\260\233\256<\312\314\216>\311\013\371<\253\252\n\276\303\246\200\274\350\205/>\244\263\353<\342D\033?\007\322W\274iM\374\276\302\026\206\276,29\276;u\311>\340l%\277G\235!\277\364D\212>\301\327\021?\ryH>/\273\247>\370\020\273>\231M\002\276\"\3732\276\272M\326>j\237%=\242\353\032>\341\212\210;4\004\242\276\320\210\n\276\3520w>y\3638?]pu\276\217\206\360=\321r@\277\361\026\253\275g_\230>\203\217\206<.\274~=6\267\265\276\271\200\274>\'zs\276\035j\304>=z\t\276E\236B\276\n\223\030?d\355l>FX\312=\276\025.\277c\261\010>e\213l\275Je\266=\323\310Y\276\264\000\226\276w7\264\276(\261\t\277\020`G\275K\'\370\276\352e\270:x\214\207=\261b\001\276\177=\333\276\310\263\255=\312\257^\276\312\025\374\276\335ol>\002\355\033\277s\031\302=\264\354\023?n\327\'?z\243\221\276\245N\262\276\310\213\000>\236\237\357=\016\301\363\275\273$P?M\255\017?\340\226\350\275\347\213\274\276\273+Y>\352S\320<\276\"\202\276\270\223B\275\032\"J=\231\333\225\275\255\201\270\276\031\\\032=\215/y\276\360f\214\275u)\227\275\007cM\276\251c?>\267\027\035\276q\203\035\276\310\231\344\275\376A\227\276\266\317,>\360\275\246>|/\261>\013\033\024\276\363\211\261\276\022Ec\276\365\275\305\276z\357\023\276\274T\366>X\010\020\277\373\364\325\273\311\246,\275\370\355\000?\366\312O=P\014~\276\216-\257<\377\263\352=\t\323*>cr\016?\372\357\310>\016\277\211>\261:\263\275\t!\007\276\217\262\023\276\370\305t\276{\230\233>\2639\316\276A\362\374\276b\332/\276\225%\370\275\304\257E\276)\030\377>\022\364\312>n\372#\275 S\017\277\\\222\344\275q\351\220>\330\026\263\275\201\304,\277\035x\036\276\024\207\224>\034?\034>\252\373\350=\0252\360=#\\\273=\257\250\215>\235\350*>3=\003\276X\316W>\333d\232\276a\337\024?\345\231\200=\247\343\250\276\204\231\246\276\273\314\205\276\350\276\376\275z\272\353\276\265\214\263\274\'k9=/>\220>\245\203\307\276\230Y\213=\355g\332\276\347t9\274\230\244j\276\210\260\272>\344\310)?\302\327\366\276\311\243\322>;5\224>\032\326\032\275b3$\277\337\0049>\361\037\200\276\026\330\256<\315\223\n>@0,>\363\356\017\277\010-\267\276\277\022\242>\267d\222\276\237\207\357\276\253\360\347=\302\254\270\2759\t\264\275\232\030(\277\235@\307\276\271\246\362>\222\304\t?3\3655>\3455\325=A*\313=\2549O\276CoD\275\332e\231>@\274@\276\262\027\201>\201FI>zX\210>\3718\313\274&\357\246>\n\221\251>gK\274>@\024\014\276\237\372j\276\207e\365\276\372\202\336>H\350V\274B\317\316\275{\221M?:\345\000?\224\200\240>4UF\277\240\\\274=\'\035\010\277\001%e>\271\271X\275\264\027\017\276\226\311>\277\353Z\347>\344\314?\276\034\357|\277W\344\005\273\270Q\370=\3620>\275\0375\025\277(\213\220\2766\001\316>\003r|=\311qb7\346o>t\267B\277\3255\350\275p\363\023\277\023W\231>|\206\304>3E\256\276:\"C>\201\350\342\276\206\372\207>\312\210q\277\260:\264>tC\312\276\274\357\312>\242Mp\276\351\225i>f\n\231\275\3557\205>\202\003\266\276\341\321k\276\234S\374<\224\203/\277ig\201\275\342\370\206\277\200\313\277\274\332\252\025\277\361A\236\274\370\211\223\276\217}\222>\211\342\343\274\365\254\000>v\014\023\277\250d[\276\276n\213\275Q\230\364=Is\036\277\204U\355=\257\376\277=\320.\345\276\023;\333>\215\377\014>\375=\025\277\272G\212\275\351\022{>po.\277\342\212\004\276\244\027\023?i\241\216\276\177\036\320=\025\376\233\276\267\332f\275\374p*>\242\374n=\207\036|\277\023\355\004\276\350H\003\277\r\030\377=\246cK\276\347\223\271=\357\033\214\276\020 \250>\251\264\013>\312\362\355>\260|\344\275\212nM>WYc?w\307c>\356\272\014?5\t \276E\370 >\361|S>J\324\312=\213Z\037\277\331\322\320>S\365V>ZE-\277?\314\200=\337\326\214>\314\224J\275Z\262\317\276\330\212\372\275m\021\022\277?\341\301\276h\212\250=\251\327\257=\337&\307\276.\036\263\276\266\210\024>\346\256\233>68\322;\204\214\022=%\321\217\275U\310\027?\322\312\013\277\366l\245\276N\315\355\276\320\225G>*\350\217\275\244\331\325>\ta\010>\244V\272=\216@\270=\277\0106\276\030\037\010?\236\267\264>\207\375\271\276w\323\264\276\242\017\200\275<\010A\276`s\020\276+\316a=\240\235\211=\364\340\251\276\226Q<\277\252\370\210\275\223G\312=K\270\322<}z_\277\034E#>\207\206\031\276Q3A?P+\244\275\247\256\252>4>\214\275j\014\236\276\024V\235=\320xw\275\336\341\r?\310\221\014\276v\240\272\276\253\"\213\276=\312\236<\366\006$>\330=\242>\377y\257\276x\255\200=AF?\276as\366\275V\251x>9\325\220\276\365D\001>Za\014<\254&\252>\364&\016>\324J\257>)\335\371=\276\224\233\276\301tk>0mG=\005\2734\27601\232\276n\3509\275\221k\026>\345K^>x\203\214>\221\365\001>\352Q<\277\377\024u?#H\356=\3258\320=i\nA?\025\317m>L*\230>\023\347\r\277eXW?\035*\207=&\321\215\276$\\\373\274\216V\272>\342my\276\243\235\234>\216\225U?\"\321\371\276\335\231\341\276\346\354\200\276\346%\030>,\302\221>\377\337\205\276\221n\200>\006\244\364>L|?<8\244\311\273\367\002H>\271@\235\276&\243\033\276SK\006\275\257\315\352\276\276\246\335\275\006;\213?,\031\177\275K\0219>\250\363\334=\3418+=\217K\317\276\204\363\337;l\350\326=\0304\217\272T\375\210\276}J\300>\'\033\273>@v\265\275j\200\004?\353\013\303\275\266w\207\276\225\2249>\264\351\307>\312\037\003\27716e\277\260\322\014?\217>1>N\232\224>\241\025\333=\257\276\310\275eo\306>\227\276\353>\257\210g\276\351\257\221\275\235\005\006?Z\212]?\277)\313\276\275T\035>\002\223\021>0\237\023\277.\252\300;\233>\263\276\221\r\323\276\336\210x?\341\212\306\276\271B\177>\034\203\324>\234\t\032?)\226\263>\253\333\351\276&T\004\2756\277\232\275\344)\207>Z\245\346>\250\316n?\334Ck=W\354=\277\250\243\241\276\'\006\203=)$\272>\022\2702?OX\331\276\377\020\320\276\304\007|\276\354\326\242\276\005!\212>\202\n\313=\370.\210=\226\323e\276\356\022\014\276/\207\336\275\275\322\032=\306\225\365\275K_\370\275\207\335\033?DBj>1\274\302>\365\264\220>~\326E\276\370\236\330=\275\267\350.\277\256>\331,\350\276\\\217\313>\343\353\253\2759/\r\277\254Q,\276vOQ>\220Q$\277G\320\230\276N\343<>\222\350\220\276.\340\347=\346G\005\277QK;\273\"\003\244=Q~\270>\374S\374>\377\340\237\276\003\217\007\274\351\034\020>Bo\004?\224s\020>}\007\233\276z\305T>\3435\205\276\025\002A\276{\026z\276\363\366\211\276^\377\377\276\214\211\224\276\345\022\014\277\303j\232\274\231$\017>\265f\351\274\262%\230>\212w\316;-\023\207\276(\377\333=O5\010\276\231n9>6\274\232\276\010\361\247\276\266\340\263>\335(\020\276j\274\316\274\305\266\n\275\225\257\200\276\371hI>\234\034\035>\376 N\275\"?\r=\000a\252\275\274\211V=\254\233P>\233\327\310=U\225\225\276.\240\347\2759SC\276?\351[\276sx\245>\027\360\016\276\226\t\202>K\307A\276\231\316\\>\225\214>\275\327\222\337\276S1\222>\2565\202>\036{\242>n\353\005\276\357\026N\276\026|\224>:i\245\275\346\367(=\004\333~>\211\365\304>F\371\010\277\2567x\276\337;K\274*5\200\276T\250\307=\267\347=>\007\306\234\276\034\374\270>x\257\035?\213\337\257>\004\376\034>\326\246\025\275\024\'\205\276\"^R>\325\343\257\276\356\357\017?\035\243\213=\272\260I\274\236i\260=D\203\325\275\016Bv>q\3162=o\301\225\276\t\031\\\275\"\305\035\277\254zf\276\034\220$\276\031\305n>\026j\345>mMZ>K\230\266\275\205\377\202>i\017g\276\274e\030?\300s\007=\212\034%\277\330E\273\276-[p=\302\216\336<\210W\215\276\270\360\223=\201\307\342\276s\220\372\276\346\214\340>{\230\243>\234\031\255\276\020\201\321>\360\230\244>4s0?\201H\020?\323\013#>$U5\276\336\261\362\276_Q\351=iv ?3\230\224=\356\014\037>\330\332\364\276:\026h\276\255\342\r\2776 \354>`\264\361\276}\340\341>\221\327\376.\247,?\236l\036\277\337\355\324\275;\262c\276\315p+?\000\342x?\347\037\332>M\007\331\276\244\246\346=*\000\263=\320\017\251\276O\344\276>\353v\312=\256>\325>\215\3523\277#\355\301\275\326+\005\277wo\344>\301r?\276\3654\300>\346\027P>HH\207\275\276\357\342\275`>\010\2766\r!\275?\336\261\276\3161.\275r\216\323\274\314\372\363<\013\301\252\276\202\036]>.]\264>L\322!\276\214\206[\274\010u\323>\225\262j\274\373\323\265\276\010`\225\276\023\035\343\276\206\'\314\276\241l5=P\251\303=GL\320\2751\364a>j#6\276\t\265\314>\255\364\347\275\323\265Z>\212_}\276\276\204\303=\210\233#\277Xa\222\274\2577x\276\224\207\373>\300\206\325\276\361\333A\276\330\333\013\276`6\334>\251q*\276\255\231\021>\247\004\210\2750\354\242>\035i\004\275\207\231\240\276\\\365\221\276\036\366\316>#\326\373;E\335\'=\002\025\312\276V\237c\275\367A\323>h\023\371<\031)\261\275\275\232\246\275\022\255\366\275\314mK\275\250\300\314>\362\354s<\356\356K\276\331\301\231>N\226\210\276\026|]>\034=\300\275/\001e>\003@\000\277~\245\275>LMd\276\261W\024\277\376\223\216>\223\324\004\275@\000\304>*\007+\277\270\265\222\276\256\'\253\275D\331\251>\243\"\005?\302\360\332>q\205\214>\263\276,<\312\245G\2772\006\007?\255\016\263\277\250_\002>\242\334\260>\310h\t>S\223\221\276\214\374e\276\373\335\007\277c)\262>AU\010>\033\221\237\275j,d=\356w\037>\200\364\257\273\323\312:>\222A\232\276\324\326o\275\250\360\325=\014\025\202\275\372\272\376\275W\'\206>N\323\315=\325*\245\275\273\364\273=\332=[>\263\2346\276\037\230\355\276j\270\031\2776\232w\276\n\211\221\276E\003\350\276\313x\251=\251\007\313\276\336#\277\276\366\333\330\276JM\000>\014\235\221>E\362\262\276\243\236\004>7\002\325\276&\r\312\275\213\232=\276\323\313_=\023\210\314=#\212\353=v\240f\276\306\364\214>\373\321\t>\335\351\321\276\033\256\374=\226\271\345\275\336\351\276=\313\367\013\277SA\315\276\206\256\310=\212\263W=\235\241\376>+\266\020>\250\270\032\277\272\006*W(\277\000C\025\276\317\275\207\2769\005\254\276\353g\262\277\341\005\271\276h\253\006\277R\214\201>\241\206\310\276\277I\221=\216/?>\334r\">X\237\315>+o\232\275>\370\221\2760\361\027\2776~\257\276z\202\314=:\344%>\255\215Q\276n\277\316\276$\335\305\276B\337\371\276\226\237\343\276$\031\300>\335\223#=\324WB\276\211u\232\275\370\263\010\277=A\206\275\257\366\210\276\353\263$\277_\331\360\276\222\001\334\275\213\r\000\277U\205\"\277\220\263*\277x\030p>\235I\000\277\317[\034\277\270`\313\275\242\227u<\210\177\002\276\342B\312\276R{\300>>\030\213>\233y\036\277\240\351\372\275\335\336\213\276\2612;\275\357\371\270>=@\301\276-\302f\277\323\377\033\274\2453\227\276!9\027>\333p\036>\306,\273\276\214\277n=\363\230\340\276\270p\306\276\365\367\262=\235\312\245\276\026\225u=\014\303e=\233\203\016\276B\217\345>E\237\345\276\231\005\201>\314\r\365\275[\221\353\275\314(\n>^J\363\274\177-\246\276a\372L\276P\363\203\276\356\202\271\276B\355\\\276\240\343O\276|\331\332\276u\354\215=N\233!\277\371O\311\276/&\220\276\360\302\272u\371\\>M4i\274\334H)=\320v\240=\213{\004=5\360\363\276\0066\225>\n\203H\276\023\177\177=\252\362\022\277\345=\222\275\031H\361=\332\224\t\276\314.\352=\3064\223\276*\036\264>\217\033$;\247)4?\256\346K\275C\344\241=U\254<\277\244\276\362<\352\036\245\276c\324\002\277\334\231\310\275\365)A=\372\n\245>\r\216\231>\270\365!>\323\344\341=\376\016N\275f\301\230\275\201Ms\276M\360%\274P.\343;\236\341\317\276\210\261%\241\256\254>c\3132=\316\334&?\246\014\013\277\027\022P\276\374?2>\227\346\324\340?\233>hAE\277!%\016\276\275\201\261>\177\027F>z\236s\275\326=y=\374\r\316\275\271\221\235>R\317\302>J\203\n=\243\304\021\277\243S\247\275\251o\360\276[\320\212=L\213\200\275\323|\267\274{\270_?_\034_>\025M\026?u$Z\275\334\303\324>\370g\017\275V3m>?\300w\277PJ[>fv\320>D\367\225\274\3063\017>}v\305=\251\216\240>\336\324\265\275+\202\363\275\326\337\022?-\255\362\276\022Q\226>-\217\010?\262q\200\276\267\304Z\276~##\276\337\016\342\276\320m\376\274\260\'\255\276\363\302\'>xi\375=X\231\337\2765.\224>\255r\032\277\217\356\031\276\375\274\271>\355\227\003\275\222\314\327\275\231\026\273=\336\214\210>t\231\230;\310\217\215>\253\017\311\274\317\323\276\275p\265\036>\2257\305>\0269\203=k\267i\277\035\343J>\010\214\244\276\347\305\361\276g\2769=I\246\033\277\233\021\304\276\035\276\'?\027w\330\276To\266\274\007\031\021>\315W\364\276&5\256\276}*\001?\250\372\265=\206\242\361\276\336g\226\275\350\2176>\313\"\231\274\362\270\217>\336\r\020>x\313Z=Wg)?\034\243\020\276;\2477\276\365#3>\026\377.\276\177\272\001\275PC\035\276*\365Y\277\255\232*\276\371\023\r>St\247\274\337\030#\277\027.\307\275S\365\252\276\3018\203=)#_>>\227\">) X\275u\362\364>\240\263\007\277#\005\001?\210^\301>\247}\024=\222\370\005\276\322\013\235>\037\234h\275\271)\352\275_u\260>\357\310\205\276\014|\207>\262j\221>\r\030\023\276\257\013\002\2775\342\242\276\230\3016\254=n\355\376<\315\376e\276e\355R=F\254\315>\033\020\"?\347\031)>]\237\277\276\005~3\276J6\214\276\362/\030?\346{\272>\334~\t\276h\360\227>mv\023\277;\031j\275\220l!\276A\234\273\275[i=>\250\316*?b\2161\276K_\007\277\\\313y\275\363\244\347\276N\231\033\277u\3119?\003n\205>\010\277C>\242@\303>_~b>[e\230=;\373\r\277\273\320u>\014L6\276%\261\262=\'\227\315\276cm}\276\030\324\323\275o\253z>~\257\207\2760Y_\277k$\204\275W\244\026\276\230.\320\276Nk\013>eb\333\276\261ml\275\354\370\344=\307\212\323\275\307\227U\276f\271\324=#P\341>\366\306t\277\345\362\201>\261\362\235>\221(\343\276\321\n2\276\303\2176>\204\323\372=\354\276\263=\312\251\312;\356F\005>\016$\240\276\2115~>US0>?\007\326>-\356x>j?\256>j\353~\274\327d\316\276\311\364_>\2163\254>\313\310\224>\241\016\266=\371C\006\276\314\201\221\2763\352\231=]\\\340\276}\n\'\276P\256h=\375X\213=!\250l\276JV\336\276\271\013\360\276\335\016\207>\232\320,\277P\376\363=\254G\230>\021\336x>\322\023\237\276\214\203\337>\017\336\257>\347r\342>Ay\266\275(\344_&!\004?fHV\275\026\2521\206\227\242>\320V\203\276\356\335\247=\025\0100\276\343\222\306\275\273k\207>\350b\347\275\\q\347\276x\265\233\276\027\2152\274\241\224R\276\242`E;^\252Q>E\261\203\276w\024_*\'\237\275i\227S\276\311\2717\276\215\'\337>\330\033\346\276\204\306\373\276\325\313G\276\031o\331>\345\223\371\275\320~\377>uv3\276[e\000?i\233\354\276_\325\027\276\254\344\003\277p\2052>Y\332\347=h\244\026\276^\207\330>\n\372\227\276\267\316;>P1\305>rM\014\277w\361\003\276\351U\327=\014T\225\275\314\210\311>\t\375\346>\322|\253\271\371H\270=\006\342?=(m\276>\022l\304\276t-==\370\327\311<\025\227\323>7\037o>B\306\037\276%\331 \275{*\247<\016[!>\212\016\201\276f\"\215;n\316\001?\220\241v\273\351\005\250>\222\341\234\276m\371\232>\257\322\031\275\212\341e\276B\240\205\276\267\246R\275<\266\026=?\022I\277`\231\365\275\201T\240\276\320\252 \277s\237\224=c\rJ>\211\334\033>Y\236n=\250s[\276\230\205\330\275\032\025\202\276\243\345\333\276F\\^=\237\267\233\276R\245\313\276\rHR>1\212E>\227\025\023>\264\3322=\016Q\331\275\223C\223\276o\241\r\277\326a\007\275n\002\265>4\242X\276\325?\370\275^~\321\276Htq>\'gC>{.\037\277=X\343=\210\357\337\276\007\306r\276*\023\330\275\261]v>\3314\304=\365\034[>\"\n\244\276\217;Y>\t\033\r\276\204\023\033\277x\336\217>\306\'%>a\030_\276\300\240\235\276\202\347\325>\3039\267\275\265\324\277<\000\273\263>\225\216\\\276%\220\037\276Sj\236>g\243\302\276\014t\227\276\344\323\210=\275\226\215=!_\350\275\2118\236\276\246>N>\210|\037\277\273\357\223>\234F::\360\321\256\276\026#\222\275\276\373!?\\}o>\331Z\305>\217<\204>M\355\003\277\341\324\010?p\211\372\274H\036\3729\2130\221\2776\213;=\333\353r\210\233\304\275T\241\210\276]Q\362\275\312\341\017\276\315I\260\274\0179\344\272\274o\005\276\354\327\346<\276\254\365\276\345\267\350\275V\340\252\276\360\264m\276\363\212\376\275\253\027\344\2763\373\"?5\320\004\276/\204p\273ul\346>\257\004\237=\005\276z<\242\253\325>\322\245w>\037\211\256\276\363_;?\213\034\243>N\334\303\274v[\313>E\222\000=*\244\r>L\032\004\276#hJ?b\201\230\275\232i\240>\001\300\211\276\256\337\024?i;\210=\224\221\345\275r\006+?);z\275x\033\341=<\'b=\274\003X>w\257\352=\363I\213>1\230\355=a\333\372=\021\020\277\276\244t\305:\216\216\257\275e\303\n\277\004\'\212\276h\3464>\267\334a\274o\033\354\276b\177\000\276\377=(?\230\330\323>\336\233\376>\022D\261=\214\035\344\275\253\352t?\001\014\204>\2236\006?\\\340??\376\272\374\276Y\206\023\277\032\n0\276H\003%<\000\206\364>\352\316u?\320\303\201>\267\243i\276-\025Z>\256\372j=\327)\200\2764\376$?\263U\200>\307\220\013\277L\316\342>P\020:?\241\306\242>\321\342\007\276\304ir\275\326!;\275\233\202\324\276\241J\255\276\201\034s\275\271\336\347\276\2756\355>\210&\031?T\254(>M~s\275\302M\364\276\241.\255>qx\343\276\n\330\225=\025\315\031?!\345\002?\266\006\010?q{\360>\257\202\215>2\2273\276\306\220\270>\303\302o>U\\\214=ggq?7s\252\275\014$\236=^\276\363>5\262a\276x\312\005>#\264\264\2762\035\000>\243}\362\275\332\253\215\276\301\334\007\277\360\216)>\\\010\003?\027\224\211\274\010\201]\276R\315\030>*\252\347=\320\237\247>K\361\245\276\345\316\350\274\361\375\336>\226\275\253\276O\031q>\"\225\244=\037./\274\254y\233=\367=\001>\r\217\010\277\337|\277<\223;\256\275\357\\\263=\351\000\010?Q+E\275\035\274\305<\312Z}=~?\034>\035\316\002\2769\0361\277\260$\001\276\033\341\016\277\027\313$<\361c\261>\037\257\220>\354\252\255\2753\321\003?J\310w>\202\354\301=|\005Y=\nK\020\277\t\026\255\275\205f\307>\365\342\n\277\006\344\363\276;e\324>\224G\027>\007\027\306==E\260;\013\243.=\351\273\374\2755z\212>\013\256\325=\276\267/>\263\232\363\273\252 \204\276\311\335\240\275\367f\354\276\261B\253\275\336\324\315\274\206(\204\274\215\r\020\277\252\373\306\276w\275\217=\214<\002\277~\212y>\204\311\234>gB\211\276\335\356\317=q\313\330w\2239=Dzb\276\002@1=\247\306\251=dAz>\227\264Q\2758\220\214>\343\226\327\276\236I\261>\024h8\276f\361K= b\324<\3105\206>\327rs?\310\222\310=}\374M=\343X\301\275\021\336\344\276F,\260\276Q\257|\277\334\033\017;\2636\022>\372\245R>\223\312\234=5\n\203>\322B\\>\241w\212\276\234`2\277j9\377\275:\231\251\276Y\247C\274g\213%\277\347\257\307>\275\206\220=\216\363\324\276\311\216\247\275\370\366\020=\263/\025>5~\266\276*rC\276\301\025\'\277\321\365\030\277\251\224\325\276dM\003\277O\014\201\276u4Y\277\306\260\321=,v\007\275\214f\\\2775\211C\277\301=\007\277\266\316\021\274\373+{\276d&#>W\223\264\275{(\211\277\201ma\2767\243\345\276\351\000\245\276~?u>\034\202\363<\r\273\211>, \366\276_o^\277\013;\274\275\021He\277\005V\337\275\035\266\344\275\'\252\004\277D\241\005\276N\373,?\030\000\240\275\036\340\006>GC\233\276\344&\204=\335i\010>\010\276\344\275\225[\325\276\200@\020\277\271\177\311\276\306\272Y\274E\266\326\276\310M\245>\357gg\275\241\241\234=\304\215\340\275\235f7\2773}\030\277\242\200\226\276\244\335\344\276\257\200\275\276ao+\277\242>\003\276\320\355\265\276\221\241\035\277\327\002<\276\351\325\023\277\2470?\276\020\215Z\2760$\026\275\366\372\210>\030\005D\276\353\0369\277\315\301)>\022_\257=\032\306\022\277\243\210\244=\3050\377=Y\324\324>\263 \007\276\260\257\226\276l$k=\251\2534\277\2529\251\276\023\231g\271:\014\027\276\000 \336\276\212\217\323\274\276S\342\276\340\024\220>\021z\025\026\016>\244\004\020>?5\231=\2331\251\276\315\t^=\231\017\262\275\022\230\326\276\3618\355\275i\311\311>\253\357\202\277\322\200\353\276\316\2550?\340\221i\275\033\344\370\275O\232\236\276\343\2119\277\026.\035\277\333\n\243\276\203\031\311\276\003\301->\275\r\373\275\365\036\314<\272XT=\214\302\231\276S\203\t\277{\233F>\tL\257>\217\227\354\276\r\352\360\2769\243c\276\345u\201>e]\254\275\353\312C>\373]2\276k\'\325\276\307\257\233\275)\177\273>_8[\276\376\\\220=\277F?\276\367\013\235>\033bG>\331s\372>\377\343\2719)%\247=\013\365\t\277p\274\010\275\002\303\006\277\334s\004?\010Y\000\276\347\2467\276p\207\222=\243\030\230=@\314\030? \267$\276\266\320\272\275\311\323\231>\323<\204\276\202s\325\276X\027$\275\241H\027\277\372G\346=z\305\356\276~%,=A\247\313\276=\236\007\277\326\261\263=\006V\001\277\373(\355\276\"NL>*R\034>\325\213\037\276\211r\226>\346:\000\277\305\263\276>G\023\243\273T|\036\276\341\225>\275W\377\213\276\3625m>yH\245>\201\255&=\264>\004=\367\225D\276\273\274/\276\276\036\266\275\337*\226\276\374\367\227>U\177\365\275\373\363x\277\323vh\276A]->\277P1\274\376\324\266>v\024\263\276\037z}=\226\305\030\277E\376\204\276c\206\232=\027\023\r?u\213\245>\023*\245\277\330q2\275\034\376\207>\225\263a\277\3572!>\212\233\306\276w\270\322=p\322o<\270\302\037>\254\006\301\275\357\326 \276\023\224\325\276\006\004\016\275\322\000\240>\003^\000\277\237-\302>\002|4\276\333i\347\276\313\352\220\276/5\r\277D$%\276\276\233K>\266\232\367\276\275\366\356\2745\254\r\277\036\227\222\276\017\353*\277\010\211\003\277m(\230\276\212\201#\276r\376\324\276#\377\003\277\375\216\302\276\264\373 \277\220\202[>\334\034\236\276r\2733\277D\310\264>U@6\277\362O\020>\263*\241>?\244\004?.\340\264\276\032\221\334\275\367]\005\277\275i\247:\'\306\241=\001~\225\276\013\034\034\2770T(\275ApZ>N\021\220\275\033\321\275\275M\341Q>\232\222\250\276$\270;\2771\216\335\274\2129\017\277i\026\213\276s\206\265>\220x\024\277\322P\020\277\\\367\346\276\321\325-\275?\305\343\276\324\226\r\276\201\200D\2771\201\032>\264\370\225=V\376\004\276\3067.>v\277\023>\270\264n\276}3\t\277x\322\321\276fm\371\276\345\027\025?q\207\204\275\tw\272\276\\u\374\274f\322B>]\2701\277\375$\352\276\276jk\276{\275\320\276\375\372H\275\264k\272=\375\234\031=\355\002\372>\177?N\276\320\036\004\277d}l>7\373*>\312\231\373=\332\242\024\274\214\343\213=\351<\200>\262\326m\277.\204\235=_\235\367\274\'\001x\275\022YC>l\336\257<\311\314\272\276\355\247\203>p\334\037>\034e\274\276\343` \277\213y\215>\363XG>PIM\275\377\005\233<\207\013\212>;fe\276[\023M\276q\231\010?\025\322\240\276\246v\202\275iu)>~\260\306\275\270\372\003>\210GE>\n\210\241=*\315\222\276HU\305\273\276\370\376\276\215!\360\276}\300t\275\303\354\336=\016\260\013?\340\032\001\276Py\303\274\017\030\376>\211\230\275\275\017\314\334=\341\257\224\276\326Z\005=\326\272\312\276\0267\037?\004\256H9\251\027\224= &!\277T\3606>\275\217\335>\036/ ?\352&\334>\343\302\267\275\315\354R\277\024\364\277\275\303\020\364<^\375\242=\212:\\>\202\016W\276\264{\177\276\267\224\222\276s\312\310>i\340\310\276\312\024D>\357\271\267\274\n +\275\033\233\242\276\265\374\035\277i>\217\2766\203,c*>D\300\230\276|\372l>\265\335\225\274\013\243\030>C\3279>\247\3730?\374x\002\276\026+\330\276\330O\315\275\005\376\224\276\265x\274\276j\241\326;\252X\001\277\2567/\276\311\030\246>%4\005?\3012*?OEF?<\356\327\276\177t@\277\202\320/\276x;b>\323\027%=\351\022\346\275\n\337X\275\362\336G\276\226d\223>\374R\034\276=\3460>\350\340\023?\304\355\346\274\036\366\347>R\360%<\021#\014\276\332\324\025\276-\336\367\276\305a\020?\006\331R\276\333\"\215>\227\370<\277\007\302]\277\346\013\017\276\253\001\000\277\354\212\212=J=6\277\317\013\356\276\330\242\206\276\344\324\r>\\\266t\276-\356\255>\205*3\276\203\014\211\2764\346\210>\241\240\032\277\202\237\263\275\351\237\266=\032\327\204\276M\345a>\232b\017\277\016\257\024=\366iT?F\'\036\277\350\306\246\276\344\232%>\327\233\245\276|\373\030?\030\362\215\276\334\372\276>\370\223,?\247\303\235\276\247\3534>d\206\254=\334\377\331\276Z\241`?7\350\256\276\355?\271>!\252\265\276tZJ\276\262|\216\276\232\364\312\276\013\351\023=J\022\300\275\2444\253\276\312\000\206\275\266\034\'\276\030\006\177\276kFX\276\241\035E\277G\303j<\207\332}>\241\"$\275\205\273\034\276G\246x>\203\365A?\244[\002\277\016\260\323=\264\245\221=\\0\350\276\203VY\277\374\320\376>8z\205>\302\244\344\276WT\\>,#h\275\303\036\027>\361\"9\277[H~\275\374\215\362=\314=\216>\346\035\000\276\006\302\032>\261~\326=\335\033@>\261T\026?\013\351\177>\243z\232\274F\325\231\276\215\221\250=\301`\346\276<\222\225\276\355\031\200\276\205\250\347;h\212\266\276\023;{>\177\261\266\276\001!\334\2766\364\031?\322\037\032?=\272\213\276\303F\340\276_c\r\276\303\263\327\276\336\203\220>2\024\257>\246G\026=\017\313\023>\316\311\000\277#\223\324>\n<+>0\251.\277\345\317\033\366\354\331\275\263\332X=M\315\267\275\226\001\032>\277\207\215>7\240\232\275\255\202\261=\256\374\013\277\200\306V\274\245\347\307\276\036\026\300\276;M\316\273k\255\223>\324V\261\276\311\323\\\276\321,\333=\312\273\344<\262\342\224=\2518\242\275%]\010?\276\250\207\276\266\036\260\275\312\352s\276\227\355$>uK\003>\355*\327>A\277`\275\236G=?\376\203\305\275\210\rO>o\346A>F\023]\276\317\014\257=,D\206\276\240A-=j\034\206\276\211a\226\275\026\306\310=PE\t>\r=\302\276r\024)\277j\267\032\277[\026\312>\006\366\200>|R=?\033@\004>y\235\340\275\035\0143\276tn\356=\202\004H\275\326\310\204\275\255\357\211\276UA\321\276\2120K\276+\363\032?\205e\261=,5\221>\246O\"\276c\235\224\275\014(\377>X\215&?}\347\377>\214\377\033?\206\016\341\276\'GR>\005:\177>\010\256\036>\276\300\374=\322\353\365\276\365=\332>~f\363\276N\277X=`\004\001>\302\330??\027M\253\276\273D\256\276=g\372\275\300\030\250=`v\364>\220(\035\277mjs=\0017\244>\373\273\337>\303\274\t\276|tZ\275\270\233\r>\305O\020\277\324\277\326\276Y\341\253\276y\333\261\275\211\375)\277\366B}\276\355\025\355>\025R\260>>\337\253\276\375\352\010\276\332\210\341>\005\256\030=\025\303\346\273\240\003~\275\357\272\036=\007\372\350\275\314\371\002\277\206\345\020\277\022qP\275\205\347\301\276\323\226T\276\253\301\033\276\342\302\374\276\363\300\230\276j\313;\276N\367@?\211f\277\274\264\020\334>m\214};\245t\373\275\317\365\337\274OJ\366=\200\214:=$\360\257=\345!\014?\037\350\006?\220C\200\276S\035\211=(\232\310\273~x(\275\232\013\336\274!7n\276\205U.\276\003\177\200\276\322$\350>\217\326`>f\035e\276\202\005\314>\007\320\217\276C0;?\275\250\201=\255\242\372\276\362\277E?\260\034\004?\220\220\343>\204\036I\277\263c\322>\234.\303>\026\013_\276 wr\276\021!\315=\004\240\264>lb\200\276\035|\037\276\030\262)<\231\r\006?\021\273H>\023\316\273=\335p\306\276\237*\344\274\023Z\264=\341\275\376=\311\250\225>\320\t\324\276\316\263f=\005\216\227\276w\355\n?\204x\301>\\\314\r=\322w\177\276\006\311X=x\037\307\275\211x\373=\274N\026\276\005\360\234\276\302\275\205=\236\361\351>\337\034[=\330% \277\344\364\216>\343\205Y>\2108;\276\231L\205\275\334=~>Oi`>\370X\355\275:4\271\276\223\031@>\370\2645\276#\212\250\276\013\215\207>\031\346\227<\003\302d<~\014H>\244\207\246\275]9\337>\215\225\311=0\323\244\274i\205\323\276?.\036>e\024\'\275\305wP\276\277\274\277\275>J{\275b}}\276O\242U?\212^\025\277\343\261\242=\005\030\212>J\363\235\276\221\273\r=\226\2109>X\371\233>w\256\325>\313b\212\276\331\016}\276*q\234\273\240W\010>\n\005Q>\025\245\376\276\r\252\223>\370%f\277\352\205E\276\214\305E>\260\372\010\276\3659 <\207I\220\276\244\"%?\020\321\204\276\354\300\265\276o\016;\276\350\031\223>\225L\177\276S=\232<\247\243\"> \315\353\2733\252N\277%\320:>\254\204\">\343Y\031\276\302D\032>\254\2772\277\\\216\275=\322\366\214>\314\233\223\276\2622z=\362a\030\277\255\035\375=\311_X\276\305\367#\276>\224:>0L0\276}#\354\276\234\211\247\276\002\004%\277\257\245\357>\307\234\\>r!\032\276\272d\014>l\345\245>\340w\037\276\304\313\021\277\\f/>\262\366)>frI>\352\371t\276\222\221 >U9\007=\010t\017?VK.>\227\315\374>w+\374\276\221\257)>x)\\\276\334\r\000\2760\235 \276\222\242\365>\276\331\313\276\013\226#\276\272~\271\276B\363L>\364\257\005>\333\244\000\277mh\331>i\334\002\277\346!\263;\364\"Z\276^B\326\276\215h7?\233n\252\276\243\202W?\316@\\\277\275\005,\277\243\273\264\274{o\240>Gk\234\276ZS\216\275\261g\234\276\247\035-g\363\364\233\217>\341\337o\276\2267\311>\211\3165>LE\010\277A\005\204>2\307\272>3\004\222>\307\376\014?\327o/\275\235\207\005\276\370Z\"=@\352\237\276X\341b?\006\217,\276y\335o\275\212qT\275\342&j=R\335\204=\233\317\302\273\333X\346\275\0166\251\276_\203\016?k9\206\276,K\277=b~\'?_ \346>Za\030\276g\273\000>\347\377\252=\363<{;U\264\035\276\033\036\005?\272\306\017\2773\310{\276\220y\203>\031#4\277\\\337\267>\244\336\226\2751D\337>)\022\301;\262YH\276\241y\313\276\347a\036=I\373\001\277\2572\313=\327\370\362=i\210-\277\345\346\303\276\323R\361\276q\274\263=\352\206`=\203\355`=\004\324\241\275kJ~\277\370\303d\275\351I?\2777\302\301\276\026\007\305\276\037\343\326\275\245\201\325>\325>G\275\241\257\307\275P\"\330\275\362\341o>~\320?\276c\251W\274\315\242|\276\344r\253\276\262\321\351\276W\205\343=\206\274\247\276\000\025\022\276aw\036>\327\236\205>\241m\337\275]h\204>\350\206\006<\002\234\210\276\320\241@\275\017\333\005\277\370TL\276\254,]>\374`\032\276\2653\207\275R\271\334\276\024\232\212>jM\212\276Pih\276\355\014\273>\253\277\017?\245\3176=\265\226\325\275\265[F\275\241\"\203\276\306\236\352\276\267\277\335\276.\003e\276\221?\222>o\307\275\276\362|\354=V\322\205=u\002\010\277\242S\007?D\244\225\275\373\033\034>PR\234\276%L+>\020\n7\277\245Q\270>m\265\004\277\334\261R>\245\025\242\276 /\005>\324\216\241>\214\316\346\275\t\010\222\2762$\321=\300\354\203>\037\300\305\275\207\367;\277\307\020\215>\372\303\217\276\021\242+>^\202\377\276\251\203_>\325\344(>\327\253\323\276c\0079?\353\030\000\276Z\254\351\276\343\276\305\276\310\265\362\273r\204\311\276\363\234\366=\375\302\313\275|W\005\277\374\276A\276\004E9>\204y\215>5\216\003?\306cL>\310\217\205=,b\242\277\236z^>\301N\260>\n\333\234\275\277qN\276\343\360\200\273\247i\376\276\363\344\255\274\247\316\030?s\210\004\276\036\021\307>\212\035\307>\351\004\360\276: \201\276\273\347\341>o\233\013>T\353\221\276,D\n?u\014r\276\366#K?\207\305\200\276\305N\037\276\n\302\361\276.\010\330>X\220\r>\242y\341=\230i\251\276\003\024_\276i\270.?\354y\005\277\253\204\231>\324>\304\275&\315-\277X\355\316\276\307\224[>x\256!?\207\'\030>\256\035\301=\225\264F?}z\335\276m\223\322=\n)\337=\212\014\326>L\340\036<,\326\265<\321\217\263\275C\244,>\306\316\220>\204\3169>#ii>t&\233>\316\347\t=\242\000\017?\264\234\212>\377\216\340>2\255\000\276|3\016\276E\352\000>\202;\320\276\000\252\233>\373\023K\276\232+_\276\"By>\326D\223>\262+~\275\362\206\001\277<\025\264>\264n\314\274\034x\366=\256q\013\276\r\016\223\276 :\022\275w\375\256\276F\213\354\276M\3402>\355g9\276\005+\356\276\214;x\274\n\207%=\333 \223>\260\204\247>\212:\037>\005p_\276G\027\177>\032\023\334\275\t\024\242<\241\"\212>\231\321\177=44\034=\321\014\r>\211\335\212\276E\336\205>\241Lv>\372*\210=\244\306\022>+\022\247\274\231\312\202\276\203\215a\276.\2224\275\315z\300\276D|\365<\322\345\022\276\020\2158>r\217\200>;`\350>,\232\265=\026\0226>\237\300\257\276\276\372\252=\256\2442?t0N\276\027L\221>\344\327\345>\005\221\242=\377\357\202>\340\213\370\274\210\233\314>\330<\240=\002#\277\273K;\322>Z\354\245\276j\214\271>\177\323&\275X\360$\276\260\024P\276\316\003\010=Kq\370\276\251\222!\275d\025\243\275\376\200:\276\\\230\305\275\300\266\354\275u\007\213<\350\335O\276\207\216\320\276\352?\006>\235\021\030?\337\357R\277\341\276\363\276\373\242}>v\307.>I\005\242\276\t\304\243\275\353\311\321\276\221H\t?\355\344\355<\304O\205\276\236\341\345=jq\004\276m+\n\277\006\025\317\274&0\272\276\001J\004\276>\323r\276\363\276!?b\212d?b\274L\277\0001\203>`\023X\274|\346\310\276xg\t\277\327`\214\276\210\n\253>\337\232\353\276bb\273\276U>\003\277\375,\037?\000\\\002\277g\215\022=l\236\t>\344\302J>X\332\213?\177\312\346>\266\3168?zK\246=Mj\244\276N\206\017\277\243\021??\336l\344>\204\251d>9\014\201\276\304\203\t?\363P\210>\306\334\030?\242\257\256>r\233\206\276?\222\234>^+\304>\274\020\311\276n+3\276&\251\233<\262 B?\207%\003\276RD\306\276\276\017\301=,\214w=d\226:>S\252\241\276\332L\036\277\307/c>&\354\336\276\310\202\336\275+\366\357\275P\361\203\276\375\321\330\276\007\372\230\276%\r\315\275\023\256\320>\030*k>\347\021\246\276m\271\366\276\200\331\035\276\322\272\010\276\324\230D\275\241aF<\361\177\232>\n>\370\274\010x&\277\274\021\016?b\245\322>r\033\007>\350\250\332>(\363%\275y\241<>(*\022?\3420\364\276\234\240\227>\276\301w\276:Z\001\277\317\016b>\307mz\275\032z\013\276?\235\r\276\201\027\250\276\264\341\t?\263D\310\276$v\246\276\r\023\344\201t\213\276\252\260\"\277)\242\010\277\024\347}\276]>\031>;\003\366\276\230`\257\276A\272\253\276/\345t>j\304D\276xu\214>\322\030\232\2755]g\276\354\r\006>@!\362>r\r\002\276\324\031\337\276\375\0066\275\302\017\033?\221-\276<\215\365\376\274v%\377=\010A\354\276N\213\374>)2\356\276\260\3034\276\301b<\276\231R#\276H[\327\276\315\226\250\276\302\031\205\276\344\310\355\275\306\2266\277\254z@>;M\230>\207\247\223=\263\356\257\276\354\262\262\276\351\332t\274\316h\253\276\352\250\352>g\254\261>\352\3668\276\341\225Q>k{\321\275e\233g=\243\356\241\275\220\216\217>\331\r\266<8\311\232=\214~\023\277\177\315S\276\343\226\256\275\322J\351\2763GR>\332\3145\277)y7\277\210[\005\275\235.\216>A\005\217<\323_\357\274\271\344\302\276i\363\200=I\021\204\276\350\243\233>\307x\217\276\317\256\362\276!\211\237\276+\224\034\277AJ\r?\255\356\237>K|}\276d\357\035?\2633\'\277\001\220\023>\231\202\376=\237\351\306>+\334E>\240\352}>\267A\316>\266\356\261>\241Z\332=T*[>\276\275\363\276\333\363\200>#\304\003\277\026<\274>\253\036\342\276\270V\342<)\264\007\277iqY\276\263D\276>\212\201\372\273+\300\255\276\264p\306\276\254\254\241\276\235\321M\277\334\376+>H\212\317\276\234\0220=\ra\222\276\277\220+\276\305\301N\275\230W\201>k^\355\275\242\347\014>U\311\t\277\0312\032=\341\203.\276\004t\373\275\033^-\277\036\"\005\277\320\235\222>\215\365\351>\335\006\206\276r\022\265>\300O%\274\226\250\037>\254\374\231\2753_\027\277Y\325k\276\215\250E>\201\320\303>\275\357\246\275\345\023Q\276\033^\003\276Kih>&\006\234>\272k6=\253WL?{#R>B\347\262>\347\3041\277\020S\247\276\005f\020\277=\246@\275]\244]>\355\374\n?\245s\253\275\202\374G\276B\240\206\2779\272\353\276E\3504>\270\001\226>\345c \275\2613x\277|$\224\2743\276x\276@\355\034\276\021\027\227>JS\234\276.\3170\277w$\315\276\225\000\330\276\342\001\235\276A\357\211\275p\362\230\277\265\373\024>\265\350\033\277a\305\240\276PJ\300=\226\376\010\277\350\\-\277\000\217\271\276\\\362 \277\2551\026\277\003k->8/\210\276\250v3\276\352\227\230\275m\236\220=XZ\001\274)\3027<\210D\010\276IR:\276\337\rl>\344m\001?<+\200\2775\004\241\276\324?c\276\236\346\232\276\342e\316\276Z\030\036\276\357\216\233\276\271\0362\277\311\310\330\276&\250L=\260x\223\276&\r\324\274\312\021\306\276#\000l=\215\210\035?\017\316\023<\317\242g>\271\347\204=\330\324J>\304x\346>q\363~\277/\235\207=\252^\020>{\nc\276ga\321\276p\326\030\277\323~\022>\330\212\316>q,\245<\243K\275\276TG\354>\300\327\222>3\350%>\013<\034=\203\371z>j\332\t>/m\244\275\250#\017?\024\241\323\276\245\244E>\251\347\240\276\227\356\034\277\221\310.\277\223G\212\276\275hh\276\362[\006\276\306\'\022\277\235a\345\276S\242\000\276|\210\324\276\254\232\207=h\225\203\277\241\001\216>\307\333\300=\200\\T\274\276\016\303=\251_\001?\325\231\325\275:\235\005\277\372f\310\275\031!\250\276\032k\225=\317\377\307>\344\376\237>\331\372\031=v\362\250\275\200\333\246\275(\2762\276\214\273\357\276=\256*>;\311\264\276}\235b\275\\\331\010>\2649\010>_8\302\276\274\016!\277\316-\266>r\3459\272\276\026\223\276\006\356Q=\256\025\272\275p\033\314<\364\001\266\276\347 \235=\207\237\243\276Y\262\362=\340G\016>\312+\371\276L\274,\2778\306\025?/\270\033\276a\033\004\276.\206w\276Z\177\217\275\234{\t\277N\320\331>,\001A>\306\0132?{,Z\276Lw<>\023\020\322>\375\025\020\277\305\246M\276\310\210\207\273\330\375\232\276\210l\215\276Wd\235\275?\024(\276uf\033> \336\255<\177\202\023\274<\272:\276e!\217\276\366\273\177>\270GI\275\322\355\327=\014:A\276\214d\025\276\257\2339\276\313\000\345\275\205\326\303\275\325\375\005\276\357\221\307>H!\203\275\023\302\004>\235\332\244\276{\356\002\277{\242\222=\2505\004?m\210\350\276\236w\201=\013\213t\275\177\263\003>N\310R\273L\217\273\275\376A\037>\244j\274\276O\320u\274\235\345\300>\271\303<=H\340\253>g\204\337>\3672\010=\332a\307\275\256W\237\275\270.\t\276\216\311L\276d\266X\276\212I\250>Y\016\031\276\r\210\246>\202\036t=M\365\307\276F\274\230\276\327C\016\275x\241\344>\353]\255\2763\224\027\276\341\220\366>m\327I?\307\360\245\276\026[\247\017\320\346\276M&\334\276!\033\344\276\360\274\233\276\303Y\256\275\'\303\030\277\233w\r\277\020\300\">o\245[>y@\334\275\231\375\252\276\034\200\300=:I\004?\"\3139>\031\243\350\275\337>P>9\236R>G\340S>\312I\346\274\352~7\277\333\021\230=\031\237\356\276\r\214\345\276\002\025,\276\312:\001>/Di\276ej\263\275*\014p\275\267\261\322\276k\264\304=\215,\363<\023\225\376\276\352\2536\277\027mJ?\t_\243\276\345\004\313>\025-w>H\\\222\276\225\3426>\372\224\356\276\222\005\034\276\026\374\001\277<\303\216<\211\024\262\274\001\314\236=\313\233x\276E\245\342>\035j\303\274\217f\351>\202\335a\276\360v\014\277:4\257>\312\243\275>\376\017\244>`cs\267\307jz\277\226k\267<\371\036\326\275\036\0146\277\320\"\302\276\311\235\302>\234\023\200>\371\341\312\276\233\205\374\276\247\262\177\274(\211\001\277\"\031\252=\262\206\365\275\252\234U?\367Tl\274f\374\020?\326\322L\277\267\022\374>\024\263\336;\365\314m=\254\007\206>\010\010\311\276C(\027?\354cG\276\340\021\223\275\325C\232\276p\327\326\276#\0366>\312\246\024>\334\335\377\274Y/\"\276\366\270\024?)\013U\275\370:Z\276\306\004\233\2751\n\201>\340q\'?~\247>\275\244\372\250=\317\006\204>\322\236\317\276\357\231,\276\020\253\222=\242\273\240=\357\340\014\277uh\201=\037_\357>\nt\004>y#\021?\222\242t>i\304\323\275\031$\354\276\230\212\363\276\024)\022?\017\215\313\276\014\224\031?N\363\234\276\036\262\372>M\304\017>\242Lp>NB\035\277\n\341\360\276\232\361\033\275\220-\224\276N*q\276\306\235#\276\201\242H\275\020\367\231\2760\347\245>s5\247=\243\032@>\234T\344\276\254\3745>\340\236\006>\237\006\257=\222@F\273\236E\257>2\224f\276\010\t\323\275M\330\r\277f\010\033;Qa\214>.V\323\276\371\365\007\277\345d\225\276`\377\346>\324\n\271=\' y\276\032:l\275\226\205\002>m}9={\016\234>\226eO\276D\230H>\276\306\333\276N\n=\275\255\250(\276\014+\251=Q\325\316>h^w\276NC\222\275\277C\364=\362\001\"\2766\0142<\316\002\t?\240\257\345\276\255\356,\276h\221\372>\273\200\263<\222\334\216>\201d\201>\241\341\020\274M\365\246\276[\231S\275[%\n?\224\207\271\276\ryr\274^\360\204\276)Iu\275\362\307\357>$\251\002>z\377\350>_\016\033\276=C5?\262/\371>a\372%\277\254\330\271\276\316\n2\2774]\341=\305\355\026\275\024G\224\276\357>\201=;\275\246=Z\354\317\274C#\010\276\217\271\335\276\230\2356>a\272\022\277iyY\275\343\033\226\276K\237(<\002Q\262=\2505\305\2764x\346\276\234\364\233>$\t&\276 \026\016\277\\\'\242>mmo>\326\335\351\276\364\376N\276\031\272\266\2760\316\024?\\} =p\347-\276#C\360\276\'\006\242\276T\262H?1\207\355=\343yj?\327y\311>\r\372\211\276\365\323\336\276\363\003\262\276\027\005\202?|\301n\276\221\305\240>\r\"\314=\375\233\357\276\355\266\">\277\351\013?\375n#?S*\342\276\345\220>\277(s\222\276\324\255,>D\245\300\276\327a9>n\320n>\177\313\005>_Oc\276\366\035q\276\277*\206\276\330d5=\333%\206\276\200Z^=v\334\264>\025#\306=MX\003\274L\223Q\274\246~\255\2769\332e?\013\341\205>/]\016\277\336\022\336\275&\014\347\275\202\220\371\275\3143\253>p\333\317>)2\n\2771\220\277\275\374\215\306\275\247\335\027\277\311\326\251\276R\266\004?%{G\275U\300\343>D\374\202>\025B\225\276\214m\210\276\212\311\224\274\275\357\256>@\277A\275\322L\243\276\314e\257\274\227\300\252>f\027R\2766\353\317>\373%\347>\0172\224\276\267V\033>\235!\230=\013\305\036\276\337\251\311\275a\301\210\275t\020\367=\004K{=y\245<>0\227%\2761g\251=n\331\220>\354k\226\276\327QL>\326\037\214\276\316\013x?\262\273\263\276\322\201\010\277\310x\365>\006Pc?!q<>z\214\325>\363\021\026>\226\335.?\217\321+\277\235\316\332\276\242?e\276y\365\000\277\313\023\220=\241\276\313\275\365TG\276\035\335\251\276\211\035\370<{;\032\276\361\233\257\2765\345\322>\016XI\276R\331\327>\357\265\343=^\314\024\277h\332\331\2762CX\273\363\223\344>\256f\232=\016\r>>:\022\321\276r|\356\273\313\021q\276Q\215\020\277\t\306\001?Q\265\240\276\371\003\212=1\373:>-\313e\277\220\366\215\2764\r\007>\275v&=F\324\247\276\267L\203>\353\261\351\276\207Z#\276I\030\203<\035<\037\277)\317==\275\254\021?\357ap\275\367\247E\276\330\363\230\276\234\336\013\277\0236\224=\021MG\276 Gj\276\305\245\256\2761\312\243\276%\013H=\355\362\217>I\322\024?\373^\002\276\302\317\241>1\376c< \277\207>\243\205I>%\264\222>\345\344.>%\266Z>SF\345\275\205\251)\276\322\263\177>\306\272\313>\271\367\014\276\263_\233>Y\031\314=\274\002}>}\266\005?\342\372\335\276\357)\210\276\200\315\025\277\362\027\344\276\342X\273>\021tF\276\004\361\256;\263\345\223?\017\222 \276\320G!\277b\002\005?\265W\222>wc\n=?d\007\277h\253!\276\327\235L=\361\247\211\275\211\022\345>~\263\221>\320\003\271>\323\224\215=2\222H\277\345\222\312=oCB?\301\030\364\275I\245\027?\216r\025>\3156\250\276e2\367=\210\374:;KX\205<\227F\375>2\036\232\276\3553#\276\277q\206\276XC\252>C\324\256>Y\376*\277w\302P\274G\026\224\276-t\231>\342F\374\276K\2756\277\365\341\274;\326\314\200>\330J\211\276\345\027#>\366\353\227\276J\025T\275\031v!\276b\002\356>1Md\276:\261\324>G\017\022\2775\005\022\277\327\001F\276r\243\221>\003z\'>\275\252\265\274\326\014g\2766Tk>t\264\204\276\313\217\257\276\332\323\254\276~d\022?\005\017n\275\250\037J\276\306;b>1\356\341>\243]\007\2773-\224\276$jq=fr\271=(\277\262>\002J^\273\215,\366\276\374#K>}\273\201\276\341\314\305\276V`\017>\276\312R>\321\036\037\277St\316\276\242\010e\276\002\0169>\342\023\030\277\310m\350><\257\336=\220\201\366\276\030o\233;\036\230D\276Kc\213\277\004\212?\275\022\276\215>D\004\207\276\262X\204\276\314\004U>\031\301\021<\230\305/>\004i\207>\313Q\204\274\355\035\335=\306}8>m\334\315>]\301\037=\t\245\034\277)\263\353\276m\205\302\275\300v\002\276\344\303\366=H\206)\276\226\336i=J\370\205\276|\232\273\276\341B\236\275\006\225\357\273\035o\014\276\014\033\000\277\217D\317=\023\264\373>\002X\001\277\006`\006\277R(P\272\3279\252\274If\n>\324\236\357>\241\350\346>\260.\335=\3267\201>\002~\\>\207\035\267>\"\255\311>O\316Q\276\203\"\320\276\226\333Y\276&\020\370\275\302\206\226\276\323\245d>B\"\263>\226\347\216\275\2718\310\276\207\375\000\276\301\241\022\277i)\257>mY\020\276\272\000\330>.\363\022\277\347\234\305\275\247\021\372\274\022\240\272\276\300i{>\342\021\214=\334\354^>\322\003\310\276\347\342o<\260@L\276t~\004\276J\201\374<\354\212z\276\030\177w\276\320\217\225\276B\373\324\275U\"T>\224c\031?(\373\316\276|\312\365>\222|\002?J\345\252>\330\003L>\307v\'>\345N\317\276\3340\005>\276R\244\275\027\240t\275\226D:=xL\247\276\345\354\244=\022\332&\273\3412\034>\032%\367\276c\037A=[VS\277k1\226\275\360W\302\276\022\335o\276t\370\204=-\247\005>\254\2519\276\r; =\342\315\t?\272)Z\276\374\205\000\277\001\027\366>i+j<\274\315\020\276\305\013\272\275\t\246\034?1!\254\276\016\347T\275r8\005\277\016\274\024\276\301k\241\275\202\226\242\276a\017\314\276v#\252?\324\343\214\276\240*\275\274\t\302\034>\253`e=\304\262\224>\225\334\342\276\211\325,\276mGm<\311\007\315\275A\374\257>/u\214\276\235\276\325\275\350UG\277?\\?>T\2171\277\225\263\227\276\236\317\212>\365T\002?S\321\243\276\302\364\010?\364ec>10\307>\010\005\353\276P\3605>\007\324\027\277\274BP>\301\214\353\275\355\356\262\263\254\233\276\316P\207\275\205\207G\276\233\270\201\277\363\002\315\275>\332\003?\220\223\006?\234\232\203\277\031i\036=\224\242\222>Ada=\004\305\350=\307\337\035\277*\340)?\355\2058\277L\3138\276\357V\227?\033R\036\276\250n\362\276\241\375\031>\2248e\276]\315\276\276a\376\304\276:\017Q>\357\2717>\017\247\000?\234\263\016\276\242*k?ve?\276\320\n\021\277\202\371N>\311\266T?\317\377\253\276_\366\246\276b\\H=S\315\006\275\032\340\250=\236d%\275\030\264\0339k\314\263\276\364\003U\276\303\360\036\275ZC]\276!\253\217\275\251\026R>\300\320\n=\2530\336\276I\364\202>E\233\356=^\033\304\276\304\371]>\306I\017\277+\316F\274\347\214\016>\245\373\306>\t/\r\276-\320\002\275p\024Y>\322\230\303\276\365\314\016\277\224\312Y\276H{\022\277(a\235\301\2761\"\237=U\024J\277i&\361=\031A\263\276\377\236\265\275\265x\264>\r\266\332\276:\336f\276\002u\364<\\\\\267\274\\\323\003?$6d>\207|u\276\250\230\221=\275\266\266>\220[C\275\224\007\213\276\253\313\030>I\203\234>\205\323\027\276\314r\341\276\337\206\265\276\20103>\317\030\320\276\\\232\362\276>\2442>@\013\205\276\344\376\225>\003Lh\276T\2304\277#\342[\276U\342U\277\\;\251>\370}\353\275\330\016\"\276\231<\347=\205A\340=/\315C\276\335%\210>\003}\177=E\216^>-1\"\275\234\321*?\375\371\362\276\233\203w\276D\221l>I\331\221\273z\t\340\275%\312(=\266\360\250=\373\373\272\276h\222\r\276>\301\023\2761g\265\275\273*E\2767\265\243=\n\023\243\276L\305\036\275\001e\213\276$\337O?\n\r\217\276\214O\254\275\016&\222=\345\247#\277\021?\243\276[}N\276\256\022K\276\254:\340\276\3628\350\276\002\"8>\021T\311\276E\2179\277F\366j\276\370\236\235>\222[\375=\277\270\366>/\310J>Zb\277\276Ow\326\275\2643\"\276\326\215\t\277(@\370\275!\201<\276T\354J=>Q\331\276\234D\230\275(\361z>\022C\254\276\024v\003\277\351\177u\277\352\316\007\277\265iR>a\242\354\276yc,\276\372\267\001\272@\227\013\277R\244\207>\2375\217>^o\346\276FO9=B\331\235>\355\026\021?i\250\375>\276r\021>\024\\.\276z\261\006\277\314\225\017\276d\027\'?\022\177\217\276\330\231\026?\227\223H\276\270\366\203>\307\247<\276+\234\r?h\200\350\276\350\254\306=Wb\224\276\245n\013>\331i\027\277\240\345->\316\264\272>\234\216\225=x9N\276\007\006\252\275\307\345#?\254`\256>\022_N\276;\235\330=\240\335W\275\025f\236\275bg\t\276\344\2544>\262\306A\277H\307s\277E$\235>+\304\022\276!\325M;\360_\366=C6T\277\240\302\266>Y\242\022\276\235\003\255\276\274c;>-N$=\316\345\310\276D^\347\273\"\003\347\276\253/\030\277\010\210R\277\243Dg?\346\245H\277\'\305|>\310q\267=\010\200\014\276X\342\037\276\247D\204\2768\265\023\277Z\374x\276r\342\237>\230\312;\277\032\263\"\277\034\203\"\275J\235|>9:q;t\203\000?\255,~\276\365Zs\275\312z4\276\267\257\250>; \262\276\244\236\364=\265\2026>\226\321\026>\304\001Q>\340\375\322\275%\256,\275\302\375\351>\035\315\257\276\371\303\027>\366(\311\275\323RR\274@\364;\275\014\340\037\275Z\357\210=\323\255\232>Z\302\224>Q9(?\226\325\272>\310E\336\275\'\226\323=\200\"\337>Ow\240>^W\007?\222%\303\276j9\007\275I\300\205>`\242\314>OK\217\276\305x\n=\240\021\235\276\3460\'\277\264\345\213>\351d\313>\351\t\341>R\330B\274\3573[>r\014.\276W0\377\275%\337\215>\347e%>\307\215\026\276\000\362=\277\246\332\367>\312\337\355\276\344\371\265\275\225;k\276\351\020\250>\000\2310>:\333\003\277\006(4\277\343@\365\275\025-`\276m\036W=\034\210\375\276\314_\237=H~\007>\366\201\263=\303\022\223\276#D\271>{k\213>#\221\201\276c\246z\276\313\270\202\276u\372a\276\017\030\301\276\371\025\214\275\t\227\346\276\3474u>\234l\250\276by\240\276b\273\274\272\347\370N\276bNn\276\034\3026\276\342\2022>\337{\302\273~\317\035\277*\257\222\320\205\n?\335\017\000\276k#\032??\325\265=Z\226\206>BB\233>\030\202\270\276\307Q\255>\204\212\272>\244&#>H\324J?\3305\007?\211i.\277\333\264\027\274q5\030\276J\017C?\310\0052\273\002(\231=\305\217\014?C\"\325\275\254~\224>\030\350\324>\300\010\231>/\342_=\267O\273>A\316F>\020\232\333>\237\265\221\275\327\237\340\275\033\377\031\274<\347\035\277\202\217\010>PA\200\276\236C8>\302P\247\274\307\tq>&\034\214\275\003\364\210\276q\362\314>\277\244\023?/\375\003\276*H\325\275&\264\305>\357`\360\276\230F\026>A0\023\276\237\233\347=\330\211\344\276\244\337T=\221N\024\277\025\321\340;\237\361[\275Ak\271\275>\257\306\275I\234\r?\264\315G?u\254\021\277\334\246\202>\250m\333\275\031\th>e\026\362\274<\304\201?[\273u\276\023yu\276\323fe=k4r\276\276l;>\032%S?\035\350)\276S5\327>MQ\250=M{\023?V\256\252\276\321\211\257>\273\261\213>\257\357\200?\254i\370=+\276c>8\212\225\275\302\274\241?\354MK>\013\001\335\275\242\260\303>K\346\'>\346\3039>?\001\217>\377\375\225=\336\242\306=\367\371\360=\256\274\034>\035\371\351=a\356\r\277-\000l>\346 \311>\351\302\330>\266\346\206\276\324\251\222>\"\301\244>-\031&\276BN{?\222\260_\276M\3410>\266\010$>\022\010\210>\325\271b?\240=s=?\372=?\335\016\221\276\377\210\262\257P\034\277\341\361\364\274y\241\246\275\244f\261\275w\354_\274r\'m\276\246,\205\276\377,\353\274f\212\352\275I\365~>T1\267\276\225\345\202\276.\3374>\270\2423<\261/\207\276\211\217\230>1\324!\275o\220V\276\321\351\360>\032E:>\014z?\276\324T\025>\257-b>\343\301\350>z\341\325\276\344\205\222\275\364n\222\274\330M\314>\225\321\226>\'\265\365>\251\241\030>\267\245v>\002\365\024\276\352\207\253\275Y\233!?;\273\273\275\350\252a\276\236\336R\275\370V,>=\004\230>2\024\233> \356\213\276\225\010\255\275\025\267\222\276\024LT\276\271<\211\276\t\014\264>\313\271\265>7\205\034?ch\240=?\347\313>t\214\030\276\370-l>\307\245\347>\265\033\246>\022\t\344>\367\330*?p\364q>8p\344>\335\373\347=\214\254\302>\220KV\276\337\255m>\257\377\361\276\0342\202\276\347\021]\276\3707\335\275\265G\276\276\253\247\n>\201\010\365;\337C\300>\335\322\330=\316\351\277=\332\254\236=\0350O\275\340\214\000?\230\017\025\277\203\356\200>\320A\257>\243\363;\274\226\3018?\333\001\261=\352\327\026;\312\255`>\010\255\374\276\035i\242\275C$\250<\272\032\000\277\245\267\253\276?\372\371\275=\240\"\275\311\262S>d*\n>\267\360\313=\300\246\306>\312\237\304>\3036\250\276\350\260\001\277\210\302@>\233\257Z?\322r\037\2766+\233=\032\251\025=\344\204\345>7\347\323\2765_\310>8\2127>\267\250\262\276\241\330\353\276\371\002\217=+\"\376=\2605\005\276<\003\240>VTc>7?\242\276g6\202\276\200\263\341\274\262\032D=v\216\313\275\\\200\211\274\234\250\005>\211\034\304>\242\2579=s\2021\277\022}\313\274\021A\207\275\003\212\004?\314\374\300>\223\264-\276\214\371\243>\265\364\021\275\221\323\r>\266\022T\276C\010\250\277\207\026\262=\324\014\335\274-{O?\271\254\310=\007\216I?\030?\343<\226\356O\276\365\221\245\275\266\247\271>\215\363M\274\331\241\250>\216\005\026>W\311\212\274jm4?\317\333_=\215\230\251\276\364\373\013\277N\366l?\272\275`>\025\240B?\004A\300\275\360\305[=4\251\222=}\341\243=/\210\022>\303\326->n\n\005=N0*>\335\202\352\276.V\273\276\r\264R?\341\326H\277\t(\236<]m\345\275[p=?\2308$>=!\365\276\351\020\206?\374\320\211\275\314l\r?Y\301\345\276/\262\266\274\224\3420>\340g\257\276\266u\023\276Z\256c>\356T_=b\2456=\312V\034\276f\234w>\311b\035>\037\253\200>s@\024?j\354\n>\021\033H=7\356\005\275\251\025x?Q$\210=\252\321\320=UT\241\276S\265#\275\312\t\200>\357\222V\276q0}\276 \270>\275\226% >\225\322\325\276\2650\007=\327\322\005?X\036\376=\235\344\317\276\364\014\221>\231\003\204\276e_\361=\341@\\=\023l\220>Ag6>M\020F>\345e\306>r\327\212\276\265\031\331\274_\277\335\275i\321\273>\007\360\211\276;T >\354\032?>Ev\330\276\307\357\235>,\217;?\270\ng>\'\244O>z\341\367\275)\274g>\370\234}=\356\303\300>\026\207z>\246/d\276\273\230\024?\2456+>R\225\237\276\224\034Y=\370\322\200\276\222l\004>\227\247A>G\215,\276\036\205\023?\251f\246\276\223\204\242\276\207\2668>\357\203\345>*e\254\275\333\317@=kW\360\273\030\377w;\361\356\251>\253\306\'\276\005h>\2762Qz>\006mE\276\'\312\346>\037M2=\321\016\200\2774\237\247\276^\276R\276$\003\005\277W\207\343=\024\317w>z,E>\262\363\204\276<\243\200\276\205\273O>\325s\336\276&\n\370\276Sn\367>\303\001\007\276E\310\211>\017\217\027\277\333U\303>]\226\270>h\033\342\276\327\344=\274O\026\022\276mB\251\276y\375F\276?\251\273>\331|\303\276uKe\2767\006\035\276z\272G>=\034\207\274f#X\274\261\360s>7t+>0\205\315\276\356@\320\275\370\270\320z\202\243>\326\030\310=\326rq\276OL*?\304\336\024\276\213\342\005=\215\200\344<\370\201\325>\237\245\031?\212\274\311\275X\237 \277m\n\343\274\025UT\276M\364\234=\255\357\233\276\224\227 \276D\321\352=\347\017\031=\024lA\277\\\007\222\275\016\307\224=\362`\222=\333rR>6V\350\276\315Y\364\275\341\356\r>\276N\010\276\274\302P\276\231j\200\276y\257\217\275\3567\360\276\236}\365\276=\317e\277y\n\263=*\036\265\275g\375n\276\262^\017>\243\320\337\275@\037\305\276\200F\261\274\036\343r\276\265\342\316\276q6\225\277kX\235=`\310\214>\\\021Y\276\005\245\321>\010\321\206=\007*\200\277\026\271\274=\025u\241>\205\323\300\276\205\300\370=\252!\306>\217\214\237\276nuf\277\247\305\235\276\320\230\342>\341\374\251\276\361\202i\277\237L.?&9\317>\3342\017\277\357\025%\275\362\326S\277\\\322\357\276\226\037\031\277\327%\003\276\013E\t>\351\033\201<\213w\032?\n\034\\\277,\367\304\276\352\271>\276\370c\253\273\217jY>\221\034\013\277\302\306\005?\342s\253\276\225\214\267\276|\351\207\276\337~\227>(\305\314\275WZ\377>\340?7?\220\240\231\275\242\221\302\276\240\266 \276\3263\226\275\277SH\277\3176*\277\321\375 \277\005\307\236\276[\311\366\275\315\344\337<[\001\333\276W\344\267;I\235\347\276\357\365\322>\010\222\226\276\267t\004\276\213Q\237>\n~?\274\324KW>\300\260\260\276[\2340\275\242\354\361\274\357Y\256>\276\313\260\274\250\316\031\276\343>\226>Sx\233\276\346\301\211\277\264iR>\245=\354>\361\246\001<@+\033?$U\030\277\361U\200>\0269\242\276$\2554\275c\341\325\275[\201v\2766,\'\276i\246u>#HF<]\211\252\276\211\315T\277o\022c\276\223\303\374>`aq\275\242N\177\276\351\235\303\276Y\0247>\263\215\340>\003\235\223\276|iq<@A\353\275\302d\023?\310\024\300\276\2368$?\031\256\000>*8D>\257`\021\276\301$\\=\302\247\000\276\361\235\n\275\035M\326\275\246\212\204>y\242\246\276p\035\n\276v\003\311\276\274V\343>md*>\203\313\027=-\017\030>\342$\250\276\367\260\237=\355\024\377\276HR\261<\236_\357\275\tA\201<\235\273\037\277=Y\014\2775wZ\276\013\004\231>L\373\251\275\022\324\202\275\372\206&\277w\222\211;\016\325\374<1\246\024=\372\251\325\276\024\341\030>\260\244\274\275\305\201\255=\330\214\007\277\003\215c\276\203le\276\354\206y=\334\243\351\275\200D\366\276\003\342\330>\310\005\362\275\364\236\t\276D\021\207\276\333\247.>\275{\274=\302\240}<.\024\211=\307\320\303\275\200\007\321\275\317\232\255>\373\3376?^rN\276~\266\023\276#\336\205\273x*\363\275\3328}\275\021;|\276\177\367\'\276\2368\332>\335o\371>\226vH\276\001\316\350>\"\225g=\016\024\037\277\215\217\341\276\261\245\345=J\355\370\275\273\230\000\274\260\363q\277\254\376\033\276\267+\032>7J\207=\254#\021\275\331\336\006\277k|c\277W\362\303\272@h\337=\200\342A?v~\225\275\223(A>\206\216*>\236?\354\276\267\223D\275\211\003\270\276\242g\252>\323US\276I\317\010\277U\241L=\270z\375>GU.\277\224\230h\276\244\234Y\277XN\210>\253\021m=\350!\336>\306&`\276\336\220\350\276\266\273\215=2\241\003\275\325\323&\273\207(\212\2768\235E\276u\':>\321\346\007\277w\3559\277\225\013\346>B\335\334\276\014\266k\276\027\354\270\276\330\315\320>\2416 \276\325\327\273\276\226\340\020?\217u\013>\217\001\341>\t[\200\276o\270\216\2768 x>\017,\227<2i\235\276\026S\376=o\332\200>\376\027\010=x\000\355>Sl\325=0p\325\274C\227K>\276\177D\276\257\324m=\266\267x>\3001\241>8\260\234>\215\244d\276N\353\357>S\244\275\276J\'\231<\212),\276\374\342W\276\031Hj\276\005\324\251\271\3532\241>\203\026E\276X\347\334\261Cj\275g\320\031\277\036(\032\277\032\265.>\3609\262>\221\242\263\275\001\226\341>`\313\"\274A\177\001>\007\006j>,U\022\277\324\331\231\276\300I\234\274\312W\301>r\';\276)\\\025>!N\317\276Bq\224\276U\363\263\2744l3?\351\361\307<5\021J>b\266L\276\343\312\264\276i~\027\276l\t\221=\235i\024?\2620\242\274\234\325Q\274\r\0075=\371\357\334<\361e\266=\350\220\004>KX\r\277\212\342\307\276x\351\037>h\325\r?/\\\300\276\2675\307\276\263VR\276\177\265\030>\316\346\226>\014\270X\2763\016=>O\231\344=\256\334@\275K\347\207\276\321Wq>LSc>\031e\r\275\222*\361\276\222\330\371<\001\000\365\276\216\221E\277\025R\004?\250\323\254\276]\2036>t\301\212>1T\222>V\2569\276\264n\206>\324#\234>\371\177`=\333*R\276\366\267\225>\201\'\267\275\030md=\223B\005\277\357\204\033\276$\336H\275\261\354\200\276\241fQ\277\260=\214\275S[V<\"\247V?\246W\201>\246l\032?{\361\340=\nQ\023>\333\344\267=\351\353\343>\222\244\024=\2056L\276\n\331\227\275\346\033\256<^\225\303\275\360\215\245\276u\024\211>\037aJ>\026\0308=\032\333\006>\320I\237\276\'\021C\276R\203\327>5,\023\201\247f>\211\321/\275I\246@=Z\262-<,+\202>\244H#>\225\036\021\276#\t\333=\350\312\270=J4\004>\232`\025>\247\241Z\276\274\242\217=5\270\302>\353\374l<\312\337\274\276\324!9>\230\367\024\274\017\276\252\2769\264\334=;\270\272\2763&M\276\302\3541>\207PJ>\010\243\276\275@\344G>\025*t\276<5Z\276\0025\335\276\207\250\255\276A\277\226<\324\345\300\276\374\231\t\277\021\262\217<\020q\342>\344\267I\276]\226!\276\336\340\"\277V\376?>\236z\277\276\rn\340=H\343\204>\241\310|\2765T\366\276\td\367\272\224\355\'\277\343h\251\275\035\317*?B\006\312=\200\364\035?\261l\207\276\030\214\241\276\305\320h?\210\325\263\275\201\211\241\276zeO>\225D\337>\177>\205\276\031\214\345\276\373\245\311\275\017\302\036?]\212,\277\377\227C=s\202\307>\363w\357\275\252\310\371=\224S\350>\377\206\221\276\333\023\013\275\365\377q\276\020\332\332\275\"r\207\276 \222\254\274J\243\212>\276\325\273\276\370\324\252\275\031\325\255>\325\326~\275[\273X==}\230>\256\030\214>-{\032\277\255w\004=uv\355\276\014\354\035\277\253D$\277\267(\330\275\200\272D\276k\235\272\275y\250\005?\201\212:\277\261J\021\276\252\276\244=b\304\330>At\327>\315\273\026\275\372\323\257\276\265\247k\276a\247\366>\371-A;\336`\217\276\203$\335>bR\226=|mG\276w\205}\276\306\207\034\276m8\353=Q\376\366\276\2060\234\276\230\"\223\276\203\330O>\316\315\203>\317/\205>\241t\370>Ez\250\276\213\224\203\276\221\356\344\275\200\233\031\277\024\374\020\277\000\016\2769\252\230#=\332\034N?a\201\252\276u+\311\274\247\202\216>t\336\310=<,\376\275\210\257]\2767\032I>\260\265\206\276\331\256\270\275\031s*>U\332\277\276\205\232b\275\365\251\t>Yv\364=q \003>\361W\022\276\204g\001?\254\204t\276\265\375}\276\373\256L=\374\004\311\276\334<\021>Lm\235\276ao\372\275\002\244i\2315\334\276\262Y?>\244e9\276\256\2038>X\020U=\376)K\276\325\000\023>X\231\224\274w\261\375=\312\3168>\036\021\216\277P\343 \276m\031\221\275\255R\212=\250t\255\2751\260\263\2752\215\305\276\220E0\276\231\302\014>;\346a=}\026\266>\252\337\205\276r(\010\277\224\352F\276\024c`\276\203C\024>\245[\224\275\004\330\320\276\367\014\244?Wx\222\276\025r%?\377\026\320\275B\211*?f5\375>\271\370\364=\230\341\007\277\217\352\306=]\335\236>1\343Z>U\307\313\276\227\020\326\276\265-{\276\312\251\027?\020G\034?r\260\373\275q\362\n>\2061;>T\323\335\276lz\273\276)\211\354>\005\341\275\276s\254\247>o\266\256>POy?F\370\247\274\007\210\260>\003\" ?\263\216\375>\261y\322<\364\205\000\276\325\243\005>\016\023\227<\026\n\263>\351\342\226>\220W|>9&\021\277\237)2?\341\242\230\276@<*\2765\341\354>\240f\366\276\376\022\276>\366,w>\021\017\016\276\355\335\301;\310\034\376>\207\003W>\220<\225>\272\302\306<\313\253\313\275xn\206\276m\346\343>d\013K?\356F\252\276\225av\276kD\220>\203n\036\277}\313\371=N\376y\277\344\310\273\276\214\306\032?+y!?\026\001\262>\\\224\314\274H\345\220?\322:N\275%\006\315>\236\204\037>\314T\264\276=b\004>$x\350>\032/\330=\\\260\261=\372\232Q?\014\242\225\276\006\326\036>\240\017\262>i\247\367:c,\217=\306\336&?\373\257\031?1U\273\275\231\205d=\215\311l\276\330\245\"\276[@K\2766>$>im\231>\356\276.\275VB\373=$I\344\276\363\215\257\275\344\234\275\276\220yl\276dR\232\275qc\225:&\234\t<\227)K\276\265\226\032>?\277\337\276\312\347Y\276]\317Q\276ms\377=\207\323\211\276qi\350>E\216a\2777f\030?\273\212\306>\033<0>d-\364>\371\341\371\275Jr\227\275\\8!\276\275nf>\035\264a\275\024\023@\275\204!u\276\374\221\201>\203\332\024>;\356\233>}\231\303>S\2764=\2018\351\276\003~\211>\304<\250\274\357\371\355=B3\355\275\357\274\\>\247\223/\2776{J>\207\267 ?\274\004z\2769\256\355\276\314\245\316=\220\034\300=*\313\256>\367\276\'?yP\357=\221l\016>b\000\240\275\376\245g>\030\334\271\276C\273\243\274\343\001R=\371\021K\275\251\377\302>\231\225 >GU\023\276_\246\276\276\023\366\025\277@\271\272\275\277P\204\276\031\325$?\010\253(=\340\217\304>\311\235\271\276\352\360\321\2763V\266\276\315\375\250=\236\3654=\026\367\226\276\215\021:\277\347\302.?8{\206=O\021\004\276\333\230\306>\267\346\013\277\337/O\276\021\177\t\357\360\211<\357\253\025\275m\037\236\276\336\267\036\277Na\322\275\262\034\233=\257\256\215\276>\260\226=\202\rJ<\375\377\223\276lN\345\276\342u\304;\3747\264\276\366\235\034?C\217\021\276j\336\201=\356v\211\276\232\314\260>o\341\202<\340>\352\275\222j\036\277B\n\312\275\244\256\322=c X\276\362\372\361\276\3537\310=\340/\354>\355\022\315\275\377/T\277\341Qe?x\'\007\275\035vZ\276\246\331\322>\307\201(\277\356\337\256=y\311\035\277\356r5\277\216]R\276;\362\276\276\003\271c>\321\213\246\276,\2713\277\263\031\240>\301\334\025>\367\320\217>yE\032>io>\277:\225!>\321\331F\277+\031\367=\241,\202\2767\240\022\276U8P\276\341.K=\373\234\003\277\017\317\t?-\203\322\276@\203=\276so\266>\324\270\373>\200r\265>\013\340\014\277J\212\036\276\307c\214\277\335\300\214\276\333\256H\277Hi)\276\355j\331\276d\351\251\275\312\302\026\277kGp<\214\001\257\275zA\356\276\366\2672>\312eO?\273[Q\276\267 \207>\031\033\203\276\364,*>\r\363\027?C\367\365\276\371p\t\276+\236\206\275d\221V\343.\262\275\262\004n\276\346\004\030\271*I\276wL\231\275p\267k?G\343\346\274\326\201\214\276\027N\302=R\032\232\275m5\333<\213;\322=J\211R?\023\275e\274\257\014\017=\363\211Q>\343\332\030>2\2248=\271uI>\177\302[\275\014\274\273>&G6?\014%^=\360\363d\275\220Y\345>7\340\010?\307 \037\276cB\177>\213\350\215\276\037\037\325=\261|\224>\271J\305\273\237\217\234>\276\306\206>\243:\340\276\340\366\010?\264\257\017?\321y3;\374:v\275\317\334\256=\346\356\222\276@\302\n\277pL\243\2768\317\000?\222q#\276\231\306\345\276\277{g\275\300\257\313\275\234\352\333>W,\240\276\277\020\264>\343\325\237\276\267\352J\275\334\301\r?Zj\345=M\212\321>\246V2\277\026g\251\276\032\235\'\276\370}\341>?\344D<2\276t\275\333yF\277x\3568>\353Z\366=\277 !?|8v>\014\273\000>\204\tY\275m\033\211=,\036\372\276\027*\366>\201V\027\277V\344Z>uR\225=\"\211W>X\036\007?\327mD\2768R\023=\235\2214\275,0\242;]W\256\276nK\361\275\361\023\275\275I#L=\360\272O\277^\236\314>\\\030\253\275u\224\214>@\232\375\275\345\371^\276k\356\272=F\343\221\276y\034\203>/<\262>\2104\230>!\357\\>*\305+>d\202\261\276S\267\302>b[Y>De\251\275B^\220\276uX\217\276Po\022>7\021Q>\376}~>\261[\340=\022+\216\274\021\032\210=|\367\255\276P2_\276.\360D\276\360\354\'\276\372?\256>$\325\032>\306\250\200=\242\204+\275\316\352[>BMq\276\351\010\035>]\325\037\277(\203\307\275*%\004?W)\025\276(Us<<4\335\276p\231\026\275\333Sw=\330c;\277\373.\364\275\374\245O>`\372o<\226,\345=\242y\010\277\200\375\216\275\273\240\020\277\000I\312\276\257\001\244\276\206L\321\2762\367\334\276J#/?y\020\223>}\203v\276\243_\232;\272\305\242\276{B\203=\342\311h\276\000\002\254\276\322\252^\276\037\377\014\277O\376\341>\246z\026\277AP\347>T\353\262\275\206#l<$\271\355>\222-\222\276\313\330,\276{\307\247?\2140x\276\331<\000\276c\231\200>\376l\222\276\226\337\235\272\276r\276\376\275\210>\031\233b\276\314\240\016?k[\221>\330\243\006?\351X\305\276G24<\027\r%>\247\246\246\276\311\272\202\2769\326\247>\201\234y\275\3532\020\2773m\010\276\375P\033\275\323\006W\274p\260\022\276\226\001\262>,h\357>\252\353\032?\t$\034\276C\n+\276\336\362\222=\233\313\023\273\342\225\323>\266?f\275X\266\340\276\360\037\231\275\r\311\276>e~\217>\027\215\337\276,\317\361\275\231\035\010\277\360({\275\245q9\277\216|\223\275\277\276\223\275\315\356\227\275\005\367i=e:\314=;%B\276)\376\353\275\331\231\225=\361\262(\276\364\363\001>\023[g\277\357\033\350>\000\202\316\273\022\333\033\276!\241\336=\265\035\210;z\336\335\276\323yU>\254\205\"\277\325}\322\274\244K;>2\005\026\275\260\3576\276\277\377\262\275/\033u\275\001Z\270>\343ao=\271\324\300=\317\266i\275&\356\016?\372/\371=\203\251s\276a\367\237\276\006\246\277>\247\234\262>\341?\334>\262\212\316>\215B)?p@.>\345[\230>\201\001\262\275\236\234\334\2762\213\025\276\372g\270>+w\277=\300*\340>8`\303\276\271S(\276\214\211\022\276\3412M>C;\244>\3534\327>\316\033\305=\222\275\303=\343\260S\277\025\306\023>\322t\307\276\331\342\221>\337\021+>gk4?\254A*?\362\031\234>J\264i>Oh\355\276\375\217\241<, \034?\373\366\203>\240\214\343>I\377\221>&\333\025>\327\371\300\275\352\351>\276+g\207\274\244th>\304)a\275\260\300$\274\0160~\275\200>\'\276\213B\352=\257\324\262\276<\2545\276\316\215A\276\310\234I\276\252b\017\277)\364.>wk1\276\255\251R>ie\357\275B\271\351>\300`\312>\376Q\227\276\013^\030>\230 d=\252\226}>\30135?\206\337\335\276\261,\330\275~\260\027\276\006\016!>R\000\010?\024\300\224>b\333\322=D`\204\276G\246\347\274\257\274\260\277\031~4\277\027\232w>\270~\274<\362=Y\277\355(\367\274&\212\212\276DCk\277\336\235\036\276s\322\221\276\0370\034\276k\321`\277s\213\261\276\373\000\315\2752[\256\276d5\361\275\r\200\325\276@\340\n\277\315\034M\275\206\206;\277\306\277=\277<\361\254>\204\331\001\276\225\203u?\031\332.>w\214\302\276\242\037\215>\342J\n\277\357\362\320\2742a\300>?B\233\276]\246\355\276h>8>\267;\324>\335$\236\276\320+\377\275a\264\361\276J;e\276yb\264>\273~\352\276\302Z8>\245,2\276e\364\000\276<\212\207\276\006\204\354\275l\033\301< \314\031\277\273O\264\276\262\000\255\274\322\226\356<\342\322&\277\003\332\336<%\342\244=\0358\316\276e\315\344>\344H\010\274\211Sp>\233\223\330\276\240\032\321>\245x\036\277\204\013\373\276\337.n\276\335\t8\276u\365&\277\003?\245<\341\360\260\276\320`\025\276\235\306\216\276\334L$\276\360j\272=\2427\235\275\263.\310\276h\370 >\324\2133\277\314>\265<\322\276\014\276\343\331U>\0312\236\276\342\241\265>i\274\367>\257\264\245\275\034F\341\276\005<\350=\026Ks>\230\257\341\275\341\346\276\276\352\222\020\277\025\013Q\276\313\331\022\276\031\000\223>\264a\t>\206\215\224\276\267d$> \320\303>\037x\262\276\331\275+\274\211\260\376\276\265\266\234\276\265\235r>\211\236\326=x\370\247>\372]*\275%\\o=T$P>\273\320\317\276m\254\203\276\356*\013>\227\035\360=)\336\n>\237\307\266=\031M&\277&\003\204\276\347\242\257=|t\027\276\363K\253>\363d\177\276\331j\316=\205q\251\276^\223R>\014\254\246>%\260\237\276h\207\266>w\247\254\276\036\234\261\274\301\232\312\276^{\356\312}%=\341\310|\276\344*f>}\344\332=7\253\021?C#<\277\021!\224\273sT\217\276\317\004\243\276\367\322!\274C\337F>x\217\307=\007b\275\276>\301:=+\214\267\276\242b\n?\213\370\347\275\346T\243\276+C\215>\231\366\036\277\356\211,\277b\202\030\277<3\231>#\025]>\021V\264\357\031f\276JR\353\276z\n\253=\"a\215>\310\221\001\276s\030\317\275\326\036s\276\250\360\254\275p0\254\276\257\276\250\276\\ip\275\202\261\274\274H\326Q>\t\373\232>\007\006t=,\237\023\276\270B\006\277\033\020\222>XNZ\276\252\307\222<\364/\223\276\273\374\362\276\233Z\310=\231\311*\276\355\356_>:\306+<\224-\352\276\032\277]\276\377ZI\277%P\330\274n\256\350>a<\376>bm\331=\314LD\276\325Jh\275n\334I>\2017\016\277\317s\342>V\024\371>m\314\202=O|\036<%\204K=\322\377\301>\333\222\254\276co=?r\361\030\276\306\257\253\276y\265\376\276^=\250\276\032\317$?\257K\222>\234\275\274\275\316~\334>\3602T\276\341\003\335>\035\320k?\2519\215:\245c\222>\250jH>\264a\201>\252\325C\2777\016\002\276\307+\322\276\222\306\014>\304[\217\276\336\331!?\3570G\274]\252\007>,\003\202\275\252\263>\277\342\327\352\276\232\317\201\275\225\354\373>U\0202?\016\277I\276l}\237>\253\323\305\275\266\302D\277\371d\204=\206\010o>\027\325\r\276\241-x>\377\300\"?\312\\\302\274\010-\007\276\022\304\310\276&\334\t?\230#5\273,\336\303>\241\220\313\275T\276\217=\nZ\027?h\363\300>\252\3344=\035\235\020?\360\235\335<\021\261\000=\372U;?\304\317\321\276i\355\313\276\227\232\"\275h1.\275`\031\\\275\031\342\265=\036\235\364>j\366\016\276oof>\014\0316>\320\265\325=Fi:>\371\241\202\275E\034\016?\334g\223\275\317\247\253>\360\274v\275\"\027\367\274\373\362\215>\263\374[\276\024z\272=\342n\004=BdI?\246\033k\276\346\nH?\206Z\272>\245\332\245>\2073\013<\r\"\340\276\004\340\337=\013\341&\275m\370\335\275\007\372\231\275~S\257\276\010e->\372}6>\314\330\212>\311\203\266\276\237\375\200\276rU\346=\216\355\006\277c4o>\207\327\331\276\360b\311=\2546M\276\313\2155>\037m\304\276\254\\{?_\266Z?\256\261,?\260\274\002\277V\227\317><\246(\274\303N\225>\303?\013?\327m\365<\022\347\025\277\377\2122\276Af\014>\353\225&\276\037\022\003\276s\345#\275\204\307\244\276\205\214#\275\032\017\353\275NH\327\275U2\254=\230\264U>\207\273\256>\227\233\005\276\372\322)\276\273\220@\276\230A\217\276\363$\034\277z\230\316=\020\200l\276\351\360N\276\216\300c>\241\263U\276\007a\336>jAh\276R^\024\276\201\237\347\276:\215\315>\272p\372=W\226\345>)&\241>\330\3107\276SP\365=$?\343\276\351\214\241\275:\234\200=\372\346\350>wU(\276\213\300:\275`\363\365\276\273YE>\n\353\030>\021N\016>3jP\276.\352#>io\034>\r\3422>]\374\014=\267\217\035?\266\010\200\276\253\007\351\276\027\321\200\275y\375@=(\271\226\276\3707\224>\025\253\262<\0225\000\277\207rX\276\316Z:>g\246\033\276\020\207&\276\020\202\265>\264\202\277=M\270\231>\254\203\332\276r\326\037\276):\370<\372\301V\276\273.%\277\340\202.\276\326\032\226>\216\023\022?\343\353\244>}.\000?\303D\365=\006\252\334\276\372\344;?p\240\361\276\'\275D>O\0033>\033\312\200>#\204!\277\275\025X\277\215\251\300=\312\254\375\275\2619(>\272O\221<\365>\"\276\364\2535\2776\352\006\276\270,\303\276\033o\234>y\034\004>\217\202X\277\2175\225>\210\330\274\275u\016\211\276\341\r\021>\205Cv>\374\305,\276C\034\264\2752\353\363>et\031\277.\243\310\276N\203T\277\255\237W\276\270\001/\277\220\341\n>\223{\310\276\263{R\276p \310>Z\372\345=\021:\222=\334\346\215\275\346\314V\276\377\220\302=\337~\017=\255%\023?\260\324\013?\311\014H>\234{9\276p\331o>\035P\216>\332\246\304\275\237{\024?\001\273\361=@\320\215\276~0\315\276\377\034\251\276\335\247\307\276Z\303\276\276/1\234>\243n\370\275!\276_\276oy\246\276\373\377\003\274\006\353\'?;S\325\276O\033\005\277\306\237S\276\301Z\226\276i|B=\300%!\277:\203\200\276X\370\235\2741\304(>\330KQ\276\022\213\275<\253\235\364\276>\237:\276\202\223\004\277\373\205\245>\003\014\377\275\214I\352\275\036\352G=w6(>\002T::\350\3072\277\271U\246=\333\'a\275;\030\003\277I\326g\276#C\317>\2245\261>\231\227\210\276)t+=.\357\013=\267;\232>?\200\'?4g\251\275\220\022\302>\217l2>tL\272\276\001%+\276p\232\235\276\243\261\200>\036\024\030>\004\016\006?\013\257\350>\312l\330\276\305\326\207>\352\010\354>\034\367 ?\270T\026\277\001D6\276l\322\262>v\256\030>hZ\020=\340\013\013=\003\023\236>\231\270~\2754\236K>\301\207:>\003\227\204><\007`\276dun\2763\302b\276\000\034q>J\\\205\276/\350\010>\331\273\025\276\273B!=\233\002\247=f\027\255\275hy\213=S\336%?\314n9>\307?\274\2763\304C>\304\n\262>\312\2030>`\324\367>\331\036|>\236\354\313\276i\346\336=\271G\005\2762\037\"\277\265\002\320\2764E\356>\226M\\>\253\233\275>\213\257y\276:\016\265\273O@\250>\250\327\205\276)\377z\276-Vk>\335\3246\276\374\201\257\276z\300\376>X\362\305\276\353M\243\275\325\341\227>~\033H?\231\010\271\276\256~d>\032\232b>\334\t\300>\243k\n\277\024\277\244\276G/\261>\214\352\341;\033\251\361>\353\263\241=+\217G=Vp\275>\276\365\215\275\344\243\355\275L\201\201>\373\315\005\275\236\375\241==e+=\241\3628?QE\362<\337\367\342=B~\332\275\200\370j>\317\345\340>\244\257\036\275\257\240d\276\211\323\256\276\006\361b\276\302\230\007>Lv\376=\364\2165?;\\\273\276\217\022\352>\271\221!?\335\033\300>E*\021\277,P+\277\375\\\335\275\222\316\036?{\224\223>\215\302\203>\210M\203=\302\373\243\276\3152\327\276\231\332l\276\307\376\250=\023c;>\357\233\355\276\333\373\266<\323\361\206>\367\262E\276\021\211\007\276i\236\276>?:\205?\333\023\356<0|\363=\212o\031\277\243~\014\2767\321\340>\315\362\220\275\354E\311\276F\305\304\275\235\"\225=7]~\276^\360\021\275]6\264>.\307\251\276\017\352T\275\351\024b?mW\220>#>==\321\2461>\251\025H>\344+\320\276\034\241\361<\361\010\220=\312\222z=z\323\265=\0059\202>\315\375\217=\2630\005?\273\205Q=\363$\265\276\032\007\304>\213\243\244>\255W\363\275~\356>>+\264\353=\001\034?>[6S\275\023s7>\365\224\330\275\275I\317=*\215\271>\177\2235>\275\343\207\276\243\036\177?\334\356h=\266\264[>\301\365\200=\037\205\341>z\335\205=\230}\221>\020\343*?nL\303>e\364\032>j\013\264>\263n\023?=\321\300=l\206\314\276\377B\357\275\316\037\251\276\235\326X>qj!?:\206\362=\317G\222=6?\346>ps7\276\317\005\224\276/\236\024>\256)\206>\232\344\346=\352\014B>\210\206\363=6\n\360>|2\\>!\227#?5\211}\276\003\302Y\275`\205\364=\"\2403>\rJ\240>L:\233\276\330O\324>X\214\330>\316\251i>\265\362\344=\377\251\262\276)b\003>\351`\365>d1\361;\007\361\'?\017\265\256>\226\200\020>N\031\243<\372\255C?\031\025\243\275\343\'\206=D\017\032?\022t\031\277\036O\332\274\013f|>\220\342\206>\221c\234>\342`\361\276\205\330\321\276+\266\"\276\021\257r=\2555\250\276\016:\302>\303 ^\275\177\0161\275\371B\237\272zBF>a\026\177\275\334\360\032\276\233\327\216>\032$\251\276\234\020\211\276UT\005?\'\363F\277c\202\365=\235\315j>~\036\205\275&&\224\275\022\214\013\276\232\035\302\275\233\236\203>\002\n\260>T\032\247>\274\353\034>\246%\335=,t\014\276<\360\027\276\360g3>9\363q>\r-\271\276\2035\'>\311$\344\276\331\341\200\276\335\031e\276xN\240\2750\267\347\275\204\'\336\275{E\332\274\221\2462>\312Oc>\302D\270=%9q>r\013\010\276\306\201C\276\211\253\201\274\214 F\276\334\376b\275>\223\246>T\252\261\276\023\340\206\275*\224)\276m\300\223>$\365\000>\231\305\003\277^\346\366>\322\033\336>0\257D\276p\017\360\275\013D?\276\322)\035?\365r\302\276\370,\224>\332b\346\276X8\006?\251\332j\276\367\344\325\276\373o\313\274o\313A=\252_\213>B\n\\\277\277B;\277\023@3\275\307\252\307>Lw5?\250.\337>\304\267a>\334\324\272\276\034\343U\274\344\202\370=h\211\204\275\364\272\265\274\2766*\276K\\(\276?i\371=@\030}\277\270T\276\276%y\326>27\204\275\234\266\345\275OP\306\276(\342\344\276\326\352\031\277_\nP>%[U\275\253I^\275\006b\235>\271\372\000?0\271\004=\304\327\021=\337\235\010>\341%\217\277\262\003\002?\261\315V?\021q\245\275\036\024\313=\246\336\027?\303 \014\277\306\241}\275\rv\023?d\260I=\206W\241>d\306\274=\221\345\337>_\037\033\276\353\354K\275\365\2477\276Y\322\010?g0\277\276\237\216\221\274\331F\017=L\345\334\275f\342\242\274\350\344j> \016\261>[X\003\277,\301\236>\222\334n=\024\2451?\007\207\317>\207\374\017\276Z\341Y>\032\267\252\276X\213\340>\341K\245\276\317\217+>\316f\021\273\366v=\276\216\262\201\276\023\224\211\276\343B@?\367\306\226\276\367\304\024?H\253+\277n\273\014>*\006\241>\333\260[?\262.\350\276\272\226>>K%#\276\266G\376>\276\301\023?\331\356\002\277\205k\206>\370\330L>\322?\037?\2257\317\276\306\n\354\276\301t\271>89\252=|E\226\276\327\010\332>-D\217>R\312\033;\200\3473?\027\364\210>\031\206\220>\206\257\257>\006\344R\276n \236\276\024\035{\275~L\374\274H~\307\275}A\004>l\201e\276np\326\276\207\375\006\277\311\264%>I#\201\276^\262\253\275\240w4\277\202w\270>j\037\246>\363\022\250\275F\325.>]\304\200=\320\357\206>1\031\274=1T!\276\362\024\363\276)\023\232\276\032yr=F(\307=\270L:\277.R\000\2765\026p=\332\337n\276rJ#\276F\330e\276-K\271\276\360\035q\276a\367\032?\032\202`\276\215@m\276\252.\304\276D\271\037?\363\1778=\272\276W\276}\206\206\275\\{\314>\242\203\021?\263\362\373>\265H\334\276\360\217\037=\\C\205>\330\352S>\223\014s\277V\256\006?\226\005\301\275M\tg> \365\222\276\232N,>#\315\007?4\204\372\274\277\027\256>\301\033\347\276r\365\274=-\037\031\277\036Z\330>\356\246\363\276\316\\^\276y\221\344\276\374\250r>\020\202\232\2761\343\026\276\301\274k>\210\023\035\276\244@\210\276_\026\252>\037\376V>Q\322\344>\205\214\315:\222\245\014\277P\037\355>\207\263\253\276\313X\251\276\351\271\266>\177\207x\275\210r\233>\027%\266>\341c\354\276\212X{>\306\247\265\274\000\"\211\276\241\327 \276\344\255\330>`\374\021>0O\026\277\241\341?>j\276]\276J\232+>\327%N>\034[1\274SCU\276\250Jy\276\260;F\276h\217u\275 \317\217>\242R\254= [)?\333\371;\2763g\027\276\312w\263\276\245\305\362\275n\213K\277\016e\000?)\346\320\27553\254>\025\303Z>wR\245\276\361r\256=\201\220\371=\235X\261>\027\207.=\312\376\013?$H\026\276\246\001Q=\026\210\036\277_v\315>\372E\017\276\262\002\256>\323\036#?\305-)\2770#\303\275X\373\232=f\002\">\024\320\344>0\003X=,2\330>\201^I>WR\307\2757\312\026\275c\354\022=fH\013?X]\017\276or_>\323\227\304\276cU\264>)\300\220>`\237\354\276\231\020\220>v\274\310>\241B\037>\242\301\270\276\255\337\337\2767\202\201>,r\003\277\010|\221\275w0\256\276\217\314\240<1\305:\276\254>\330=I\r\005\276\342\254\324>\224Nz\276\'V\370\276\2500\267\276G\367\331>\266\032\201>]\273*<\332\376\347>\005s\t>0\007h\276\006\td?\025\004\324\274\370\345L\275\350\220\210>\314\236\303\275\305\364\270=\214R\247<\350\332\327>\305\252S\274\004?\262=&u\335\276^;\014\277\002\356\013\277\035,\257=\357\357\256\275d\266\360\276\340!\022?O#\353=\251\362\r\276x?\356<\311=\232\275L\313\212=\365\307\257\276\001\261\013>\005\227:>?)\226>\253$W\275q\356\267>O\260\341>\366\341\007\275\230\235\315>7\317^\277\212\303\201\275W\322\230\276\223\006E\276\026\247Z=\224\266\014?\326L\261\2744Vg>s\0174\276\'\035U=%L\025?\375D\234\274\356*\305>\204:\252\275\372\2125>\234\023\377>\267\337\231\275n\007\037\2761\2029\2769\266\014?\251c\364=%=Q<\320\365\201>4h\025\277\240\341\216>I\216\347\275b\373\357>\t%.\276\030\337\331\274\226U\236>\277F\302>C,x=\031\257\304\2761{\037\017\277\376PD\277\235\231\362\274&\265J\276\252\305\277>#XJ>\035\234\254\275\024u0\2755qQ=1\217(?\006\267\254\276\227R\270=\320N\306\276\221\315\337\276\352\335O=?2#=}\2072?\223\261\005?\366\250\373\272f\240\231\275\255\205m>5m\265>\202\026_>K\355\r\277\337\212\253=\314\304\320>B\362\356>\335\235\002\276\312\323\327\2760Z\201>\277\271\210\276\311f\375\275/\256\327\275%#\364>\203\203J>A\234\244>\321*\030\2753\207j>\024\325\361\276R^*\276\233\321;>t\212,\275\370O\033?\216q\020? \255\r\276\204\336;\275\313\2770?Ie\265>4\026\271\276JT\323>\252y\251\276\036&\210>\305\242\267\276\276s\315\274Cu\352>\273!G;B\210\202>eOL?\354\024\203>\346\371I\276?\214e=\223Y%\277s6p>\304\366\350\267\360\331I\2761\226\343\2767c\230\275dS\000?\266\010\203>]\350\350\276\304k\220>x\356\002=\031\211\317>\333\335\205\276\226\262\360\276\217L\t>\355\261`\276r\236\223>b\264\374\276\226\366{>\260\275.>Xd\312\275\023\t\202>\335\200 >\312w\314\276\330\031f\277p6\315\276\257e\275>\244\250\002\277%)#>\257\"E\277\200\031\231\276\356\307\000\276\252v*?0<%\277\264\253z>9\376\026\274\344d\301=\255o\232\276\220\"D?\307\361\241\275\r\253];\030\272L>E\271=\277}N^>f_\251>TlF<\214\262\222\276v\347\363\276\252\260?>$\311\224\274\324f\'\276A\033N\275\203:\313>\222J\223\276s\327\203>Y\311\024?\301\230\320\276*8\234>\273$\207=\362a\037?\240\200\207\276g\030\262=R\202\206\276\322\302\310;\310\363\237>\303`\037?\177\034\246>\207\001\030>\276\210\367>(\301\002?\010]R\276\314\205\300\276]\\G\275\r\330B\276\032\252\272\273\274mT\277\215\273\277\276\t]\234>jqi>\366u\213=\026\343$\275\230fQ\276\362\037t>\301]J\275y|\302>\266\014_\276[\025a<\335\315\201>cl)=\004\r,>\331\257\321\276\030I\007=8\213\302>\323_\025>2\314\020>#n\213\276\037\247\031\277\177\0235\277\010\325\262>\316=D\277<\357\032;\354\202\365\276\026(g?\232\277\221>&\006!>\232\362\317\275z6P\275\310\340a=\320$\346\276M\316\005>\255!\017>/\316\225\276\376\010z\275l\315\254\274\000\367\026?X8;>q\035y>\243;$\276\023\207\033>\r\303\343>\311\227\374=\303\017a\276\327I\032>\010\254\342>q\242\275>>\352\371\263\271\360\276\n\306\017\277\030V\013>j5\277\276\035\317\270\276\347\002w=\037\264u\276\\\213\232\276\022\321\343=\010\205\225\276(V\212>~ \253=[\213\217>\3554\177>z~>>2\350\336\275\'.q>\251\2150>\263\267\316=;4%?\'\371\221>|\223\205\275\2202\340>\006E\315<=~\207=X\313z>\036\227\241>\376\323\342>\334\213h\274\267h\355\275\265\376\022\275i\324O>\354\t%=\351\224\202\276F\242K\276\214A{>B\324\002?W$\252\275\363p\247>\351\342\243\274\277\312\236\276iG\201=\366W<>\270\362&?I\r\264\275\026\247\354>\364\303-\277,}\346\276\n3=\276|\204\205<\225\356_\276[\034\327\276\220~g>\t12=\346\243\316<\371\010\201\276\346\354H\277\325\315\003\277o\234\313\276\016\325\242\276\261?\037\277t(\014\277\370\022\206\276?\005\302>}\343\205\276\205\022?\277\3338/>#\320\247\276\210\304\217\277\0376}\277\324\rh\277o\030\232>L\330\002\2773\340\254\276\001\235\330\276\216\021\200=(8/=\225\206\276>vN\260\276<\303\210=\237\004\341\276\034s;\276\201\n\253>\270\347\215\276\001\3021\277\266\343v\277\246ov>\017`\321\276+j\350\276\035\216\013\275\361\001S\275\025\260x\276\366t\254=\210,\200\277\343y\350\276\2559\213=\227\276\222\2769EJ\276\305\326\251\276w\002\000\277\304\313\367\276a\347\306\276\301\341;\276\3437\n\277H?\320\276$\252\305\276\032\303 \277O\246\220\276\312\004\216= z\256\276J\252\005\276\311\357\023\277\214\204\260>8\335\211<\2612\241\275\251\006d>\203\034\005\277c\277\213\277\367\372w\27529\217\276\346\351\014\277\243{\220\275\200\330\013\277^\023\001\277\r\376\222\276\307\\\252\276\315p\t\277p\\\204\276/e\323\276C^\014\275\377*\276\276I\262\360=W\250\237\275\010\243B\277m\3024\276\265`O\277T\234\'\275\264A\243\276&\241\216\276\242\337\224\276\212\n\037?\215\252W\276\214\341\r\277N\255=\2756\363\203\277\255\233\347\276\316\357\370\276\361\270\024?z\007q=g\202J>uk7\276\262/3\2760\025t>srd=\354\305{=\2354\377>\313\346\241>\345\242\241\276\302\027\006=\006\314\220\276\254\203\201\277h\010\367<\355<\261\276\020\265\304<>Pi\277m\211%\277\006\035\245\277\006\252\273\275\010F\001\277\371Wv\276T\253J\277\272)\210\277\036U\036?^\260\252\276\t\2011\276\3279W\276\376\237T=\366\370f=\346\323M\277\374\356\311\275\037\001~>\241o\216=\030}N\277P&\363\276\316\354\310>\310\244\026\2761\rX\275\355gu>\326\032u\276\211\211z>$]\343\276p\364\341>\311\302\303\276#\324\303\276\356\037\247=\311i\">\312\0267>x\316\233\276\306\327U\277\3722\364\2760\372p;\306\275\372\276\003\242\343\276\305\214\221\277\221#\336\276,\326\375=\375\323\r\275\351\230\310\276E\210\305\276rg(\277\372Z\034\276\336\030:\276\21171\276kf^>\220\003\361\276R\202\316\276\035\3754>\203\273\032\277\227;\204\275V\004\315\276\365\265<>-i\372\274\240<\212\275\026\014\023?ZK_>H\0239=zXM\276\032\244\213\277W\350\004\277\255SO\276\260\261\007>\225\027\333\275)\351\324\275\213/\313\276R\342\257\2765\342\231\275\275\315\357\275n\210V\276\236\321\257><\327\030\276\267\227~\275\234\236\021\277N\234\014\2770\273&\277z[\357>V\013G>A\'\231\276\203\343\202=\363u\231>\010\263\275\276\326\365U>\270u\301=.wb\275\317\200\266\276.7W><\274\357\276`\"]\277\372B\263>\255\300\007>\271\251\320=\024\177\035\277\206m\t>;\220\324\276<\005G\275\340{\n>o\020~>\337D\334>\023\322\017\277.%=?\241\014?\276\211\250\032>0\253\260\275A\204s>\261\244/\276\223=}\2761{B>\221\233?>\313\354\303\273\243\223\220>\223\032\345\276\334\346\304=\216p2\277\372\205C\277\277.\204\275\214\353Y=b\017\375\276J\316\315\276\276-\240\276\265\346\233<[<\352\276g\001\252\276\010bY\275\206x\016?\313\365\242\275t\274I\277\211\211!>Q\033m\277\347%\276\275]T7>\227#\016>Ro\332\276\342\006}=D\353\233\276\003\347\007\275\353\311\276=\225c8?\023\307U?\315\244\301\275\367\nK\2759\321\257>}]\252\275\004E|\276\236\260\247=S\215\t\277\225W&>\000\220\211\276\237\211}>\230(*\277GT\307\276[\352,>\3102\251\276\250\006\253=\255-\004\277\301h\211\275cq\214>\315O\"\277=\372\014=\335\313:=\000m(>\333\232\354>\267~G\276\254_\024>\327,U\276D\333\006=\035\364q=\377\232\374\276\375}Y>\025}\013\276\026\027\027\274{\201\241\275\034\207c>\330\"\303\276v\337s\277f\224*\276EQo>\374UY>\375\372\234>Z\013\024?\200vE=z\232\013=\353\254\225>\221\241\254\275\3101\211\276\2668\371\2769\270\340\276\235\022\350\276\211\217=\276\342\307F\275\355\276\203\274\203n\212\275\253\314>\277\351!L>\021w#?<\032 \275\261\362\347>U\320\243\275\006]\253\276\225\325k\275\255w\317>\317F7>\203\216F\276\037r\023\276\014\332\033\276=w0\277WO\242>\247\262\213\275\232\264\203=xf\271\276\004\351=\277D\221\300>\267DU\276\270\361\247=\224\n\315<\356\037\177=\253\256\261<\301[\302>\224\005\264>\332\230\375=\027\227\031>\031\211\263\276L3\334\276pmN\276j\240}=\202\215L\276|\251\376\2752D\207=\372\203\302\275]Px\276\212\234\201\276\2619\334\276IlG\274\355\324\251\276\300\334\347\275\357\206O=\034B\315\276\274c?\277\265\304\017\276\225?\025>\321\257\341<\326\206N>@P\014=\350j\224>D\367\247>\344\207\316=\320jA\276\324\374\256>\334\226\221\275\216e\010\276sF\211>\311D\324\276\'\025\257={\374\345\276IP\266>\247\032\002?\204\355\372<\353\024\277\210\343\217>\232\341\317\276\363\274\222\275\265(\265\275IVH?\222\010\221>\004\364\255\276\001\326\317=\214%Q>o\351\t>1\366i;\3536y<\257\023\356\275\232h\017\277/\344\177\276\2246r\276p\226$\276k\203\006\276\277D\341\276\251C\270\276>\304\270>&\276\356\276>\234%\277\003\351\202\276E\021\202\276\003q\226\276Q{0>\306\356\'\277@b\210\276\323\211\007\277\330\034\021\276\267\277Z>\004\201\347;b\272\276\275\221\345\260\275\277\342\256\276nA\300\276\016\'\000\277\007\275\274\2750\024\272>\212c\000\277\300n\320>/\235\\>\206o\315=\316\"\317\275\025\2449\277\216\001\235\271\323\013S=\377\374\330>\375(\013?\177\221\177\276K0&\276#\\o\276\263`L\276\215\252\222\276\212^\273\275\231\010[>?\275\306\276yR\322\274X\304\226>\331\\?=\t\221\203\276g7\267>\006;\275\272$\365\230\275f\375\'>P1T?mo\255>\"\341\306\276\233E\242\276\213\306\006\276\322\302\336>G\3300>\233\005\027\276\310D49\2673?!\306Y\276\314G\022\276\325\374\344>\2516\260=\366s\030=Z3\024\277P\337\346\276\256t\371\276\375~\213\276\333a\227>\257\225\002>\321R\233\276\200\261\210\275h7\245\276\360\334y\276\021\240\226>\3073\025\277L7k\276O\302\223>p9\211\276\244M\251>%.:\276j-Y\275\312c)\276\230\377\263\276\220\032\207>\317\322\366>\211\335\017<\333\247\246\275Qx\321>\306\306\006\277\327\301H>\227 \205\271Z_=\277\304\317\326<\033I\221\276\035n.>\002\016\300\276\330F\020\277\337\300.<\207C\310;\341\005\342\276\241c\265\276Y\225\360>\214\"C\276\'\3776>\217\230\311\276\262+\006\277\244\251\337>y\007\275\276P;\254=\245\215\270\276\251\004\215=j\225\340;G[\r=-\214\200\276\234\260g\274]\272\222\276J\031\345\276\370Sb\276?\t\350>\200\'X\277L\275\313>\235\3502\276\366\243\200\276t\364\211>\267\251\354>~/\277>\233\262\001\276\350\240\356\276\330:!\274\020\322\255\276\361\310\t>\313\351\231\27628\213\277\000,\300>\260b\001?h\0030\276\234\037\005\277\1779B?,Y\277=\375B\213>\271*\307\276v\247\017\277C\t\023\276\305\001y\276\320\356\r\276\350,\225\275\037\025\026=\251\241\253\275\252A\307\276\252\242/>\367\267\240\276}\332\025>\306\341\306>\243\032\032?h\352\313>\336\004\321\272\317\326\335\276\347\361\213\276\224\310h\277\312/K\275Q\000\350\276\362V\312\276\327\363\201>\310\227\220\275\000\353e\276\242b\255\276\211=\240=q\325??\020\n\226\276\306\274\313\276w\0300>\333\302C>@zY\274\276ie<8\035M>jM\255\276\"v\326\276\252Ul\276^\202\221;yH\200>m\312+\277\\S\321>\010\255\350\276\373\327W>\020\266\310<\2718\220>\035\237\321>\277\336\317\276\337\356\313>ol\214>WV5\274\267\013\004?\311/\253>|,#\277e\314+\275\341\362\274\276\201\307c>\272Xn>\324\214\265=%\007\277>\244\251\237<\343\333\307>W\361\224=%3V=\367\230/\276\200@\362\276\303\n\342>\331\323\024?\232\034\005>\306\307O\277\345\030\024\277`[p\276qn\000\277B\262\246\276\246H\331\276\277\240\213\276:\362\233\276\272\365\026\275_,\241\276\222\250\236\276\263\r\201\276\322\214\022\276\034\030\002>\305\373\027?\257\301E\276XT\242\277*\272\017\276#\004\216>\005\330_=\336\014\001\2768s\317>\3608\223>S.\271\275p\022/\277\336\227\221>Y\203 \276\345\017\214>\3563\336>2t\250>I\032\205\276\262;\363\275\031w\247\276\013\232\311\276\331\t\212\276\275\351\000?\334\307{>\252H\231>\216\037\242\372\033T\276\302\321v>+\255\347=T\341m\276\261\342\343=\324\265->\211^\'?\375\275h\2751\274\247<\270\200\360\276\237B\370\274cFV>\254V\211\276\314$\210>}\323\322\2765/\375=0\361\251\276\240\361*\274\313x\002>\n\024\200\276\"\313\351\274KY\307=\207\271\323\275\r\014\033>\303-\007>\215\333%\277/:\355\276q\r\312>\005\312p\275\235\223\265\276\2217\020<$\255O>kI\202=\307\2614\275\004\265y\276\024m7\274\027\275\343.\213<\276\224\316\261\275\271\317&\276\203\371\333>\355x\276=\221Q$>\346R\224\276\241?\227\275\260\320\227\276\365\263\t\276\254\301\264\276\224\253\224\276K\307\232>\225\365\202\276\247(D\275t\0043:\'\200\t>H\313\372>\243\023S\276\203\'r>qP\340\276\232\244\223\276:A\201>&}J>\351\241\221>\306\2427\276\263\274E>\252\252\023>\350:\222=\321\360\304\276\367dx\276\254\247\354=\333\024`>\0045\026\276A\264\232\275\256\317\316\276\017\'\253\276\265B\032>\3551\217\276/\277\037\276\302`\301=lf\272>\\1D=Srs\277\034b\301\276m\217$>{\251\003?8\240&>P\205\276>&M\214\276\356\240\343\2761u\262<\231\333\373\276\367\212Q\277\242\354\201\276\025\034_>\033;\023\276&r\236<\231C\255\276\244\n:\2773\226\342\276\331\312\304\276Iv\324=S\275\337\274\250\004\214\276u\256\036?\350\266m90\233\n\276\354\333\235\276W\237\030\276\245\020`\276\240M\352\276y\213Z>c\200\250=\033\364\222>\356\216\234\273{`\006\276Tb\200\276\242:\247\274\372\201\201\274\371\3672=\212x\223\276\353\325\035\276\253j\203\277\337\320a>r\003#\275\2678\315\276\177|\361>/\273\006<\360t\266\276<\304#\277\373\013\001\276\021\230\267\275i\177\216>\262$\234\273Y\252\235=\035\232\266=\315\332\257\275\373\034W\276\2470 =,3[\275\221\263\247\277\037F\233\275-f\214\275\375m\315\274\345_R\276[t\313\276\334\2540\277\240\374\271\276&#p>O\333\276\276UO\016\274lGC=\311\312\255>\365\263B>\2644q\277\262\353\346\276op\004\277\267\262\210\277\212\345c=w.\034\277\222*h\277\217\367\022\277\250\262-<\312\214\000\276\272h>=\243j+>\023\366;>2\rW=\2740A=\\lv\276\375\371\223>g,\231\275\005\250\227\276\217\360\272>\260\3460\276\212\342\303\276\221S\332\275\221l\300\276\314\273C\277\317\007\221;\216\345\215>\351w\327<\226\350~\2771\221\261\276\342N\355=\220\325<\275\\5\370\275\246\307\274\275\r^\204\276\335\263\237>X6$\276\244\332\342>\305\303\317\275\325\335P>F\221V\276xq\210\276\264\277\326\276\251@\007?a\215a\275n\366\220\276\204O4?x\306\216>M\342\223\275\243t\210\274^\001\026?\257\234\324\276\005\240\221\274:\260?>\221\354\242>#:Q\277\0369B>3\323\304\273\210u\240\276\334\254\212\276B:#\276> \350\276=\035\027\277\242\210)\275\023k\330\276;\302G\276v\312A>z\351\275=\265{\344\276\253\225\341\2762F\313\275)!M\276\237*\327=\247\363\204\274\343\266\206\276\224\"\352\275pC?\277\021.\214\276^\302-\276\370p\311\275!\244\247>\207\014C>\013\364\222=J\332\362\275\000\207V\276\326\221\206>~,\005?\330\231#>b\203\037>\204%&\277\016\342V\275\020J(\276\016\314`\275\005\247\333\275\333!\354\276X/\260\276P\027\037=-F\320\276\210R\310>C\310\030?b\235\271\275\301\335\201\276\345\222\260>\334\001e>\351e\006\277\024\273\\=\371\243\373\276\272b\004\277\243#\332=\034v\006>w;m\276\246\223\007\277\2029I\277\n\331\240>\234\027F<.\216\177>p\254=\277\231\024\310\2762\360v\276\201\037\316\276\274n\360\276)#q>\024\352\"\274\252\260\004>\253:\n?\260\245\352\276\3618V>\037\247\002\277\376O\025\275h\302\372\273c\321\334=\254\323v\276P\305\206\276\"=l\275\257\320\231< \244\007\277\332\227\252\275\260]\312\276f\2338\276\245\003\262>\213\241\237>\311\374~\2766\216\223=\3434\366=\307%\227=\240\255\227>\260\nB\276n\217\260\276\200\025\274\276\316C\241=\345I\266\274\300\346I<5R\263\275\253\'a\276\302b\200>\002\352\341\276C\267\234\276:$\215=\213c\310<\361\246\r\27711\214\274\365 \345=\372\376\253\275S\353\205\276$\362\\\276\265\033\003\277u\305\366=s\254\005?\304\032\201\276\223\\\322>\371<\036\276&`g>2\233\212>/\243\370=\342H\320\275\270\333\223=\207\027\210?m\305\276\275aJ\220>\334u\017? \245\\\276\214\006\027\276\244z[=4\316\005\277\260\354\323\276\252S\345>\311R\024>\004\261\275\275\276\023\206\276jK\346<\366o1\277=\3556=\271\220\274>\023\371\t\277\346v\264\275\242\026y\277\242\2138\277\355\223\266=\351z)=\233\010\321=P^\225\276:\372\032>0\337\264>\3104\230<\t\333_=\257\212\337\275\244\300\261>\242\025w\274B\376\225>\246\3466>\337\n\273\276\004\373\314\276\346W\356\275\r\233\035>\033/T=\2056\017\274I\245\224>\027\227|>\032E\255\2756 K\275\365\010\233\2764\014\276>\221Cj>\21000\276\223$\335\274\210\370W\276\325\"\246=(!\234=\213\256\203\276fTI=\344\230\203=\234\035G>.\243\244=c\232\025=\\\251;\277aaU?\356\333\230\276\027W\224=\025Z\n>K\271\231\276\237w\224>pP8>|n\343>C\r\306>\273\274(\277\265\317\037\273\035\220\200:\315\254\367\276\344\301C\276\237\"\317=\336\327\016\277\211\234K>\017\005\"\276l\220\037>U\371\026\277_\331\336\275\002Ic\276\211~A\277\364ri=\177<{\276vN7>\300\261\026\277`\304j\275\337\205+\2754\036\355=B\322\303>\023\340\207\276~_\247\273\362\355\321\276\224_\244\275\315\221\206\274&\3159\276\344\310\377>0\340\222\275\354\320\316=\226\205\202\276\354\336\320\275\266\312{>\336\017\246\275\302\373\023=\r\370\364\275Z\tP\276\210\235\331<\245\032\201>\031\323\004?\024\006\305\275\231C\341\275\216\212\255\276j1O\275\213\243\264=\317P\204>Fw,\277\225e\223\276\022A\350\032\354*>\201ZV\275#\2527>\352\005\274\276\000Ma\276\357l\244\2761e\362\275\336}-\276.\305\324>Jx\310\275%*\322>\267\025\261\276;7\353<\000p\245\274\264Z\233>H\252\336>\323&\000\276\330\263t\276\325Hp\276n\303k>v\264G\277\027)\252\276\310\220\360=\201\233\331=H\260\032\275;\003\246\275*\275\255\2767\\9\2771Z\035\275\030I/\275\341\335\241\276\247\336\323>?\275\351>\0142J=UV\351\275\026\362\'?#\374\262\303R\276\024%\205\276\342GE>\260\315\010?dki=\006\364\206\276\326\"r\276x\036s\275\271\331\331>\232\206\345\276n\246\016>.\325\216

\370\276p\353\271\275@p#?\220\367\261\276\267\'\036\277|w\336\274\025#\006\277\351\255\030?\221Q\221>\275\370\301\276KP\"\277X\232\223>\251\'\207\276z\020\257\275\007\274\221\276}x\031?\232(e\273\262\027*\277P\321%>\276Ps\276R\206*\276\307\325\374\273V\353W>\025\377b>>\275\251\275D6^\276\370\314\303>\277\345\025\276\245\3566?\242\035\316=W\212\257\276\177\372\007>\226\230\215\275Q\226\226=Ap\237>V\315_\276\270P\001>\266.\257\276\217\317{\277\206\311\200>\003\327H\276\023uM\276\355\361\010\277\323F\347\274\224\"\360\276Ct5\277\0376\264>\024}\001\276E\234\335\276Q\2024=;\033\036\276\275z\321>\355\361\177>\005w\006>\206;\317=\247u%>\325s\346\275H\315\203>S\345\343>\203\315|>\027\177\212\274\303\250\256\276 $\214\2764\354\214\276\024\033W>\323\335\224>\264\351\036?\013[\203\276\037=\234>C\254\375\276s2\276\2761\210\205\276\036\300\201\275N\376<>O\3644=\344g\356<\323\0306=/\320\026?\343\360\225>\323w\362>\353\3127?\014\205\202=\221\010(\276L\226\302>\377\331!\277\025-\034\277&/(>~\333\322\275\312\314\341>\267\276\231>\222\316\243>{\000\r>\371\276\227>\341i\006>\237\004\n>\n\263\007\277\211\035\316\275\021\316\016\275\"\265\320=\t_W>\025b\177>}g@?\205H\205\276\305N$?u[\241\274\323\275\201\274j\014\200>\3217\235\275U\010\300=\2604\237>\376s\032\277i\'\273>\004\354%=\031\236\342\276\214\274\005\277\030\321\360;x\214\233\276\201\365\220>\321\341\203=\373\343\036>>K0\2760D,\276;\307 \2763@\022\274a4\020\277\232L)\276\013e\365\276\2761\030\276\020\035\204\276\235%X\275\250\356\325>\315\256\336>S)\374>B\002\235>\311j\306\274\223\212\260>\365D\033<\001\267\260>w\240\356\275^\263\213=o<\257>\265m\272\276\317@\372\275\266Z\371>\034\"\377>\021\256\024\276\317\307\230\276c\004\207>\036\237\224>\247\346\367=\020\216\306=x\006\316\276\244z\r\276\267j\372\275\362\327\255>\034\0140>\177\265d=\366TV\272\370j\245>;\013&>\370{O\275\311\365\303>I\316\205=F\372}=t\360\223=P\355P\275S\327;\275s\025\267=\360\020\367\276\330\206\353\276\303)\333=d\371\246>\235\373/=\373(]\275\373l\275>\235;A>X\335\321\276\273\242\203\276\"\301\345\276\360`T<\031\263Q\276J\267\374\274\314\372k=\305\223\014=\351\221\276\276\033_\003?\035\323?>\035P\244>\230\366Y\276U\204\273\2752TZ>\303\351\345\276ol\321>\271E-\275\200\354o>\000\227\224=x\214\362\274+[\211>\370\"\t?\263t\205=\367,\220\276=\234\207=-\240$=r\005\177>P\271\267\2762e\344\276,\235\\?c\200\277\276BH\014?\000\364\242>\336V\331\276%F\230>{\302\r?\220\346\247\276\321G\320\273\206\316\026\276+\023\217\2759\251<\276\325fw\275V\205\244\275\263|1=D\220\347>\363\027\003?\375\262N?\000\213\200;\312\220\'\275\330o\273\276\305\332\342\275\360\022\246>\303\305\r\277eO\334>\r\231\320\276L\212\370>\323q\023\277cW\n\276\177s[>\027\321\241>\300\255\253>\332\303\322>\361\340\343<\n\340\304>\2649\207>\373f\341\276Zj\330\275\336G\222>\222\257\037?\200\225\213\275e\360\016\277\255J\261\2762g\342>\202\220^?/R.\277\'L!>\216\277\253>[m<\276\010a\370>f5#=\233]C=UGa\2762d\026>J\232\262>^\023\331>_o\217>\2230\023?M\035\200\2762\250!\277\211\025\372>\202\032\210\274Z&w>\313\263\264\2753\253\025>T\234\262\276\236\354\234\276Dg\033>]\200\206\275\340\r\243\275\234\035\000?\013\341\254>\324\330\331>!\t\031\276\303\352$=\302\014\311\276\323\373\030=\223\200Y\276\353\253\233=\366\021=\277\205\333k=yW\277=u\214{\275;\325\265=\374\007\275>9\316\371>\005\305\301\2768\275\337<\231\3264\274\261\035\250>\375n\364>\270\340\037?\210\272\232>x\331\032=&\t\354\276\230$\205=\231\315\252\275\355\000-\276\0047\000?\025\005\202>\362E-\276\'P\302=|r\226\276}|\240\276\013\275;>\305\241@>\232\035\362\275\037\026\212\276\007/\273>\014.`\277\331\356\n<%\231\235\274\273\312\003\276\254\3124>\3461&?*\261y\276\226\000\334>t@;\276\307\024S>`\340\017\277b\266\214>\215\313F>BT\373\275R\312\230<\303\363\377\276`0b>y\203\225=\220`d?\210Z\203\277\274Y\224<\261\034\247\275\2675$>\201F\273\275\244R\n?\254Q+\276\241\344\'\276\250p\026=\355\241\203=\004\017\343=X\265h\276\2758\007??\216D>\343\302\351\274\377p\235\275\313\276R\276]\233\311>g5\203\276\312\n5\275\354\025\225\276\224\017\210?x\201\317\274l\322\000?\203\352\211=j$\276\275\027,\260\274\3736Z=\032\207G\277T\214\212\276\203\003\031?\216T`<@H\273\276\014\365\304=\317\311\342\274\002\016\037>E(\235\276t?M\276+\270\203\2769\336\n>\267\310\355\276\217\275\205\276\315\355\237>\365\2626>\010\375\206\276\017A]>\267\016\377\276\313\337\007\277w\026\243>G\315\226>\006P{\277\014\263\212>\025FM\272\346\000*\276\025\"\023\277\001\323\203\275\037\235#\277xR\215> }\231\276T\301\205=\211\310\021>z\265\002\277\206\200\330=\333\'<\277\354\336\244=\317\331Z\276\205\220\010\277-K\240\2767\326$?\311XB\276S\263\340>H\254R\276\223d#>\351\346\325=(\033\241\276\014_?\275\343\272T>\343\374>>\227)\202=\346\r\210\276\211y\r>\217\317\333\276`\036w\276j\352\315\276\0279x\277\347\331\031\276\377\034\270=\227\330\341=\265\272\254>\001Q\214\276\006t\031=J\005\020\277\207\271\206\277b\323\326=\034\t\267>\330\002\341\276\321\272\203\277\300\245\'\276\tIA\2758\202d\2775\014\240\276\n\301\n\277\013\rZ\277\306\312/=9aT\276V\317$>7\206\313\2768%\203\274\230\010?\276\220~@\2777\342\205\276\254\225\217>\030&\236\274\357\224\326\276B\244\004?\361/\352\275\363vk\276\004\335\256:\363\261\024?Z\036\003\277\231\021\217\276\3539O\2767zj\277\007*\370\274\357\303\301\2751\030\356\275\302\324`>\336\000\211\276\253\372\277>o\377\334<\200\024\034\277\376\266\r>`\314\370\275\211\214k>\3039w\273\263F\\\276\356\021\276\275\'\024\373=\317Y\270\276\343xi\276`\330\212\275x\375,\277d\200!>\330\274\2768\310\005\277\364\022\203\274\364\313\222\276\267\275\014>\330\254\263\276\304\232\245\275\233z\210=Q6<\276\276C\224>\236\206\003\277\313|\205>\323\026\304\276\242CB>\276\310\266\276q\315\276\276\321\n\177>-e&\277wB\">\002\341\350\276Y\231\335>\3512\230\275\260|\244\276\276\222\253\275P\340\226=&.^=\355!6\276L\\=>\341\273\251>\232]\356=7!\037\277u\243\034\277\242\333<>\224u.\277\002D\314<\013%\300\276\024\344\372>\237\243\234>\t\235\t=\217y)\2776\216\334\276\017m\002?\031\226\205\276[K\315>q\027X=\002\264q>3x|\277;q8>\010\345V\276\207Z)=\325\2757\276\036\314\200=\343EJ\277\013=\201\275of\214>\2402\017\277\205\020\233>\364\347\246<\347IK>9j\337\315Vt>\311J\010<\007\262\204\276\305\366\315>\330\250\227\275\256x\265\2764\314\'\276\005\313(\276g\3763=\016\360\323>C\357~\2767Z\'>S\024\367>\254G4=}R\323\274\020\230\203>`\327\255=\326\213/?\353S\234>\307\362\216\275}5k\275Mk\264\2762\263\273\272-\035\222\274!D==m\330\201\276\251\311\037\275\335\200E>q\363\242\275\3040\375>\232)\254\276S{\215\276h\351\221>#@A>qQ\264=\277Cg?\002Q\013\277?\307\222=\0045\262>zt\031?\203@,>\335?\r?\")9\277\021\214%?\356\347\206?\000\007\363\275Ei\024\277\310\264\311\276\227\355-\276\245\027\245>!\025;\276\211\267\250>\005\251\243>\202\226q\275\245\315\227\2769\216\321\276>\027\351\276\310\025\311>oC[\276xn\004?\337\232\233>k\213\031\275\237-\002?\217\2412\276\227]\221=HD\274\274n\256\251>\311\263\"\276\')b=\025p7?\377\331^<\237\226)\276\\\221\034\277=]\t\277\233\010\201?\201\002\026?\021*3\276\235\220v\275*\357\r\277?\210\002?\240~\334>r\256\204\275\323H\010?d\036\262>\245Q\305=\257\221\235\276\035d\333\276\301L\217>\315\320\215;\006\320\224\276,\330n\276\226\256\235?\300\272\276\276\332\003\307>B\256{\275\3336\244>\221\274]\275+CH>\n$!\277\027\2475>\216o\252=\313\034l\276\235\256\374>\274\220\313\276Sx\250=\253\343\213=\207g\003?\261\r\000<{\337\370>]\330+\276\337\255]<\215\2525\274\035\306\321>(g\350\275\275\246\267\275\023\376\317\275\024\205\020\277c\326\214\275p\034\005\276\350\272\310=A\377\223>\013\314\332\275u\210\000\274\331\340\202\276\303\'\013?\321e\374\276\277i\001\277\310\363\267\274\303\370\000?\256\307\246>w\201\001>\355B\212\276\t\261\000\277\232\323\007\275c*\307=\360\376\371>\377\241\021\276\246\204\350=\252.?>\356H\266>*\213\224\276\246\367\221\2760\241\266\276\266\362C\276R\223*>El~>\233\027\200\275or\212>.^\322>F\370\003?\223\010%>\316\333\225=\265O\332>I\204\201\276\330^\205>\327\354\335\275\254\024\n\276\244\210?>\304ud\276u\253\224\274\271\344\343\2769\227\002\277_\257\212\276`k\203>\241\206\301>e\356\013\276,\250\302>\202d\023\273\006\346\376\276\320\001\034\276M\377\365\276\247\254a>\322 r\276\000\250\000?\226\372i\276\302\274\220\2762\267\300=\372\014\016>\264\230\252\276\354\024\330=f\367=\275\315\353\370\273\347~~=\276\305u\276\236\264*>9\326L\276\265m\217=\'xc\276+\0260=\256\243b>i\004x\276\356r>>J\034=\276\350#@>Z\202\221\276\336\3575\277\367\256F\276\327\236\323\275\030\004\n\277\017\330)\276\r~\220=\233b\034>U\243\001\276\264L\254\275\000\372\372>\222\223%\275\031\213\344>\256\354\t?\023>)\276\307\320\255\276v\002\005\277\240\007\206>\0027\342=\022_\250=\023\247\364\276\324\360e=\224\262:?==\227\275)X\273\276\314\246\n\276\354\017\377>\271l\316\272\330h\221\276\353_;\276@P\000=r\201\207\276\245\t\217\276\242\332\335\276h\212H\276\361\307\202\276\346\362\270>c\021\202=\350$\227>o|K>\375\210\342\274\251m\351\275\275\330\354>\256\003-=\217\341(\276Heh\275j\212m\277\005\233)?K}\206\276\260\357\244>6\372\215>\213\021!\277\004Cd\276Du\314\275k\223<>t\205\035?\371$\177>\373\377\203>Qy\000\275_\315\323>b\025\301>\016D\010>\323\030}\2775\361\026\275\216d\230>L\230\323>\357\337\006\277\303\321\225=\241\215\266>\037\324\320=\030pB\276a\360\223\275c\333\212>\247\226\246\276\336\377\013>\377\314\227\275y\223\021\276\000\027^?\345\273\247\276q\357\027\276\037H\326>\n\245\244>\333(\345>\244l\277\276\314\227\204>QD\350\275Q\315\014\276c\030y\274\340\037\000\276X(\335=\202\216\232\275\220\036\346\276\277f\033?\323\024:>>\321\236>\017\256\310\276?\240\021>\034\350d?\034\220\004\277D\031V>\\7v>\001f%\276\332X\200\276\025\304\207=\336\\\333>\250\360\032\276\311n\220>\2406\023?\370\314G\276\331#\022?s\306>=\366\201\235=\372<\317>\000+8=\301\000\265\274\265\266\212\276\213y\366<\356\357\001\277\2006\272\275\n\276T>\016\363j\276ca\033\276U\260\231\275\036\243\204>\024\262\031>P\246\325\276X\363\262>\251\366\253\275:\212\216\276\272D5>\251\214\215\275&\355\207\276\231\2532\275\302\'\r?\257\303$\276T\005\226>\336\006U<\222\371\260\275\2758\331>\326f \275\267\211\223>\025\234\220\276\215\351\030\276\360\346\316>4\314\013\277*\301\202\276\366l\316=\263<\352>\221\223\216\276\005\215\200\276\022\033\026?f\334B\277d\273j=\375\345\345\303-\334\276\356w.\276i\207\246=\347u\252><:\037>\rJ\207=\226\251\260=+\361\371>\177w\216\275\212\374\272\275\350\371\301\275Lu\354\276\257\347\017=\346\002\224\276#\322{=(\023\306>\'\277\201>\022\020a=CM\222\276\0248\236>B\355\326\276\322\231\337\276}\277{>\321W\025\276\327\337l\275\276\221%\276\266\343\252\2737J\355=K#k>bDG>\2469\234\276\rjO=]!N=q\272\271\275\001\362\233\276\030\007#>\244\230\346>\370\342\177=\004\0201\277\024\276\314=J\000F>\274#\310>\024nH\276\245\035\227\276\343\213`\276\261^\206>\336\300\003>;\0047\277 \225\213>g$l=\333!\200\276\326/7>\276\307^>\025o+?\327\207\231\276\\o\310\276w\213\224\276\"\2052\276\216\357+\276^ i>\362\325\316\274&i\325>G\352\365\276A\250z\276\203\t\210\275\3222\005\276\027\254\326<\314\261\207>0\223`>b\272$=\007\353z\276Jr\370\276\033N\313:{Y\027\276\215\261\232=\245\371\343\276K@\370\276\360\261\314\275).\265>\216Ka\276\035\347\311>\335\312\025\276\006\346\216\276d\212\312\276I;\030?\313U\017\276\355t\\=\364\327\233\276\177j\026\276\270\n\361\275w8\315>N\276\345\275\\p2> c\257\274/-\250>\321J\232\277\256\227\354=K\346\271\2768}\230>Z\250\245\2749\024y>\366\320\006\276\267\312o\276\200\324g\276!\342L?\337\267\213\276\374\263\226=\351\255`>H\243\201; \r\234=\000\374\031?\244\354F>\331\007g=\342\213g\277\336\335\023;\035>\177\276\244\253\302;\010I\"\276;\302\227\2763j@>\003\243\234\276@\372x\275\213N\307>\020\004\364\275\3464\373=\014qZ?\306\026*>\231#H\276\017.I?\377r\272\276\215\205a>\2605\202\275\253\377R\277\314aW\274\357\365\202>\335\371\241\276\367b\205\275q\246\311\276\202\032+<\256:`\2763YI>\251\374l>\350\372\245\274\307\206X\276K\325y\276\375\320\017>G\256a\276\3522\222\276\324\331\213>\316S`<\231pF>\200#|>)\013I\276\346\271\323\276\364\323\010\276*\006\260=\324~\345;\266^\232\276\240\346\306>W\343\306>\002\353\201>E\242r>\374y\314>\243\tl>\361\362y>\034\376 \2757\3038>dK\227>\253\332\227\274oP\033??\357\327\276z:\222>\t\356\021?j2\273\276I\271\375\276\200\333\221>s\337R>\374\363x>)\242\264>!\204\354\275\212a\256>\237\346\226\276\234\204\327=\014\366\177=\250\351z\276~mG>\'h\272;\236\225\315\276\272\234\325\275\262\214\035\270\267r\367\317\216>\317\3002\276\244\234f>\016`0>\245r\037\276\273\244T\274\277\217\374\273S\352\375\276\274\214\206\276a>B>\321\361\215>!#\330>^\261\226\273\225\223\016Z9\211\276\004\274_\276\034r\212\275}\212\256=a\316\337\276\326V\340\275\227\343v=\356\244f=\016T\010>\235\353\230\2767\242\026?\371\354\307>\244\221\275\275Ds\237>\205\246R>y\263\221>y\016\206>*,\207>fP4\275O\271\273>\312\341r;\303C\027\277=\353,\274F\232\217\276<\331{\276IZ,\276\216\"\017>P\312H??v\244\276\017$\017>\016\006\271\2765?\237=6\365\013\276\325i\366\276\325\032\330\276(M\240>\246\320\031\277\243\233\353\276\344\014\241\276j\304\r\276\346\365o>\')\236\276|/\036\277\332\034+>Z\301\025\2778j\034>33\301\276 B\250\277\333\274\350\275\265_\316\275\373\377\255>\322\242\366\275\217(\363\276\263*\203\276\007W\360\275\244\225\036\276\360\255\323\2758\317<\277\210\231<:\027\016\r>\024\252\215>\'fN\277;\211\003?\021\331\270\274\226\314\\\277\037\244\213>\372\243\006>, \242\276VB\322>\271Hb=\276_\034\277\0300\001\277\037W\213\276G|\212\276V\\\016\2765\037\306\276\372\363H>6\357,\276\"\030\264=\251C_>\313\204\220=k7\013\277\"\353\243\276v+u>T\206\031\276\335\2028>\356\202+\277\206R\223\275T\357\222\276\327\002\201>6\260\273=D\270M\277nj2\276i\251\340>C\271M\276N@\'\277\302\307\211\275R\327#\277\016\217\335>\253\372\210\276\362P\316\275@\327\034;\313\371\260>O~\233\274\342rH\2774\035\344\274\256\322!\276$~N\276\014\n\032\276\330:s\276\373p0\277\204\247\014\277Jw\343\275\216\361\377\276\005$\241\276\351\006\217\276\270\336\023>)\n\274\275\315\017\"\275t73\275o\261\261>\\|\265\274\235\365a>\214\256\325\275Z\246\221\276\361^\221\276\372t\304\276\316f\006>\322\226\003>g!4\277\234\230\212\276\207\3146\2769O\220\276\352\250\316\276\304G\370\275]M\235;\356\363\013\277\020*\360\276@\362\246\276\321\210\256>\250\305\222=\257\322\005\277\r\210\241\276\241\263,>\255D\333<\022G\217\274\t\310\264\276\311\020\010\277\362\037\325\274d\017\205>\003\2057=\027\346\212\276$\323\215\276\347\357\320<;\301\004=\300\210\275=s9i>\301\217\261>\t\327n>\232{\367=\006\3478\277\203Q\232\276\346\n\212\275\3714\324>\270\2204\277\323\305e>\356\"\035>\023\264\031?\303|(\277Q\001\267\276\2006\262\2761\020\016\276\315\010\320\276n\346\267=\306V\324\276\272:\007=\252\0373\276\315\306t>\302\"\r\276\274\320\317=*\352\306=T2\216\275\016M?\276\213\334e=3\321\022=O\014\366\275/\312\257\276\021,\376=R\320\315>6\320\221=\217\357S\276\016\333\004?%\253\211>)\031\377=\207F\362\275\305\"m?u\252\033\276Cu\341\276\217\013\017\277\"\356L\277\240\322\263\'\204)>\030\343\262>\230\203\003=5\327U\275j\333\224>V4\005\277\321[e\275w_\022>\021$j?\004\341\357>\301\272;\276\355|t>\017\315\030>yI\363\276\316\"\204\275A\031\003?oQr\276\323\316\246=\277?&\275\2108\226\276-\354\301<\201%\373=V#\327>P\336d\275\240\232\253\276\346\225:\275\346\247w>\355\027\242:\254\3430?\376)\013\277k{\304>\346\n\254\276\020\343\204>o\177b\277\234\273\027\277F\221\347>*\333V\275-\262\263\2748\264\005\277\036\035E>\035\226:\274\373\320\205>\' M=\200\201j?\234T\'?O\345\237>\004\271G?\216}\342=\0221\225<\341\\\016\277\305\236\024>\311y;>\330X\255\275\344\370\177\276\350\223\326=\273t\272\275\301\274\201\2761:U\276\236\r\313\2763\317\240>\026\351\216>\024\207b>\323G\223>\2349\001\276e\232l?\006\035\r\275\302\275^\277\023\215\374>\373<\377>\2464F?y\237\t\277\372\232\001\277B\327\263={PE\277\035\221\006\274TB\t\276\303\327\341=\317\214\252\276\010a\201\276N\251\216\277\326\225\277=\371D2\275\335W\031?\2551u>\033>\226>\360^\317\276\244*\352>J\370\001\276\220\246\221\276iA\354\274|z\302\275\357\255v=\254\343\326=\310\363\240=\026>\010\277J:\n\276W&\344\276\324\331\020>AV\242\276.\262\326=\374i\331\276\010\000\034=\023\214{\2760A6?\307\232\264>],\"\275\033\244\007>\373)\261<>\223\210>\277\340*\274\032\276+\275-q\020\277J\236\221\276S/\'\276\245A\014?`jO\274\224\314\243>\351\270\363\276\324\335/>\233\231\026?grY=\273d}=C\341\231=u\323\224\275\016\317\242>\205+\374\276\3113\020?\017\323.?~F\242>\306\352\260>:\310\347\275\345\371<>\014\345d\275~\2421>R\255\271\276t\254\326=\345\r\353<\271i\247>{\034^\275\016R\231>z\024\021?\340\021X>\n\274\205\275\207^\177=\215D\006\276\250\'\030\275\250?\021>\314Y\335>n\345\274>b\206J\276\035\354\230=St\215\275\365\214\261>,1F\277\034!\001\274\313\257H?\207`\357\274\313\264\345>\370v\370>3\340\"\276\227\267\205\276\346\206\355:+\202\007?\327\223\322>\377$\007\277\214\377@>%\244w>\375\263\244=s\341\204\276\363\332\225\273\341\035\017>jX\003?\253H\246>Ob\364\275\364}&>}m\235>v<\335=\345>\277>\024\260\355> o\271\276\253\267\306>#S\024\275`k_\276}t\345=\310\234\230>\355\020\323\275\345\035A\276\177\211]\277!\352\260=i\347\275\275J\257\034?]\317\035?G\346L\276\344!\037\276\244\316\334>\3753*>\341x{>\210\204\307\275\217\317\363\276W\014\302\276\177\232\216\276d\372\362=Q\357\005\275\276\253\332\275c\223&\277\3223e?\3754\301=\t\301\270\276\234\002\256>\222\004\006\2777\337.>\247\240\221>v!\037\276\304\026\210\276\331\274\342\275\007\210\\?\3652\004?\321N*>\224\240N>\367_!\276\336X\346=\345\345v=\362\'\025\277#?\263\275\270~2>G\"\316>\030\330\026?\335\034\005?8}\273\275\215\311\221<\265\276\204>E\220!=\317,\217\2767\000\242\276\304T\207>\275\025j>1\233\030\277\201\2144=+1\311\276\024\006R>\211\027\274<\"\225\267\276_q|\276n@\257>\210t\266\276\302\035R\276\355E\277=\030\226\277>\242,\326>XQ\245=e$\021?6x)\274\335\277e\277BQ[=\017\315\366>\301\273\003\277\'fk\275\367\377\210\276\024\344\026\275@\340\234\276\336\334\253\275\"*\226>;\'P\275I\005E\277o\024\334\2763\205~>\026\352\264>\360\303\234\274\247\0162=\231\000\333\276x\007\207=d\376\376\276\320\026\373=Y\224;\2773\215W>\230\244L\276\345\335\004\277a\032\005>i0\032>F\352\372\275\215~>\277\203\004#\277\363\007\017\277)\217\247>\232\205\306=\271I\255>\216\355\256\276\303V\305>\031\375\304>\213\225\253\276\t\226\002>^I\032=\026\311\024\2763\010\306\276\023\020\256=\376\361\262=\336U.>I\022E>\254\352\365\275R[\240>\016q\3178\020\212\216>\374q\254\276\210E\301\276\217.a>Mi\373=\203[\316\275\332\214\n\277l\252\365>\230\351\031>=\261\206<\354sS\277\232\310\220>\254\0219\276\004\023\222\275\324\304\352\2750R\277\273\206@\037<\030k$\275g\367\332\275+\362\022>\227\300K\275*\376\035?`\230\214>A\262\242\2735\004\276D;\225\276&\366Z>\006\373\263>\253y\312\274\337\244\251\275\234\356\300>\342\373\315\276\251L\231\276NI\r=\355-\270\275N}\024\277L\005H\275s\250s>\252k&\273\2427\220\276\300\326D>\270\361\364=\263#\t>%\024L=\0250O;\325\317Z\276\0362\233>%\213\t>\377y\201\276\307\232\314=s\331\233>\367d\010\277~\251\n\277S\027&\276r\341\227\276\"\307O=\026\032`>\362\303l\276V\341\247=D\007\214<(\254\'\276\3155\025>\374\206c;w\244\033\276\357U+>M2 >\343\2710\277\305\002\304\275y0\213>U\035[>\006>g>C\237F=\326\322\034?\203\266\217>\244\007\221=\354\325\007?\217\337i\274\231,E=\347\277\001\276\264\322\252\275o\377|>\313\223\223\276\342,\270>\304Ow\276\206\341\362>\016\312\"\277~\366\353>\310]\377\276\234\305\\?L\322\025?\266W\033?T\021\005\277V\243$\274\312\334\277\275\340\207\343\275\236\203\335\276\233h\036\277\245\234W?\373\177\212\276b\2006\276\\/k\276T\350@>\377\212\227>_zT>&.}>\351\355\014?\275\032g\275\316\003\310>\265\372\276\276\340p\304\275\337\343\036\274_\270\245>%\313\263\276\320M\375>\\\343\331=V\320\272>\310\301\t\277\356n\305>\t\220\213=\210\030\270>\007\362\207=\307\254\375>\346\026\376=\241\240\275\276\002\323\362=Z\330\310=IF5\274\275\3578>\216\326$?2O\252\275\330\361z>\031W\233>$\177\215>R)U?\304\250\013\276!]\216\276Z\224$?l\004\367\275w\032#\275\225\264y=E\006\335>\230\365O>\276\267!>\327d%>\035\314\323>V\276x\2754\001\377>\2424\263\276\341$~?\254\022\n=\2409\007\276\216B\352\276\005\207\004\277Cd\207\276\350 \351>5E\251\276\246\002\257\276\313\252\211>\337\225\334\276Q\220\255\273\022\360\315\275\347\007\027>i\nm>#\277\373>a\202\023\277\246Z\310\276\022\351!?k\350\006\275\030\3011\277\3133\211=r\0335\277q\260\r\275=\377p>\004g\r>ef\331>\005\247\225\276F\225/>\n6z?\306A\003\277X\237.=\220\216\244\276\344a/?\275v\256>\254\034\026>\036y\245>\352L)\276\275\214\244>\334\001\305>0j2\276i\330\353\276\271}\333>T5!>w*\341\275d\330%\276n\332]>[.\332\276\227B\227>\354\215Y\276\303\264\316\276[R\365\274\031I\210>Z+\031\277Ad\273\276 \037\236<\2017L=\240\352\343\2737\224x\276\250\022\024?\301(e>\r\034b\275\341f\200>/D#?\312\377\227=%\213\351=\232`\304>\345~\013>\036\230\354\276\255%S\276\320l@\276#\014\034>\354\263\243\276\2642\347=\216\242\257>B\336\235>\345\002\226>\375\014q>\366\247\001>\230\024\270\276\".J=C\035.>\231_\206\276v@\320>\204`2=\364A}><\303\310>\274\332\240\277\210o\310>n\321\035\277yf\032\275oej>\331*3?A\244u\276\302s\022?\007\022\376\276A\224\205>kSN?C:\301\275^\023\345\274\350\277\304\275\215\320\023?\370P\332>\233\366\004?%\232\366=\372\206\245>2\251/\275\241\362d>V\3427\277\025\326\232>\255\350\204>y\254^>:7p>\257\t\221\276\274^.>\032\233\367;\335\337~>\013\211\222\276\262D\032=\020\'\006=\373\267\016\276\271\264\266\276-\302\334>\311\205`>\001\316\036?\211\353\260\272\265\332\022\276rD\251\276@&\254>\0104g\275\355z\024>\200z\351\276\350\013\237\273f\316\364>1\302\227\276\206\236\361\276\361\232p\276e\220\262\275\327\177\326=vj\300\275n{<=\366\211X<\177;\264\275+\334\325\275\343\216B>\014\t$?\347Z\253\275\350\375\236\276\333\344\003\277G\345\371\275\241)G\275@\246\235\275\314\007\n>^\243l\277\273$\234<]\023\344\275\227]\037\276[:\005\277\023\341U?\275\253\033\277\207\'m\276_\004\272\276z\243D\276H%C\276\230\304\023?\237\347\013\276+\257(>\035KI\276<\004\211>\254\006\221>\377X\307>\004\355D?}\005\223\276\372\250\006\277\020!\210>~\216$\275\233\371\271>\342\270\304>\036\241h\276H\326\203=\360G\222\276Qx\234\277\366\345\367=F\377\312\275v\204\001\277wR\026>6\205\010\277\345\335\010\277\35678\277\2240B>?\355^\276\352\360\364\276Rq\234=\315\3341\276?\214\324\275\255?\333=\204\217:>F\205\341\276\364\301\315\275\270<\361\276Ez\277\275w<\247\277\0074\237=\264\341o>\014\333\227\276+\0104\276\231{\004=\254\226J\2762@\306<94:>N=\252=\002\220\217\276\214G\025?\330\357\222>\2347\325>\353\351\236\275CSS\275+w\237\276\016\227\022\277\030\252\210>@\023\313\276\340R\377>\224\274\210>\337\257+\277\032\027\t\277\313\032o\276Z\3655\276\313Kx>\204;x>;\305\021?\301\'\025\276\034\265q=\366\032^\274\245+\265\275\367Q\267\276\231\373\343=k&\016>\256\016n>\217\345X\275\341\006\233>\226\217\324=\021\021\274\276,^\037\276+\"\226\274\251!\206\272\037-\312>\246Q\216\276Z\211\032\277\244J\237\276P\244\227\275ne\034>\223\331\032>\006\355\252><%\252\275\230Be\276\254\231\005>N\350w\276t\252\256\275\357\252\343\274\270\316V=\"\3764?\205{\226\276\261\323\221>\003\023\003\276\300\037\315\276\036\366\026>\3217{<\320)\347\272x\240\345<%\r\271\276q>\302=\004\022\034\276F\253!>\"\316\271\275J\267O>\036\331#\276\247P\302\274\341\330\017\277\350e\250=\236\t\327\274\t\034\304\273\036\376\024\276t\036\007\277\331\026\025>\317/\311\276i\306Y\276\362\3136\276\262\356\335\273\027\344P\272\305Ti>O\232\336>07\210\275F\206\220>~\207\327J\325\224\273\310y\203\275)\207\354\276\255\\\033>d\376\300\275%|\302\276\316~z=\357\262l\276rQ\314=\346\2303\277\2115(\275\265\374}>]=6?\203\270\376\276W\316\342\276Bx\366\276\230S\373\276{\273.\277\224\371!;\373\n\001=\t\033\244\2769*\237\275\214\317j=\005\235&\276\303|\272\275\312\274>=tY\033?\313G\346>\313\337r\277\325C\247\276\003a\017\277\265\341\244\276\355lQ\276\202\331\375=\263sl\276\r\314\021\277\356\017\"\277\354\202\030\277\032eM\276Y\244\035>\241`\023\276A\317\206\276\375\344{\277\334+\037?-k\\\276`\021\232>~\027\"\276\271\232\364\276\265\3565\276\356(\331=\233+\227\275]\2770\277\245\337\n\277\263J\345\275\250r\356\276\026%&\277\211\301\247\276*\027\376\276^g\324\276\"f\007=\374\021\204\276/\367\005\277\303\207\226>\314p(>\\//\277|\330]\275\020\177\352=B~y\276\021y\242\275%\245\024\277\303\032o\275Jy\367\276\353\211D\276\354\325\020\276a\274Z\276\005\013R\275\261\035\337\276\313\204\307\276t\312\213>\020\032%=\240\313\360\275\251\375\352\276,o1\277z\265 \276\301\007R\276cg\t>\355P\304\275t\017\327=)\177\224\276\362\305^;\372\376\203\276X\261\023\277\242\254J\277\254@f>-\030\310\276\'\303\017\277\372q==\203Y\230\276\355\330\t\276\205\033\205\276\026\210\306\276\305+\006\277\233\005\262>\230b\214\277\361\211\217>\n\341m\276m}P\277\255\351\305\275g\321\027\277\322Q\321\276{\273E\277\207X\236\275\215!\323\276*`\247\276 b\272=\006{\314>J\264g>\271W\000>\031\303\252\276K\335\371\275\341\r\014\276\207\001v\274{\216\371\275mF\r?\203\016\374=J\305T\277\347\340.<\276PA\276v\277-\276\3563\244\276\326\034\240\276\312\273\213=\010.A?\333\236\021\2760\240\372\275\356\362\234\276O\201i\276\234O!=\330J\027\275~\202\251\275\220:w>*Z\025\277n\332\025\277\325\304\250\275\243\330\000\277P\3338<\256\210\202<\241}\314\276\265n\030\277<\342\366>K\243\264\276\014c\200\276\257\351U?nM?\276\261/w\274f\022\201\276g\233w>n\235\302\275\212\210\352\276\220CV\27632y>y\265\016>\366\356p\276\274\210Y=M+\326>|\364K\276*\3341\277j\257\251\276\200\033\260=x\332\364\276\220G\314\275O\270\257\275\326\306\200>\327\200\323\275r\227\372\276\252M\213\274H\353\001\277E\231\231>\343\231\014?{_\346\275\237=y<\016=g\275H\362{\276\021\001\223\276>2\305\276\027k\213\276\217{2\276\334k\347\276 \271\366<\241A\027=_\341\235\275\277\304b\276\345\275e\276\300\025N\276\364\204;>IN\232\276\356\265\211\274B\317\266>S\0002=aj\316\276\010}7>\022&\222\276\nT{\275\232\225\033\276\363\267\267>DP\277=Md\263>\260\321\365<\375P\031\276X\261\214\276Q{`\276\321\330#?\240\363\224=Kl\013>\337\254\r\276\3075<\2767l\215>d_S\2764\321\022\275~{.\277z\337)?c)f>.\223!\276$\202\036\277\241KG>\236\207\302>\t\010,?\316\027\036>\367\321\353\275\002Z\204>N\266\224\276y7\375=\254\211\005\27742{\277&\001\034\277\002\201#>0\363\215\275-\377\312=\270\370\245\276\332\217\007\277\316Hv>V\256\275>\231\310^\276\231\"\\>\335\\G>o\347u=~\034\373\276\251X\264>\307\227[\276\256\307->t\212\205\276\356DB>\333\017\252\276\333\345\312>u\245*>\353\324t\276\272\235Y\276\357\021o\276@\212\035>\014\337\017?B\274\261\275\324\245\030=\247\024K\276c$\010=\211@\031>{\332\250\276(s\001>\005]\236=\234\264\373\274^\027\007\274\007-\t\277t\277\247=\34199\277 \335\375\276vn#>\256\024F>XF~\276=[\243\276]\320\235\276\367\226\230=\200/o\275\270E~\277P@\336\2739\224B\277 \325G>v\242.>)7\377\276ZgO\276\273\000Z\276\t\310\352=\220\254K\277\341\177t\277\273\312A\276Y\373\'?\245{\326\276\007\220\034?\275$\374\276\344\016\332>\376a\244>\313\251\273\276\215\'\034? \264\321>i\337\210>\316pu=\332\232\005\276\251\r\322\276\"\261\367\275\033f\317>D\216\272\276\362H,?P\301W\276/\243\332\275\320\275\306\276\336R0\276\030\377C>\t\307\327=u\321\025>?0\025\276e\276\226>\323\333\256=\253\233\362>!\320{\276{,(\274\342x==\1774\217\275#{\226>\371\003\241=\371\346\277\276MyB\277O\247c=\267\010\023\277\321\0169\274\375\276\005=n\346\244>\362|\016>\200\014\304\276)r\265\276\031\225\254\275]\303\310>U\317\311\275\363mW<\323\361\250=\254~L\276\177\220\230\277\254\250m\276\311f\201\276Gk\257\276\320\305\366\276\204;\025>\312C\274\276JR\354;s\002\202\276\227\367\277>\273\243\256\276\025\254\275>\303\027\221>\330\302\216>2\363\221?W\014^\276\364j\356; =1\275\203:&\2756\373\260\275\220\303V=\364\265R>F\236\333>\033\232\035\276Y\037\001>\020\230\365>\376e\234>rU\361=\237FV\276F\364\231>0\360\217>\361uM>q\341\251\275\3403\221=\223\017>;\321\221\257\2760\251\341\276\3575\347\275\\m\n\277\213\335\270>[\345\023\204=\210#\364=\033\252\215>-\274j>\347D\211\276\245\231\206>\255l\213\276+\243==\335\307\020?\367\3774>TZ\n>>\031e>Z\353\256\276\325\227\023\276\326\300\002?\202\360\200\276\031\343\351\275\331\251\365>\211\263\003?\016\270\034\277\274\227\246=;\362\341=~L2=\317\023}\275Q*\010?\002\237\325\276\3753\234>|\273+?\213\031\207>\251\264\232\274\267\034\264>\336%\022\277\332\326k\276~Q\"\276\217`I\276\305\025\360>\001=~>\212\010!\276\323^\370\275\\\025\351<\356\347\264\276E\367\250=+\235E\2776H\206\275\217A\250\276\243\350\253\275\3656\266\276:\315\231\276e\214\356>`\017\250\276\021l.=\023\346)=\222G.>\306\312\350\276\353(\313>4\212\311\276\264}{>\327\243\211\276\235\210\033?\352*)\277/w\254=\234g\234\274=o\016\2760\201\377\360\274J>\020\025q>\006\311L=\365\2523\277\213C6\277\270b\223\275\250\377\316>\273\255\317\276\254TS>\322\367\033\276\317\206\202=\354\346\231\276N\245L\275*B}>\213\207\307=|\213\022?\347\331T\275?\330b>\266.\254\276;_\177>\355\317\031\276\236\3058\277\362\005\301=\034\271\263\276K\250+\275<\320\023?5\344\341>Zu\201\275\250\301\216m\001\277\013-_\277\237\020\270\276X!V\276a`\203>\211\244W\275\315l\022>\267\036\034\277\275E\027\277\332\010Y>L\303\357=e\r\225>P\031\215\276\237\021\333\276$-(=x\257\007>\350PF?\223\372B>\275\337\274=\345\371\303>-\210\000>\021\330C>[\304\346>\220\032\211\276\255J\002\277\361\254\220<]w\016;\303(\214>1i\226\276\211\246,>\267~?<\032\274\315>\365\243\r?\376[\372\275\016T\256\276Z\212\352>~\236\313\276\202}\246=\323\247>>\315\\6==\255\037?49\302>\245\020\215\276[n\236\276\270\021 \276\201,\201\276\315\212\215>&!\245>\026w\264=\363\017\236\276\247&C>3\201\205\275,]\215>:l\020>U\273\010\277\223\266\313\275\035}}>\177\257\325\276\010\014\021?\001\336>\275\261D\026\277\177\243\227\274q\246\210=\235tJ=*\020D>4\313\336\276\004\001\031?\325\036\002\276\254\362\347=P\341c\275\032\366\363\273(*\307=\366\370\243>7-u\276T\262z?\027\026\354=\274 F\275q\231\026>\220\307\302=\013e\014\276I+G\277J1\355>\243\2350\276QQ\307>\207\215\275=\266\327\230\276V\210\252\276\361\3415?\233\034\214\27670\021?,S!\276~.\225>\276o\277>`l\325=\217gw\275%e\037\276\212\000\021\274\313\t>\276]\235r>\240Z}\276\3167\032\276\243\246b> \306\303>\373\272\305>\267k\210\276\313\034\234>\034S\367\276x>L\277\374D]\275\300\220>>\242g{\274?\237\213?\245\022Q?*\375\333>+\221l?\376\231\306\276U/\234\276\252\246\014\277X\214\311>\323jp\275\016\241\242\276\022\233d>\352\2771\277$\312\344\275<\323\306\276\346\362&??\276\325<|n!\276\352?\226\275\232\243\234\275\006x\016\276\331\272\337\275P\317\364\276Ks\226>,aE\277O\r\302\276\302#\222\276#\213/\277h\203\024>\341&\212\275/D\023\277\235\246\321\276\321\200\240\274\303\360.<\307\206\006\276\202Q\213\277\203o6?-M\034\2773\3171\276B\376\374>\033\021\266\276.\201\320\276G\335(\276\005g\357\276\235\214\203\274\347x\315\275\024\271\025>\363\032\345\274(\251\016\276\343\233\267>kW\265\276\232%\300\277Vu\330\276\305\002e\276\020\254\351>n.\260=#\030\207\276l\231\355\275\t\262\325\276\227\357\302\276\237\006H?Q#\323\2760\004\341><7\035\277H]Z=1\374\214\276\212O\220\277\232\021.\277W|/>\207=_\275\007\240\013\277\203\360$\277\004\177a\276\031\234\226\276O\357\003\277\364\301%>5|\354>\246\n\226\275|\327\214\276\370\247\023>VV\016\276\357\213\031\277\262\202G\276\317\353\327\276\213\260\331\276\266\t\332;7\322T\276@\303\311>B\274\260\275\326o\341\276\215%8\277\262\335\035\277Y\245?\277\303\036\344>\301\031\240\275\234\\H>\347\246\333=av\364=\301\2241?X\252a>\363\236,>\337>\315=V\332\341>\021va\276\366\034\217>y)\311\275\177Cm\274\370\224\021\2751\2771=\016@H;\2005\357\273>\263\267\276\005\013\022=?#\002<\211z\256>_\333\255>\027o\252\276b\177x\274\373k\030\276\304\2238?\273\2778\275\033\240-=\254\237\217>\034c\341;&J\266<>\342\351\275\036\273\216<\361\014\272=\232G\232>\253%`>\247~!<\377s\316=\341\335\021?\177\323M\275~?\251=/\035\341>G\301g=\353\341\017?\207\0040?F\222\340\275\024\n3\275\342U\201\274Ss\267\275\331\371\332\275\202Y\363>\3433\320\276Q\027`\276b\231\260\276C\223\252\276\201g\005\277\372\032\300>\311t\334\275\236\252+\277d|&\277p\231\352\274\363\332q>\266S\002?G\207q>H\352\023?(\263c\276\005\314\275>\334\233:>\324\372\275\276\266\212\266>\257\275\337\275j\252P>\323\206\210\276\314>Q\276|U,\277\'R\004>\261\2359\276\354l\306>\205C\203\275\201\333\323>\357J\336=\321EB?\313\202\262>\013\004\241\276\372\305\007\277\375&\021\277c\360\326\276*uj\275r\034\300\276\007\240\274>\211Th>R;\217>\252b\330\276\332\273!>\310K\352>\366$\234=\002v\326>\253\251\027\277\200\276\241=G)f>i\003\232\275\3531I\277\212+\024>\247\343l>\031\225\370\274\037\255P\275FG\202=EW\262>5\335\230\275\301\207\346>\277\215\211<\376\204\365<48\254>NB\214\276Z\356\242>2\365\236>\276#U?V\356\033\276)\004/\276\373\215x>B\310\007?\000g\202=\004\346\222?\004\013\346>\240\354d=A\002\303>\241\032\235=\215\212)\276Nj\216=\252v\210?\002\267\223\276y$\221\276\034\230\022?y\2702?W(\370\275\010\347\021>\221\031\227>\036z\260\277\262j\312>\337\343$\274\216\275\002\277\342\025\320>\317\225\265>*\245\371>\3668\242>3\003<\276\366\016F?b\252F?\222\201\022?`-\204?$3\216>?\006\005?\216&\250\276\3356*?\363[\350<\316\201&?\376\375?\276\004V<\276\320\"\334\275\227\022\006?\227.\311=\305qI?5\304W\276\017\306\303\275\013\005A?c/\201?w\320\266>\220\036\004?m~\252:\304*@?\255cB\276\017\243\216=\t\005f\276F\260\036=b\300\313><\007\303\276\313\220\220>\355\342\001?u\232\264=!I\301>\234#&\276\275\231.?\312\206\375=\023\351?>lq\373\274\320\314\215=W\231\273>\332\254\331\275J\307\350>\314W\307\2756\371\364>\001\203\255>1D =\256\n\334>w*\024>\353\034I>\254Xm<\345:\270\276\311\321q\2768\331\306>\317\2065\273\3330\324\274\232\256\276=\3654r?]m?\275c\211,>K\365\013\276\364g\363=r.p\276\023\276\347\274Sj\215\276vs\270>N\2743\276g\243\\?\021h\207=\303i\372=\323\225\304<\243?\251>\364/\227\2752/\030?\230\246\205>5^\020?}^\2769\002\367\214>\002T\213>k$\255\276(\355\311=\253\207\203>7\021A\275\3023\221=\236\314\006>\330&\014?4\002|?\353H\231>\317\310\314\276\217\232\344>\2663=\277\340\313\017?\342\275\r\275o\220\361\270\254\037\276p<7\276\225\337(>\243p\035?0\345\016>\326\246\300\276@\361\223\276\"\355\226\276\027\316\252\2732=\373>#\253\t\276N\363\262=~\315\260>]\315\272=\332\343\224\275\200\3442>=}\231>\314z\020=\250xh?\025\236\034>XY\313>\343\314\227=\035Y\271>G\276\245>\215\306|;\374r\232>\034\352T?\237\233\313\275\216p\017?\257\273S>z~\214\276\270\032\215\276\035&n>&~\005>)\220\212=\014\307p>\263\203\301>pVp>dR\r\275L\254\005=\267\214\203=*\256\302>X\246\262\275g\232\307>|\006\237>\204\007\244>\346\376\325>\010\215\205>\275?\355\276\342\"\273\276\324\237\313>o;\266<\340\303\275;1\025\330\275\305\354\232\276/\007\217\275e\222\237=\033\'\027?\201\271\214\2769\301\226\276\271\026\241s\007\227=\'\261\024\276\316c-\277)\233D\275\346\255\007>\027\311 ?\030?\025\277\373\317\235\276\325\'s<\305\371\241<\353k\306\276N\211`\275rw\353\276\321\350u=\'\273\234>5IX\277\334\361\205=C\2674?\260?\274\276A\016\032?X\222\023>\013\021\027?\311h\t?\010p#\276\323\266D>\0108J\277\177=\333\276\003^1\277*Z\272\276\006\253\330=\016\363\270\2761\200\327>\260\263W?\262\373I\276\320\341\332>B\376\347=\025\\\215\276\217\251M\275\036\003\355\276\236R\335=\324\254B\276\254\016\307<\346\001\017\277\255$\255\275\322\324O=\222Y\236>\364I\243\276\374E\316\275\034\272\351=\002\326\203\276B\t\003>\t\010&?Q\374,\276}\006\325\276\250\333\266\275\364\200\177>\266\357\251\276\035\366\374\276\216\205\273\276_\001\344>\031\037\203\277V\022\222\275\345\233:>\301\220\327\275\237\316\333\275#\010\217\276\313\370\207\273\236\263&\276\204\031\205>\200\347\322\276E\325H>!\310\253>2/\301\275\245\364I>\371\241k>\365\224}>\247\235\203\276/\241\034\273\330\236\003\2766 6>\267\256\202=X\022\320=\325\211\307=\274w\327>\035\302\223>\370,\344=\371\235\005\275\237J\313>\224\246\260>\267\223\362=,>\013\276]\3459>m\214S\277%\357\007\276J\237\014\276\322\250B>\207\376)?W\367\224>\333\345->RT\232\275\372\333\316\275\276\376\337\275\001\265\326=\270\010\216=7\222\277>\300\366\301<\336\332\224\274\'3w=\'\212j\277Q\215\222>>\266B>oU\034>\317\350\210>i\302?\276[\024\210\275\220\324\031>\363y\326\275\022\261\223>\314!)=z/\317\276\013\321\034\277\0006\025>\271V,\275%es\276\023\3441\277\017}\014\277\214\3234\276\340(#\276\262\362\323\276\236\1775\276\325\022\223\275[\004\272\276\020S\365\276\004\250Y=\216\014M<\332Y\024>\354V\201>\313\314\360>\034\203B\274j\300\022\277\326\316\211>qq\\>\001\224A\277\212\215\234\276V,\300\346\2322>\347i\307>g^\027?\330\305\003?\253Tp>\272\312\022>6\306\221\274,\373\334>\r\336\210\276\2450\022=\264\\\016?\326\322O\276\342CI>\305Fs\276\254S\360>\020\316\232>\341\300\300>QR\253=\036\356\264\276\362\366\236=K\323\322=\226\305{\276\351X\371>\334\314|>\360\347\260\275\234\313!?\357\327O?=\221\243\276\\\375\322\276Z-\333\276\370\'\264\275\336B0\277HG\342\276^/\332\276\304\307\\\276A\351\203\276\326\227\230\276\332\337\367\275\362\226H\277\316\033Z=\305\010\025\277~\343\233\276\203@\321\276\302\242\357=\354\3450>\311aQ\276\336y\304\276\202\023:?@\033R\277\2264\242\276\\g*\276\362\342\020>\300\354\303>\242\244;\275\272\261\264=\357\010H?ii\032\277\363\024b\276.8:;I\021\347\276\023\271\274\276\235\315\017?\200\351_>\372\177:\276t\035\022=\253\006\360\276\277\231v>\2512\301\274v[G?\311\205\365\2752\377C?c-\311\2750_L\277M0=>\333\336\037>\'Ws?t\212O>\317\340\331\275\211\247X>2n)\277g\034*\277\276L\232\276\0133\261=\355\372.>%k\276>\022]T?\361\232.?\367\316\r=N(G\276\264+#\276\r6!\277c\371-?\237\363\016\273\204\276\321>\300\t\031;\273\341\343>K\263\242>\362\2169\276\274\202v\277\246&\231\274o\2639>\024\306\353;aH\016?Fi\240=\356\357\317=\006$6\277\014\025?>\037E\375\276;\232\300>rN\200>\315S\253\2753o\217\276\264\t\204\276\377\232\n\277\262\277F=\216P\337>i\213\306\276\226\ti<\232\263\004>\361\365\003?\025\247\227=\342U\020\276\366\232\373>T\265\316>\257\177$>s\264\301=\2405\274\276\\m\226\276\244ml\275{\231.\275\330Ts\2763;\212\276\307F1\276\'t\312=\020\276\314=\332\320n\274D\003\207>_j;\276Qr\214\275\017\020<>!\334\030\276*\343\257\275\031(\267\276\362\341h\275\326 \237\276\230\233b\276\225\"\260\276\217\330n<\266%\323\276\r\376\312=c\224\034=\345k5\276.>\307\274\030\342\272\273HiF=G\242>=\220\036Z\276\002V\345>\333\212\257>\014\275\316\274\222\"#\276\266\254\377>\362_-\276\205\242\252<,\377E<\'\242\200\275\220]>>:\026\236\276\247X\341\275\254\342j\2770\200\262\276f\322\005\277\344\211\210\275\342nJ=`\003\364>e\213 \275+G\025=z\320H\276\310\013>\275\255\026\005?\030\225U\276\001\337\215\276\352\337d>\227\354\261\276$\212B\277piL\276Gy\277\276\333\r\324\276Q\016\223\276\345\006\242=\005\372\350>qd\303\276\263*\326\276OG\200?\001|\177>\367+\032\277\033\230\034\277\3121\024=\225\022\315\275o\001r\276\333\336E\2761\230\371\276\221Gw=a\n\344\276l\242\024>\320\347\236\276\366\026\354=\003\256>\277\2065\256>!\332\225\2763N\201=\016\035\302\275\273\201\330>\267o\005\2765\215Y=:\035\225>%\301+?2*\330\276\321\3073?(j\321=\022QQ>C\236\204\276\334\210\005?\027\3274\275\247\030Y>6\016\203\276Z}\270\275q$T\275T\303\024\277\214bG>w\340\324=\024\352U\276J\364\246>8}\232>\344F\255\276\267\267\363\276[\227G?,\017\217\276\266\035\204>\236\006\022\277\035\303\306>\227W\232n\264\032\277P\206\212?\006\007\215?7,\243\276*\250\257\276:\231\334\275\0073\275\274A\221N\277\315\031\253\276)\"\302>\325\'l\275v:\245\276\374\322!\277_\312<\277\370@\r\276\314\242\264>w<\251\276\377\300\025?\265\343\276>\264}\250>\027\177\276>\230v=?`\237W?N63\277\021X\243>\364\350\356\276\032yb>E\320\022?\224\367\377=\375@/\276\363\224\244\276\342\242%\275(@\231\275\243\331;\275G\020\222>\340\317l?\353j\316\276WN\364<\301`\022?>/\255<1\003\006?\317\266\331=t\237\253=\306O\010>\277M\306>\010\332\347;\022^\367\276\343\321\212>\366\026D?\350\314\210?]+t>\334d\203\276\370s\203\276\005\366p=\016\321X\276\3603x\276\314\243\361\276\227\213\004>\342\347\370>\342\333J\277\370_\257>\222\376\307=\354\033\032\277\207\260\023?\334\311\n\2771:\234>Jyg\276x,\336\017<\242=\0243\036>\336):\276\224\262\226>\260\260\206=\374\203\215\276\304\356\241>1&\242\275Z\003\330\276\207W#\275\310W\233>\306c\177\276\346\216:=\225I\226>\200\2574=^J\302\275\005w;\276\0272\"\276\231\272\325>\256\\\022>\243P\000?P\251\017\276\212\216\234>[\222\343>X\321\030>\306s\303=\236\235\347\276x!\013?\275\220\251=\347A%=\377\312\233=\275\'\223\276\234\214\277\276a\0336\276\374.\025>\201\306\263>7\361A\276\375p\245>\354\356\025>a6\023\276q{>\2764\354\">\242Ws\276\234\223\246>\223\370\200>\223\357~\2768\233I>d5B<4E\027>\211\333\343\276\343\006\214\276\214$B<\276p\035\275\310g\017<\021\213\230\27654\005\2765\243\353>\363W\253\276\031\0326\2756\030g\276j\345e\273\020\362\244\274\373\347.?\005\320\006\276`\371\235<\003\317\265\276-IV?\006\342\227\312\266\234\274\207\336\342\275[1\252>}\375\211>\303\230g>\213~B\2776b\217>\021\027\357=\313\217\013\276\006\205\345>3\006L\277\245q\322\275H\270\213=\375\355\237>D<\210>\032-D\275\334\370\\?\356\360\335=T\021`\276\360\3661\277\315\372\203>\257~v\277\204\216\r\276>9\226\276\225o\345<\326\205\327\276\323\200\031\275\023\275\343>Hv\210\275y\327\361\276s\036\336>\261\242!>\366Mt\276\345\266N?#;\362<\321\224\003\277\2033\367<\232\315\336\2766L\033\276\255\006\301=a\232t\232@=\265\032\275\274`\003>?\243\214\025\277\322\306~>\307\253T\277\264D\356>\221\374<>\025!\320=*m\316\276\276\263d\276\014K\262\276\356\263\204\276L*\234>\250w??\313l\323\275\023\277\376\276\305\244*\276V\033U\276\034\241\227\276\337\211\202\275\362\244\005\277\260\231&\2750\2252\275\307\245`\276\300\342\201\276\342\023w\274\253\001\353>H(\272\274YM\374\276\244Xy>\254s\236\276zA\234>\002Z\315>\233\374\224>G\325\234=/w\004\274\241q\327\276\205\034\222\276\2443K\275\227\3150\276M\252\366>\337\262\305\274\347e\344>\266X~\276\003-;>\214\205\307\275<\340\242\276NI\004>\237\202\014=\037\246S<7\345.\277\005Tr>\020\333\354\273\3201\002>o3\005\276\241^\n?v\334M\276FcL\276\236\364\002>v\221h=\333\271\254\276\262\243\236\273\207jc?\320L\325>Q\366*\276[c\320\275\215k\246\276\243\307\201\276J\255\206>A\'K\275\315-\336>\367q\006?\032\323\243\2744e\r>wyZ>\">\362>/\200\214\2763\361\314\276\207\237\032>\006V\313>\276mK>\303\361\262>\025M\020\276\311\371E\276\031\354!\276\326\217\371<\306\321 \275\373\333\377\276x\351\256\275\317\333\232\276\224\033`\276L\313\302\276\032Q\332\275\001\032\037\276\266\335\203>\203\177\006>=\016\013?\247P\235\274\277(\236\276#-\034\277f\364\276>w\262\013\275aAW>\346q\354\275s\275D>|\363\226\276\200F\262\276V~\241>\200\242`\276\032\2609>\252\334\352=s\257\014\276z\t\373\275tG\240=|\216\262\276\005\235\'\277\0020\222>\0256\232\276\262\213\345= >[\276!\tN\276\206B\321>\217\007\202\276\227\030\014\276\265K\203>C\227\277\276;\212\030>Pix\276\317o\330>\225ZE\275\247\331\217<\345\336\021>\262\026p\2765\327\005\277\244d)=\355\337\017?G)\271=\372\010\305>\307\246\001>\037@\211\276|%@>\202p4>\311\003\024?(\212?>\315*\301=\007\216\327\276f\037&\277\214\276\241\276b\374\221\274\226\227\246=!\240`\276\362\242\330=\033m\231\276!o\373\274N\300\275=\nH2>\001\214S\276s5\204\276w\016\002>_\272E>g\000\027\276\315\2335>\020\225\266\276\265\n/\276DW\245\276\305\370\204\273\321\361\240\276a,\205\274\332\026\013\277r\353\211\276G\320i\276\033\375\320\276v\3522\277[\370\214>Xy\306>\233\366\">+f\266>ff\352>\371n\355\275\014\337\016\277\036\266\201\276\226\\<\276\347F^\276[\034\204\276\232?\244<\312{\001?d\246\224>\021\215\270\276\n\262b\275\025A\t\275p\034\201;\0013_\273\204X|>\2305\007>\365.I\276\324\261\033\277\003\364\241\275]\353\026\277\022\362P?\321~G\277W\342\252=\006g\344\2769u\350\276\020#\330\276q[\030\277\212\3567=\321\357\305\275\367}\260\2764>\211=\333\344{>\033Z\r\276\353&\363\276\366\302`\277I\264\235=\325C\013\276\036O\246<\035Y\322\276\311o\005>\210\257=\277\323=\016\276\332\325\202>\255S0\275\261\332\332\276\"\'\253>\014\370\013<3\274\374>%\2136\277\330\256\n?\332\033I=\022\362\364\276p\030\372;]\2720>\217Y\216\276nW.?i7\033\277\3641i=\301)G\2770\224R>\313;\321>tp\031\2770\367\016\277\0223\376\276G\331\023\277\317\276\313\276\371\243M\277\347D\031\277\370^$\276\345i\207\276V\355Y\274\304\204\360\276\347\2273\276\306\304l>\371\021\036\274O\207\365>\271\210M\277\267X\'\277\027\032A\276\0060\240\275]^.\277\346*]?V\237\276>\346\341\235>\272v\213\275\347\254\260=\334h\177\276\260\025\'\276\177`U>\020\276\347\002)\330=\304\203\304\275\030\241\324>\266Wg?B\332\'=\204%\205>\362CQ>\212\356\226\27602\023\277&\237\336\276\374n\'\276\346\240\307;\307\371\225z\307\342\276\'z\n=kn\305>\312SP\275\324d\002\277\322\351P\276\230\317\363\275\r\344\236\274\371u\237\275\n.9\276T\r\025>\346\2760=T\304\214\275e+\036?wp\005>f\000\245>=L\254>\033\371\025>F\202\344=\230\023(\276\2401\351G,\021>8\034\334\274&\245\225\276t\242\255\275P\200v>\322\360\252\276\3536\275>\261\354\022\276W\240\251>\204\257\334>\247\225A>Ky\"\276}\002\326\274\213\032\026\276I\256_>\005\031\252\276\023\n\227=\035\263\251\275\r\\H<\331#\257\276\214\305\211>\262\335\273>\337\222d>\246\343\227\275\266\237\007?\224\2735\276\021\375\222\276_!\263\274\377\226\344\276\343F\"><\217\254\276Q+\361=\263\362\266>i.\035\276\350q\340\274c=\224>\366\310\271\2757u=>\350m\212>\202R\307=\231\n\332>\215A\333>\374\215\027>$\304\001=\2550\256\276\312j\032\276{\224\006\276&kg\276\3261\266>\243\353\250>\310\312\r>\351\367\206\2768\322\250>\277\003\257\276\310`\375\276V\025\022\277!J\246\275\333\347\021>\3438X\274j\231\033?\314\201\337>\177UI\276j\214\273>6\2512\277\240\300\245>e\303\233=/\330\226\276/\010\215\276w\205\220=\277.\244\276\254\202\374>\026 ^=\213\356]<\250>D\275p\245\203=\034Q\004\276J\226\255\276%\242\221\275:H\342>b\205\332\276\311\336\247>\271\020\340\275\n\030\212=\342\361Z=\202\360\207\275b^\024\275\375\201\275>\tWV\275\035\001\007\276I\275\000\277\361\334{\276^#(\277\237 \256\276\310\244f\356>&\277\nv4\276\241U\334\275\251l\202\275\320\222>\274\020\253\"?\251\322\230\274\323\247\262>\305ti\276\373\223\347>\345\000\210\276\235R\311\276\26235<\253,(>\024\201\020\275\\\034!\277\215\026\034\276\246q\036>w?\373=\251\366\314\27643,?\031\265\016\277d\364\364\276H\\\177\276Z7\017=\035K\225>\007\006\237\275\004\247\307\276\236\360F\277\365\226\277>\327V\260>\014\227R\276nU\235\274\0354 >\255.7\277\314s\374>8wV\276\255\236\226>\361\2216>\206\317\203=\2220\372\276\206\261d>\372\210\004\276\035P\"?\201\275p\276\363\242\007>\034*(\276\365M >\367!\r\27671\353=P\267\265>\310\215\013\277\377b\374=Z*)\276UqX>r;\035=\014\265\236>y\210\326\275\277\366\302\2759[\302=\005\021\232\276\367\327\352>,\222\006>h\301\010\275\'\270\351>r\270\323<\230\335\300>\371za>\016<\235\274\002\263\372\276TV\261>\027\261+\275\362z\324>\001\360P?\232\206\231\275!\026/\276\324/i>\254\251\347\275\343\330\237>MFq\276\216A\023\275\224\245\372<\343\223\331>\275\037\356\274\2023q\276\234J\334=@g\276\276]T\226>\206r\207>_\247\021\277\242D\322>\023\344\247=\265\0257\276ce2\276[\265\322\276\302z9=\\lN>\335\222M=~\024p>\360\313\272\275A\000\017>>\010\340>%\016\377>\032\257\026?H\227U\276\r\323\037\277\321~\372\274\254\231\305\276\305\374\212>\205\210\224=\206\253\017\276\030X\331\276\204^L\276fE\303\276Q\363\204\276\253\215\364<\300\250\364<\277~\n=\207\225\2469i\262\206<1Y\207\276\331+\257<\rD\204=\370]\250\275\212\223I=\255J\013\277NY\004\277\335\353\204\275\247\007\217\276:c\337\274\r`\022\277\203^\306=\030\307\032\276Z\253\221=\343+Q>\265\203\333\276\213\307\002?\365\243\316\275\n\274\344>.\3456>\250\337\246\276vi=?\355\237\300\274\217\020\345>\224\344\364\275`\370\210>8\013n\275\364\351)\277Y\227%\276-\300f\275\214\242\265>:\303T\277[y:>\310\276\247\276y\001v>\265;\014\277\336Z\245?]\362s?\201\020\222\277\357}\003?\363\306A>\323\3645\276O;\036\2778Z@=R\276\252>\353\220\267\275*\241\024\275c\256\247\276-`\037>D\204\225>\023\006\364\276\272\321\270\275\2216\344<\362\345\203\276s\314$>\347\315\206\276F\327Y>h^\025<\366\372\\>\341\215J\274^2w\276R=M\276\"1\t\277\332O\313>iU\002?C\010\344>\r\220\373\275\261\205\367=W\214\320\276\314\220\205>_\266\215\276+\341X?1\227\343>\014\222v\275\320\236R?\320Q+\277\301o[\276\\\230\262\275\347(\316=\204]\007?uU\340>0M\037>\020J\235\274\252 \301\2766N\r?D\333 \277\233\225I\276\303w%\276O\212\007\276kD\002?\014\320\363>\343\321\212>\263J\233\276\273\014#?\350%3\276\333\230\330>T.\326>X\230\271>\004\352\004\277\255 v\276\364N\374\276\256\024\204\276\205>\310\276.\177A=\216\013\255>\230}H=\340\025\024\277\222S\376<\226\340R;\354\306&>\242\rz\270\004\277\273L|\275\256@\243\276\271\223\221>\354\251I=H\255\264>V\261\203\276\266\270\350\275\217\363R\275\375B@\276L\257h\277[YE\276\364L\306=ZI\302\275\364_\231\276\226NA\276O\234\312=\301yu=\367\332\324\330\017\276\004\"G\276\277\326\262>\207\231\005\277\345-\234>^\342\314<0\355\355\2752\277\334\275\347\010\036\277\325\256\325\275\2627\000\276\030K\302\273T\365\035?4\326J?\340&\270\276>\236\301>\005A\275\275\345&\202\275`/\2258\306\314\n?\273R%?\217\304\271<(\t\346\275\272\211\367\273\316.T\275\226\324\340=\257\377\347>e=\243>=\274\371>n\2431\276\032\335j?p\363\230>\354\305\016?\352\315\232=k:\037=fgK>\265\021>=4\350\034\276\03529\276\227}\260>\205\273\021=b\321\236\275\227\307\262\276\227\2047>\272\207\000\277\322\300\232\276\373$\013?\"\256\251>\204Jh>\263\221\227>\365\341V>\0344\025\275\326\n\016=L(\362<\242\253K=;\360\256\276{/f>\021$\246\274\227\031r\275\205C\243\276t\3777=\217D\232\276\216$\004\277\251c=>wl\217\276\274\350\324>\277j\334\276i=\313>&\033 ?\245\305V?\303v\036\276\345\353\001?\3174\022\275\347\326\014\276ZL\213>K\273\275=\004\005$?\274\031\351\275Gz\301\275!\253\001\275D\360\355\276\3770\247>R\372\331>\346z\331\276\r\340s\277\270\n\315\274\256jK>S\215\254\276\'h\266\275\361\345\034\277a8\024\276\275\035\321;\214\270%?\312\264\273>\323Z\257>.h)\275\232}\'?\234\255\201>\355k\374<\276\333\022\274\304\262\362>~:7\276\007V\257\276\376E\304\276r\255\022\276\220\311T?\200\333s\276\227=\355=xc3>\"M\033>\326J\034\276\177u\323=\211\274\271=\214\350O?}\350\347\275\323\371S\276\311I\231\275\354\200\365>[\217^?\250\003\316\275\\[A=0\256\237\276\202wZ?\351\314\243=\017\022\353\272)]\220\274\251<\234>\366\373c\276\327{\333=\265\364Y>\3103\307>ez\014\277\253\341\032\277@mF\276\230\337&\276d\371<\276\006\220\253\276\370\373\032\277\304\001\256>\332\251*\277&\023\230>\215g\275\276\223a\240>\037\377\310<\367\020Q\276\326\"\036?DY\002\277\346\324\205>c\205C\277,$v>d\375\030\277\013\315\333\275\271\035j>\324Te\276\257j\312\276N\203\331\276\347\267\206\274\263Ce\276\025\331\007\277\332\036\365\324>e)\027\277\021rP\276\344\333\230\276\334=\256=\302(\203>\371\362#>\357\361\264\276\347\010\001\277\216\3004?s\035\200>\302\025\267\276\206M =\371;-\276-{5?t\306\030\276\r\265\254\276\375\272#>A\020\346\276c\356\013\277P\2130\276|\271\342>\014\177\005?\361\"\326\276\221\034\242>r{O\2763%\002?\254\210\030\276Jl\230>O\203\312\275\243\250\010>\224u\000>\272@\036>($=>A\010\372\276\030\276:?\234\375\374>\233\360\024\275KV`\276\205\352\372\275\213\013\367>-:\320\276\343\363\006\276\307\305b=\253\234W=\244\270\315\276\212W\346=>\357\363\276F(\304\276\231\314^>h\202;>\207\311\305=\252\262\t\277\302\354\366\275\026\331\250>B\261\323>\336\023\024?\252\241a?\013cL>]\005_\277\350-\007>h\317\361\275,\274R\276\302\327 ?\024\030==0\217\366>\261\361\235\233\262\326\2753\375\r\275\354^\226>\206/L>\217W\345\274\242\275\351\2758H\214\275\251\315\207>dg\202=\265q)\275-\353v>}S\023\2775^\336\276\'\001\337\276\372<\237\275\326\245\350=\204?\221\275\201V\351\276\221x\r\276\020j\366\274\3342\255\2757\277\024?\260N\010>\357`\223=\211\307\035\276\214\037\302<\372\t*\277|\217\254=d\327\032\276\030\375\327\276\000_==M\373\021>+\240\365\276\277\246\201>\337\004\">L\332\221>\241\002\234\274\207X\306>9-\024:\225%\265\276\350;{>\326\337\371\275#\177\024?\273m\016=\t\337?\276u\010\332\276\327\246>>\231\200\241\276\277\362%?\r\223o==\033\352>\323\350\200\276|w\021\277\n\372\007\276\005\253:>\237\204\312\276\275\310\220\273\266\214\267>\275\340\355>\276f\213\274n\177\306=M\300\347>\324%9\276\310]\007\276\200\342Y\275\237P#>\263\223=\277Xj\341=\330\035)>\255\220F=8z\022\276\025C+>\231R\301\276\274\201\017\276\206\300\000\276\303\2342>\235Q\331>T\204\307\276?\336\271>\362\343->$A\010\276,`\037\276\302q\230\276l\327+>,\230\330\276Id\335\276\342\227\304>\0058p\277\260\242\237\276\350t\034\277\201#\215\277eg\025\276\220\357\014>qC\234=\313V\204\276\265u\231>y\365\202\276P\005->\207\'\272\276\355{\000?\271o)?\\#\016?R\300\005?D\324\227>\005\251\260=\321\020\014\275\326sk>\266\334\210>\264\376\355>W\300\345\276s1`\276\244Z\365\276\342hN\277\264f\032\274\024f\001\276\004@\"\276S\364\244>\327G\257\273\373C\014\275\241\036V=\356\320\014?\301\010\300>\'\210\026\276\230\271\020>7\236\342\276Ws\312\275\250\330Q=\324(\006\2768`\327\276\345\360*\277I\260->\020\364\003>\203\267+\276B\001A\274\377\250\366\275\353q\204>7\0332\277\320\213l>R\261\025\276\317\321\226>t\0164?\026\013\037\277\306\321\342\276\210o\334>\2759\201\277\232\344\300\276\013\224\310>O}8>\261\201\362\276\036\323\265;]K\247\274\234\017\025\275\242\365$\276%\370\307\276\222\272K>\376\":>4\333\213\276\331\345\010?\226\270\252\275h\364\334=\320\367\250\276\'k\234\274\232<\322\274\177@\002?\2009\202\276W\343\013\276 eU\276\210\230\362>\037\265\211>t\335\t?\217\254]<\260\253\005\277\221\202Y\275\177\323\270\276\024\372\262=\326\301)>e\353\250\276!\251\251\276\210HK>\274\351P> \217 \275\251-\365>s\355\201=4\330\322\276\'\223z\275\370\013\321<\301\333\367\274Q?\021\276\320@k\276%5n>LBb\276\323\r\335=\013\033\253\276\016e\225>\035:\265=K\341{>Y\365\264\276TM\037\276w\342\271>\356\223\326\276\271\311\272\276\306.4>Y\300\233=\262a\201\276\246\211T>\221?&>\254X\007\277\314i\033\275\367\374\234\276\224#\t\276^y1\276\313\013\316\273\3564\341>\3310\210>\206x\266=\364o\247\276\317]jQ\036\314>\316\271\264=L\263\023?VH\261\276\303\034\246>\035qd>e0\202\275\310\312]\276FJ\"\276;\374,?\274;\000>GzO\274\024<\213>\273\244\270\275\003\312\204\276\2523\227\276\270-\006?\326\2101>\270\265\240\276\255\020\224\274 \037\267=\373\371\212>\206\240\366\276x\354\214>I\321\252=\221\000\n?H\367\202\276\230\273\014\276$\337\251=\035\246\006\277\205\246\263\277C\023\272\276|\242\000?\275\307K\277\213\361\222\276\373\331\342\276\216\002\323>\253\037\234\275S\350\207>\214qA?\202\254^\275t\3308=\245\215h~\032?>\376b\256>j\353\364\276\300\0308>\250\276\202\277m\366|>\212\373\200;\202x\003\276\265\2152>q\305\217\276\0054\222\276,\2226\276\320\030\234=\n\213\377;3#\310>\340\320\273\275\254\377\224\274\314tA>\216\207y>g\242\033>\0006\252\275D\320\345\275\036g\223\276\207\325\232\276\013\372\237>\324R0=\350\001<\276\356\363\242=\323\225\006\277G\256h\276\234\343\000>\004\t\367\276\367\312\204>\324;\232=\214\353\003\275\315\"\240>S\245m\276\030\265\341>\355\220\351\275@\255\225\276\273\364\324=-v\r\277\261\266\326\274NfV\277\366\037,\272\037\374\265>k\320J\277\226\224\377\2750\342\276=\252\030T\276\022\3430=\221LJ\277\026*\030\277\235\003\"\277\366\010m\276\3522y\276\234\033\275\276c(\231=\r\004\250>\357\032A=\'\313\006\276G\335\014\277j\304\231>\214\025\225\275\272%\236>:aO=\336C\360>\206qx>\353k\322\275$\215v\276\026:\020>>\366(\276!\2678\274e\\5\276\274\266\003>x\301}\276K\325\356>\017?r>\031H&>\212\027\002?X~ =\234\312\240\276\377\350\357\2756\311;\276?\016&>\231\361\227\276I@7>7\031\226\276u\217]\276J9\302\276\031\014\001\276\314r\304\276\365\246]\275u\366\236>yO`\276U]\270\276\037\010\314>\245\344N>N\337\025\277J\352\253\274\2760\375>\377\013\204\274\224D\305>\001\002\275\274\2012\030>a{\017=\252%E\275\234\036\213\276\022fV\275y\350\275>\235!j>\351R\244=\364!O>Xen=\254\376\027>G\315\233=.\026\270:\034\004\277\276o\305\n>\226\0143\276:\212\310>\205\301\200\276\214A\272\274i\237\216>\244\2267\277\225$\260= K\343>\270\260S\276J\303b>%\353\217>S9\n>\207v\004?K\352\017\277\211\203b\276\243\360\302\275<7U>\372P\205>\007\2618>v\371\237\276\344\254\246\276{p+=\325\365@\276\210\032q\276\362\363\236>nb\256>\302\203\r=\221\353\323>\365Y2\275\211Z7\276)).?\031X\026\2775:\204>\336Z\212\276Qm\006\277\350\026\222>\225W+\273\334@\255\276\001\213\342\275\334\016\222\276\257&\327\276{\037\033>\3202\211>|b\245\276\224A\331\273/\207\322>\237\224\300>\222k\201>\366\223\217>\370k\340\275GZX\277\032%\210>\346\241T\276\2478\020\275\341@0\275\363\265\262=d\207\026\276\214\302\261\275\370\"\271>\323G\'?\035\000\205>\255\221|\276\274\270\200\276\251M\002\277\270\310\\\276\345{4\275\004\275\205\276\237NG>_T\243\2765X\273>\317cx\276\274\231\354<\265\207\365\276\2367\345\276<\265\213\277\031t\236>:\242\245\276d\005\244>\220[\251>I\344\'\277)=k?\302\240\"\275;\210\225\2766V\234\276\003\237g>\310A\352\275\361V\'?Z4>\277\242\210\340=\241\027\266\276*82\275\347\303\243\2760X\223\276CA\246\2765_\034?\355\232\034\277\373\346\027\276\030\207=\277d\034\204\275\214\333\337\274\331\203U\276\35036>\204i*\2773A\210\276\351E\004?\035\315j=\200\210\327>\330\203\231\276\335\275J=W\3110\277\323+\323=\327\220O\276\347\204R\276\034wq\276\002.=\276\376;\356>~\230\230>\371\302\302=\322y\010\275\023\005\226>.\305\333<\323\322\353\274\243<\025\277\033\336\211\276\027\205\000?\247\262\350>\351\202<\2764\235v\277]\006\277\275\313\312\037?!\215\337=\264\231\267<\225)\276=\271\3256>\032\027G?\326l\236\276\230\005\021?\263\023\210\277\217\r+>\337\200\323\276P\177n>V\3322>\315]\237\276\007\264(=\335\'\235=\\\226\250\274\247p\323=\346\230\254\275k\360A\277\026\270\257\275\274\207m=m\215\275>\024\265\307\276\206\277\n?;\307\375<\204!\255\276G\362_\276}4\215\274\004\000\200>{\260\325=\200\3505>\200\231e\275\366\177q>j\321\223\276-\2641\277\331\221\222\275\036])\276\227W\024\276\262\207\n>\242\035\201\274l\257 \276\365\352\252>>\327\307=\271S\027\276\344\333\202>)\254\213\275q\010\215>\033]\300<\272x\306S%o\274\021\316r>9\226D>\013\312\360\276\374\202\270\276\223\013\r>\336\025\337>t\024\025>n\025\256\276\363C\220=\324*X\276\230\232\246\276Nj\231\275\221\302\312\2741\271\026\2761\032W>\304\256\270=\352\243W?\235\353\345\275\357\346\352>\232z\207>;\316\254\277\273~;\276\310\013\237=WV\267\276`l\037?u\225\211>\024u\212>\001b\021\275\027\242J\276\210\350\235>\252\003%?\366\245\301\275\177\377/\275\031~\341\2758X\255>s[\247>\316\267\232\276\272\251\024\276Y)(?\322\235y\331\033\317>\014x`>\223\224\253\275c\036\222\274D\264\206\276M\237\245>\344\002U\276\345\261\267>*\362\350=\224>\223>c|\231:z\304\027>\213c\200<\223\203\224>H\222W>K.\373<\274\270\224\275\263\251\203:\204\275\331\275\251\016\025?\010\333\245\276Y\020(\276X\\3\2760)\252>\346\r\006\277AT\303>\005c\330\276\331\352\225\275\232a\333\275\305\364\302=\275\301\217=)76\276\243\010>>1\202\335\276\217\r\273>aQR\277\307\225\034>\317\233q>\031\202-\277\226\365\241=\226\262\221\276#\325\266\276\251\373\340\276\230\371\201\274j\033\223\276\225\326\352\274\013\312\374>\345\300,\276\031\372\237\276S{\024>\207U~>\026\374??\316\342\n?\005\316\212>\0016\001?\363d\026?:HP\276\337\354\247\276RD\301>U\013\366\276\202\\c>\302\233+>:\250\226=\262\2327\275\322\010\200>4\326\374\275\226\270\276\275\002\224\316=V\362i=\225\326\206>\367\245S<\021\245\000\274f\221l?H\236\333\276\207\021\301>\256\303\230;\014$t>\033@o\274\016\t\243>\010\321\360\276N#\270\275u\026\\<\212\202\252>n\254\324\276\274\226\361>\230/\364=\373\017\311\276[nx\276\262\013\000>s>\"\276\212\360\224>p\252\214\275.\003\366>T\317`\274L\026\'\275\365\331\251\276}\t\245\276 \343Q\276\362\257\211>\204\000\316\276\333\215\266\276\360\260\214=\n\003 >\023x\254=>\016\253\276\241\363\333>\272<\311\276\021|\210\274|\021D?\201\022\002\275`\243\207\276\005\034G=\237s\245\276a\225\266>a\332\235\276\304\\\304\276\206m\253\276\263\"\350\276A\232\004>\273\n\206\274\225^M>\r\266U?\247\rE\275y\031\221\276\352\240B\276U:D>_$\226>?\312\215>\235\324D\276\342\302\207\276n\225q\275X\010\224>=\3162\275i\344.\277\226\006\351>\256\212h;\275}u\276]\023\243\275\016\030\302\276D\030\205\276\212N\210\276\013\306\372\276P\220\247=\315\250\027>W\217\016>W\016}\276i*\034\276\257\361\235\210y$\277\207\373\360\276\n\000\300>\2437:?\256>\233\275\265\247\327=\303B\217>\002\260\200=\344;$\275\324\3253>\221\217\277\275\263G\336<@\210\214>5\312Y\276\377\n\342>\241\364\265\276\253\342\226>r\225B>\026y\212\2761\244\361=\346\211\024\277\217&\201\276\002Sv>\223\275\016\277\371o`>\203\345\260>pw\332\274\177\231B=\305\274&\276}!9\277\233~\342\276\325\t\323>\261Z \277\311XZ\276\241\277\257\276\313\345{>[\210C\277l\036\321>QQR=\336\266\031\275\300\354\270\275\026\310\212\274\343P]>\307\274\306\276\277\227X\276\314a\226\276}\327\025\275|\256`>U\032h\275\346\026\034>\"\264~>I\036\202>B\221\276\275\t/\026\2760\023}>\013fQ?\037^(\276\312\006\321<@x\305\276\354\260\235=\214nq>\361\017\027\276]q\357=\340\201L\275\023f\331=\267\275\207\275\203\006S\276\353=\335\276\232|9\273\314\216\210\276\235\007R={\276\035\277U\225]>w2\217>6\325m\274]\022\014?\031O\017?JL;\276\031\351^<\2670{\276\217\324\014?\031m\307>\250\225\036>\221.u\276\331\006\033?\331\nG\276\232\216\222>\317\026Y?p\356\360\276JU\023=\367\241\271>}\266W\276\344\335U?\235\325\204\276\332\016\215=S\030H>\336\001&?\r\230\302>\203\233\205\276\305\014\202>m\330\250>X7\004?\222;y\272\362\231-\276T\2135\276\026 6\277\353\234_\275t\3651>\'\245\323\274=\221\250\276\025\377\337<\273w\352\272L\242\254\276\001\366\220\2767\"n\276\223\026J>S\242\325\276T\n:\276\374\206#?\305\330\367\275\271\256\271\276\360\035]>\351\341I?b\246\305\276,\014\267>3\344|>\'TN=U\214E?9\366M\277\353\312\025\276\274>\016?\260[$\276\322\223\370=\301m\276:\217\016\271=\2529$\277\252=\201\275\364{\000?\271%\004?e\352\023?\342w\260=\t\026\316=\231l\031>@M\361>\002\334]?\327\266\365>\217\013\304>\336\314\305\276E \374>\326\023\254=(9\253\276\371\373+\276\2129\000>hYr>\233 \026>\217\253\222>\232.\024\277@\277\300\275\001\210\211>n\246\230\275O]\377=\275\225\377\275\037\026\224\275\221Y\246<\032>\026?\237:3\275Y\300\230\276\031\303\263\276\r<\362>\\s\303\276\365\343\263\275\3431\034\277\247p\260\276u\267I\277\261\336c\276\332\212l\276M\334\274\275\027=\210>>q\250<\233\212?>Z\341\032\276\276%G\275\\\233\225\275\242\365\220>\340\221c>7%\023?!\226\372=\302 ~\276\030\365\310\274\3201\022>{\237\207>\321S\210\276\262\244\333=\002u\265>\331,6\276\352\257\t\276\215G\221=S\221r\276\005\265\354\273J\253\336>[\245\270\276jw{\274\361\003\035>\331u\251\275\340t\003\276\274\013\344\276G\342T?\373R{\275\300\301D\275\034b\255>\357\321\027\277-\310\362>\247u\221>\262\253\245\276\267\344\246\276$\333U\274\301f\367>Si\330\276\332\320?\276\231\335\206>l\r\020?A\034b>\334\221A=V\350d\275\377\205t<\225\355\362>\261\306R\276\336\246q=\rm\233>S\007\303\275\337s\202\276\301\026\320=2\2714\276\212\255\001?<\206\317=~\214?=\001\0256\275\276t\002=\362\332\274\276W\'\301\275d\371\345\275bi\257=\240H\t\277&\025\372\275\024\261\253\274ZR\361=\034\331\025>\007;&>|\"\263>/\212P=2\254\363<*?\001<\370\275p\275%W1\276\010\246\242>\357]r>LI\316>\001\376\t\2761\303\251?r\341\351=\3749~=M[\325\276Ja\020?7d\202\275\230=9\277\350\367\317>\310\225\265\275I\177\360\276\251@\200;\n\006\225>\376\261)>\201*\246<\354\273\230\276\261=]?}\017\206\276=;\255>z\227\366\276\244?\304\275\247\310\314>B\0068\277\374\245\231\2765\253\254\276\310@\253>\022\316=\277\355q\037?\315\366\000\276\357\375\177\276\245\213\277\276`\237\034?\274:\327=\220\310\353\276\214:\t\277y \347=\361\330\221>\261\307\036>\351\'\245==K\367\276@n\351>\225\247\232?\270a\345\2733:<>!a\224?\226=\004\276\207H;?\342&\036?\317\021\326\276\272&\013?\217\231@?\350\226\215>HT\016?\276\177\351>pfk\276\207\345\266\276\345\333\206>m\261\\>\334_\371>\376&\220\276_i\223>9\024$\274WX\344\275$\r\242\276\274\233\025>\300\t\006=\314\233E>\265&\322=\255\312*\277\330\372\237>\006\274\357<:y\326\275G\206}\276\306\260\336>J9\326\275S\306\357=\363\234\320\276\326;\345>4\320\316<\262k\221\276\\T\274\275\0169\014?B\260\225>5A\347>\r\336*?0V\260\276\263\235f\275\230\217\226\276\267\373Q>\227\256\321\253\276\025\345\356\275\037\304\257>,\237\215\276\2703\233\276\351U`?#\340Q>x\034\230\276\356\352\304>\177*\361=f&@\276OK\364=\033\225D?+\356\351>\232\014\316=\010U4>[\310\177>\365\344\214>2B\277=Py\323>\324;\212\277m_\252=n\346\003>\216i\351=K\347s>\247<\027>\252\275\301<\340\250Q=c\222\214>\230-\242>\272\253\240=/\332\375>\323e#\276+\002h\2760\271\341>\331\3728=P~\224>\340$\232\374\240@?\262 ^>\030\227\210\274\2675\003>pF\257\276\316\354\261\275)\016\350\276\334Y\225>\256\014\013?\276x!>-{\273>(\031\251>bt\360\275\327@\320>W\313\267\275\341\342\263>\347o\303\276\260d0?\005\326\346\275 K\335=9N\331\275\225\322%?\307\262\254>\370\367\007>\227\373B\277\001\033M>_\353\342>Ns\264\275\234r\020>\373v*\276\305\034\362>,\262\212\276BgO>\217\230\325=\272\243\340>+\\+?\327\305}\276\224w/?\037z5\276\243\023y\276c\246{>,\027\335\276\025\217(\273\003\246\002>\203\355\376>#2]\276\233T\035\276MB\000?\2135R\276Z\340\355=yv\272\276\256i\316>\330\321\360\276 \227\323\276\034K\032\277$\277_<*\"\032>(:S>d\312\265\276BY6\277r\237\257>y\325,=\\\270\305\275r8%\276Q3\337>\344p\234\276\257j\006< \274\256>\215lL\276\034\211x\277W\357\271=)\025\343\274[\261\236\277\310$\244>S\336\030>j\205:\277\016\342\213=\332z\341\275#\253\375\274\324\226\005?\003\201Y\277#\317\324\275e\307[\2763H\361\2745, ?\334\377|=\354\335p?0\263~>n\314\034\276\322\266\326\273c)S=\375\256\002?)Y\251>\210\275\273>\236}\363>\035\'\200\276\254\t\264\276\222H\276>\\\317\215>\002\241\312\275\031\264\315\276\356E\035\277:\t\023\275q\346\330>\271.\035>\310\243~\275\362~\n>\210\002!\275.\t\332>\027\277\267>\317\263\227>\230\235\276\276\246J\014?\274\253\250=\342\236,\275e2r>HW\226>\212\037]\277\321\000\257\275\334\374\313\276\201\220[>\272\225&\277\342\344\322>\234\310`>\342\017u\275A\211\233=\310\316\032\276\1777b>\257A\320\276\373\237\032\277\350\316\221\276\306\376+>\237\205\222\276\333\032\002=\226G\202\274op\010?H.1=\246>\000?C\324Z>\370\265\207\276\3326\367<\220\221&=\356N\344>\244\345y\276y\025\271=\316\224;\275\245\201\036\276\333Pk\276YO\351<\257\330H>\242\301\371\276\374\210\236>\327\245\270>\231\235\345\275M\034E\2761\2078=\'\021\316>j\244\200>\317~\223>7\\\177=@\037\177\275\346\257\000?q\320\'>V\371\236\276]\212\254\276\010,\210>\345G\264\276\211-\322>\207\233\t?\333I\226\276\264\211\371>\030\214\321>\270\032U<\301\257:\276\022\221\313=\324\037&?`N\227\2760\272\215;,\036c\276\211\002\311\274F\365\251\276\027\347\013\277p{\007\276\255\315\026?\335\346\274\275L\370y\277\275pE\276\027(\243\276\254\230\325>A[\276\276I1\305\275gq|\2768\275X>\014LB\277Xv\327\276F\302\260>H\315\232>Z\'\344>y8\367=\351\320\t>\240\270\362\273\027<\364\2768\026\221>\341\2567\276\330\230H\276%\354\342\276p\242\203\274\315Jt=\332\317*>n{\035\276\372\261\261\275a\020\210>\313\273\241\276\304\351|\276p\013\223\276\271.\201>\034W\252=}\301\016\277\217\356\207\304PF?\013;\276\276\363\267\207>\370\363s=*A\034>\177Y\275>l\224\227>x\212}\276\351\337\376>\251/\324\273\240[-\276\204\257\364>\212C\243>n\323\247=*\240\310>\206&\226=\236+\324>\036\352`\277I[\342\275\302uQ\276|\220\"?\206L\250>\202\223\316>-\352\013>\372s5>\037\337j\277.j\265>\264\366$?rO#\276\27693=4V\361\377\342n>e\004\271>!\250\n?\2069\350>\210C\252>.).\277\337L\322>6\270\216\276\215\366\013\277>\234\233\276j\305K=P\005\350\275U3\000\276|v\037\275\360i\255\276M\022\n?=\311+\276\332\215\035>\321\314\241=\342\202\341\276\233\356\235\276(61\275IU\372\276m\266\235\276\027<\224>\256\331\360\276\207I\333>\213\346\024\276\013\261~\276\227\373\236\275AQ\320>\255-2\276B[\347\276d\236)?\262\342\002?\224\330A?\266\027c\276\342n\237>\254[\346=\243\265!?\370U\255\276\374\021\247>\322VJ=\274@\245>]\232\010\277]\3258\276\242]\347>I`\037\277k/=\276b\211\223\276\360%\225>Dz\204\276=\030u\276\303\326\364;\315>\014\277n\017\0339\312\3020?B[\r?Qc\341=]\337\210\2764S\337\276\200\213\204=HVo>\345\307\204>}S@\275\302B\036\277\371\257\257>J\251\336=56M?\304\243A\276\304\007(\277\251\317D=\313V\221\276\310\226\036\277\202t\261>02\007\275\254\037\367\274^\203\001\276i\265I>\016\256\247\275G\351\244=/\213\327=>o\234\276C\310\003\276\370\210\254=\322\224b<\372o^>#\310\256>>\375\342=\303D\325\275^\010\005\275[\317\351\276\315\013H\277\312\347A\276`c<\276S`\204\276|\241\356=\336\232J>\211\365e>\277\357\005\275\242\350\271\275`\025p>\324}q\276U\227\203\2768\255\226\274g\006\035\276\t\031\211\276\331\224\330\275C\210\177\276r\254\345>|6\312>\340;\224>1\231\341>#\322\257>\360\254\252\275I\000\025\276\306%.>\256\352\216>\235,\221\276\237\227\032\277~\344\364>\031\3061\277/I\333=E)#?Gn\303\276_\247.\276\200\204O\277\372VD\276\2037U>\013\241s\276\377\032G>u\027\223\274x\226\"\276\327\350\212>\"\356\315>\222\006~\276\234\343\321\275\357\305\313\276\266$\354\275\274=\003\277R\256\337<\277\371\237={,\225\276\357&\t\2753nV? \206\371\276&^\034?K\376\"\277_\361\010\313\265\276\275\023\221\365\274\355\324\311>\330\355W\276\243(\246\275|\n\244\276\244\301\006=\037L\210>\372\221\227\276\270\005\025?S\234\027?\003\025L>\326\315n\277\217\315G\276UA\267\276\031\000G>\223\245\177>K\366\204\276\nR\305>\257\207\016\277X\222:=_\317\262=\033n\027>\324O\216\276\255\331\237>gfa>\272z\265>ox\002\276\375e\000\276\365\242\212?#\367\307\276S\251\267>\363)+\276\201\343\271>\327\200\204=\347\364\227=%\211\322>F\205_>\335\327\225>\364\325\010?\367\303\315\276\336vO\276\023\207\272>E.\226>v\203|<6m\240=r\345H\276\032\302`\276C_\025\276h\223\001?<\233-?\027\030\244\275\315\000v>\027\"\007\277Dy\363>`cv>\363a\362\275\341VT?\265\257?=\357\254\027>\261\026\371\275T\024\320\276y\311\345\276\312\376\n?G\346\330\274\330n2?\372\025\341\276K\n\307\276\205\3066?\247\327F\275\227Q\342\273\220\020L\276\334r\341\273i\371}>2\362\253>\262\016\215\276!\201\215>\245\237-\276&<\236=0*\233\276\327\000A? u\252>\317\035\331>\227\241\211>\336~\024\277\312\365\306\276\221\357\246>\270\214\267\276k\206\361\275~7J\276[\316\234\276\210\023\337\276|\270,\276\2313??R\306\370\276\237B\216>\372\273\300>\003\221\327\275\237\200\364=\263\220\224\275C\374\251\276!}\317\273~\304\034\275\347\014O>H}\314=\004\313\334>\212\266\272>\265\215\033=\034\005w\274\215\235+?\306\263`\276\333\365\036?\335\000\201\276\nZ\005>\236t\227\275,\267\025\276\314\244b=\332\245\325=\362\002\335=\004\351\247=5{\326\275\231E\260\275\305\002\252;>q\213\276\326\036\">P\026\342\276v\346\253>w\255\212\276\000\277\305>s\214\334\274\006\232\014<\354]\026\2766u\250\275a\301\201\276\346\3273\276|\345\202>\302I\254=\322\335_=g\346\013\277#\235]\276T\347A\276\306\340%\276y\215\214\276>\304\204\276\227\034\214>\002\257\236>u\327\312\275\204\251\021\276\005\223\255\276\2662\t\276\2472\204>\232\204\036\276}D\223\276\263\247}>|l\327>i~\336>\240\'\234>\203\024$>\033\245\027>\020r1\275\366+\204>\204\326\267>\0354\035\276\232y!?\374\312\243>\004\330\220\276\013\334\315>\26706\276Aj\005?\022S\207\276/r\220=\031\002\253\275\3024O\275\n\340\342\275\330\254\224=\022\2046\272\250_\223=\317w\224>\004[\n\277\002\377\252>\263#\221\276\204\272\331>\302\373\251>\0314\223\006R\347>\3606$\277`\215\000?\035j\312\276R\367\322=\341\211\025\275\263\221C\276\264\303\211\275UN\352\274z_\217>\205\321D\2768\003h\276bb\221\276klX\276\251*\021\275t\265J\277\023\"\367\276z\221\022=\232u\276=\002I\324>E\205\371\274ug\021\2761g\314\271\305i<>\210W\231>\265\256\221>\350\"\325>q\310G?l\245\003\277\257Q\322>\357s\202\276\335\235\322\276\340(\013<\322\244t=VP\031\277^\'\273\276)M\026?\273\227\371<\305\321\343\275\345\300\355>\346}$>\262\010\361\276\377\365\321>G5\020\277pKe?p\013\237\276\264\257\301\275f\037v\275\212\202e>\314\014\215\276#$\360\276\003\023\036\276\301,\263\276\025\352\344>\235\272\017>\273c3?\313\0355>\231\206\262\276\335\326,\275\374Hn?.2\016?i\256]>\343\320\022?\236\210\352>\241\300\n\276g\213\224=\002v\202<\326;t>\227>\260\275\225\010&?\023s0\277\267\021\243>\261W\221\276\265\2561>l\317\220?\265=\224>\371\334~\277\005\r\271>\026\356\231>2&\346\275,\032\212>\254\007:\276\344\374\300\275\335\332s=S\302\261\2752\352\276\275L\007E>\026\302^\276\036\343~\276\340\313\226\276\314\366\231>\211\007\354\275\257$S\276D\017)\276\343\241\001>\303\006\240\274\376`\354\276R&\214>\217\364\217>\351\225\216\274q\300]>\366\331\204\275\354L\261\276\366\350\017>\311;\356\274)\261\004\277\353\275\002\277\341\003\313\276\332\301?\276:Y\342>\201/\253\276\306j\262>\230\256\346\274\253\221*\274\223`\335;\021\000,\2772Y\347<\242=M>\376o ?\243\327\230\276\nz\005>\246|\367>7n\213<\2318\233>\212v\226=x\243\273\275_1\246=\351O+>-\022\023\276a\337\244<\305\237\300\274\321\177\260\275u0\241\274\361v\277>\227\300\244=\255+\331>e<\247=\177\353[?]\022\030\277y\210n>\002\210\235>S\2108\277d>\017\276\033$\n\276\207\'\\\276\352%\245>\221#1>\275v\213>\265\0160>i\345\">\274\204\277>e\235\361\276(o5\276\303\225t\276\265nS\275&\341\272>\245.\232=\212,\250>._}\276\315\355\260\276\362@9>@1\020=\021I*\276A\217\n\276\2474\357=\377p\213\276}\'\324=\024\373\036>\213*\300=\3104\275\275\357V\204\276\330\334\234>\001\027\326>\033\232<>\222\177\202\276w\254p>\006\020p\276\016\3618>L\271\006>\256\343\250\276P\023\r\277\345\323\262\275\032\370\031>\360\355\273\275g\267\020\275\327\014\302=C\304\025>\274\266c\276@\301\201>\362\273\022<\251a\211\274\257iA\277\230Q\253\275\342c\216\276\221_\206\276\361D\337=\246n\271\275Q\275\025>H\351\312=O\321\224=\222#\001?\035\216\214\276\301\305\037>\n\352\202\276c6|>\323\'\346=3:\214\275\350/\252>J4 \276Ly\234\276\274\302G>\210\302\n\2763\301:\276\324\203\020\277\352_\014>?8\342>I\226\320=X\321\010>\343z\003\277\214Q/\276\013m%?J\254\324>\214H\006>To)?(i&\277\200\033B?\252\353\235>\nv5\276\363@\001\277\006S\364\275~N\316\276\241\260\233\276\371\216I\277wi2\276\236\201\007?\314\032\235>t]Q\277\034\344\205<\002\036\330<\217Lg=\232\260\313\2753\210*?\231\320\264\2765\036\342\276\264,\372\275\303\003\n\276\242uB\276\230eo>\030b\026>9\326\027\275\035\367\370>\206\214K\276\036\007\224\275\364\330\020\277/\365\033\277\2020\267\274(\003\205\276zQ\262=Q\027`?\350\225 ?\351\322\376>d\353\252\276_\017\003\275+V\214\276\307\366\230\276\327^6>\t\364\277\276\010#\021\275&\271\204>\227\3516\277M\252\266\275\304\027.\276\357\010}>\265P\240\276n\031\315>z\277\222>\026j\033\277z\313\007\277e\275\031?\220\250\337\276\024\3540?\242\305\'<\202\375\033\277\311F\206>XS\007?\323\024\035\276\\>6\277-R\034\277\204\020\020?f5l\276\250YA>\025K[\277U\354E\276)\n\\>\203\212\256\275\014\346\026\277\271\232\304\27524J\275$a\347>Sg\010\277b\200|=3\324n\275+4A?\200+\317\274\215\250X?\201\344\210>\267\316y\277@+\036=Tw\237>E\364%\304\275\274_\027?\356\327\342\275:\017\346=\007\0171>.\252\352\273@u\363\276q\261\237\2752\347!>H{\r\274\036\363@>$\360b\275\360\016m\276\3759+\275E\215\223;g\002^\275\277\337\231>Au\207\276\350J\227<\344\\\226\276-n\372\275Q\354\227>a\306\361=pg!\275\210Rt>\241\342\240<1xf> \377\027=\213\362\027>\3354\n\275,{\334>\004z\264>\360\024\230\275.\021\002\277\2436\366\2750E\354<1\203\222>R\252\241>\346\276\263>\006\'\350>*I\036?\177\035\314\275\365\334\206>\020\2076\276\301#\017\276l\375\205>\246\321\"\275749\276\t\240\357>\353\342\357\276*N\024\276D\037\305\272\202T\214\276\343ka\272+$\225\276B\250\216>^\200Q>\034n!\276\227Q\264\276M\334\344\276\2775H\276\306\243M\276\203\016\243>\341\000\232\276\377i\300\275\302n\005#?\311\t4>f\323\033>l\325\331>\246\351->8,\'>\260W\001>F\231f\275\256\373.>\256c\033\276\241(\026\365\017\273\276\347\331\221>\344\205\227\276W\240\221>\\\307K=\r\321\302=Z\200\256>\022\312\177\2765e,>\352\342\031?M]M>AN\330>^n\376=\356\016\222=o_;\276\253\225D?\250\222\235\276\016h\033\277\\\322\203>j_o?\204e\027\276\252\265\t\277\246h\260=,\217r\277\275b\273\276\003\377P\277H\207\000?\037\314\323>\250\307\277=\270$\316\2763\205m\276\0367-\275\225dI\277\314\373\261\276\346\202i\277\370\214\022\277\013\2278>\326g\240\276\357\321\t?\316\031\330>\200c\350\276\250\212\013\277XC\261\275\033\273h\276\277\240b\276\320\300\226\276/\360\214\276\271B\212\276\245_^?\302`\207\276\007\3326>\234s^?\"\371B>\327\247!\276]i\002?\331\016\222\275\021u`>\365\324k\275\332\377\244\275@\220\225<\214\005\253\275~\223\253>K\2401>/\377\360=\206\314\t\277\307\026L;\223/_?\303pH\277\365m!\277\370\255C\276[D\026\277\276\227c\276+\346\217\276\262\352\032\277S\363a\276\036\331\204\276\014uC\276]\035\275\275\200\377P\276\264;\211\275\0054*\276\210\033\207>\007\322\003?\003\026\323\276\270\0138<\373\026\003?\372\363\037\276\246\r\265\276\327\307\210\2767p\262=\025\014\027?H\037\344>\375\223\335>\330d\260\276$vx\276\244\375D>\026\'j\276\350\002\026>\373\320\003?\214\207\017\275\254\336\200\275\'>`\2766\314\230\2765F\277=T\334\222>\273\027\001?s\340j\276\251\243\366\276\306\3271>D\200z>\363A\240>\266\014\004\276#\0312>\242=T\276\357/V\276\365rA>\0218\314\276\227\337\037\277n\305[>\006=\352>\025\362\331\274\023\224??\235zk=\305f\026\277/\"0\276\257\210\233>5%Q>\010\273\212=2H\264;J\037E\277U7a\276I\014;\276kbK>c\234\374>I\372\337=f\2227\274om\336>:\320\224\275\2318K\277\351\016\324\275d\237\215=\336i2>_u\007>\003)\014\276\340\010O\2741d\205>\351\\\321;\331\301\035>\020\206\005\277O\246\271\275\333@\330>6\360\222\276\262\201\231>r\313I\277\227\257\211\276\022\221\251\275\333\342\037=\244\227\242\276(\241\365>\353=\033>\243\324\336>_\314\267\275\223\025U>\223\010d>\274\261;?\001H\235>\332\"1\275l\226\023>\340\004X\276\370\246\344=\2769?\276\225YJ>\320\341%\276\305\355\246>\277\364\321>\237\205\301=\305\343\314\276_\370\245\276\233LF\276U\t\234;\320%\365\275\313q\373\274D\030\277\276\321\r\027>g\245o>>\326\266\274\3164\352<\324\230\000=3B\347>q\353\220>-\020\354=\214\023\376\2755\013\205\276\003\201\224\276\205,\320\276?MO\273\263\033\357\276\333\031\254=\322\244\316>|$\261\274h\3712>0\206\255>\217+\213\276O\363m\276\230C\310\275\250\304_>\267\030\260\276\232\261\031\277{U\025\277\316\001\312\276\034\372\312\274n\242\024\277{\217\276=\215\270R\277\334n*\2775\025\312\276\357\365\247\276T7\240\275(.x\275\246\345\351>\037\273\300\276\034\227\375\276s\256\245>\211\"\013?MTE>C\225\333\274\341y\337\275\271\371\223\276\014\0258?*\233\226\276\303\241D\277\2778T\276N\341\n>$A\026>\214\371\036\277\241r\242\275\205\376\362\276\336\313\237\276\224\317\325>\222\210Z\277\231Ff\276\377\200b>\227\333G\276\303\t!\276\225/\351\276F\2257>\314\270\007\277\216\252\t\277S\276\354>{\262\373\276\261;\241>\003\007)\277\247\302\022?q^O<\355\334w>3\277\312\275\311\340&\277P$X\276\355\240\030?\353\353\252\276\370<&?Y\355\333<1Y\034\277*\210\235\276\317\022\267\276\346\343\341>F\231\304)\220\313\276\241@\006>\225\246\370=\311\023<\275\337\271\236>\230Lo\276\003\3652>:>\004\277\007\'\351\276\201\323\006\277\t\210\236\275\004u\025\277}\013\272\276\331@\373>\352\270>\276T\261\244\276X0\241\275\326\312\255\276&\272\346<]4\266\276\004a\005\276\225~\306\276U$\257>\002\007\376\275\256NY;\232\002O=\236hd\277\242\222\013\2763\320\206>\363\317\317=\305\275\020=\240\227\020>\375* \274\177\366\303<<\357\320\2768$<\277b{-\276\216\014\361=\374u)\275\005\034\211>z\'\203=\340x;>\216<(>\010k\215>\310\211\027\276\tS\256\276m\327\001\275\245\027\276>\021\336`>b\224N\276\204\201u\276p\n\357<\036_\017>\322\n^\276\224;\257\276\245\373\225\276\360\310\227\276b\277\346>\375\330%\277\217\017#\276\315p\t\275}\333k>.K\033>\216\376\246>\240\276\334\275\340\265w;\004\315@>1\263\031>\332Tn\276\313Y\343\276\320\347d>\036(\232\276\030\231]<\342<\010\277\212x\267>vRL\277*\016\273<\010\243h>A\271\321\276\030\001S\275\310\222\202\276\373c\220\276\266#%\276\327+\301>\252\250\035\276\272b==\200/*?\271\263F>Dge\276\301\035\274\274\251:\036\276\364\270\264\276\347\226\337>\234\214\364\276\177\263\r\276\267D\313\275\305\021\270>\001\262)?\226\221[\276\364\305\314<{H|\276\340\275B\276\373\205\024?\260\264\214\277\257<\203>h\270O>\357Q=\27659\305\275\023\017\030\277\031\313i\276o\232\033>J\026(\277\216\242\022\277\24601=\275\250_>\"\361\226>\234 \334\276\024c6\276\245^.\277ZZH\274\360\231\033?\327^\216>i\335\273\276\211^\034\275\024\221E?[s\030>\305\177\341>845\276\3645E\275\235\314\n\277^p9\2776\346\212\276iR\024\276Q\017.\276\230_(>&\t\336>\334\253\221\275\274>K\276\214\354\212\276\\?\204>I%\005>2I\242>\264\261\230\276\020\363\220\276\024\336\256\276\020;\213>\260\231\024\276#{\327\275;\335\024>:a\033<@N\327>_4y\276\271&\335\276Z+\314\276\0304\001\277\341d_>\021\270\215>?B\304\275\325Q;>\267r\344\276\237j\204\276\314\001Y\276\276\236\205\274*g&?\224\262?\275)$e>M\316A\275\023\t\352>\352\006\217>\335j\027\277\215X\033\2770Lt\276~\252\n\277+D\237\2764\273\272\276m\226\202\277o\027\235\276\327\026T\276\207\037\025\276-\315\222=\316\241\271=Lg\272\276\226\037\322\276s\271\257\276\314S\275\275\260\355\361\275\336\014;\276Y\260\032\275\014{\013?\3333\371\276\003F\310=\335\014&\277\"\201\002\277\2774\221>SoJ\275u=\247\276\004\254\271=\325d&\276J=+\276\2417\312\276\331\347\336\276\222\356\262=\327o\227\276r\204\317\276h\356\375=\352\252\304>,\324E\277\322\230\241\275\260\034\221>J\327[\276\235\311\001?\0364\251\276TU\232\272hA\343>Z\316\"\275p\000\001?\017@}\2753\034\t?\035*\205\275Lh\022\276\'t\006>x+\202\276\200c\016\276\214\243\241\274\357\303\315\274R\351\302;%r\365=\263d\277=\302g;\277\326\001\033\276\n&\323\276\177\033`\275\t~\017?-m\267\276\273\340\022\277kP\007\277\247-\225>\023\313 \276X2\\>\211\356\200\276\231\355G\276\327fa\276\006P\264<\223\262&>7\234\013\276m\234\016\274\036\260\327\275\025\004\001\277\314\306\262\276`\331\205=D?\206\275\347u\255\2768\303\230??\335\034\276\257\235\215>\326\312{>\351\3464>\023\270@=>\316\024\276z\020g\370\210\360\272\322g\336>\370\315\331>lh\257\276\213\341%\277\2425\352=x\210\003\277\3376\201\276\000\203\344\276\215\337\307\276\306\315\256>\203k\305>w\202\210>\247\311\001\276\277\335\353\274g:\206\275)\337D>\204\223\235\276\317\322\005?\270*\225\276\256h8\277\362;\213\276Y\316\231<\250W\371=\270\325O\277Mo\372\276\337W\234\276\\\026\247>_\340T?W\030\255\2764\227 >\246\323[=b\3524>\354=b\276*\317\236>\022x\200\276\247\221\205\275\330]\026=*H\206\276\036q\020\275\027\216\236>\021\323\017\276\0222\304<\344\235l\276\034\220J>\234\3704=\026\022>\275a\0016>70\302\275\333\322b=\005\'F\275\261\226e\275\366\001\235\276\311M\232\276t3t=\345r\313=\010\355\330\275?}\010\275~\025\317\274\246\242\024\276\262e(?\237\023v>C\320v>\370\272\302\274\037)\220>\n\274!=\363\220\363\265k\257\275\363\277b\276q\253\"=\217\271\016\275\370\205M\276\341\336\357=*s\211\276D\306+\277\304\364\351\275\324\210D>{b\304>\200\3666\275\024\211\014>\2467^>\355N/\277H\033;=\033$\001?Wb\035<\262/\036>\230y\251\276o\212\361<\373\377\310==\325\363=\235\227\307\276\364D\211\275\271\377\343>V \213=\325tT\274\224\242\227\274w\022h\277\225\r\307>\020\222\254\276\303\030#\277\001\213\350\276\230Z\304=\371\014\014>\255\376-\276\242\t\017\277\237\032p?L\362\363>\205\"\244=N\0322?\227J_>@\034P\277\272/\265\276+\277\237\276[@z\275\373\335\242\276\240\020\204<\020\305\300>\217\364\210\276\367zu\276wU\245\275\271\213\301\275\'\027F>)\244P\275N\"\214>\224\020\021?\177\037\247>\026\031\337\276\340`#\277\2249:\275`\303a>*\307\025\276/T\304\276\230f6>\376\247_=B\005\350\2759\007S>.\351\257\276hW\205\276\260x\270\275}\261\304\2743\354}>3\271\203\276\277\271 >@+\276=\363\220\016\276\256\030+\276\004\244\005\276o\235I\274\020\177\223\276\233\005P>\303\3060>]\374\377\274\232\020@=v\221\357\276\201\323\327>\3415\032?\025\014%\275`\356#\276\234\244\021\277\177M\220<\272\272\266>\030\271\000\276\330\272\374\276z\327\006>\270-\032\277\217Z\253\275\271\256z>\365h\337>-\355\335>\332\017\271\276\214\267\346>\034\230\305\276\036+\261=vS4>\230XA~\243\371=\240\250\364>y,8\277\306\263\213>\tW\210\274\026\003[:W\360\321\276\321\005+\276\372\262A\276\313\r9\276C\035s>\217VO>\275\232\347=]\203\211\274\340\0351\275\2625\322\275\361E\276\273U\014\201\2766\361X=a\312>>\022$\275=\230\031\275\276FQ\321\276\005P\320>\343\247\251>\326Wv>t\243\201\276\221\216\213=!\237\024>\316\214!\277<\351\302\276U\033\000\2770\370W\276\r\214\t\277\303q\372\276\234\003w>\010_!\277\336\333(=6z\243>\310\202j>\030\373F\276K\211\346>\004\204\332=!\221\312=T\222\004>\235vI\276Bx\335\276\362\373\261\275B%\221>!\377\303> \341\213\276\316s1\277\014s\032>dEd=\341&\354\276\240\016\342>\357k\275>\0105\235\276a\362\342\276{D>\276\361\001\257>\207\030a\274\366\025\246>\245|\255\276\250\261\251\276\317\031\032\277\244\316\326\276\251\322c\276Y\342\373<6\342\367=\250f\333=r\302f\276O\363\215=6r\226\276\003\215\022\276\020\037\274\275Um\223\276\005\355\001>5\372\226>\013\353\253\2752\351\354\275r}s\272\326\276P\276\220\'\330=t\221\337>tAf>\374\037q\276\3524\000>5C\242\276\001\230\261>KRt\2754y,?X\323\022\277\364\\\010>d\352\036?\n\374\023\276\315\310G;\321l\314\276P\355\'\277\366\205x=\033\267\267=T\215\207\276?[\221>\366\"\272><\354\367=\377\026\262\276\237\365\037?\014\334i\276\032\3613\275\277 \222>e|6\276]rm>\031\220\037\276w\310^\276$\367\206\276\301\343x>p+\223=\250o\244>i\026=\276v\027\002\277R=P>\026\353?>\205\353\275\276\333\3338>\321\326\215=\373\234\207\276BT\206>\340k6>+\344=\276^do=\313\221\002\277>L\034\276\370\033\313=-?\210>s\360n\276\327\354\344>O\201\266;\361k\000>\264a`=\232\027\257\275fJ\204\276\356\t\232\276o\313\331\276E\371\202>=[\016>\336(\256\275\'x\025>3s\217\276A\030e>V\\\'\276w\332\234>\205\204m\276z-\203<\336(g=H./\276;%\255=`\241\203>[\2212\226?{<\235Pi\276}\220\020?W\002\221\274Z\3565\276\033\230;=\275\206h=R\254\376\275\236\260\005\276\027\277\244\276\221yW\275k\031\305=\306\204\006\276\323\033\"\276\244\274\377<\323\257\032<\3120J\274\206\377G\276\330\261(\277\373\307L\276\333A2>\353\373\023\276\014\256\267\276\341\220\232\276\'B0\276\n\275\206<^\370\204\274\352\263\n\274Q\367E>n\214\005=\235\245i\274\241\361\360\233\340\n>\312\3609\275\022\336\r\276z\000<\276\305\312\357>\373\372\030\276\276\005\256\275\321H$\277Y;e\276qV\371\275\2341m\276\204\272\363\273!&\275\276\031\223\323=n\356\032\276B]\037>\213f\307<\303a\220\275\220\300m\276\221\323\241<\3333l\275\215\201\242\276\021\254\353\276\344\225\325;\307D\373\275h#\255\276\003\314)\276kF\003\277ob\305\276\342`\206\276\223\315\325\276\273\226\233>\317\232\014\277\0162x\276\014\260V>\004\2523>\222+\203\273\235\021\037\275\346D\250\273gE\324|b\271\276\250\020\253:\370\337\031\277\377\316\320\274\213\267D\275\201\2314>\265w\030\276rs\256=e<)>=^\022\276l\026n\275n$V\276B\230\207>\322\301\251\275\352\032y\276\200!\334\0105\333\276}\237\255=\233\342\246\276\035(\333=\215\200+\276\"\177&\277H2\356>\313\360\323\276\216\243\346\273NM\007\277z\273\222>\227\336.=\354\232&>M\227\340\276pA\224\274\266\263\307>+B\201\276\315\260\331=< \266=5\235\007\000c\233\274\303\007E\276A\010\360=\003\223!\275\220\341\237>\302&\206>\230{\222\276\247\030\003>\245\343\353\276\221\243\022\274\330O$>\370ok=\263\017\031\276+\326\204<\220}\231>~\353&?i\256\342\276DA\004>O\225\215>\361q\005\277\241\201\250>\037~B>q\363\311\276\272Hc>\331w\022>\320UM>\264\350g\276\272\241\260\2763f\222>\016\206\007\277*\301g\276s\272\252>\014\3628?K:\376\2769t7\275\351?\270\275p\222\357\276\206\363E?%\245\033=\276\025|\275$+\232\276\347(\031\275\340s?\277\323d\"=\000\177\276\276\326\310\213=\t\232\017\276\315\356/>\247\3367>\035\224\305\276:b\236\276\371`\233>\237\365%>\343\221&>\265\266\245>dS\356\274\240\274\307\274\222b\203\276 \265\274\273G\257\247>\352m2=#W7\276\221\364\327\274h\323D<\242w\360=rJ\020>\177\222\344:\007\235=;\206\322\037\274f\221\373\2751\003\226\271\346N\365\275d\317r>\032\355\022<\344\217\201\275\361\342\374=\314\033\221\276<\350\026\277\233\262\304>\365\035H>\255\341V\276\022\017}\275K\350\033>!\202\221<\241&\376\275!Q<=\033\221\243=\312\200\350=\372\004\363>\256\315*\275\220X\365\276S\275\350\275\355\320j>\217\220\271=\367\210\245=\254\373\330\27620\r>\275\344\212>\312D\263\275^[\351>\216\365h=FD\240>\233^K\276\315N\321\275\254y\027\277\373Wc>y\215\267\276L\215\r\277\t\343\205>\206\337\024>\330f\203>\010\014\271>\315in\276\2264U\276\237\367\216\275\312\025\305\274q\010\336\276\272R\263>\341\337\336\276\310=<>+\360\234=\245\352\257\275\003\245\361\275\372\332P<\344\271\227=\202&$\275\'JR\275\177\021C\276o6 \2768\366\024\276n\345\244\276\255V\273\276\223\"\300>\206-}\276\n\177\364=\207\341+\275A\177C\275\355\017%\274\"\301E>\373\345\320\276$\021_=\212?\033\274#J\212\276G}n\275\314\377g>\257iC>\322\2106=_u\203\276X)\207>\234\361\002\276$\031\320>xo\017\276\231mB\275m\355\n\2771\204\356;\001~\327\275\214\235L\276\201\356\177>\231\322?\275\005$\222>\3458\305=|x\337>\305\334\016>\322\225\275>Z\342\300>\315\273\377=\347@\004?\r\212F>7\263Y>\352_\252=\246\357\361>\240\210%>I\030\241\275\241\343u>\374L\333<\237\265\357\275\273\300\212\276E\233{\276\217\207#>\027\365<>\317\324\013\276\010(\033?\270\004F\276\316\017r?$B\370\276*\013^>\0307C=\301\021\204>\306\352\000>$\310\266\275\264#\224\276\265#\272>\222!\027?\360\232_<2T\234\275\264\364\314\276HY5?~\321\332\275r\247\025\276\336\207\243>\332\213a=\225w\346>\320\216\207\275r?\273=\310\014\273>\275\346E\276\302\177z>\320\333\273=\315]\222>\177\371\364>c\002\005\275\027\n\343=o\366f>\310\226\240=\257\225\234=\352n\277<\231\244\022K\233\242\274%\371\244<\235\243X\276\221@+\275\357\306E<7\262\223<\374\264w\276\312i\213\276)\201r\276\244\201<\276\275m\014>^\357\216>\255\326`>\027\251\027\276\320\345\333\276\376\311\214>\241\206\202>\353\365\361\276\272F\366>\346H\227>N\177\034>\305\311K>v\240\267\276\321\224\035>\276?2<\013\010\354=\2739\326\276\320&/\276\336\306 >v\341\270=]\261\257=%V\003=k\203\233>\346\235\334>\303\261\236>jq_\275\243\220~\276\240l\006?G-\224\275\356=\367=\344\224\354>7\340\301=\3349\225\276\353\277\355=\335\374\250>?L\306= }\371\275\212_\317\275\013\207\310\275\224\010\006>\314@4=\237.\306\275L \235=q\001B\276s\232\307>Wx\351=\212\013\033\276/\343\030>U\241\321\275\033\240v=\361d\203\276EV\334>\247\236\235\275Y\342\303=\321\332+=\274\032O?\305\367\310>T\216\n\275\023b\274>\242\247*\276P\207\025\275iY\351\274\330\223w>:\307\314\275\204\344\022>\242N\205\275\022\'\301>D\254\310\275\221/\022\276AM\022\276\234\336\313\275\274\000l\276\003:!\276\037\241==P&\342=\205}\303\275\326\201\212>f\252\217=c\317\031>O\257\007>\260.\200>\256\237\226\275\332M\363=E\223\220\275Qc~\276\205Ec=}\361#=\017\326\213\276]\337_\275\260\r\206>N\300\223\2765>m>\332$\364=\367\021F>\236\n\274=\303k\n?\"p:\276\215#M>\345\374.\275\226}+=9\356\367> \360\314>(r\307\276\220\365\316\275\375d8\276\312\334\026>~\303\031=z\236\217>\211\276\226=\210\003.\276^q\001\277f\334\267\276\335\354Q>\315S>\276\334\336\277\275\271x.\276\215\353\222\276\273R\302>>+&\277\372Z\230<\373\375\234=\225\021\021\277\322k\246=\267~:\276F\375\347\276\237\335\345>\254\026\250\276\235n\236\276q\240\320\276t\3578;\227^\346>\3728I\277\273=\212\273\373o5\277\021N\233>\315\232\211\277\033\027\026\2768u3\276Tk\272>\342\332\235\276H\"\254>\350\336\256\276\247\277\253>0 \312=s<\314\276x\311\232=\211\275\257\276\262\354\016\276\024\327[\276\ra\244=y\233\036\276\330\007\013\276\240{\336\276\231\032#\276`d9>a\336\326\275\355\377w\276\316\217\001\276\376\375\031>^\366\222\276\023\363\033=\3734\334=7A\235\276\244\237S\276\203eo\275\346\023\253:\330\017\220<\257\3701>oM\244;\037\344\200\276\217\220\263\276-\000\322;\214\t\274>\375\327\000\276c\201h\277\246\335\252\276{\210N\276\315q\274\276\033\325\201\276\227\327\240>N\337\256\275\\w8\276\032\247\025\277\217\317\232\276q\322M\276\322\020\331\276\352\216\237>`\263t\276\331\264\317>\301\177\355>\3446\226\276\242\2055=\331\255\234\276\371\236\320\274\017\021\256\276\267\374=\276\230\213\247>\215\305M\277\024\376\235:\244\027\013\277\303\234q\277\273\214\306\276\311\301r=\341\037\320\276\217|\330< ~Z\2769SU\336\352\213>\3726\027?\260\373Z>Cn\207\275\333t[>S\310\001\277)|\204>$DX\276\005\235\n\277\356\n\314=\350z\235\2767WE>\004\307\254\2756\220E\276OJ\034\275_\333k\276@\356\301>\260\304\312=\023o\016>\374H\007>PoA\277,(\177\276\031\327\001\276\017\332\362<\023\325\331>\245\236\'\2760\000\025\276q?\357\275p\343u\323\300\327\274z2F\276~7J\274#\3107\276L\356\265\276#\230\311\275\270k\250\276\243\037\200\276\214\363\301\276\276\311J>\330\200\242\276\303j\311>\331\021x>{\262\020\277\252b\215\276\371YS>\311\017\010\276B\261\020\276\024\020T>\035\251$\276\345\236=\276\356\350\026\2770\345\n?\307\262?=\302\0340\276\215\347\200\276\315(\201=\271I\245\276\3416\341=v\020s\273\220\214\201\276\026\226\036\277\310\257\004\276(\014\t\277\313\\N\276d;\241>\361\211\241>\337\234\036\277\247\004*\277\342\001\"\277\350\235c\276\014CB=\203\351\306>&\332\205\276\226\301\252>Q\000l\276\340\270\023>P\310\310\276\312\256\303\276\240\324\021\277\254\361\220>\332\274\227\276\300\365\222\276\253Ek\274\3213\275\275\226y\214>\3259j\275\017\236\256\276\374;\216=\225,F\277y\350\032\276U\265\365\276RL\002\276\005\370A>\006Q\213\276\3140h\276\233Y\236\276\246\222\315\276F\223\370\274\n\301J\276\003\251\305\276\300y\230\276\312\221\322\275\333\320\235\274\230\350g>\242\026\t\277\306\'\223>\271\241\032\277\024\264\020\277\237\\\314\275(\347d>%\001\256\274\2601\336>\341\221\203\2751\335\205=\262\003^>\017\\\362\276\311\025\005?z49=\306@\374\276\217j<>y^\030?\205\r\371\276\240%\241\276\231xw\275\223a\357\276)\330X\277\331\253\003=\3479\243\276\322\002B>1\323\032\277\3237\"\2757*o=eH\352\276\273\3634\277\202\006\244\276\273&\372\276\262`\342\276l\214\007\277UZ\254\276\232\341<\276o\215\013\2773\315\224>L>\033\275\347\020\024=,I\210\276\243\250\323\276e\'7\276\216\333\374\275]\261\363\274\254\226\030\276_\243\016\2769\000W\276\322\017\207>%\254\034\277J\013S\276\311\253\226>Q\333\212\2760\r\271>\214C\r>ak\233\276)AT>x\005\001?\321\330\310\276Q_\007>@8\235\276^c\225\2766\037\330\2766:\220=\366l\331\276\033H\303\2764M-\277\357\310\022>y\377\304\276>\317\202\276\361m\r\276\217gt\276~\275\r\275\003\366\333\276\372R7=\340>^\276\3522\"\277\250\264\352\275\250\256\337\276k\203\352\275\033\316g\275\276\356;\204\276\260\320\233\275U\351!\276\204z\014?\241\020\002\277=t\232>\351\342\243\275\314\2550\274\r\r\362\275\212[\352\276\010\325\220=\026\313\036>\303*I\275\023\246=\276\016\002\223\276\262#\323\276\314\031\317\276fJ\265\276\317\273\022\276\345\243\315\276Uv\352\276\335\210\270\276\272Tz\2756[\236\275\226p)>\240\233\"\275x\347\366\274z\001\347\276`\0168?\016\037\341;\345\r\231\276\227\217\'\275\303\362\333<\246\226\001?+\370h\276r4\373\276\345aM\276\206\333\203;V\264\277\276\247qk\276\206\247\366\274tu\216\273\241^:\276{\036\227\275\246n\234\276\343\335}>\351\331\326<.\321\025>\276.\210\276LA(\276\235D.\276L\335\251>\307\366\006>\027\333\207\276\316\312\000=\242n\250\276vr\201=eg\311\276\177\357o\276\227U\241\276\2508\022\276`\020\361=\255#,\275\271\342\372\276\260H\'<\324\010\275\275|&!>\335\344C>|\373Z\276\341\344\031\2764\303\346\276r\217\263>\327a\356>4\256C\275\000\237\177\276\340\246\243=M\241\250\274\212\0259\276n\311\014\276\3224\302>\253\024\312>\202V\265\275\344\211\010?b\252\n\276FG\335=;\001d>\327\016\255>T\256K\276\273]Q\276\365\246\002\276\026^?=\035c\212>\312\216\351=\302\276\246\275?X\366\275\235\r.?&[?>\204\315\324>\206h\214>\247+n<\356\317\242\276\334@\026>\206\366\351>J\224Q>\357\004\307>\317\241\013?`\030\224=u\037\251>\013\275\271>\246}\004?\341G\236>\270\242\350=[\242\225>\362` ?e\322\272=l@f?\3345\004?\263u\301=\315\301\005>\246\227L>\2123\324>.\0346?4\n\214>\343\366\350=<\207Z\275\252\213\372> z\257>\335\3708?\377\356\221>\035\204\242=\311\021\237>%\324\005?\241\rB?kV\234=\264?\322;1Wm>\301\002\373>v\347\007?y\316\'?\344\3305>\355\215&?\277Z\036?.\353\023?\302\337\002?\312\302W>\025}5>\000\341\007>\006Eh>N?\312>\251{P?\346\255\276>T\231\215\2753\217\324>n\250\236=\303\331\215>\233!\274>\310)Y>H\354\326=g\r\362>\346\027\300>/L\242=\037\221\273>!\003\025?i\364\266>\377\324\320>\356\256\343>\373&\364>\206\371\273>\214\236\310>\264\302\231>X\252\n?3\324v>\306\227\327>f\220\n?\372\260\250>\304g\311>\210\366z>\214\020\000?\024\375:>\343\346\340>\332\036\267\273\"\267\347>\2145s\275\370\236\266>A\255\016?\354\343\\>\rS\004?c\351\222>\205k\207>\316\372F>t\206\024?)\\;?r\325\226>\241\017\211>\000\300`\275\251\016\026\277\216\333%\277/\3211\277\251\371\001?\203d\236\276\343\220\351>\234\2733?wR\310=\302\264r\276\305\221\037\277\270\272\365>,\032\372>Tk%?B\355\332>\311\035\021?M(\336>k\317\272>ka9>gH\325>\207\326\303>\363\tR?\226\231\207>\305\227\033?\2302\357>\335Xz>1\251\027\277\224gU\276\231\265A>2\336`=\200\202V\276\222C\315\275<\352\317=$[\330>rm\013?U\220\005\276\035\025\267=pP\241>\313y\005\276\0230\001>!\037I?\216\330\325>:\232\300>\211\372\325\276\006\201\337>\313\201\r?\200\330\002?D\327\363=\204\030>?\004H\271>\026\232}>\247}\375>\235N4\275\302f\036?u\346\210\276\234P\255>\327{\376>\366\240k>d\253\n\276:\004\370\274\200\227w>\024\330\367>\032\306\375>\025F\223>Kwl;Qe\002>\252\351R?\202\376\342>\303\352\203=\327\224\252=3K\004\271\304\021\334>\263\227\354>[C\356=\036O\335\276n\250\014?\366L\010\2756J\246\307\026>6\233\222?w\231\357>\274\000\331>\370\204{=\307\311G?\0239\323>\304%\263>\005\256\242=\342\177\261>\272[\021?\343z#?\'I\331>\336\317\230\276n\030\323\2763G\201\275%\201\201>)\370\224\276e\221\243\276\3065\366\275\024Ar\276v:x\2765\323\315\276]Q\366\276\251\251\017\277|N\'\276\264\347\331\276\034\246\274\276\022\035\236\276tD\243\276p\010\304\276\324\251D\277\271\023B=\315@\\\277\032[E\276>\347+\277{S\207\276\377\332\356=\372H#\277\260G\251\276\233\000\236\276Z\261y\276\320m\346=\330\027d\277\332p\231\276Do\266\276\337\343\316\276\032\355\251>\221\032\363=J\361\252\276_h\307\276\265\025\254\274\372\362\273\275\350[\034>\310\262\177\276\244\\\001\277\272\227\251\276A\207\255\276\310\343\344\276\005\022\026<\334!\216\276\362\231\232\276\250d\016\277\211\367\376\276`\371\305\276<\210\007\276\306u\317\275\332\314\026\277\225bO\277\353;\352\276D\232\313\276\327\215\036\277\253\250\240\276\340\023\213\276R\354\235\276\037,\335\275%65\277 VA\275\031\215T>\362=\003\277\200\214G\2766e\252\275x!\357\276\265\257S\276fj\323\276\300\010\345=\237\037$\276[S\360\276U\035+\276\013P\341\276\nNj\276\216\315J\277\276\316\236\276\304V8>\201\202^=B\305\237\276\021\352\240\275\301T#\277\037]\203\307\321\177=\\\222\263\276k\177\222>?\3445\276\225e\256\276\036\277\242\275\355\003\331\276\254\000w\276\r\004j\277\3620Q<\002\260O\274<\230\202>\254*}\275\234\021\252?\3236[?\255(\356\276\242/\344>=\005\014\277\263\315\305\276\313Wg\276{\346\202>\356\211\354>\257\320P\276\370Z\346\276\230\263\362\276\243\207\240\274\320\036\035\277h\204C\277\000\204_\276\246\340\017\2770A\001\276\005\002\240\276\031\203\002\277\274r\025\277\370\316\253\276\266\r\212\276@\314\200=t:\367>\231\033\322>\371\250\016\276\342\177\013\277Y3\032?CO2=c\016@\276N\231\350\276\2517\020\276P]\212>\036\025\241\276\375~\306\276\201\242T>\320m\371\275\225\377\223\277\3018\225\274\\f\263\276\024\366,\275\027\220\365\276\'\370->%o@\2768\364f\274i\301D\277\270J\216=\210\031\320\276\340\004%\276\310d\036?\230\272\345\275\250\316.\276\277\010\345\332\276\353\242\030?\250)\274\276Ps\021\277\0272\025\276\204\257\235\276\033\240\'\276\032W5=K\370\177\276\313\2447\277\307\272\256>\325\322\364\276=\345,\2761\336Z\276{\272\262=\356\013\347\276\340p\216=\013\335\255>\314\204\243\276\003y\205=\306\220\n\276\r}\214>\304\372\244\274\347C\337\276\246\330\325\276\240X\326\276\312}%\277\265\206\276>7\302\361\276.\374\224\276\327\242\033\277\201\352K\276BIV>\273\016\211\276\252\241\323\276\264$\220\276\023\370X\276\274\246}>\315\177\301\271\324!>>\202\240B\276*j4?z\241\306\276\345\366\346>\003\235\017?\374\262\213>7m,>X\253\267=\370z`\275\242\200\210\275\305\020[>j\016\214\276\211\325\353>]<\224\27337\023?\246\375\022?\025\305\363\275\016\021\322>\364\244\236=n\267\021?\274&\265>\207\263\222>\025\272\352\275\n\031\242=\013\267\002\277\340\213\333\275\356U\312=I\227\201=\037\022\241>0>\025\276\022\020\253<\317\177\033<\256x\303>\216\025\356=3 \363<\306\006\207\276\220\200l\276\000A/\276\300\311\211<\211`\323=\350\340\202\276#\344\327\276\257R$>\337\243\346>\242\336\231\275\323\031X>\021&\342\275\200x~\275\257b\300\227\224\025=\212\3508>\206Q\223>\367Q\250\276\033\023\020\276\350\263\237>\276$\336\276{\027\331\275\nP!\274\264\2205>\017~\376\275\275\035\276=\317\266\220><\244\240>\307J\344\276_\363z<\210\240\000\274\201%\246\276W\302\n?\016\267\001\276/PG\276\327\246^\275\035P<=\350?2>?G!?\341)9\276}\274Z>\353\364\023\276\265\021\326>\010x\223>\220\354\222>\330O?>\211_\271\276Z\264f>6T\257>\275\3103>\0344\021\276\352\265\216\275 =\031\277\351\226\336\275\301`\274\275\236\367\024=\357\323\207\276\262e\002\276\260\201\235=T+4\276\336\266\276>G2\302=\261\324\364\2749\247D\276\232\360\325\275\202\363\306=\2042\035\276\2442e>/\374\022\277q\351\350=\240\326O\275\375\013\236= F\354\275w\2303=\333K\023=\325\022\241\276\231\247\277<&\013\352=\353\375\266>\016\274\227\274|^0\276\331\034\277<\0012\204\276\343w\350\275\263\033C\276\375\241\032\276\201\305t=\211\214(>\372N\301>\333\034\211>\224\317\332\274OI\211>M\237\264\275\3361\321>s\320\256>;\023\217\275x{\261\275\263\353&\275%\341\214>3w\264>\021xo\275j\260->\216\345\036=\366\305U\275j\250\244\242\274<\006\245>\212\374\227\276\370A\202\276{\215\273>\036\004\312=\204\272\307\275\351\3171>\371X3\276>\230\031=\371\213U\276`\016\304\276\344:\345\275\023\225\250=\367\355\345>\2003\377<\222\206\273=\241&\221>\262\010!>\t\306^>l\"\367>\225\3157\275+\355\205\276g\360\217\276\340\n\243\275\254\335\231\275\266\236\030\275\3344\274>\270\354r>\366\264\312\276\263\2672=\231\300\325\274\3017*>\305\025\007\277\201UL>\315z\004?\223\253\356\274\304\3059\274\351\311U\275\336\310\220>\0238h?9\226e\276w\375\212>\t7\273>\304\203\244=v(\240>\t\202\240>I\r(>\353c\264\276\212\332r>\203\016,>\002\327m=\026\213\265\276\302\276\257=\321\357\274>\227\213\225>h\336\330>BZ\007?\314\010\373>Xs\025>1rV=\004\017\032?\312\256^<|\360\267\275d\243-\276S\356\354>\364\357\026>`H\253>&\027\205\276\321\221\316\276\362t\357\276[\277\025>\252C\361;qg\035\275\304\324\212\276\2369\035>\323\272\'\277]\377&>\344\262\003>9\232*?*\007\177\276Ih\027\276C9\303>\200x\360=\027\325\023\276\367A\017>\267\273;\276\372\0133>,\236&?9\031i\275k\312H>\025\203\020>c3\023\275y\253l\274\366\311\233\276\355\030&>\340Y)>7\317d>Q\336\323>\315\361L>\007\nJ\276 \347#=\3513]>F\002\375>\335\254+?~+\304\276\275:\236\275\261v\221;?hd\276\021{5?^\376_>Wk\354>d\323\224=CIT\276#\037\010?E\370G\276\220S\356>>\030\340\274\177}D\276\027^\361<|z\224>u\217\236\276`\203\005?\251T\'=\335u\216>\010\003\246\276\213$\237>\307\007 \276\326\313\340>\255\'\034>:2\023\276_\0355?Qg\001?H\t\377\275D\356\'\276K\r\312\275\026w:\2755\265\300>q6\261\275^+\227\2767.\274\274\361i\212\276\322y\357\2769\023\270=Y\267\317\275\2144m>\360\243e?\326\305\334=n\355\273>wJ\025?\227\310\313>w\2646>\007\n\003?2\254\'?\320\263\n?\335\307\251\276S\202\003?s\2078\275R\004\317=\221\217t\276]\277\005?\016\013\021>T]\201=\2270\317\2758C\366=\361<\253\276\316gi\276_\347s>\223\361\317>H\202\253\276~\335\246\276\221q\022>\237\212\316\276\337.!>\360{\006=K!p?\363\316\371>T|N;\3021\374>N\304\264\274\362\310\271\276c\036J>\013\345%\276\275(\376\275\214\316\344\2750\342\034>\204\3010>yr\256>\026\007\360\275\373\301\246\276g`\334>\n\2216>\260\" =\274h\227\275\340k\305>K\303\203\275R\356\035?U\357\246=\000\253\026=IM\001\276\365\353\021?%\265\355=\312\276C\276#%\363\275\016\232=;w\257\232>\361>\004?\260=\355>B\343H>2\013\255\276\004\263\223=\347)d>\234I\253>\202\035\014\277\217\225\301>\220t\376\257\351\266\275}[\304=\023u\236\274 P\222\275\300m\200\275\236\303\320>kN\301\275\215\373h=\004\020\224=\362\217\341=\317M\214=\351+O\277\210\345\031>\231\253\333>n+O>\024\306\001\276)\337\247\274/\355\207>\360\257\016>8\264\263\275\230/\257>}\013\010=~\350\204\276U\234/=\201\204\251>\331\322N\276\241GQ>$\014 ?\256Lo>\314\316\351\276V}u>\363\025\023>\033\342\224>\226\370b\276\207F>\276Jz\310\276LIx=\243\006\244=}\017\256\274_\014*\277\3730\236>\310\323\010\275\303\374\377=\276U\235\2764\037c>\227\245?\276P\365=>L\370\250\275\366\003\311\2767\371P>\263\353\250\276\205C\215\276\006(\360\276\311\262\204>\255\036S>\246~\023\276\262\246\306>\003\3414\276\331tp>\371B\177\276b\241y\275\310\374\036>\271\224\263=\227\262:<\307}\212>\307\0343\276P/\205\276Z\301{\274\014Z\023>[\3644><\306\241\276d\031\231>\007\274\026>2\205\256;,\351:\276R.\205\275\017D\353\276\016\202\234\275\356\233\263>\260^\224>\246-\375=r\337\225>\253\n\240>\275X\323\276bDD>\227v\002>,]\212>\260a\327\2762\341R>\032\246\033\277\354\324\370=|\337:>\224\242;\277p\211\021?}\'\235>\200\364\036\276\035\237\345=\006\261\332>\2167\354>.\256\223>\n\026\227>{\231\243>\314\276\313>\362\0245>\240\005\300>)\330#\276\355\330B\277\221\263\201\276{&\275>D2\274\276\354\265\'\276\266v\331\275\300/\241>\250\363\266=-V\300=\357\374\020\275\353\264i\275\034\021\023\276\334\254\235=\325\213\347\276\365\340^>!%[\275TC\242\276\261\301\311\275\351\271\342\275rj\301J\nI=\326~Q>hG\014=\345Y\n>H\035\354\275\363\356R>@?\032=\3470\246\276\254q\266>\362ua\275\016{4\276\301<\211\275\365k\322>\024\235\232=\217\260\377=lE\344>\177\257\\>=\241\265>\317\322\020>\001\240\226\276Etc\276\037o\356\275\306\265\276=\307\002\233>\005s\363\275\031(\222=\2655\266<\336d\343\275\353\266\253>\321^\007=\003{[\275\376\300A\276l\310\265\276[s\274\276\371i\325=+\360U>d\313\317\2769gb<\341\374\217\276\326nb\276x\203!>\261P%>\246\t>\275\016Z\025\274\351[\266=\000\006\334=\031&\213>\376\0201\276\257\200\217>\035\247\301\274\262\315~<\251\nF>:]\225>\247tp\2740G\022:\257dE>\211\034\322=7\244\303=\245\220\353\276{\355\025\276\302\221W\276z\213\014=\301\267\366\275\026(\235=\357q\327>&\200\267\274\206\025f=Q\300`>)\260\335\275\270\277\306>_\026\355>\020\025\232\276 x\344>C;\245=>\007w\276\223\251i>\203\357\004>@\333\221\276D}\371=C\235N>\336@\376=?\267\243\276\354\013\246<\317\255\202\276\033\'\201\275\321\034D=\230H\235>\3304\203=\024\267f>\374x\032\274\224\206\252\276\245F<\276v2\034>\205f\205=}\263\254\276|\027\365\276\232\036\035>\273\3725>M1c\276\202B\002\277\013\336\246\276\234O\347<{\250\227\274\204\\9\276\007Z\374\276#\tJ\276`@\027\275\230ep\276\310\337O>\256m\031\276\326#->7\331X>W\301c\276\303\376\225\275\024\323#\276\241.(\276\370k?;o..\276\237\016_\275\252\177\007\2772I\200=l\306:\274n\021\377=\360\240+?\006B\031?\346\005\364\275\256\376\016\276\027\265#\276\211\242\240=\361t\213\275\330A\267\276\243\262\204\276\213\276\344\276\317\000#\276w\013\031\276l\017&=,E2=R\201W?[:n\276\374A\200>$H\227>Q{\264>D\247\365>2!\217\275\307A\231>_\275\037>\313\324><\330\230e\276wj\251\275a\2239>];\271>9\317;>V=\037\276\224\206\311>\232\213\202>k\365\330=\"\337\315\276\325A\210\275\231g=\276<\213!\276\237,d>7\260\270\276\326\260\305\276\323\274\355\2759l\374=\356\327\262\276\332\032E>\376\3469<\366\235\275\221\266\220\275\270H\022=\3360-\276\350\215R\276\301Z\202\276\245\272I>\264l]>Js\010\2776\304\220\275&\001\035>\252\025\267>c\347K\275\370f\257>\236\262\230>A\003A\275K/\337\274\247\277\321\274{\230\203>;\211\252>\254V,>P^7>#A\330>\013\006\343\274\367H!\276\351\235\r\277\256X\361\276\035\030\250\275@\354Q\276\264\022\224>\334\211\">\351\370\261\276\013\3074=,@\254\2768\r\260>\322J\223>\\\276J=\355~\341=\364`x\276\005T\035\277\326I\352\2759\031\"\276k\022\305\275y\t\020>\206Q\316\276\314\232\217=\263\024)=\374\243\020>\317\354)\276\373\327\007=\325\360\225\273\361\0177=\210+\201\276\327\007\276\274\2414\202\274\312\221\221\276\203\332\343\275v\370\001\277\256i\325\276\365\317Y>\303k:\275\330\213\255\276\232\032\321>\3434\325>K\001\375\275\232S\035>>\373X\276\352\275\241=\314\202\200=\036\026/>xF\014>\033\002\017\276\001=\034\274\345\345L\2763\327\201\275\271\022\377;/\216\307\275\217,\222\275\3470\322>u\327\300\275\235\036l>\303P:>\203\362\026\276\366a\021\276\022\277\206=\325\016\\j\222$\276\254n-\274\036\205\254\275}\211`\276h\277\347>\224v\253>\354\024\340\274\217]\014?/\177\254\276\004\245d>\233\017\200\276\277j\205>\344\007\013\277[P\376=\177\031\027>F\210g>\335\265\343Q\247Y\276$\250\207=F\310@>\264\r}>`\\\021>s\264\212>S\371\000\277\213\037\242\276\236\320R\276\255\330\311>\326e)=\003;\210>TO\234=\362\032\016\276A\023>\276\024\006\236=\326\037\357<\332{V=a\264\010>\360/\372\304\243\002\277P\333\263\2768\305\251\276\344^\227\275\215\305\254;\311|.\276#\030\002\276e\273:?\224\313-\276J\024\270>&\364\366=X\223\031\276\234;\331\276\260\035\322>E\004\247=\205e\365=a\253\262>}\373\353=b\217\221\275*\270\023?\262\264\225\276\276\201\232>A\033\224>\312\312a\276\274\r\315>\310\231\260>\222\320O\276\212cQ\277\'S\355\274\247\344\327\226\246:\276\277\337\255\274k;\002?u0\304>0\251\032\276\340\244:\276\202\022\337\275\350\265\177>\342|\346\2760\316.\277E\177x>\303+6\276\221\222\216\276.\216\301\275\346\024\200\275\307Fr\276\010\3162>\373S\223>C\021\221\275$\332I\276\347\313\230\276\3509\"\276\036)\263>\n\255\272\275\317\266\300\275C\2479>\332\351>>\r\005\375\275\304k\327\275\\ry=\214\240\335>Mh\327\274<\237\374=\272\312\376\276\301(^\276\027>\010\277I\236\331\275r\343\343\2740\327\300\275;M?\276\320\337+\275\261\221\027\276\347H\226>\377\362\245\275\350\313E=N0E\276C\323\221\276\373\031~>W\306\357=\251\237\270<\350\325\230>\361\014\311\275\362A\273\275\351X\007=\352\302\203\274\272\374\200\275\231\025\201>\227\013\230\276\205\034O\274\364\0203>t0C\276\311\212\253>\036\007\371=&\362\247\276\305R\025\276\006\005\303\275c%z<\034p\362\275\213\'\014\275`\272\243<\250\236:>\232\3321>*\315n\275F\226^\275b\315\236>\021y\223\274\360Q6\276\350\241\027\277\317S\010>\241\005\327=\276@S>\234\333\254>L<$>G\223\027=\255F\312>8=\225\275\374m\201>\232,\363\274\346\034\214\276Y\346\314=\272w\022\275\340*\250=\327\024\240\275\260\206G:\244d\207=;l\242\276\335v5>\246\373\202>\263I->G\341\006=`\207\367<\245\226\355\275+\347\327=\363\033\252\275U@\340>\330\326\t;\342\311\367>&\257\314\275\207\031^>g\304\321>Ys\267\276\321\005\n\276s\301\256\275@\357\037\276K>\267>\024\267\002>z\203T\276\236Jl\276M\363%>\375\267\231F\350\360\274\037\362\204>S.8>\353\356\021?E\356\013\276F\3610\275\276p\013>_\360\304>`>\230>Fe\023\277Z8d>\246\325\241>+\357H=}&\222>\341W\262\275\322\3608=\004\014\\>\354\200\206>\2543\225=\363<\237=h\317\221=\330F\250>\362K\232\276\271>?\276\'\340\005\277\206\250\036\275\250\363\342\272\316X\237>\254\207\021\277t\262\307>D\202\205>\017$\236=\275.{>\300\370\273>o\255\013? \207\305>\365\271\274\276\311E\202>\257VL>\310\351\307>\207\017\000?\221\263\014>\000\210\222>\n\300\305>\377\034\355>\257p\264\276\373!+\276i\244\205>CS\330\276\005\324:>\226\000+\276\210\010_\276H\002\267>d\177%\275\277\022\375\276\306M\273\273\206\2302\276F\224\215\275?\250]>Q\230\214>\210)\224\276\353\217\260\275\272u\246\276\r\t\342\276\334\324\033\275\274\271\270>\253\275O;\353\255\277=\335=\003\275\003~T>\3362\030>q\271K\276\327\207\306\273\257J2?\310\024K\277\n\234\302>\277\033J>4\352\343\275\337\\\336>\010z\023\277\030\227(>\030.\300\276(\037g\276\275\373S\277\265\376\356>\256-e>\034\340\334=\341\241\355<\330\3759>\346\242\221>\016\325#=\210\374\275>Rl@>\031\210\264>\341\331\200>\344\232\335> \347\226>\016\352\271\276N\364\317>\277\234+>}\273#>\333W\214>\304\236\210>y|\261=\213\237\374>\'\234E\276\247\225\333R\371V>1\244S=\234\266\250>\204\034\350\275\2315\215>\273\213\302>\203\304\303>\357\341t\275\260R\234\276\022H\225>PJ\361=I&6\277\335H\240>\214\207R>\321J\333\276\237\203Q>\235g(\275~\350J=+\253Z>\014\205G>^80?\216\037\023>6\200\232>#O$\274Y\246\037>\377.\352>\251\024\204>9\270\373\275^ta\276_\202\300>\373If>[\351\262>\361\346:>\335\250n\276\332\004x\275\201x\021>h\346w>\206jG\275M\037\267=\n\326\202=\345\230\361=\262:,>\037\205X>?F\330=!\305\301\275\365\360Y>\000PK\276\007\2026\276\213\251\r\277\375z\351\274\3479\320=\025E\253>Q\031\031?\254b\341=\355,8>\030\270\'>\223T\254=!\302\307=u\005\372\276\370\210\323<%\313\256\275\377\252\212\276\3133\202\274\2511\252\276\371\367A>s\327\360<\343\006H\275\nr\213\274l\347\336\276\361\213\307>2\216\204>G\243\312=\307Z\'>\232\352\263=\311\027\303>\246\246\301>\207\004W>X\217\341\275L`\263\276\3168\252>\177\261\372>\357\372P\2764\344\033\276\320\242A>B\3748>|\n\007>yq\322\276X6V\276\0219\250>nR]\277y\371T>\235\004\313>\363\034\246\275\201&\372\276\230\360\230\276\331)z\277\224a+\277\251\265\216=\261V\261\276XN\261=\342\035\236\276a\321\017\276\' \202>C\233K\277G\004\354\274\030\342\300\276{\252\013>\tq\211<\346v\220>\247kT>\206\031:>\3405h>\021\'\376\276n\355S\275\364z\365\276\371\n\035\277s\362\273\276\\\246h\276\203\330\322\275B\351\230>,z\212=\264\237f\276\360\215\025\276=\202\265=\230\316\325>\323Zz\276\315\224\207>\265\361\355\276\255\364\333>\362\350\243\275^{\364\276,\256\017\276$\006H>\005\007\252>\302\357\005<\332\211F=6y\032\277\275\003\365\273\310_\357\275\327\263\014?\274\357\264>\017\276\276\275q#\245\276\311\355r\276\317\246D\277\255\373\255\276L\374\005>\340\"\327\275k\323\224\276\346\312\350\276\224b\004\277p\030\376\270\033\257\276\273#J?\206J\316\275\345\016P\276}|\026={dN\273\227B\022>ZH\221>\010>\035\276x;D\274j\220:=\313\244b\276[V\251\274\375\355\274\275l\000\267\276\262\344\327\275:\027N\273\250\254\254<\222\375\226\276,!\205\276m\271O>\213w\332\276q\371\234=)\254\226>\375\016\231\2768\265\240\274\276\357\236>\"\r,=\005m\201=\033\331\272\275(\320b=\016\315p>\033!Y\275\270r\202\276\265\311^\276\241^\006>\237]\007\276s8E\276j\324\236\273\237\326m>\255\356\014>\307)z\277\367=\000\277h\345\006\277e@5>tA\316\276\365\360_\276\322?x>j\273,\275\177@\225\276jo\271>5lE>\233\200\247>\020r\223\275\374f$=\311}\220=7\004\362\276NS\256\276k\230\225\276\222\236R\276%\356\035\276\261\316\216\276\215\240\262\2762\352F\276IU\224\276u\000\206\276\213\203\202\276\303I\216\2750\t\344\276\355\035<\275K\277\367=-< \276~\215Q>\232\274\361\276q\277=>X6(:\320T{<#Az>#>\201\275fG\221\276\004&\261>\250\024\023>bj6\276\004$\032\275\351\206\313>L\350,=|\204\373\276~\216\262=\351\367Z\276\212\216\033=\002\340<\276\213\204\322\276\320\221:\276jfY\275\223\304?\276\212Lb\276\334-\276<\312^3\276\003\367\007\274\220\211\005?\226\207\335\276\021\326m\276a\\\341\275z\2531\276\237b\207\274R\304\337\276\'\n\370=\024/\376=&\323\005\276\016mB\276~\225M\276\320]\271\276d\035\037\276\032)\327>F\300&\274\361\021\334\276^%6\276mw\344\276\330rT\2767\363\345\275z/0>\304m\177>\223\356\236>\311E\273>\376\032>\276\010,\217;\275\031\013\277&\346\n\276-\201\236\276\231N!=\026t\316p\347j>e\315_>P3\321<\267n\265\276a\2609\276\363\353\230\2764f\375>B<\303\274\025\355\330\273KG\300=\023X\245\276\344\355\341\276\304\235\363>Q{Z\2764}\004\276%\013\274=p\371\007=\323\237#\276c\210\"=q\222\035\275\206\342\310\276\360\223\306\276\020\374\242>q\362\306\275\000(\016\276\3034D=\346@\235\276\020t\254\274\227\340\354>\355\253\223\276\212~\330\276\262\257n=q\236)\276\313\333\233\275\334\204\257=\361\373\000\276U\004\356\273\262S\013>\266\262\311\275C\240\202>\"a\225\276o\372\003\276\361m\367\276\002A\270>.\331.\276\335\301o\276]\270|=\236\222\253\276\t\204\035>+\266W>\016\235\200\276\225\034[=\261\244;>4\357b\276\370\000\304\276\303\357\207\276\262\211\274=~UV\276\365\231\361\274x(\016\276\354j\027\277\024)A\275\303\343\217>_\312\317\275\021\346n>M}\341\276\235\340\215>\016\260&\277\007I\334\276&\"k\276)\351n=\023\313\275>5(I\276\301?d>\t/V>+\225R=\'\027\213\276\377b\353>\302\240m>\245#\255\276b\227\222\276{\273z>\265\202\332\275\312=\201=n!r\276\312l\226>3\227y>\203\016\201\276\n\315\372\274\230\023\302\2768\031\004\277\337f\333<\303\334\237>--\342>=\312k>\370\017\306\275\355\032\271\275\261\320\251\276Q\222\274>!\365?\2765\337y>\212\366\016\275\263,\321>\261*\241\276I{{\276\357\0051=\215\035\360\2754\341)\275\236\265\200>@\006\010=\356#\336>\206Y\t\275\013\014\376\276\256z\006\276\316\263\036\275OX\341\276=\300N\241\301n\276\243<\200\274\266E\241=\217\356\317>E\370>>\372\265\006>\367\\\016>\023\"U\275\005\360!=\313L\263\275W\263\322\275\255\t2\276\272p\212\276\254\\\320\2764\250g\2766\026\230=\n\343z\276!Df\276JR.>PS\376=\346\354\277\275\253(\277\276\245\236\037\276\242\222\017\350\376\234\275\311h\224\276\263l\034\276\315\014\332=\004\251\255>\213Rf\273rG\275=\001\240\332>\244\267\255=\211:\305\276s\033\204\274\340\023\313\276\323\245\373\275\317D>\276\n$w>\200/p>\';\254>\250R\004>\371\337u>3\331+>\367l\253\274T\276x\275\376\025\262\275\372!\320>=i\253>E#\224>s4&>_R\246>\251\300\032\275\276\177\226\276\254\317\214\271u\212\267=\221\214b\275\026,p\276\355M\343>\2037\335\276|y.\275\270\250\377>\txE\276\363\216\010\277\334\004y\276 Q\242>:\243\275>x\327!\276\245\363\203>Fi\262\276\030+ \276\252\037\345\276t\244\274>\311l\274\276p\343b>_\321\335=QM7\276\033\240\372\276Q\230O>\224\254\033\276\014*\222\275 \220\322\276b&\002\276\205]3>\217P\220\275\020\037\330=[\2142>T\004\002?14\316\276\345a\003>B\205H>\362\365\353\275\323\031 \276\t\305\262\276884\274\022\306\240>\226 \033?\242H\243=\237az=)\334\251\2768m}\275+\254*>\211\361\216>/D\n=\036\213\362>\0035*\276\333\246i>\311v\211=\326I:>7l\210=H<\271>\330@Z\276i^\353\275\205d\031>MW->:<\345\276\315y\330\275\360\326i\275\307\243I\276p\221\207>k]t>\201TN=\310~\213\276Z\246\234\276\332\376\275=5\377\246\275\316 \270\271JB\264>\013\205\226\276\364\240\231>N\201\201=\215\302\300=<\\(?G\226\n=C\350L=yj0\275\356y#>\227\236z\275`\003b=\002\351)=s\343 \2775\360|\275z\370\235>\240N\033>\013\261\024>\002\335\213\276Nz\246>\240t$\276@\023\033\276\2515\370=\027\233\372\274\310-;>,X\202\274\261\367\006\275\021\234\010?1\372\'=|\264\253\275-\207^\276\374C;>\350Ao\276\002\tB\277\021t\235\276\306w\232>\350\234\223\276V\005\271\275\311\302\025\276\374\313\236>\206\024j\276H\340\030\275h{\256\275\nfY\274Qt\025\276\261s9\275\236\315\340<\231t6>6\312\245\274WU\263\275j\362T\276\300P\000>\032\323W=\034\2652>{:\n\276<\350D>$\340x\275\321#t<\365\230\033>\232\005L\273\204\212&\275\370\236\014>s\375\211\276\323\005\010>\024\007\305\276\017P}\276\310\227\023>\247\002w>\"~\256=\362$[\275\344\034O\275\352\366\030\276\242\266\247<]B\256\276\264\301e>\210\262\016\274C\320\013\277\345\362\236\2755\336%\276-r\002>\260\267\016\277\210\342C\275\266\325\333<\010\237i=\031\"\376>\354\243<\2760om\275EM9\277\262\267\233>\017.\315\275M\371-\276\320;\314\275\323\252*\276\037\367\230\276\323\371\226>\221\016\276\276~\311H\275\375\341\034\276\005\001\003\276\323\304~\276\273>)>\233\213L>C[\326=w<\335\275\263?f\276\257c\253\2743\343?>\2014\266>\375\311\352<\n3\371\275\225\353Z\276\207\353#\275\r\200\246>}\253\301\276[h\'>B\217\201\275\"\260-=\346\205$=m\353 ?\024@l\2758\217\032<\313\230\245\276\363]\310;\002k$\275\004\371v\276\376\244.\276\000\366a>\036\301\r\277U2\000>\257\017\310>,;\010\276\314\2268>\3735\027>O=\010\354\320>\376\211\354\276\354\0304\276\351\316\272\275\331\010\326;\327\257\217>\336<\374\275\r&\355>\234\006\360\276(\000\035>\213\005\022?\237E(?\375\217\217\2757\363\275>\331`\306=\260E\263<8\321\327\276\023\311\204\2765\226\246>\352\311\207=\023\350\213=\016\202\331>\336<\333\025\200\240\274\263\257\307=|-N\276\216\356\220>\014\215\273\276\231\001_\276`\254N\276w+\330>v\262\272=\276\016\014=\347=W>\225\320\274\276\326\336^>\226+\272\276]\021\205>\255\025w\276\251\332\014\276-\\\025\276\333Fk\275\260U\264\276\315\211q\276u\223\010>\362\202\027\275\275\256b>\321\275\301\276k\367J\276\322[1\276R\336\201=\327\325\207\274\265#=\276\242\005b>\255]\255>\200\227\025\276\221V =. \006=\330\242?=\375$B>\232\324\211>\246\264Q\276\315\351\264\276\311\342\204<\243\tT\276\346\302\317<\225s\033>^@\232>\362*\014=\311e\262>\374\250\241\275e\220\306\276\2039\016\276,\201f\276Y\252\003\276Q\234\003>P0\317\276\267\013\261\276\375\317\242\276\356I\326>\355\306\000\276]\303#\2761\344\346=:\210\203>\206u\356\275\212\177\271\276S\340a\276\365\3752\275U\352\355=\266H\003\276\272\363d\275}\231\244<3s\226\276\222v&\275\347(\\\275\321Z\227>\235\331\330\276\220\317\010\276\231\033\252\276\255\367\250\275t_\317>4\330\361\275$Z\036\276\037\003\326\275\2259\220=d\354\251>\001e;>\002+\"<&\302o\276\332S\323\275-\334}<\307\230\n>/\260\243>\177\300\224<)E\361=\016g}>d\342:\276\310:q>8\220\000\276\271\005\321=\n&\211=\210\216w=J\235V\276\320q\321\275\266yK>\236t\264\273\2277\035\277\227H\362\275\227?\313\276\361\353\020>\260\2611\274PWY\275\201\316O\276\201\375\033\277u\310\222\276\273\232\221\276u/\316>\264\357#>\361\0058\276\246cz=\206\260\004?\177\313\223\276\250\204\025\275\266A\224\2768T\206\275\276\331N\276ws_=\313\263#=\246\313\340\276\3579\202\276)\275\332\275\201\205\312\274,\356R\276,!1>\374\305\355\276]\264\315\2769\336\312\276+\325\310\276\267\352\032>\210\0220\276T\377\314\2764>\005\276\372k\255=\332 f\276\267\204}\276\020\300\310=\205Tf\276\337M\230>2\210K>1\331\211\273[\247\365\276d\3226>\371\331!\277\213\003\027\276Q\002\204\275\026\332C\276\344\305R\276f\332\277>\002\"+\276\001\316\263\2766T$=+Z\004\276\231\377\233\275\3649\305\274\004\206y>\340\200\237=\2208\013\276@\315\246\275\021n\260\276Dx\254=\277R\227\275\272\305\225=R\"\301\276\304\2213=\222>\027\276D\263\213\276d\210;=\321\236x\276\030\314\260\270\312\257\n>\306\t\203\276\323F\200\276\213\346d>g\234{>\350Q\361;fO\264>\235\310\377>(\237\216>\010N\366\276\215R\001>\370\377*\2762\300\262=a8\245>a\326M>\006\314\273\276\301\3563>s\0061\276\312\272e<=\240\336>/\013E\276PQ|>\355\032\n?\253@\316>\021\314\013\277\332\0025<\305\305\020=7\253K=\341\325u\276\031A@\275\250g\261>\026F\345\276\240\205\r\277\332\352\211\327\230\005?\"h\250<@\375:>\3368U=r\313\265=\277\332.\277\3475\204\275\033`\305=>\306\346>M\333k>\321u\226\275x\327\222=;,@>\346(u<{/\352>\312\255\033?`\251*=\214\365\032?\025^\r\276\337\240\000?\200\005\021>(\'\257\276\274\306c=\nK\230>\2732\237\276\335n:\276<\346$>g\236\223\274w\213Y>\300&\221>`\226\023\276#\2248>huR\276R\002}\276*\2320>\247l\220\274\231\016\202>\377q\212\276f\000\376\275>\254\322\274_\007/\276\241\237+\276\345`\202\276\177T\360\275\247qG\276\031\235->\225h\312>\277\237\243\275fq\250>\023\354Q?@\266\034>H#\203\275\177O\203\276\236\376\207>\017/%\275\350\375\343\275D\026Q>\361\300\377\275\260\"W\276\312\245\257=\255\305~\275\305\274a>O\246\037\276!0\232\275\227\200c=e\314U>\227\310\213>f\336\233>Y\366\266\275\224xk\276\305\310\022\277P\"f\276\303h~>|\244S>q\217\205=\214\0016<\350n\245=\317\322\361;\022\303\265=\345!(\275\007\210\215>:\021F?\375>\005>\014Vm\275*3\353>1&G>Y\000E<\2452\300=C\030C\276\003\306\250>\036\030\017\276@}8\276\t)\255=\206{\213\275]oT<\330\3642>\264\314\334\274F\216\276=R\177}\275\340\224)>@X\226>\300v9=J\237l=]\353,>Y\340T\277\230\362f\275\207\214\177\275\342\016h\276\365K\007\276\022\331\275\275L\365\017\276X\324\226>E\262#>O\302v>\277F\315=\256]\034\275\327\272\301>\247\203F\276\'\306\266=dG\236>\356\301{>\010\214\000>\221\010?\276J\207g>\001|)\276<\307\341>\376\312\230>\301\177+>*\271\364=\303N\003\277Mu9\275\000\372\271\274\006\341\315>\262}\255=\312\201\230\275\220\244\002>\010-u\276\361U\214>\234$\364<\225\332\222=\361\333D\277\364!\345;\235)=\276~\014\243\276\313t\031\276\"\273\343=f[\034>\342\317Q>\3510\253\276\032\337\334=\177(\007\276l\317\226:&\267\227\275X\220`>\246:\262\274\330K\260>\324\333\027\276b\303\345=)VN\277\3026\032?\021-->&f\214>>\342\217\276\331\366\202>x\3500=\274\322\231\276\223\215\010\275\024\246\261=\230\304\201\276\365>\316>\311y\244=N\320%?(*\333\275$w\225\276\236R,\274\277^\331>\241\023`\276g\361\031=bS\033?\300\243\210<\316\357}\276 \370\365\276\230\373\221=\033\300\376\275\332\265\236=ci\332\275\221:\343=/-6\276`\0364\277\024\237?>E*q>\260N\r?\312J\323>\007\255w>\303\260\343=\210\371\313\276\227\335\337\276\026\373<\2758\337\340\276\010\226\241>+\\\205\275\035\353\353<\212\327i\276\344\226\277\276\322 \264\276\312/L\275-\266\001\277}\r\212=!\264\310\276g0m\275^\365\262>\256\037<=6I9\276\242\"[\276ud\307=\333x\315\276\324\025t\276\261L\030=\325 \216\273J\001\343<,\355\315=s/\257>\336\007\223\276\022\315\034\276\317R4\275m\241\033\276~\224\271>q\235\252\275XD\256\275\214G\000?\306=\357=\234\005\032>\244\210\320\272)\255\031>\3455\216\276g\266\305>OO#\276\2115\253>\201\366.<\321\217\034>@\007\004\276\234\000\037>\370\234\355=\237@*>0\357=\276\347Yq>6\022\027\276\265R\270=\273\356\346=\225k.>*\276\251\276\241\326\026=\027(\002\277\014\272%\277\302ch=,\205\370\274\204\2259>Y\001r\276f\270c>V`I\275wv\266>\327Ye>\205\250(?\322\316\231>\022B3>\367\372\304\274\371H\333>~\000Z>(\224\274\275P\257\202>\223a\255>\274\353^\275>\263\244\276\t\301\271=\263\326\266\276\031\346\342>\301\370\217>\325\252\325=271>\277\007\264\275 \361z> 4\332\276\242\241f\274\371\"\017\275K8\216>\213O\037<\3354\334\276dv\007\275N3|\276\033I\354=\367\320\002\276z,\215>\241\226X\276\031nY\276\215\353\277\276\216\313\330>\313\007\002\275\217\364b\275\017\366\331=}5\353<3\317\344>2D\017>\344\024\206\2740\000\333\276\324(?\275|\016\332>\202\336\026\275\203\263.\276\313,S\277\335c\206=7\261\\>\"\232\217\275\301T\251\276\335\200q\2742\204\226\276\333\372\232={\035\237>\245}\023>\026Bq\274\201\313\265<@\212,\277\270\351\004\277H\362?>\325\345\365\276\320|r\276\334\237L>\276\376\r\276\326\235\320\275\301\254\371>\304\330\220\276\236fF=\222)\223\275\307\'\007\276r\016J\276\235T\004>R\231\311=\266\004\001=T!8>\336R\223\276G\342\243\276\016\322\237\276\336H\033?\341{&\277+o\377\275\245u\245\275\034\335\213\275C)\007\276|u\205>\203X\372=\300\273:\276\r\177$>\260\2476\277\322\262\303>D\\\256\276T\210\252=\231@\n\277\322!\220\276|r\267\276\022\275`\276\353\'\002=\245\337\246=y\010\215\276\016|\226\273\270\375\247\275\033\201O>\305\271\325\274\"\237~\276\237\305\214\276\343\240\220\276o\tX\276\007\321/\275\007\251I>\000\033R\276@\343b>\345G\357>\364r\000=\206\364C>\264\302\333=\034\014z<\263xY>\3704a>\303\225~=\"+F>\t\227W\276\221\315\203>\220\344\237\276\026\265\364>,\306Q=Z+\240\276(\252\246=\331\014\353\276\270\313\334\276\202\351p\277r\203\334=,8`=\302\337\277\276\027\254\202=\361\005\223>\250Z\260\276\343e\253=\342\356\333=J\252\220\276P\301\235\276\224\206:\276\375\206\007?\346E(\277\246\034\003;\314m8\276)G\272<\265\177\360\275\262 \312<\034\253\033\276\322\326\'\276\251\203Q=\256\013\273\276\313{\376=\305\016\216\275\321\026\021>\024\211:>\306\207\026\276\203\237\307>\215\"\316=\343a\344>!\307\024?\254\374\037\276A~\r\276e\272\211>I\371\356\275R\241(\275\000~1>\356Me>\227\016\000>\302\277R=j\273#>\240@\036\276o(E\2752\227J\274e\006\000\276E\233\233>\266~\246\276\024)O>X\352e\276d\243\205=\246t\356\274\364\256C\274\276\332F\276:\265K>\361\311\034\277\354F#>\364\213e>#0\325\276\247v|\276\254\223[\276\331\274m\274\257N\237>\233\264\337\274#6\004<\274\022\177>\2029\222>\366t\262\276\264m\236;\242\313\216>D\277\277\2754\246*>\221!\265=\323\253\250>\364/f=\342\347X\276\344\026\274\276x\2150\276\234\026`\276\177p4\276\313\200\240\275\310i6\276O\217m\275\246\r\210>\26278\275\267I\006>2\264\237>\271\276\"\277\004\322\352\274A\263P\276\357\364n\275\217\272\002\276\315`\312\275\347\016\036\276\221\201@\276\216\024K>\022x\216=\260.\275\276\3104\331>\357\347n>\177\025\006>Vy\025\354=s\274\036\023\243>N9\226=\014\240g\276\032\332\336=\306\"c\276\256\224\235\276\244(\251\273\244H\003\277N\013\235\nA\351=\255O\377=\237u\302\275\344\252H>\031\277\002\277\255\n\363\275\3330\361\227\345\210\275\323\312\305\275\251\325F>m\313k>\017\357\325>e\367\357\276\354\313]=\324\310\026\277RG\006\277,\233\243\276\266\365s>\320\003\226\276\262\212\213=\353u\214>t1+\273e\240\273<\276\225\234>{V\t=\016K=>,O\231\275s\026>>\302J\226\275@\256\036>\276\336R\276D\201C\274C-<=9\340\353<\006Q\350:\303-v>\265M\243n\022\304>\035\257I=qQ\220\276\314q\313>\251\'\342\275\355V\010\277\225\271\367\275\017\0226>L\246C=\246\355\367=E\177\201\276\321N\212\275`W\256\276\332\005\274\275\210b\n?W\306M>\316\'\217\275\002\213\356>@_\213\276\236\272\035\277\245\365h\273\266\361\024>\213\021t\276{\370\223>\255\343\255=\237\271\003\275\261@\201=#\306\240>\351W\273\276}\214\t>[\210\370\276\035A\014\276o\346i\276\344-\336\335\023\230\276\327[.\277)\324\227=\354\364k\276\360\t\212\274\205\3108\275[n\232>1H,>\357\212\274\275\2057V>\rp\265\276]\313\255>8\r\007;\001\276\035\277r\341\236\275m\211w>0\370\033>k\307n\276\324\241\232>\244J\007>\343\321\203\275\333\347\003\276O\3319\276&\312%\276\200\036Z>g\\(\275\253>\201>\031\023\r\276\344\325\301\2752\301\302\275\227\273Q>\3325\235\276^\334\004?/\2663>\343z@\276$35\277D\262\031\275\255\351\220\276\333\331,\276\037\254\237=VI \275Z)\263>\254\356\236>d2\223>Z\325U\276\362:\213\275PMt>+\372\300\276\363\273\200\276*Y~\275\343\276\024\2773\306\325=LV\026<\271\341\020=4f\321=M\375\330=\362\261\014\276\222\307H\276\203\224^>\217z\335=2\214X?O[\221\276\310\275\255=\356\211\253>\000\353j\276*\022?>\242Es\2762\266$>\2353\210=q@\016>b\0242\273\346d\330\275#9&\276\357\372\357>\202\214+\276F\352\260\254\275\027\323\001=\234\"g\276\212V!<\216\251\325=A\3322\275\252\325\026\275\213\222z\275\221g[\275\304#\363=\037-r=\350\246\213=cdj\274!x\271>\312\'\317>\373\351s\276\347V\253\276\"g\004>\237\361\230\276\034\010l\275f\361\256\276%#c>\244?\257\275\364f\241\2757\336\234\276\250\363\315>\3702\276\276\371\365\203<\021=\004>\021\253\243\276\036\227\024?|\214\344=6\333e\276k?~\276,x\355\276\003\2574\276\300E#>\223]\255\274\257m+>\221N\026\276\177\027\252>\234^m\276o\336\025>\316\204\362\276^\321\025>;\233\004>\035\004\234\276B\322C>(\363\010>\303\037\366\274~.\261\275\251\316Q\2767\262\304\276\375P7\275!\373\'>\212^\037>C\200x>\202\n\263\275\315\303\035\'vn>\341m(>\006 \035\277\027\263\350<\320\303\256\276s\257\354>\274\035P\276\303\235a\276\3128\300>D?m>\305\006\027\275i\325\234\275\031\320o\275\276\247\222\276\3059\017?\347\326)\275\307\001\227\276\267\006\016\277&,\365\274\0176\355=\217y\r\277\223{\363<\007=\256\275K\337\373>\300\374\356\275\237\342\214\276R\356\347\275\300a\215\276H\2258>>\273L\275\367\331\310>\013\007\265\276\250L+>\2018\021\274}\202\325\276\254\306\260\275\010\216\023;\264\214\345\276\235\253\374\275^\326\265=N\237\372\276tdu\275=\213|\275Y\311\216\275\\\223\347=\322\343-\276K]\233\276\233\t\005?\t\t\200\276\275|U> \223\364>6\235\357\275\335\241\232\276}i\337=n%D=\\=\241\276\335\364\304>\306|#\277S\345/=\037\177\220=\277\235\007>\016\201\224\275\003\005R\275\346\312\347\276\372\343d>\277\211\254>\304\261\204\276\202\274\020\2764\024\256=\353\304W>\322\255\024\275<\t<\276\263\001T\276\341a\237\275h@\353;|\031\324\276\365_\201\276\273J0\276\310?\014>\026\032\212\272\341\351\211\276\245\253\241\276\226\245\246=4\266\277\276>\214\002>\255\007\325\276\037zO=\227\031|\275\234L\374=\226N\003?\035\303y\276V=\257\276^\'2>\220\010\274>\341\372\346\276\375K\273\276\223l\224\275\371\373\243\276\222\371=\276\230\177\253\276\033\202]?[\306\272>\202\374\257\275#\343\024>h\324\253>%\255O>\033K\240=\355i\010>\321S\365\276\354%n\276\002\321B\276*\005\200<\243\n9\276=\004\006\277g\217\373\276|\345\252<~\030\233>\230\337\232\275\277\033\313\274<\310\323=\327\300\013>u\267\017\276F\3275>\'9P\275e\220\356=\326(8>u\373T\276\330\273\037>\273\335\356\275\323\272\261<\\\005\376\275\251\312\245>DX\225>\355\020\341\275\333\377.\276S\345\230=@t\216>\312U\364\275B\006/=\333\260b>\346 m>\nG{>\305\354u\276\"#n\276\326fT\276\345Rn=\317\365\363\274wy]\276\352{\035\276;\372U\276x\037\031>J\027\214\276r\215\365\275\030\212\320\275\217:\225>(\276\245\276e\231\t\276x\360#>\260C\334\276\313o\010\275\2168\205>\253\025\306>J[\300\275\202\014\302=\232.\326<\221Y\236\275\315\226\315\273\330-<\276k^g\276\233~\212\276\223\274\300\275Po\355=K\332\350\276\030\243\026\274\262\217t\276R\277\300\276\025\275\223\276\344\313\204\276Y\260\032>U\213&\275\032\301/\275\201\251\036\276\205\366\374\2769h\264\276\367 \312>\232n\250\276\025R>>\312\002\020\277\027\212\235=g\007\n\277%\207\277\275!\370\267\276\267\376\361\276\347\331\033>\214\272\003>O\346\t\2758;\244\272\361\333\360\275\370\246\213\276h\220\307\276\233\324\031\276\226B\257\275C\333\005\276)\033w\276\224\223\\;&\'\361\276\301\312\275=\254\261v>\220-H\276ef\031\276V\310\007\275<\3410\276KS\031\276\251\346\253\276\372o\316\276\220\350\316\276\232\314\013\276M3\213=i\360\215\276W\371A>\027\310]\275X\247\345\276I^\315\276\212\213\312<\023\014\311=\373\254\364<\255oI\277\311\322\371\276\302\352@\276\302\207\343>y\215\327\276G\220a>\241\336\243\275\307\2749\276\210\203}\276\340\234i\275\274\357\006\277ha\232\276{YG>D\213\003\277\320\177\211\276\374\314\321\276\245\022\250=\275\351\003\277B\006\002\277\024\030\002\277\"!\274>J\311->\307\234\341=\321\211d=\202\003\357\276\2124\004\277\251\r\020\275I\350g>\240\304\205\275\230\221*>\274\266\316=Z\000O\274;\001\335=\245o\212\275\351~\035\277 \336\375<&c\353>\336O\373=\021x%=!\303\207>\006\0233\277!\026\357=\310\222E<\237n\223=\300\225\230\2753\341\244>\210K\240>\037U\005\276m\227&\275\014\236\240\273\363\377\234>3j\256\275j\265\323\276b\031\245>\2042\264=\033\332\366>\241\210\354\275\022\032\347>$\230X\271v\364\r\275(\217\266<\327}R\276AW\375\275\246q\224\275l\316\374\275\262\037\325>\354\' \277\361\016g>o\251\305\275@\257l\276\016\333\334\275o\351\376=&\252\032>Jq\261>>_?>.\312!\276\023\254\274\275\351\350V>\300\022\255<^\335\016=\326g{\276\020\214.=\360R?>\363\234\214>\364qD>|\002\033\275\342\237]\276\005\321\373>G\233\036\276uY)\275us\364\276\204\0362\276\351\231\317\275\021,\017=\0135y\276\207\355\004\276aP\274\275\253\362H>\260\201\273\274\301#\360\276?E\005\2762p\245\276\242L\377\276w\001\025\275\205{\240>\272fX>\031.!?Y\"\000\276\337\350\325>\270\216F\276\033\225.\2762\330\030?dK\252\276/\337\263>\270\216\013\276~\262\350\275/9\030>\231\2251>\206\337`\276\240t\302>\2255\034>\ngv\276S\215\006\277\031\330\031=L\353\204\273\350\221\214\276\034aR\275\305\341+\276\220P5\274B\323\212>/\303\267\275\267\317Y>\316\226\037\277\0035\357>\342r\355=6\304\273\276Wk\204\276Z\254\006\276RWM\276\026{\231>\364\262\233\275\350\351\250>\372\362\205\276\355/\274\276\350\340\216\275;\342\272>@\343\006=t\007\022>\334\242\322\276\325WJ\276\27392>\377\0031\276\211V\353>\243\244\304>\247KP\276f\310\312\276\263O\223>\231#J>b\260/\276\035\306w\276\314Nr=x\362\233\276$N\317<\301\377\004\277\230\203\007\277=\375\003\277.\273e>?\303\243\275\327&I\275KI\271\273?8u>\332\322\243\276\274)\014?\212_\217\276\005x\340=\345\302\234=A\311\002\2763O\203>\317\337\314\276\n\001\210>\372u\275\276&N&\276\244]\231z\251a\276\222\203w\276\260\214t>\205~7\276\020\024\n=\202\220z\277\313`\204\275\373\261\307=\335\340\377\276\367\356\210\276\025\304\372\274r\324\303\276\224H+\277\264\022\372\274??\243\276X\002\033>\013iX\276\314\362!\276?+\204\274\225@\034\276\277\235\022\275\017\261\023\274\334dp\275\236\237\342\275\326\215^\276x|\005\276<\035\033>\032\356\207>\237\037\342<\311p\325\275.]\303=\345\313\232=\315y\322>\261\215*\274\200\0058>\273M\202>\220g\224<<\225\367=[\nH\276\236~?<\232\343\232\276~\230\246\275\333;\264=\301\375\210\276`\335\221=5\234\t\276Zk\264\2762\035\304\276\254,Z\276\237\230\263\274\017\022\010>e\033i\276\217w\254\276\005\316\266\276f\237\225\275\311\0076\276\027O2>\221\341\363\276\346\300I\277\343\251\\>\033\227\023>\216_Q>\361\313Z>\316\360\276>\"\260\346\025\240\324=\302\366&>\035\247\322>\263\211e>\267@\335\276\036\322_\276\03405>\3732\266<-\353$\276H\303X\276C\323\022\276-\006Y>\016\343\205>\367\250\005\275\250\013\322>`\200\365\274\351*\257\274?h\365=\204L\262\276Pr\253\276\374\216\211\276R\024\373=5\245\355=\322P\003\277T\372\232\273m\021\270\276\357\366\034\277\311X\224<\361\364\357\276,\333\265\276$\333O\276^\2333\276i~\263\275v\354\003\277F_[\276\221o\317\275w}\216\276Y;\253>\255\260\245=\241sS\276\352k&?310>\t\311\274<\205K\257>\334E\337\275\225\027\025\277\323z@>j9M\275k\250\326\275\343l0\273\202Z\355==$@>\330\320M>\230\r\212\276\"`r>AYp>\224\004s\276\000\202\240\276mI\274\276\'c4>\240.\301>\351\010@>:\262\202=I\240\t>44J\275x\220\276=\276M\215\274_\251U>\020\016\253\2763\234$>\201\311\277>\200\n\006=\354\340X\276\010~(\275LAS\275\327*\266\276[\275\016\276\234G\265\276\003\002Z\275\255`\235\276\255$\333\275\375L\334\275\316\303\213\276\\\335\346\276\341\277\267\276C@6\277\266B\305\276\254\307\257\275\304\305\253\274\355l,\276F7!\277[\004\244\276\034\337\210\276\361\344\001\277\255-\353\275:E#>\362\331H=\270\207B\276?\372\020\276\346\362\306\276\020g\020>\221\351\037>\'p\210\276\230$\023>\301jw\275\004\373\223>f\030\371;C\241\262\276\3519\213\275O\220\261\276\261\002\034>\324\200\316=Y|\234>\356W,\276JH\021\277m\001\031\276\006\252=\277:\026[\273h\222\262=;\323\210>\202\227p\221Y\354<\372\261\336\276\016\273\242>\026\320,=\211M\267>;\245*\276n\273\031\276,\213\220>\220\306\362\274\246\n\227\276a\202\224=\313\270\322\275vp\316\276\233\321\246\276M\237\224\276#\024E\276\254jM\276$\323\t\272\243\010\340>\221~\271\276L\246\221\276\227\234[;\342\221E>\203P6\276\201\345\314>\332\r\"\274\241JS=\233@K\274v\210\331=\367\213\200\275jiJ>\334*\352>\014\221/>vU\321\275\257mH>\310MM\276\300&\341\276M\274\247\275n*\362>\366\344\313\275\310\323\374>\330`\375\274u@E> \275\264>G\"\177;`)\301<&\251K\276\273m\207=\271\233Y>\275\264\243>\031\275b>\326\367\001>D\316\224\274\241}\226\275\177\225\216<\344rI>l\367C=\376\")\2754L\010\275\303\r\253\276\341<\r\277\"9\333\276\225BR\276\032^O>\361-\r>\017\376\330\276\350\253\014\276\345\353\203\276\334\206\336\276\233\272\352\276\317{\261>\215x9?\'\365F\276p6\214\275V\360a\2765|\226\276\305\350\374=cA\265\276\317{\211\275\326(\356\275\013?\266\276\236\302\210;/\3213\277\307\007\265>*\025\247=\"I!\276l\027\013\277v0\251\274vr\241=C*\007=zl\003;\202\300\337=\356M\263;G\307\204=\247c\031\276\220\013\345\275C\210g=\265G\003==\376f\276&\2255\276\347ur>!\021\372=\2304\255\275\000`\007\276\334\211\022\275e]\210\276a\277\230>\371\tE>\233\032[=+n\021>\312\274\305\276\212\222\233\275\201\016\307\2762\0323\276u\323\221\276\224\347\245\276U\351w>\024Nu\276w\236\034\2769\234\030>\264E\267\275\266\372\036\276\241\034\277\276\212P\331\276\n_\250=*1\242\273\210\020\340\276\021l\243=U\t\346>%\231\020\277\334f\034>\332\201\014>N\024J\276\357\245s>\347+\305=\314\251\361\276\354;\035\276\3445\027>\357A)\276\022\023{\275\360\243\327\276\345\003\261\276\376\342\231\276I`-\274\335\337\030>\233\274};\311$\324\275\201\222\204\275\033\215\206=d\254\226>G\206\361\275\341\2133\275\320\345\274\275*Q\216=)\362\014\276\036\031\213>\260\361\370\275\377)\263=/\035/>\253CB\276\211E\225\276\247M\004\275\274=\323\276\273\260S\276:H\326\276\247\271\252\276\311\237\352\276V1\307\276\307\023\207\276\236_z\276\323\'\257=\242\2103\277\022x\020\276U\206\266\276\350\236\310\276\205\360I\276\332#\305\276@\277\332\276\356\030\323=\327\230M;\344\223\267\276\230\234.\276a\311#>#\352\361\276;\030\244\276\264N\375\276Vh\022=\200\017\264\275d}\231\275c!0\275\272\036:>\322q~=\177;\236\2769h\022\276\230\003\322\274\236z\334\276\\\251a>Q\240\246\276\254ck\276K33\275/\350\240\276f\263\230\276\274\346+\276\212\203\\\276#\n\236\276\331\340\376\276\255\276b;\335.\252\276\363\002\r\276\373\357\037=\231]n\276e\202O=\221\206\366\275\367[ar;\323\275(.\331>pE\261>\033 \374=]\257\265\275j\261;\276\220\'/\276\263\224\003\275\355\340\"\276\333R+=\021\354C\276Y\334\003\274\326\326V=uk\253\276\202\244\303\275]\023\250\273\007\026(>v\260\302>\301h\027\277\254\253\001=G4\275<\314\374u=g/\207>\365m\323\275\371\010J\275COY\276\357\315\252\274\203k\242=\037\215\024\276\374\2716;\016\007\203>}\315O>=f\016\276\242\377\365\276\010\357\265\275\230\211\356\275HS:>T\270\036\275 \305\212=\230\207\027\275@SF=\212\246\240\276\300\230\013>\010\034\377\275\271\031\254\276\020\357w\275\001zP=\212\200\001>\207z:\2769\256\364\275\231tl\276\244\0231\275\302\255\304=K\246\310=\202\257\022<\027\364\340=\ni \277RE`>.{\017\276\253\262\311\276z#2>\257\274\t>\342!\006=[ \213\275\366\r\200\276\034B\223\276\355\310\256\276n)\331;\022\370\004>\315\240\331\307\345I\276z\212\3719 2\206>j\203W=\\\301\026=\252\r\304>\375\000F=\271\017\226\276\332BY\276E:\017>\341#(\276\270\326\243\275\317\337\212>\264\025\231\276\311\220\243\275\227\026\\\276[q6<\266\312\311=z\267\352=\215\254\277\276{Xk\275!\206\324=\220A\r\276\351\240\326\276l\2730\276y\221\263\275Lwd\276\022`\211\273ZwK=\265!\032\275\225\276\001\277\247\330\371>\265hG=\273\335\227\276\024u(=\341\265\310\276\026\245\201=\016\016\340\276\330\273\036\275\224\273\317>J\0004\276\216\233\367\276\310D\336=\016&\301<\260e\013\276^\000h<\321\006\002?\n\1774>\211 \326<\362\322\275>\347J\366\275\335\227_=\331\000\337\276\260\204\033\276\224\340$=\177\3077>-\304\034\276\324\254\235\276\363\272\201\275\267-\021\275\264\371{=\200=\230\276L_\227>\366\021\301\276\312\246M\276\252\206\326>\360\214n\277\216\n\345\2752\364b\276k0\203>]T2\276T) =`\357\235>W=\333>\340{\333=\364d\252>\322h\204\276\355\330?\276?CW>\014?\225\275\364X\212\2755\260\300>\334\225\'?\364md=\350n\263\276\342\211\240<\313x\360=\021\"\n\277\323\251\177\276\264\247\254>3\373\033>\302L[\276e\002\273\275\202\306\225\2763c_\275\210L\314\275\007\372\007\276;8&\275\261=*>SrJ\276\'w\002=\022\311\331>\364\034\215<\242\374]\276\242(\002\277 t1=\347\352\013>%\243\372\275]\033\360>\205\r\213\2756,\310>[z\002>\307R\322>\010\360\235\276\263\310\306=-\021\305=9-\036=_\021\212\276\333-\215\276\221\373m=3\243\245\276\356\031y>\260\340\030\276\036\367\266\276\025R\254>\272\201\276\276\227\207_>\327\233r>77\213\275\3231\214>\261\307\210\276\034\320\376\274\265\242\241\274B\315+>\334\244\006\276\356\302_\276\005\322\260\274\222\346\341\276W\217\352=(]\210>\"k`\276\341\200\016\274#\032I=\270x\022?\013\313P\275j\336\204=X\223\324\274/\311\023\276Y\037\242\276\312\206\365\275&\017\231=\016W\031>\301=:\276\330\376\232\276\236H\247\276\315mB>\017)\257>\372\t\265\275%\026%\276x\314\216\276\324\354\201=\241\037\364\274Q<\322=E\\\223>\316[X\274]\364\017?\310|8>\274t\332\276~\375L\275o\340\222>\037H\326=\257B\236\276G\221\251\275\nB\233=\354\3749\276G\360M\276\212qo\275\376\340\335\276\227]\332\275\n\261\027;\210\271\230\276b\273N\276\324~\232\276\351\241\203>m\022T>\253\233<\276\0377\243\276yvy\276\230\264h\276\215\003>>T\313\367=}\342\007\276\377\277Z\275%\353\t\276Z@\241>\261\007\030>C\na\276\375\030\235=U\301@\2768Q\350=\014\263\310=dA1>\302,T\274\\\322\344\275{5\016>\243\002\320=\341\320\263\276k\021\266\276\237ec\276=\240[=8m*\276C\216\233=\\\316(\276\363\2116\276Y\037\022?\277\316F\275?\023z<\305\\\337>\"\3474>)Z\t\276\246\232\303<\361Q\362\276\231]\030?\223\333+>\215\001\351\272\305w\215>\021\265\370=\211n\361=\037\n\252\276\214Aa\276\022\264\n=\242E\013?KMR\274=\317\005\276\326\254\344>X\032\265>\237\336@\274^R\375\274\221\305\230>\325\177\002?\327\263\212>5\207*>\357(\206\275\222\212\005\276\350\300\014\276\272\301m>\215\236>=\000\244\352\276\272\253\365=\301Q\253>\212KD?\201\014\303=\2744\272>\325\203\200\273\251}\220\273\300/\352>)\361-=|\302\t?\004\374\327%Z\376\275\213\251\362\275P\026\226\275\361Vd=\251\302I\276\366?\035>\346\305f>l\363\260>\223Y\343\275\277\352\352=l_:=\334%\306\275\376\001\260>Cq(=Q\003\"?\030\"/?\022\206\225\276V\237;\276\270N\320>[SD>\234\255\306<8\375w\276\256\020\201=\334\005\003\276R\2339>]g\205>?\335\261>\260\223\026>\275\315%=\272\377P>\017\243\206>\036\215O\276d\037\034\275M\257\202>\037*q=\334\336\342>\350\372\307\276\244\202\021>]C=\276{\035O\276\036\242\323<_\177N\274\\5\036\275\232r\252\275\265s\302>\004\256M>E\210\204=PM<\276\034H\253>\014\021\346\275\337\333\243\2759\277I\276*IG\276Q!C>\370PD\276\255>f>\216\002\257\276\243\r\326=z\220\020?\025\032\352\276_e\370\276\034\"\272\275\226q\277\276[\206\207>\207\317v>j\366\335\276\315T\202=\250`Z>l\025\273\276\256\007\212\274\316N~\275D\377\323\275\203\215[>/^\375\275\336B#>\3403G\276XL\202\276\353\301\267>W\021\201=\322C\270<\337o\347>\347\335h\276\331\276\313>0\016\252=]\261\354\275qA1>@pE\276\274(\275\276)\016P>v\266a\276n\t\341\275Bv\264>\225I\213\276\230>\205=(D\327<\275\206\210\276\366M\214>bQ\273>i\002E>\341,\234>\372\357\236\273\340\033\037>\361\237\340\276\235B#>\r\331\213=\254~/>\216\237\007>\301\315\260=J\024\231>(\364\017?OC\213=r\335\277\276\203gK\276K\322\275\276\334\206q\276\302+1>\362\334\236~\321\224>\036\333\336\275\301\216&\276\300Y\377=ZF\325=\220\r>>\363\277B>\334\305\007\276g{\034\276\017d~>\221\350\325\275!;\241>6\256\223=\266m\200=Gp#\275\247]@\275Tck>\233\256\251<\177N\020\276\340\263z=`}\254=X\361\236\276\310\036\367\276\2103\327>e\305X>\037d]\276\036X\227\274F\010\035>\252\346t\276\241\222\207\275|F<=\014\2124\2769\336\">\322 \022?m\340\260\276C(\221=v`\007\277zV=\276\367!\244\276\005q\241\276\r\201m\276W\224\242>*\364\232>\014\215\312\274{(Q>T\301\267>\024c\231\275xi\264=8\035$=\233\321f\276f\275\252\276\033\350\333=\371\352\302\276\017\342\366>gD\033\276q]\3479{\\\367=X\314\021?\301\323\314\275;\302\355=\007S\300>\3611\204\276\262\357\337\275%\217\346=\300\327\200\276\254*o>\260\233\365\275F\250\215\276\021t\234\275!\032l>\"%\010\2752\031\352\276>$B>2\210\212\2759D\200\276<\3527?qgD\275J\357\227>5O\213>\215\327T\276\242;Z>\317\224\030=\\\270=\276\002\211A\276\364>4=4Z\237=t\331\'\277\272=`\275.\334\210>\266\311\314\275\252\262\206\275I\032\270>q\265\036\275q\373D\276i\240\204\276\270\354B\276\023\002q>a\001.\276\222\031\230\276zZv>[\315\263>\372J\273\276\371\363\303\276zT\320= \367\353\275\343\336\264=\000\263\277\275m\314\266>\'n\007>97->\221\340\241\276\206\261t\276K\036[\276\2402\030\276\262\260D>\"\253$\276\314\276\346>p\372\335\276&\316o>|\243\314=\301\220W>\251_\020=\304\317\216\276 \002\031>\020j\'>\327\020\327\274\023\304t=(\322\232>w\327\034>\363q\243\275\204\265\r>\013LS\276O\236\'>\304Q>\2740\303,\275\225x\257>4\244\263=\227\264\312\276\267\315\253\276%\026\205>W(\221\276\nL\274>x6\250<\247y\213>G\200\303;\352E\216\275\310\200Z\276\373\253\347\275\341\376\024>Z\255N=\344\302\341=\206\357\271=\256\332\247\274z[\022\276w\217\241\276H\002_\276<\320\014\277\213\2464>T5\370\2758\035\255\276A^d>\'o\331>+j\214\273^\241\024>G-\221>H0C\276?\256x>\275\314\217>\214\262\355>\226\340\300>\301\320\017>G\352\">\032\t\200\275\032\251 \277\271a\r\276<>n=\002\032\267\276C\241K\276\263z\200\275\325r\005?\'&\004\277\'\002l>\261\332z\2765\302Y\276\300M\225=N\245\343\275\026\372\267\275\214\267\260\275F\t\312\276\275\253\213\273\000\346\343=\252)i\275\237X\223\274U\311C=\325\3753>\307\316(\274D2l>\243\227\316\274\'\001\346=2\022=\274\260\255@\275\351k\220>\ro\246\276<\241\225\276y\207\276\276\321\241t\276r\312m>a\023\270=\274\215\230>\222\013!?\220\244z\275\215\027\345\276\214\r3>\277+_\276\320w\"?Jn\361\276~N\214\276\0247U=\352\245\272>\274\236c>P\330\362>\021\262\332\276$rF\276\006V7\277\267\3509\276\014\001c>\213\263\325>7\370\343\276}\305\327\276>\3739\274\205b\326\2750\230)>i0\231\275\371\177\003\277\357\371\250\275\246\242\247\276\250\220M\276\300\207\241>R0\224\274w\251\361>F\367\275=r\rB>n)\326=\260m\016\275v\342U\275\334%9=\022\363\334\2758\204#\277\344/\032\277\333\200\275\276\n=\213=\227/P>\032\342\203\275\023=\033> \030c\276\236\375\013\276\026U\317\276\263p\202\276\253j\245>\226\257\314\276\005\236\211\277O\023\003?\341\310[>\te*\276\035\261@\276\277G\233\276F\177\t\276/\031p\276\204\177\324;{s=\276\216{\025\277\343\303X>dK\'\276@e8\277>\253\305<\267\t ?\275\021H>.6k>\326\326\'\277D\315\205\275Y\325\240>\277Z1\275f\230:\276\274\300\221\275\322u\207>nQ\027\277n0\355\276\372\215\310>\335\252I\276\\\233\n\276|?\322\276U\203\014\2777\376\202\276,\365\206\275\025i\335\276kA\324;h-)\277\305\353\314\275L\244\250\276\035\337\021\277#\nv\276\007\330=\277\364b\220\274\264/\376\275M\346\027>\363\013s\277\001\322#>\356[\367=\362\371];!*T\276\343\001\207\276.\026\212\276\214!\242\276r\223\242\275/F\333\276\221~\355<-3Z\276w\'\350<\267\322\331>cS*?\233$\272\276G\233d=g\264z\275\233\325\322\2758\363\370<`\264\352\276?\221\303=\017\324H>^\305\r=\014yl?\021N\333\\>\021\006j>\037\364\255\274*+\t>oL6=\324[P\275#eT?PI#\202\376\301\272!\314\301\276$\2337\276\250\347H>G\355\004>d\3167?\265\225i\274\263@\014=\362\202m=\353+H\276\317u\377\276\376g\000?\366|\371\275\032\355h>,\2456>b\344\240\276YG\351\276\321`O>%\271\305\276\236\026\262>JN\355\2768jq=r\000\274>I[\007?]=\315\275\227T\361\275\300\304\315\276\336\327\220>\276`\233\274\005\017\365\276\236:\264\276\201.\220\276N\351;>\030\212q\276n\312(?\370,\256<\221!\234<(\000\355\2752O\212>\316\\\017?\265\246\373=\327\312\265\275\3239\024>7\2144=t$\222=\277l\273>\022o\034=?\366\356\276\304\366,\277\033j\222>%)\333=\351\270\263=\200:\000?3\203\357\276:1x>\356!\016\276MJ\014\276\374\024\177=\203\365\274\276\303S\276\275\307\210\030>@\344\"\276\263\245j;_~\036\275\210\246\006?\r\277\200\276\024\036\2718\220\3763\277\261\267\016\276B$-\276\366%,\2771\201D\277\264C\333>\3568\353\276Q\034\224=\004j\004\276\233\177\327\2769K\332\275\351u\001\2766\221_\277\031\353\361\275\300\311\226\276h\205.\276!\037\250\276/\304&\2763\353\260\276\256\353\346\274\316\322\363\275\037\3614>\016\370\305\275\332r\227\275P\347X\276\216\344\177=[\312&\276\205\352\331\274\271l6\276\321E\016>J\336z\276/JT\276+\253d\276\320ai>\025\025\320\275\227q\336\275N`\375>\036U\033?g\311\335>\213,\013?\255\355\233\276\256\311,\276v}\304\276\363\227@=\374z\226>p\242\'\277\266D\210\276\215\320\253\276\245l\010>R\030\341\274A\'\217=\315\375\214=\234\324U>jk0>4\346\023\276\242~;\274\024?\016\277\271m:>\253\343\035\276\014\036\336=`\233\314\276\252\221+\276\206\327\237\276\017\222\036\274}\027\207>\177_\220\276\366\241\337\276\313\222\274>\370^)\276\361\350\340>\345%\274>\266O\321\275\263\276\317\276\362\220r>\326\t\255>\304\003\272\276\376\014\030\276\356R\033Js\250>b4%=r\275\002\276\333\340\000=\177\000a>\\K\330>6x\234\276r\204\006\275*\373g\275.#\335\274\202\034D=:R\034\276\301\256\270\276y\0140\276\r\360\202\276n;b>\236Y\037<6&\224=3U\347\274O_\026\277\264\356\345>\365\261@>V9\276\275c\362@\276\265\351\262<\004<\001>\213\026\024\275\353\251\204>#\r\230\275\266\201s\275\251\302\211\275#\347\211\275\370\204\275\276cU\270=S^}\276\372h\033>\335\373\225\2762\360\211\2769\215\005\275\007\231\241>\300\254\231\276i\004\276\276.\3646\276\022\206\027\275i\"\243>\3340t\276\361 \204\274\007\312\225\276\300\272\365\275\240\265\027?\312\235\024>\244#l\275\375\323\304\275\035T\340\275\3015\236\276\277B\222\276_\226\024\275r\3000\276\276+^<~*\360<\0324A\276\271Sv>\364G\276\276IP\250=c-Y<\203\334\016?\303(\273=S\0272>A\243\236>\373\346\275=3\020\254\274\227~p\276R\300\335=T\326\306\276a\303\303=y{\242>\3358n\27533\205=\323\032\021\277\250\264>\275\370\362\211\276\241!\332=\201s\243>\231\001<\276z\306\213<\266\262\235\276\230\321\213>\367\371\223=\243\361f>\036\203<\276$\333z>]\307\003>\364c9\277\036Gm\277Z\'\361\275\340\2251\276\177Hz\276\343\"\214\276\333\013\207>\232\317\016\277\325F\230=E\373N=\232\013q\275SIn\275\2732\221\276f\356\200>\275\270\305\275\277\225@>5\211\221\276\234p\272>\202\031\002\276\320\251f\276\347j\005>\006V\370\275\277J\336=,P\370\276\313<\007\275p\350\344\276\276&\004>\354\322\220\276\235\267\325>\001\007\320=\310Xb>\351\311\014\277\272Y\257\276\353\350\216\276\316L\222=y\035<\2769`\303\275\361\375#\276\300\361\271\276\014\327\363\275\t\332\205>\245b\301\275\271\216\032\276A\307\257\276\261\002\246>E\304\245\276\371~m>\3565P\276\3157>\276\276\007\257\275\005\010\243\276\246rt\275\234J \276\027\250\247\276C\0031>~\366\315\276\tz\201\276\301&4\276\234\257\324>\321\340\240\276\014\300L>\234\206\267\276g\302\337\267o\347\276j^\241>\004++\276\247@$>\317T\233\275\306^\235\275\303\222\377\276\357h\000\277@y\001>&Aj>\263{\233>\210:\367=[\207\001\277\366\332\246\274\345\007h>\312\301\326\2762\377\246\275\214\356\311\276\252\336\335\275\022b\2358\232\374\004?;L\021>]\361\201\275\264\3768\276\2646\221\276\233*X=\322\032X\2761\376\357\2764-\331\276{\221\013\276\2119\322\275s\256f\275\032\2637<\003_1\276\223\013y\276\321\366\327\274O\343\301\275\3468\373\275\363\310\255=\220\"1\277\210\364\301\276\332\304\t\277\261mK=\203OS\276\221d\301=\342\344\253\276A#8>\364UD\276\356PE\276\216\007\265=\356W\014\277]\351l>A\032\004\277F\210Y\276\274/\303>\217\024c\274\243+\035=\037S\206=\223I\317=\326\324\253>.\345\007\276\266\366\234<\235\000`<\256_-><]F\275(\022\021>X[`\276\016\271\001\275\\8\264>\300\371\203\276\017n\007\276\350D\006?\323\237\027?\346LI>\024\335\210\276v\275\205\275\177\372\261>\353\351\024\276u`\336\276\345Y\337=\216\316\261\276\241\340\022\276\037mp>EM\035\277\222Y\214\273\022\327\214>\265%\t\275S\263\267>&\030\231=.\0317\276W|\201>\205\312A\276\352\333\267\274\345\371\270=\246UT\2767_\333\276\256\014\n?O!\211\276\234\277d=;2H\276s\3433>GK\216\276^\272v<\233\343\016\276C\005\245\276\206\351\332>\201P\006\277L6&?\303\355\313=\014\007\247\276\000\203\010\276\205\241`>|G\013>\033s\300\275\272e\262\275\221\022\336\275`\003\210\276\t\320\021\276\r\274\241>\277\nS\2767\237\003>\347\273z\276\334\"\221>\333$\200\276\030\233\273\274\304\n\004>\252\256I\276\266a\277\276\323\242C\275\221\225;=E[\311>\336\260\225\275LK\273\276t\306\227\276\245\275==\300\336\244\275w\312h\276\306+\244\276QyG\276\000\345Z\276\233\356\031\276\235\372\304=\263`\235>-@p\274\0053\030\276H\026\030?\n\357\211\275\000\322\257>\311D\264\274\342\221\305>U\354\035=\246V\224<]8\303>\230\273\313\276}\246\214\276z\336S>\350N\202>\214\035A>\234\n\237\276\362$\374\275\036\346\301\276r\272,\275b\370r\276\313\234\r?\321,\010>\250\302\341=h\246\207\276]\n\r>\201+\343\276\035\203\363=\023\323\261=\n:M\276n\014\224\275\013\\\313>x\353\222>)2\231\276F\245\273=`\243\023\275\314s\273\276N\301(=u\244\206>\010n\224\2769\244\241>\321\332\367\275\2266\000\277\237\375,\276o\3372\274\360x\263>\327\354\366\276!b\204\2768\324,=v\361\335\275\346\245\030>h\033\353\275\306\241\262\276\230\003\036\275\252v\261>\317\026\334=j,\007\277\360\240\251>X\353C\276E\356\206=\375\025\252=\355\002\216\275e\354V>j\256\234>M\264\030>0\\\273\275\352&\204=\002Ye<\312oz=A\250\001>#\235\036>`57\277\035\365\363>v\030\242>\235\004\212\275x\214<;2O \276\347\300&\276\350\0162<\214]\270\276\017B\344\275MJ\023\276-\241?\276>\322\020<\313\236!?V\005\005>e\323\000=\2263\210>`[\220\276\037K\246>})\034>c;;\276\036\264\301=\2459F=\202\247\303\276\232\r\267\276\343\231\342\275SRT>\023\311F=\242\256X=\024.%=r\343/\276c\231\276\275\247y;\276\256F\237>.\341f>\313V\365\275\020\363\201>\336%\205\276\327\251\275\2763\252P\276h_\203\276E\355A\276\274\345#\276\306U\247\276,\267\t=\325\315\002\276d2\320=c\007l=\217\262\007>\021\007\024\274\024~\267>\0311i>\305d\322\276\203\005E\276g\017\270\275\260b\032\276\237S\020>6\220\321\274\302K\023\276\ny\3679q\215\260>8!\321>\264\002f>\022\355\037\276\000\224\340\275\211\013\203\275\021\201T>\227fB<\210\360\326=8\007\005=\023\\r\276\035\252\257>u\202\251=\344\300\200\274@D\225\276\221d\213\276\006\363%?O\324\253\274\243y\254>[\376\024>\266e\227\276\301\272\215>\247\301\352=\226\366\214>\307\003B\276\265\362\000?\247\016\305<\253\247\330=y\270\375\276K\256\007>x\332E\275\216\257\035>Y!\277=gc=>\242\273.=AX\001>:\034\365\275\020\342\177>\203\236\005\276\231q\275\276\223\246\221=\226\205]\276\202FF\275\000\356\000\277\2061\250\276\321\031g\275\007\223m>&\3079?h\302l>\256IW=&\302\020\276\203\315\347\276\256o\205>ff\225\275!~\270>7\235i=\263\353\210=\306\014\315\276\253\030\177\275\343\347f>f\037\020\277\034\327\247\276!H\234\276t\016,\277\2542\000?\204\226\034\276}u8\275\250n\372>c\344$>B\233\365\275\312\203\351\276\037\031I\275\211\320\036\276i\305 \277\3743\343=\330@\010>\032\366\266>\367H\360\275\363\355\323\275%\367\036\276\031\0018=9^j\276+G\342=\263\022\205=\363k\366<~\256a>\317\215,=4/\031\277\346\005\007\274\306O\004\276Yq%\276\326\"\036>\314%\037?q-\345\276\355\335\301\275Q\252\237\276J\341\031>\277!\030>u]D>!\2632\275\025Br>a\306.\275\017\277\273>\034\031\217>\035IS\277\"|\363=2\014\252\276\255%\232\276\'/\255>e\024\243=\361\021\210\2754\232\274\275\010\212\322\276\350?\223<\021\214\222\2766\310\214\275\271\240\336=t4\237>%\355\335\275\246\320s\277\222.e\276$Bi\276\213\224}\273\'\377\214=2#\324<\240\tr\276{\323\336\276\363\205\323\275\277\237\367\274\315\036*\273?\305\275\275\026\\>\276\005\226\331\276\263\r\324>oI\005?\267\221/\275\004\002\347\276T\022\261>\026\177$\277J\177\243\276\3478\201\276\324\355\310>`q\352=\370L\260>\206\360\"\276\370k\212\276\037}\235>\232\020F\277\235E\221\276X\206\274>\341p\312\274\201\331\177\275Fh5\275\226\337\367\2766\254\224>\025\232\237>\314I\022=\377<\251=\265l\204>.\210\247>ig\r>\367\352\261\276P\237.\277SC7\276z_\330\275\240 \004\277\010\334\215\276\214\355\007\277\3051\234>\370\202\233\276dvG\276\003T\203=\327O\205\276\"\330\327<`*n\275\031\271\244\276\0223\013\277\214\350\001=t\306\330\276z\316\031\2764\266\014\276\324\231T\275\375\"\366=\270\t\014\277\373\311\354\276\237\'\311=\032u\300\275\020x\n=AT\223\274\327a\013>l\020X\276\2028n\276;\351k\276Xmt>\254U\346\276\023\217J\276\377\243\n?\030\312\'\276\031\2179\276\217u\241>\204F6>\346\022\315\275Mh_>\\\017W\276\026\241j=g\353\'\2774\346\231>\021\201<>\237\253\301=d\207\202\276w l\275\037\374\221=?M\241\276\"FW\276w\250\316\275e\272\215\275\036\242]\276\250\224\215\276\241\211v\276\2575*\275-|\243\275\002\217\336\275v\376T\277v]\006\276A\035\t\277\006\177b\275a\0328\276 (\034=~\256\351\276\030;\316\274?\350\316\276\210a\236\276\275\n}>\003\006\262>\253\245\267>l\241\264>\255\241\267=pH\277>{>i\276>\255u>\005\313\026>=\257\007>.$\241=a\253;\275A\232\221>\313\362\325\275O\242\272\275_[D<\367ft\277\257\004E>5\306\325>\rH\337\276\372\352\304\274+\350\306\276]\007P>\262\213*<\367;\252>\233W\262\275\373\274P\277\241`\203>|\341A?\202\263\257\276 -\223<\2773B\276\271\207X?j\230P\276\354`\304>\226\372D?\003\024E>r\217 \277m.\023?\330~\031>\030J6=\352}\n\276\270\022F\276\360A\301\27694Y\276\033sz=\213&e?v\\\003:\347\355\224\276\303\314\235\337\\\353>\306\370\001\277\'Z\323\276\327\206u\276\r\250==\275J\303\274u/\024\277\336u\210\276\n\3121<\263\300\202\276\370\211u?\356\265\251\276r\233\035>L\322\003\277\003\031)=\030\277\213>>y\020>\014k\223\275{\025\030\276\260\262\027\276q\263I\274\260h_\277\354\242\237\275XL<>\210\016=>\355*\000\277ZE1\276\377\264G\275z\201\">\222\262/?\020\265\355=\241>\203>\240\363\353\276&\023\346\2755\306 >\273?\300>\345\270+\276\3764#?\362\250\003>njB\277\332[\333=4\334~\277\253\236\002\277\255\301^\276\035\344\310>C\236\"?\232\354 >\313\236\210\276\334\214!>\206\362$\277\242\"\230\276\311\243\365>\211Eb\276\017\335\214\2745\023\025\275\357\261L<\345\215:\275+\212Z=\032\312\207>y8e\277\210\343\005\276\312\243\201\2759f$\276P4\270>e\240\245=\305N\024\275d\320:\275\203V\034\274(\352\204>\023o\354\2743\360\001\277:D]\276\273,\032>0\363s\275\02714\277!\034\276\273\363\216\255\274ox\264\276\357I(>\227\030~=*\310\027\275-O\017>\326\353\025<\217r\333\276\'z\312>k\335\260>?\246\320>}\'\241>*X\310\275h\262j>\036\266\020>f\325\023?\306h\231>*\374\025\276\340\304o\277\250\305;\276\t\330z>\226\232\306>\374rD>\302{$<\033\341\204>`\206\321\276u\347E\257\364\356=X\307C=\035\230\354=\212kE\276\277\372W>\007x\020\274\013\031\314>2Io\276\224\272H\276\221\216\024>\356\035\232>\213\241D\273\374\354\313=:R\364<7\213%>\345\371,\276I R=\211\202\003\277\300\363?\236<19E=\013\324r=\202\324\211>\374s4>\303w\022=\006\253\257>m\365\217>j~\254\275Im\313\276J\264\226>\243\020&=\344\321\273>}h\271>\n\362\025=\352;\005\277\252\326\014\277\013 \316>\333\313&\276.h\211=(\330\336\276\317X\223\276@\251\021>\021\224J\275\005\337\252\276(\324\327>\031q\322\276\014\237\232\276\2547L\275hmc\274YO~\276\347\227\004\276\325\014\333\276|,\324>/\236.>\034\363\306>\364\271\242=`\305^>\3629\324>X\370t\276\004Ms>%\325S\276\204S~\276\346\365B\276\261C\340>=\247Q\276z\233K\276W\300\263>\016I\026\276\352\246\305=\rp\007>\353G\246>2\200\321> \320\234=\237b\364>\252H\030?\327\302\004\276W<\276=\250\231\206>k}\237>m\202\204>Kc\252\276\276\312\371\275/\267==\014C\305\275\266\200\306\275w\302M={Ju>?M\256>$\365H\277k\267\272>(\360\177>\212\037;>\237\230\302=\213nf=\3448\334\275f\352\257>\315\017R\275Hw\353\276z\352\267<_g\242\276\013\206\362=\2408+\276\230\302\247=~&\254>\253`J=g\247\312>\246\231#>\035x\217\276\216\2249=gj\313>e\244\242>\251j/\276)\207\371=)qD=\204\240c\276~Pu\275G\275\001?\177\243\266>~\034\277=\227SU=\301\010$>V\237\253>\335\000\237\274\\\307\210wo\001\277=\347\377=\352\261\253=Qm\321\276\301x\037=\177(\275\275\2116\214=#\265\262\274\"\274\010\277W\204\260>,\277Q\276On\331\275\243\353\375>`\301\336\276\027[\361\276\321G\354\27595\023\274\245*$?\370\210C=I\014\035\276\0172o\275#\306\272\276n\246\270\276\362\203\211>vv\037?{xH<\301@\000?@M\370>\231\361\244\276\r\261\246\274\211\343\270=\230?\310>Z\037\200>\230\2664<\377\221#\276\321\343\030>\257\310\263>\236\233A<4\t`\276\263\336\020\276Lj\254>\034\376\321>:\276\373\275\211\005\253=[\332f>k\243\305=\306o\033\276\"U\230\275zZ\010>Y\327\">\244@\000\276\355\035Q=w\351j\275\333J\210=w\202-\276\273\006W\342\302\345\275ro^>\213{j>\005\310?>\252\235\277\276\231\225\237\275\331hF>\361\334\002?\316\356\000\276\320\201\205>\006\210\251=\037\200\226>\253~\207>;jV\275\271\013\005?D\217c>\346\215\035>\010;\216>\205\027\266>=\037\260\275\366\336\300:\353\257\235<\3441\'=b\020\214>\370tU>ap\215\275\376\204\207\276.\261\212\274\314\370*\2749W\330=w\215\347=7\213\226>\010\"\312=\223E\226\275e\021\004>\344\316\000\276\021\215^=+\241\217>\367A\330=\371\357\256>\306\300\\>\254|\311>r\225\\>#\"\350=\333\r+\2767\265\247=\303M\226>\316\246\252=\204m\000>\335\r\363=\253\207\255\275_\r\037\276`;\275=\252\366\215>:\013\255\276\214\033\240>\322\021b\275R>\013>\202e\202\272-\266\203>\241p\207>\356\250\260>\037\233\315\276\321$s>\025\346\255>\217\004\353>\241\2477>t\243\217\276@]\334\276\231K\253\276b\203M?\334T\255\276h\200:>K\257d=\320\341\254\276P_\031?\317T\267\275\035\270\232\276\374t\322>\004*\216=\267\t\203>\034\235\307\276\237\354\233>\323\273\024=\036Z\023\276\217\244(?\366\016\360>5kf\276-\002\245>Z\033\233>\307\277\260\276?\320\r\276\222|\314\275\310\227\312\274\010\370\202>\301\222\261\276eZ\206>\275\313\346\274\305\r\207<\232j\301=\201~\366\276Q\301\021\277\306\350\201>8\0255>\\\252O>4\361\206< \262\006?\"\016X>\207\212\273\275Ji\251\266\267\311\275\273\177\342>\201\301V\276\025L\275>}\022\010\277{\264\225>\005\251o\276e1\r\275\r\262p>\335PY\276M)P>\252\366\234=\024\236\355\276`=Z\276\203]V<\357@\214\276Z\222B=\342\304W=\331\020\006\276\\\t\337>\201\372\272\274\027\027C\276U \007\276)\355\301>\372S\272>\277\016Z\276sEu\276\375\325\300>\031\360\267>\207\372\236=[tG=@\034Z>>\217\205\275\034\376X\276\275d\314>\244-v\275\360Z\t\273\222\020\337=\013iO\275S\351\215\276\326\313\315\276\017v\035\276\340\\\206=vV)>\024\2776?b\221+>\035\327j>-\222\215\276\202\347\271\274\217\014\345<\034\032X=\374\0370\276\352\212\227\274\263\016\024\275\372\330\034>\222 o>\372u\203>\253\356i> \336H>8\210\022>5\023G>\214Vj>\242\010\244=\032qQ\274sDB\274\333\370\262>|Z\257\275\301t\201>}\014h>G\331\013>\274ch\273/zu\276\034>`\276\022\344\257\274\027N\347>-A?\276\235dv=y`\247\276j\016\334=\320\024\242\276\022\0019>\367\014\335=\n\t\272\274\231\351\017\276\246\035\214\275\025\326\230\276.zP>\210\033\r>\275\275\000?X\230^\275>M \277\330\323^\276\330\020\251\276\233\272\225>\324\023\207>?Z\">\306\216\037>\ny\261>\226\031A>\032\0366>\341\r\247\276+\'x>\020?\\>\346F\'\276aF^\276\275\355/\276C\213J>:(l>d\027;>j&D\276\373.\304\205qa>\211\215h\276Q}\251>\334\232\r>m\201H>\260_\217\276\016B\265\276s6\207\276ON\023\276\243f\344\275\303\326_\276$`\370>\031Y/\2761\316\301\276d\247?>D\351\372\275\377\306C\276\004\342\277=\305\r\001=&W\206=t`\364=\312\024\017\276\240\334$\276\300>\005\276\316q\275=\353=b>3\340\342\275\252\275\345=9J\245<\202\005\321>\353V\215\273[\241\263\276\273A\206>\217\251\345\276a\204\332>\234\361\236>\"\244^\276\236\372\341=\223\"t\276\037\276O\275W\235\210\276\333\252\257\275\313L\'\276\022\351\352=\'\014\267\275\321\021t\276\030\343X>\0169\301>\214\'+\276\245\206h>:#$=iw\017\277\220h\264>\201\206\225>\263\306\003\276\371>8?\321\232J>\375e\232>|\240\t?\215\356\274>?|\243\276\267\336\001>\215\233J=\207\254/>\340\3100>\224\242\237>@\242\345\022>`q\026>>4\227>\347\306\355\275t\276\023>\334\223\320=\201\305\303>F\371><\034+\235\275~\212\363\275O>J>.pC\276\245\2750\276\315\206l<\355g>>\224\316\323>&\225+\276\275\0341\276@\212\263=\203\211\272=L\276\304>%\237m>\345\373\340=\331\027\262=$\322H>\317\342\007>\2702k\276g\344\212>\220\3773\275/H?>\232\275\017\275\3554\256>\247B5\277L+\322\275\013\374\230\275\025\221\242=\373\0244?\327\376\362>\330\233h>\210%\001>|\274\n>\034b\004\274aU\207\275=\315N>cqp\276f\333\263\275\335W/\276\334MZ>\375)\201\276$\352\343=\251\372\316\275\345R\362\275[\321\211>\033\022\210>3\203\211=\345\372\230>\367{x\275b\252\343>\216\340z\275\347\241\360;\223\360\246\276@\221\356\275/\256\371=\027\004\301\2751\233\027\276\030a_\274\277\300 \276\330\271&>K\334\231=b\330\276\275\366\004\220=h\357\304\275\344\226p\276\246/\306\276\t\316\267\2762\324U=\260F\243>\354\335\n\276\276v\000\277\200\302\243\275O\000\241>\000\260\226>\032\246\231\276}\230\030>\236\240\014\275\237{c>g\252<\277gR\001\276\001E\235\276\017\253\036>\377\314\247\275\'\272\014>{\265\204\274&\327\276>\277\3378>R\312\346=\334\206\177>\003t\203\275f_\366\274Hd\303>{2\003\276]\tT\276\236\271\237>x\355\346\275\"\212\302\273\2312\305\276\370\341\255\275\215\0025>:8n>o\232\316\275\360\372\t=\267\355)\276\367\342\216\276\013\033\276\276\361E<>\336fs\276\003\245\244\276\205\250\001\277\277f\317=\335(\371=\304*b>\026?\243=\031G%\276\333&\200>\257\336R\276*\257\000\276\311\000!\276\010t\230\276G-\020:\370\330\006\275\320\006`\275I[\355\275\316%a=\357=I>)j\333=\017\t\317\275\3115`>\322\2650\277&\t\205\276\360\227\310>Z\225\221>>\271E\277\277bD\277\346\363`\277\022\375V>\005\225\267=\'\327\032\275\245X\023?\274\013\t\277i\'\021\2760\362\324<\356\rz\275\252\241\314>(\312O\277\327\305+\276\244\307(\276\035\227\202>--\214>\035u\275>Vw\026\275\036\201/\275\333\nM\276\211\314\r\276*p+>\377\342#\276\261\3742>!\323\240\276\356R\177>\013\2168\275\217\371\360>+\367\006>\303\207\226\276\311N\034\277\242O\n?\371\210\320=\242* \276n\255\021\276\340\035c>\266\024D<\017\375\226\276z\331T\276_\242\261=D\023\225=1(I\277\374\226\001\277\033d\002>\r\005\262\275\241T\246\276|\013\366<\310\320\314\276\361\001\236\276\020Z\020?\017\3726>\342\336\225>n\225\022\277\346\021\010\275\243m\267\276\301\334\234\276\376\2729\277`\275r\276\220\n\013>lS\321\276x];\276 ~\222=\211\240\314<\373\207\327\276G\206\235=\037\344\031\276l\355\231>\013\366^<\347\010\020\277\177c\r\275*\035\350>\277:\030\276\243R\016\277\222\2639\276\224M4>\260\312\022\276Q\251*>yU\360\276Qft>\347+\321=\272\256f<\255b\'\276\234\010\032\277\247\004\331\275\260\177\016?\206P\346\275\264\215\331=\206\215t<\235\237{>#\\\025\276\343\244\305\276\244\276G<\204\324@;\310\263v>8\221\201\276\030\261\001>\266\320\203\276\364d\243\275D\177\273>\037G\223\275\361\\\362><\230\222\275\323\265a>H\023\256>\033\270\221=\3330\004\275Xh\014?]\336y>/\335\t\276p+~>\234\234:>\361\243\307\275+]}\276\235\226F?\373\3441\276:\263g>\365mg\276\257\363\207\275\327\016s>\366\312\327\276\242\316\242\276\361\236\270=T!\341\276i\020\026>r\276[=\205\251\206\276\273A\226>=\251f>\002\220\256>E\313l>\177\316\036\276Ib$\276\232]\306\275\222\036\005>A\316\211>\024e\234=4;6\275|!4>\343)\225>s\305\346\273\333(\223>\375\001\251\275\334J\355>\3444q>+\315\300=\372d3\276Z\335V\276Q\033k\276\356\274\316=\033\361\302\276<\246\373>\264\333\322\276M\311\031>X\231\027>\220;\376>\24452\276\372\353\266\276(\322H>\253\250\025>\214d\221>-\344Q>%\305\034>#\"\n\277E\024\307\276Y\314\202<\022\371\t\273\345\332\312=\273\317\304=5<\206>\030/!\276\213\n\331;(]?\276\031G\037>\006\313U>\211\221A\273\213\376\006=\312\334\021>\240\260\340\275}5\302zu\242=\217\324\003?Z\273\303\276\345\036\260=p\254:;\312\034\234\275\006~\002\276\202\272{\276U@q\275\243m\030\276\362<\212\276\313\257\013>\314\346\255=\305\3638\276\344\202\231\276\343\323\313\276\023\350\021\276\216^\377\276*\034\212\275\22013=\036\007\365\276\363E\211\276NP\356\276\263-\031\276\265\201\351>\370\372G\277_\220\032\277\230\020g\276\340\241\272\276\000\036&\276Q\250\373\275I7\211<\317 5\276\337#q>\341\320\304=ac@\275\207\2152\277\231\007\224\276\n\200\271\275\317\017\001=\215\010\327>\345<\212\276~\341\232\276\354\342\271\276\345#G\275\343m\260\276OM\307\276\302\242\202\276\274qz\274\364\234j\276\207\275\203\277\354 \277\244\314\222\276\226\037H\277\235-\323\276WB\200\276\211\214^\275t\357\254\276M-\212<\r\032\253\276\274\n\024\277\344\270\302\276\340E\023\276\024\026\325\275&\325\007\277_s\306\276,a,\275\3703F\277\221~\235\276\273p\260\276\203\326\035\277O\277\242\276\034ny\275\025\257\234\276\362\321\256\276\'4\355=\026\273\236\275\366\330\277\276\357u\216=\256\033\355\276\300\006\235\276\251_\257\274\356\345-\276\317\333s\277\353\357?\276\224\212\343>F`\023>\216=:\276\356t\214\276\362\003\227\276\332\014&\277\014\314\262\2766dZ=T\'\007\277\241Q\254\272\311\0243>W\325\372\276\037\030\306<`bv\275\316$\275=\216\335\325\276\323\023\362\276KB\257\274#F\215\276g`{=\363cQ\276\260\254\241>\253(u>\034\356\003\277\271\351\300>\343\261\344=\274\343\261\275m\306\207>l\345\037\275Z8)\277\320\251\357\274W\274\210\276X\300\201\275!,=\277\222\333H<\257\366\234\276\204\330\345\276,=\276\2764\337\275\276>\225\204\276N\3643\276!=\327\276\201\224\003\277VjA\276L\353\216=\367\024\265=\210\211\226\277q>\376\276{z\213>:\0269\276\367M\237\276\215\243\343\276$8K\277H\322\254>\375\346\270><\020H\275\360\277\020\276j\000a\275\305\335\222\276\234Ym\276\300\201<\276\016\327\325>\357&\306<@\327i\276\214W=>\037\2614\277\374^\265\276\303\372\242\276\220t\245\275\351\222\331\275[\022\340\276sZ\217\275o\363\027>\014\344\233\276Z\205O\276\266\363\221\276\336v\310=Z\215\370=y\242\235\276+\242U>\344\234\260\276\026\264\014>\270\276\014>\037\203\313\276\222nP\276P\006\267\275`|\374\275G0>\277Z\217\024\276\242\225D>\024|\213\276\\m\200\276\201\324y>\212\201\373\276\355vp\275\242\315\260=\354O\225\275*\026\245\276jw\205>J\331\321\276m\312\023\276\343s>\276\351oU>|n9\275\201)q\276C\261(\276t3}\276C\257\302>.\244\231\276\200~^>\003E\031\276\013\0101>A\236\321\276s\357\353\276\302\264\220\275\202\316\251>>\267o\276\247\021\257\274T\372\265\276\222c\005<\223\2210\276o\346*\275 \311\267>\214\244^\276\302f\250\276\340T^\276\262\306O\276\\\321\n>~\250~\276kYM\276\232(\233>\353\030\356\273\005\250\222=U,\024\276*\231\304=\016\246\326=\327M\223\276t\210\274<\337\304S>\363f\322=.s\010\276\312\256\227\275 /\251<\275\177\336>Z\3367\276\206\204P\276\277\334C>\340{\360=\227\206\307>^eB\276\277\375\204>\304R\262>\255\375P]F\262\276\206\t\320=\207\346\"\276s\211\333>\250\177&\276_\370\323\276\205\313\251\274\321\315\004<\275q\207>\264\303r\2756ok\273\r\202*\275X\362I>\334\025\227>\227\0359\275\3532\237>\234\351\001>i`\231>T\212W\276\027\252^>[\030\001\275\'hf>\237\251.\276\016SJ>\335l\227\276\030y\206\276\346\025b>\020?N\276\352\364\006\277\013\226(>\026\023[\276\341\202d>.(Y\2730\223\224\276\0310c>lL\026\275\033\337O>\223\220\362>,\322W\276\024@\031=_$\035=\252O+>\360o,\276\305\263/><\326\274\276\327~V>&\001\343\276[\3253>\304\177\213=$n\000\277u\3319\276W\366\245\276\374\360\216\276\217b(\275\327\354\366=\3009T\276\273J[>\t\355\233\276\357\216\244\275\016#D;\336\351\033\277.\315<=Qgg\276\230\262\244\276\017Y\232=\337I\220>b\330\326=\340[\306>\312\203\240>\270I5>\367\036\311\275\325(\262>\3077\240\276\321\264\270>\311:\344\275@\243\224\276\212\004\304>\014\261?>\316\2424>\271\325\210>\365\025\327\275\201\223\330\273o~\004>\007~\271>\222Z\315>\235\324\235\275c\364\010\276\202\370F\276\331Bj\275\354]c\273\262\251\235=\251\365\272\274\352\356\267>\344\343\217\275\025b,\276\020n\270>\017\232\007\276]E&=\020\362\222\275\212Q\357\2746X2\275J\251@\276\355,D\276\013\267p\276\204\355\231\275\014vn>\265Y\300=U\253%\276\223\254\244>\304[\003\275/\356\204;\262*\n?\263\267\017?w\2109>\022L^>\344\330\222=5,\340\275\272yw>~T\014\276\210\314\262>1\263p\275\246d\225=y\347\t=[p\323=\303\345\t?\335&\232>\355/Q>i\256,>#\377\223=\337\035{\276\236[\247\275\307\257Z\276\237\357\341>C\247S\276\256(\264\275\225/\304\273\"\325\221;\274\320&=tZ\260>\207\326\352\276\306\210\251\276>\333\316\275\272\300@>-\216;\276e\221\025?b&\r\276g\260z>\"\024\257>\321\030\307=\345D\247\276\207\240\274>G\206\345\276\230\021_>\211\010\233>\322\023X>\333L\021\276\361\264\314>\323\022\005\275\327\323\247\276\017n\306\276\210\271\374\275z\205\024>\275\357\375>\314\356N>Ti\302\275%\014\n>\257\n:\275\344\3224>\270G\215>\224\366[\276\213\346\217=\035\265N\276YlL\276\273\351\213\333\007\307\276\270\306\266>\'\356\310\276\346-\246\276J\034K\276\366\313\035\276e0\020>,\203^\275\204W\022\277\254\200\320\276\255\307\300>\002\332\375\275\354\236\312\276\257\255\216>\263\271\314>\021jr\276\234\302a\274\225sf\275\303BF>cE\234\276\301\325P\2762w\252\275g\t\202>\'\nR\276cA\256\276\356\345\273\276P\270z>!7:>\210\224\321>\177\204\303\276L\250\214\276m\271\236\275^O\021?\205\337\340\276\301\311\035\277\303w\274>\356\233%?\354~\033>40i\276=\000\207<\255 \276=\322\206\252=>77>0\224\346>g<)\2762~\201\275-\247\020>\251\230\204\276:\206\340=)\242M>\024\244\316\276\367\350\341\276\350+\205\276L\277\004\277\304\"\271>\030Fq>\220\232\251\276.4\333=U\331\033>S\376\364\275\365\340\210\274\007T@=\374\r\216\276Ni\241\276\244\022\004?\230\024\224\276\201\240\325\275,\207\034=\177\026\211=\203\316d>.\026\231\275\227\337\222\276\r\034\210\276\335\025\363>\370\363\001\276^\370\210>\\\347\034\275Q\206\226>F_F\276T\343\211>\030\266V\276\321\027\213>A\313\236>[\026\203\276y\324e>u\305\345\276s\017\026?\261_\321=\"\023\254\274|\210\205>\277\273~\275\030\002\275=\024\243e\274}P\177\275\365n\301>:\253;>\002B?\276\270\262\010\276%m\227>\333|\365\276\226\2624>\272\007\030>\227\032T\276\035\222\216>{\252\027\276p\376\303\275M\257\005>\3509\002>\216\2739?\230\005\275>\032I\313>mFp\275\311\356/=\371\324\335=s/x\276\027h_\274;\003\212<\006k\n?\003%\022\276\235#\254\276\261U>\276O\302&\276\220\033\251\274\233\'\277>\223\226\n\275,)\335=-\017\007>\027\370\252>\033-#\275oc\367=]\336H\276\225\350e\275y\362\036>hCq\275\350\2476>\313\200\224\2743\035\006>qC~\2765\336\242>:WS\276\352\256\213>d\035B>\204\303\263>\211A\032?\"\026\017>t\245H\276P\244L\276\333\200\030=W\314\355\275S\013\323\276\030\000\330\276\312=\344:+\365\014\277\ti\366>\\P\201\275i\274I>\2146\235\273\036\006\367=\005&\266>cA\020?\321\314$\275\006+X>qEn\276\333\307\354=\322\236\206>U%\330>\331\211\023?\340%;>\233\351j>\026\360\323\275J\240\251>\231ee>A6\">\372\t:>\3652\246=\321\300\006?\326\n6=\036\323\013?uh->\2266N>\342\361 \276\306\264\262>\202\356\017>\026j!\275\366\316\016?2\\\256\275\357\232\311>\034t\257\272K\017\\=\216\235\213<\177\242\201>q\243\370=\303\n:<\237\212#>\373\315b\276J\315\234\276\243\031\377=|\211\306>(f\220\275\360\263\237\276X\014\260>\356\032(>\361/\203\274\361\204#?! \024?\211\256\277\276^\253\215>\242\037\244=A\032\007?\346\304\371>I\306\337=%\232\335=\207&\013>\342K(>\356t\177>\333\251\233>\312\310L>L\274\022\277rl\231>M \214>\367\317\r?dT\360\276F-\334=q\224\002>\0106\217\276[\3119>\237Y\247\2735\274\232=\327\253\254=\200\306\234>K\nM>\352\010\250>\233\265\242\275:\274\230=\377\2351>L\251\305\275\3513p\276M\204T>\026n\344=6\223\326>\007\372)\276a\334\004?\303\335I\277\346\321\005>\241#\257>\002H\231\276{\007\237\273W\342\340=\207S\264>\231i\260\273Z\230\005?\270\371\346>\ri\353=\357\\%\2761\251R>p=\201\276\274Rj>\237\220\t>\261YL\275qd\035\276\242\344\243>elN>\026|\030>\350s\364=\351\365S\276M\r\317\276@s\206=6\345_\276\030\232\000;z\031%\276D\244\013>\316}\">\002\244B\275I\355\221\275\365^m\274\260\0327>h\263\217\273\275%\216=d\341\325>=\035~>\002\2108>&\322\360>O\376\000>\365\251\326\275\211g~>W\317\333>\224\001\362>\350\332M\276\2358\266\275B*\002?\305\302U\276\304Z\347\275\332We=!\222\347\275I\177\'=\251;<\276\2477F\275\346\345$?m\334\253\275m\033\303\275:\t!>\352\363*>\225\362[\276\025\275\210\276\315!\235>\244\377\013>@E\254\273Sz\205\276b\351\310>\374\352\241>\250\230{>\306\273\262\275p\352\275>BM\343\276\2104\264\276\377H8>\n\225\352\275\225d;=F\2163\276\313q\305\276\352M\037\275\2630\024\275F\240u=\307\342P\276da\334=0\331\220=\'\362\253<56\230\275\346\003\252\273\356\377\350=^O\023>\265\001\236>_\357\033>\301\331/\276t\353\211<6\010\304\274{\3321?p\332\373=\222i\330\2755[?=|\033R>\214\366\303\275A\000\322=\344~\024=\240\200\216>\035\\\206>\277!\220>l\344\206>4\025\343\276\220\017\025?R\271\224=\227+\237=6[t\274\315\213\313=^\201B>\247\357\322\274\004\000\314>3&\322\276\020z\222>\314\257\211\276z\177\356=\352\022/?\335\331\001\276\007r:>\004\312\346\276_\001d>\217C\037\276.\233\274\272\352\361\332\276\222\327\325\276C[\304\273\260-\254=\270\267\267\276\265.\240\276|\344\215\276\202\212\265>\t\241\251>.\n(\275\233k\007\276\226~^\2768\352\255>\257\354\'=N\021i\276\027r\251\276\370\214\342\275.\215\r\277#+\276\276\'\373P\276\317\272/\276L\301\345>\255#v\276\215)\205\276N\346 \276+\311\224>@%\210\276\331\361\237\276\274+e\276\234x>>hy\001>~\211A\276\367\367\272\276\227\220\340<\262b\313>\334f\r?}D\n\277|\371\322<\235u\206>\177\266\257\276\344Q\013\277\345\025\306=\002u\335\275)[\210=\362\252L=\005\230\010\276\"3\222\275|%\303\276\365\375\201\276\2371\225>h{\036\277\364\242\243>\213\006\020=x\273\006\276e\375\201:k\257\217>\006\371\033\276\252\326\n\277*\202G=PC\014=\035\274\217>\237\252\233\276\207!\020\275d\201x\275\212!\014\277\270\352B>\203p\254\275o\245a\276\353-\301>U\353\300\276*\231+>\2206\234>!\363\020<\201WZ=\215\277\334\274\250_,\276\362~\365\276\n+\204\276E\017\327\275\351\t\347\275\231\005\251\276\341\210\304\276\274\325\016\277li\362=\363\240\200>\217\310\220\276\312^*>\306]\253>\341\310P\275\212\201\n\276\220\301D\273\276\324\237>\013\211\320\274\200\344\363>S\363Z\276\364\007\\\276~_\242\276\207\352\212\276\322o\257=}\265\376\275\220X\035\276p\030\243<\312\340\342\275K\337+>\013}\260>\002\266\003\276\345~\272>w7\205\2755R/\276\213\024\240>\222\216\016\277A\273\246\276\260L<\276\353\277j<\234R?=\261?\303=*\024\371>\266Z\030\276\253\311\266>7f;=\276\213\341>[y\272>\224\2538>\376a\372=\265\013\223\276\267\000\347\275\017\302\200\275\304\3341;\014>\254\275\322\344\002\276\374\2327<\213\237j\274\364\354m\276\360W\340\276\357VH\276\310\226\361>=\031\210=K\316-;\301\021\376\275\'\221\330\275\201x\036\277\376\230\211\276\035\372\365\275\322\272\034>\357\t\241\276!\231\375\275sv\247\272\026\031\223\276\024\020\021\276\2004\005<\206\221\024>\370\306Q\275\343\037p\276\245 \024\277t\037\027?\276\n\302\275QeE>\260\375\032\276/;\277=r\243/\275\3710V\276\036\314^\276\036\002\251\274\200\356\241>\013h>\273\314\260\250<-\203Z\276\314S\246\276\300&\227\276\t\361\313=\331.\222\275)|\n\276\346\246\n>\356X\\\276m\0066\275\346\243\030>&\366\240\276\201\261v=J\353\255\276\342\303`\276K)\352\275h\325\235\275\263\200K\2769\234\324=\352\274\023\276\007\356d\276\001\365\364\273i\331\244\275\227/\267=\227E\\>\2648d=\340\033\306\276z\313\340\275\361ek>\000\266\204\274\217VF\276\\\322\315\275\360\322\r?\034\312?=\375\353\333=\300Q\005?\345*\362\275\203\016\301>\356\'\320\275\r^\017>{\212k\276\324:\337\275\2124t>W\222i>\210p:>\263\326\r\276)\002\321\274\374\2407\276\342=\200\275\004\337\214<-\232a\274\371:\273>X\371!\275\207\033t\276T\301\213>0\267\235\275\237\022J=\357\234\t\277\257\212\254\275=\336)\275\037@;\277\226\257\241>\232\277R\275y\347\237\276U7\n\\\276\204\307\213\275v\3622=\235\250\r\274 WR\276Z\'\267\275\315\331\316\276\353\001\260\276\354\276\341\276\264H\016\277\320%\220\276\372\370\360=\312&\346=\312\377\306<\262/K\275E\3111\274c\317\217>\343\020S=\204\371/>%Oc?\n9\355>\'\022M\275\275\177\310\276\227\375a\276\202\361l=\363\033?\276V\035\343\276\333\201\364\276\267\324L=:Q\242>\317\032\342=G\213\236\276R\353\244\276\244~\273\274M\264N\276\257+\236\276+?\232>~\363\256\276h\214\216\275=\275\221\275\314W\375\275\016\252B>\022\203\244<\377\343R\276}\255M>\321\311\373\275Tp\001\276\020\353\306=\022]\240>\314K:\276.\001\221>\301\240R\275F\324\277\275\223K\325=\211\2170>\367t\310=\026y\250\276!\216N\275\233q\006\276P\305\365=\342t\210=B\333]>\'x\323=1\'\245\273)\305#>H\334b>\336\272\272>\325\215`>\245;@=\363\2477<\212\342F\276\324\"\220\274\215S\017\275D\212\250\275\244\354d\276\027#I\27417\013\276}\214\027\276\245\033\373\2758\243A\275\225\212\217\276Y\263\305>?\246\243\276P\241\354\275F\032R\275\237m\223=\315\271\343>Y\226<\276\344}V\273\240\334\255>\220.\357=^\245\333=\204d\n\276\224\310\304\274\3546\210\360\212\030\277d\273\220\276\252\221\261\275%\237N\274\367H\262\276t\205<\276Fd;\275|?\251\275\250\205\220\276\177\033r\276\014)\014=7R\352\276\252\324\255\276la\216>\376\364\317\276H\305\326\276\263\224\215>X\370\365<\206)\253\275\220\212+?H\202\277\273C\356\253\275\234I\321>\312\2427\276_\"(>s\346\374==3\257=\032b\034\276F\034\342\2748\233\243=\366\343\207\276U\204\022>%9\271>u\325\234\276\210\312\202>Z.\212\276\026r\261\275\212)z\274S\310\370\2730\0209\276H\035\251:\024\016\376\2739f\323=Wv\034?uc\256=ee\217\276\353\031B>@\365\353>\213a0?\207\002\253=\017\316\206\275\025=\241>\352\366G=\236\214\003\276a3\026\276\304ug>I\331\217>\023\204\225>f\314\200>\334\035g\276\002&\203\276\240D\364>A\031g\276W[\347=K\300~=\341\037\340\273]\313\224>\025\020\352>?i\242>\201#\262\276U\201\351\274s\315\230<\315\362O?43\320>\336\\\313>\235\252\330\276\371\274\363=A\3715>\333\334\207\276\014\360\357=\266Z\206>\014b\341={\307,>\205\001Z\"5W>\032L\342=8\177B>\010\022\'?\004%R?NQ<\275@\2766>\000\301I=\217\275\202;>J\236=\325\300\207\276\254\350J>\237\254A>\260e\027?Z\307\022?\257\032\303>\010\364\316>m6\255\275\364\365\270\276%j\266\274\361\215\305\276?\036\005>\014\017\206>\003pt\275|.\267>\362t\244\276\030~\305\257\255\263>L\n\301>8\027#?\225\337\347>\'\340\235>\242\016\263\270\213\324\024?\347\020\020?\236\034j\276\177\255\343>\304\375\031?V\222\241>:\306\371\276m\005\257=\235\246X\2766\212\n\276hY\256\275\000\271\222=\271\305\340\275n)k>E\244E>\342\352p=\366\267\016\274\267\237\214\275}\316+\276\375\363D;w\016\202>7\327\266\275\276\274e>\376,`\275\031es>\'\306\322>\217E\263>\210%\000>\320\357\213>l\263\271>u\200U=\216\272\237=Q+\337\275\370aT\275\342\307\214\275+\342n\276\350\275\272\274\374\367\344\275\352\324\264>~-\200\275\323\253\371\275\221_\326\276\"\210g\275)lD>\371\305\005\277\273\362b\276w\246\245\276\342A\225>\352\232\317\275\355\222\223={\202L>WmV>^\002_>\361\346\350\274P\0132?\355~\236=\035\232@\276\267i\224\275\035P\341=\212Dz>\365\251\323=ZQ\006?\241r\035=\323W6>=\302\006?GuO\275bq\001>\374L\360;>vQ\276\343sH>\376\230\316\274\230\321\010?\347)\007\276\\\264\346<}-}>\265\006\247>\263\330\202\276\034p\367=\326\307\004<\rc\027?\314L\201>{\021\340\275\345\312\274\274+\230\277>\242\2677\276\336\230\221>#\231\032\275\260]6>\030+;\276h\330\254=\252\300\252=&m<>\346x\002\276\322\240\373\276\202\021\345\275L.T\275\205\003\226\273`\007\232>7\343\027>>$\224>\254*\002\275N\0064\276\324\220\256\275\311\344(>\221\370A>\363\247\347>^\221\036>\r?\240=\246l\242>W\026\013\276\356\230\203\276Y\3178\2779\342\263\275\345;\204\27587\266>\217_w=\204\025:>\320\007\316>e\304:\276\2274\302\276\334\232\026\276\033\206%\276\0062\222=v@\375>>\335^\276\304E\013\277o;\304=\372\346%>p\3467\275\225i/\277\231\002\263\275\324\330\352>H\'\207>\245@\200>-\235\221\275\366>P>\314\035}Ok\325\275\r6H\276NC\013?\023\243g>\321\2417\276\232q\r\275\203\251\270>s\247\357\276\323\314\014>A3W\275j\302<\276\332\034\257\276GZ\037\277\362J3\375d\223\276\377\235\206>\302\342\214\276\377\226\353\276\362\003\323\275\375\223\244\276\260\315\036=\264\273\t?\314g#\276\r\373h>\223\222\201\275\355/\264>\364NV\2769\376\002\276rc\313>\3764\210>6l\005\274\000)b=\243\240\234\276\2121\266\276\342\335\264=\2532G\276H\323\373\275d\346\350\276>ON\276\317\354\375\276\242fQ<\205\327\023>\"9\343\274\264\225\314=5\252\274\276\202\340\362=\000Fb>Q#\225<\335q\320\275N\020<\274\277~\374>\340:\314\276ov\203\274\201\006J>(D\234\275\335\322\201\275\002\205\301\276P\260\337>x\241\002\276\262@\341\275\377a\373\275\331\022P\275\200J\206\276t8D\277\007lh\275\003o$\275\332\261\n\277J\340\310<\241\004k\276\\\201\034\276n]\220=\265\004\205=8\0316>\270\352c>s\233\004?\017\201&\276\315\336\307>u_\247\276\t\t\217\276\357XX\276\200M\022\276}\302!>\364\316\330\276\301m\334=\342.\020\276\316\323\371\276v\376I\276\3709~>F\205a>\326j\022\276C&\212\276\3663\315\274>\263\234=\20239\276%\242\244\275\350\300\202\275\261\3178\276ozM\276\001\'\375\275W|\017=b)\253\276\210\236\255=Y\271D>\372\315e\275Q\327\255>\256m\302\274C\314\031\2765\3357\276\033\350\323>]z\232\276\277\205\375\275\004\306L\275\243\222-\276\347z\202\275\251\301\326=\344s6\274<\314\212\276\242\264\335\273nXq\275:\356\327=\2534\002?\rl\230\276&\212{>\321^\n\276\274\031$\276P\373B>\374\307\334\276\366\223\221>\241\357(\275\2000\207\276Q;e\276\2748\334=\344\252\257\276\277\r\014?\177\'\212\273\371\237V\276#\032\356\273\263-\251\276@d\013=\013\361x=\374\304\276\276\357\225\005\277[^i\276\311\330\221\276X3 >\210Q\214>!\n\232\276q\244\316\276\206\213\301\276 \201C\273\254\000\204\275\245\345\021\276&\241T>\325\320\022\276\323\021\371\276\342\267 <\037\031\367\276\3565\306<\016\t\351=K\032\214\275E\306\204>\373E\217\276`\303\002>%zQ\276.\310\226\276C\"\210\276B])>\341Q\237>\270\304\234\275\254\337\215\275qX\217>s\203\207\275\035\222\001>6\344@\275\347\315$?\277D\247\276;\225u>\263\264\007>\366 W=\327\326\216\276\274+\266=\304S>=\274\330\200\276#\350\210>\354\302\316\274KU\035\276\265h\033\277\377\004S\276\007\355\t\275\226D\274\276g6v>^\274\207>4\257\361\275\333o\003>\224\270\306=\027\t\230\276p\220\260>\372|\234=\rVk>\277Z;\276Q\261<=\310b\255\275\031T\237>\301\365*>K\2026\277\353@\273>\337\260\354>F\350)\276\224v\"\276\306\363#\275xHI=\247\313\335=Nr\343\276\305\303\364\275\206\355\345\275\203h\252\276SC\017\277\204w\003>y\326\222\275\377s\n?\024e\t\277\262\320\355\273i\2516>T\333\\\276\342\336\301>Z29\275po\314\275/\006T>\306\240\332=r\320\224\276y\350\275\276;#G\275\30346?\207v\335\276\361\214\006?\030@\243>\215\203\016<\356\307\373\276\220\346\004>\221\"#=\021r\314\276=\344$>\217\006\350=\374:\300>9\207\347\276\335\241\224>\0170\257=\333\301Z<\314G5?\324-\303\276\364\264\245=\374\224\270\276d\0247=\364N\305\275\257\301*>\200P\242>N\340\017>{!\005\276\350\355\223\276\214\272\252>\221\263\360\275\246\303,>\253&\227\276\3762\204\276\215\365\200=\370\002\026=\360\263\245\276<\300\216>v-\310=M_\264=t3\210\274\014\226\223\274F\343s\275\212z!>\366\234\230>\266\021\254=j\317u\276K\301\201\276\225_\333=d/<>\361\275&>z\347\341<\370p\365;*\277x\276l\357\342\274c\367\224>~0\232>qy\314>^\023\204=V\3421=\277(\000\277\371q\244\275\3160\370\276X\347\211\273\203\321\225>\262\315\236\275%\267\\>[Y\223\275) |>b\242]>>\262\263\276\230S\240>\0377\307>\261\273\203>\232N]\274\256E3> \223&>X\256\255\274\273\240}!\276\363\251\201=\242\214\265=\221\3243=\3511W>(\230\036> \023\r>\276\351U>\200\260+>Y\202K>\255q\r\276\013\020r\275\024\300\341\275\243\375\204>\300\005\353\275\271\007f>\204\221b>\240\215\250=\236\006\214>X\247_\275\346\246\314\276w\302\205\276[\374G\274\234\355\202\275u\3434>b\221R\275A\306\341=\273\264\r>\367\n\365>%}\002\274\003\234\000=\014;\r?\257\264\330\276\375\374x\276\352B\241>\311,\025\276\2168\275\275\360\270\206>\246}\356\275QaI>YP]<\235\214\221\275\314\275\254\276\223\316\364=\353s$=\300\274\005?\261\243\265\276ba\273>\364\237\255=\334\203X\275&Ns\276$k\007\276\364\311>>\313A*>\321\337\310\275\357wh\277\035\242?>}\317\037>VK\220=-\274\252\274i\203\243=>\002\r?\"\263\274=[O\351=S\255O=\271#\332\275\332V\250\273\014\340\r\276\331}w\276nQ\304\276\177\312\247\275\233\351\270\276\264g\260\276\261\233\010\276\301mt\276\364X\372\274~\020{< \214>BQa\275\231\211\202=\217\320\323<\'\312\207>\3201\213=\276\t\335=\312\326\300\2769\017\357\274\256\317\317:\227\003\025>\375\350\230=\022\020\013\230*b\275H\356\261\275\006\372\213\276\232\005i\276\020t\352\276\323\355\023?\223\022d>\032\330\312>\263j\036\275\256\227\177=P\211\352\276\004\305\360>\322%g=\037\312=\277\2178\373\276$\200\331\276\010\260]\275\323\344\202\275\354X\207>f\002\205>\275\005\236=B\226\247\276\016\373\013>\215\321\350\275\031\364\375=\255\026\256\276\355\337\344>\226\232\331=\033\021\257\276\255*\037?[\n\202\276\244\374\274> \350\001>3\274\263\276\314\220\252>i\246\216>\230#\322\273\3424\0339\201.\211>\342\363\205=f\214$=\201\377\014?L\005\006>6\2578\277\032\241\275\276\331)\230=\337,\242=p0(>\270\345\327=Zc\266>\250\363\033>s\r\355=\331\356D\2768\340\212\275\2629\271=\270\274\006;\253\006\270>-\367P>E*\\>]\342\272>A\333\275\274\327\016\256>3~\231=\272\007F\276\206\270\235\276\213,\262\275\001)\242\276U\352\032>d\340\034\276~)\031\274\330(\377>C\222\003\276Y\032Q>\037\342\332\275\034\243\032\274Yr\200\276\373\300\220=\247\335\342\275\026\334\372=\216\177\270>\\\220\257\276\352\027\321\276\275\326\316=EJ\333>|o\214\275\343\272\356>_\231\325\275\217\365\216=\235\276\t\275I\367\033>\244\343`\275D\217[\275\234\337\337>\341\246\214\254T5>\276@\245\275\005\264\251\276\227\033\342;\217\265\244\275G\351\232\276$\037\035\276\360\343\254=9g\217\276J\353\013\277\365\310\230\276\246`\201\275\203\310a\276y\276J>\232|\345=\255\353h\2765\377\300>:\231\307\275\264a:>!\305\261=\256y\213\276\301{\201=\317\240\336>M\341\235\274T\335f\276\023d\363=o\257\351=V\312\246\275\365\272\362\274\314\203u>\355x\202\274\344\022\202\275\236}!\274\214$\244\276>\200\035>\007\361r>\200G\024?y\002L\276k\236\352=\005\002#\276\272\230\232\275\301\256\023\274\020\342X>f\003\321\275\233\356\n?LtH>\331\'\016\277\3746\207\274\207\035\334=\246\301\205=\225^.\275\307\211\025\276\276\214\241>\307\247\016=3\370\266=\2432\230\276I\300\333\275\220^J\276\3113\235=\346\337[\275\264\253\020?\307\325B\276\271\"??\226\331I>\260\022\306\274\373\006\240>\213\326\256=\224IH>;2v\275\005\252(\277\372\201\001=9\240!>\334\032\207> \276\323>D\216a=t\307\022>\214\354\336=\276%\357\272\005\021\235<{$\217<\317\201b\276\233l\215>S6\352>\274`\274\2745\034\021>T#\244\275\221,\261\276\002\335\276\276\014\233\242\276\030=&?\263\277\022?p\266\307>\237\256\260>:n\303>\333\241\030?\216\'\037\277\332x\330>\021\205|>\222\362\306>\024\003l>iZ\335\273%\300\340=\023\215\353>*\312\267>`\367o\274\233\371\251=0<\203\276c\035\300\276\207\345\212\275\275%J\276Nq\336=9\253m>ld\026?\360;\244\276j8}\275\006\336!\276`\363\221\276\013\204\200>\271\251\312\276\203\221\236\274\370k\177?\235\326E>9\312\232>*-\177\276\327\213\245=h\'\210\274\342d\212>\353\014\245=3\332\310>?\001\200\275<\001\224\274\352\270\252>\342\321\301>0\305\316>\362\0059=s\231\333>E\256\024\277+\367\002>o\366\"\276\215\223\317=U\030\342\275\333oH\276\351|\210\274t\216\231\275\177F\r\276\305\355I>\233\365\247>\207\0246>!}\226\276\222\004\202<\224.\331=\222\260\034>\313 \266>\357\221\010\276\261\002\305\2752b\255>)\332\262\276\321\016\027\276\256.\206\2765\257\037\273\361\215\360>\216<\203>\300\272.\276\'[%>\010!\257>\250\215\316>\372\3007>\035|I>\313?z\275[\003\326=\235\266\316=\272<\265\274\t\352\347\276=;\036\275\234z)\276\352]\350=\345\006}\276\235\337\317\275t\202\002\277\332Nw>\227\204\337\236\276&uD>\233Q\027>+\371\230<6\232,\275\3678\273\275\342\237\246\273\377\256 >\330\220\267>\251uC\2767\276\035\276\243\233\302=\330\006\312\274\243L\273>\t\256\244>6\264\322=!\035\311>\254D->\1779\263\275\270s\272>\367P*\275\217\3076>\\]\347=\251\215\302\273\027\374\n=a\240!>\027\374\033=\351\023\336<{8$=\356y/> @\247>\332\335^\276\340\022\251\275\311\377\035>\021H\224>\371\255\240\275\024\026\245>\373W\360\276\223\274W=\227]c>\354\254\031>>r\255\276Y\253/\275Y>\213\276\014\350\203\276\271j)\275\301\230\336=\201\301#\275\005\263\365\375\346\300>6H\276\274\004\224\353\275\241\2145>\232\221\247>jZc>\307\001\215>\351\337\313>P\177\206\274E\000\016?1\367\027>K\273a=\231\363\270\275\250\372\254\276\264$\356>g\242\202>\366q\245\276Dt\027?\331L!?Vd\035\276\255\331\310=\000\336\277\274#\367\236>JQ\262>\327\254h>\257\203\250\275\264[\n>\220.M\276&\032\201\274\307\001\003=S=\221\276\027if>\177\243x>\330\0005\276=\336\313>\235?E>KQ\302\275dE#=:\220a\275\210\221\265>\374\n\336>&|\225=s\235\027\276\004n:?|\320\252=V~\201\276E\257V>\222[\214\276b\337\212>W\233\367=\264\231U>\266\006\314>\305\347\360=\334m\304>E\025\221\276\274\326\003>^\"\262\276\212P\017?\207_(\276L>\306=\335\354\360\274Q\004L>\031\024$\276\2378\374<\240\303\271>K\237\332=}\'\014?W\010\215=A5\021?gb\241\273+?l\276\336\025\204\276\377\266\'>\363\322\246\275\342v\232\276!:1?VC?>\\\016\237\276\177\337\355>\250c\026?\270&\330\275@\327\213=DB0\276H\202\337\275\227A\021>\303\006\353=\005\234&?\221\316\374\274)\305\344\276\017\3326>F\035\202=iX\323\275%\2037?\227\210\223>C$\223\275B\222\211\276\304\212\220\274\322\034_>\371\264\201\275Jm\324\274\347\220\002\277^\260\314>3/\242=\022\217\353>\033\225\230\276[w\300=<\213N\2767j\351=Fp\005?\002;\240>\215\313\257\275\021\374Q>\337Y$>\211\016(?fw\266>\377\314\311\275\236\227N\276\262\275C>\376\037Z\275\263\305\205\275\037\357\006>\244\212\357\276\021\240\317=\314}\263\275\336Rv\275&\240\"\276\337y\352\275OIH\276\362\215\206\276\231{\212=a\345\200>y\247\320=\220\344\343=\262e\270\275\245 `\275\263\316\250\276\263\264&>\201\312\255\275\210\314~\276\333{l\275\311\333->\324\367\027\276\265g \276fE\314\276\366\2160>\263G\340=\362@\210\275\005@\004\277\325\347\035>Z\235m\276\325\312+\275\223\366\253>\252&v=je\204=\000\360\254;\345\272R\2767\010\352=\\\004\263>k\256\204\276&\252\375\275\350\223u>u\313\325=*\272\200>\224\363\330\274\333<\022?\004\037q\2766L\200\276\246h\005\276)\215:\276\324_\021?\341v\237\275\254\026\266\276`m\036\276:\230\272<\314\034q\276\245\352\330>\315B\344>\371\260\361\274\025\277m\276U4L\276k\224\217\275]\246\306>\001\013\372=\356\ry=yb\273\275so\277\276L7 >~j\t\275U\243i>\3032\277\275\347\266~\275\265\344\364\275\034M\234=\355\370\010\275\004\314\017\276T=\035={\036\202>\363\334K?\366\n@>\310U\016\276\261H&\276\017\2768\276\257\361\014?5E\033\277\233v\216>\005\222\276\276\267\316*\277n\222\265<\351\336\226\275\263\337W\275p\324F\276\001\256q\276\303\214\275\275\255\001)>\355\r>\274\030T\234>\202f+>\226\346\026\277\377zn\276\242\263\252>\313\365\357\274\010rG\274\026?\353\2764\026\201\276\272\307\222=\373\310\301\276\272\347\320\276\265\360\352=-?\257\275\267\241\202<\271S \276\346\375)\277\371\027F\276\033\372\321\274\242\343Z\2773k\206\276\234\302\367\275\320\2345\277\301\337]\276\227\207\201=J\005`\275\201#\223\276\355\2311\277\000\256\210>\021\005&>\3414\177>:&\242\276%r\200\273\200 \033\276Un\013>f~s>\335\200\304\276\220\2465>\371>7>\33498\275`\272\376\275($\220\276\024\314:\276\351E!\277\255\026d\275\366P0\276\231;\214=\244\222\233>[\237\355=w\270\205>\251\252\217\276\275\250\260\276E2\215\274\260X\221>\341\346\211>Wa\372\276\202{^>\232mk>\344|\240\276\355hN>tj\227\276\301\303\237<%\263\010\275&\013\275\276\"\032\373>\224\372\355\254\227A\276\021s\212\274\001\214W\276v\330\216\274\372\355\224\276 \212\232<\316cY>\031\021\032\277\270%Y\276?&\247\275\333\363\035\276\t\355\027\274\235\261\002\276\006\260\200\276\340\006\257\2749GJ\275\370L\322=\t0^>&\"\"\277\350\004@\276\017\346\027\272V\234g\276v~\247>\215\243\207\275\310\225\274\276L\270\203=\223\352\017\276q0x\275\205A\323<\320\361\200\276XI\n>\225\027\207\276\207\207\225;7\314N\275!\014g>\216p]\276\016\235\004?\246%\327\276\366*Q\276\327\220\325\276\n\311\036\277\251\215\350\276\304\322\221\276J\354\303\275 \354\323\2755\310\037\276I\340H\276-%P>\024\306\022=\325\2267?|\221\202\276\002\204\275\275(\\\363=\275\327k\276Qg\370\276\010O\243=7\305(=7\306\236\275\243\034g\276\303\r\214\275q[\243>\374*0\276\242\034n>\207\005\253\276\3444\370\276@m9=I-\334\274C\025\255\276$\2712\276\324^\210;\317O\214\276yr\270:\223Hm>$\351\016\275\222\261>>[Y\306\275\n\355\025\276Er\222\276\346+\234\276uc\261>\355\006L\276\340\236+>\344\376S>\206\220\034\276\203n\212\276KJ\251>\353/_=G,:=cf2=\325X1>\344\241\221\276\311\300T\2764\303\221;\254;_=9:\002=Q\245\273\276\305~i=\030\2672\275\220\271\273=\315\233I>/\0364=1{\026\276IRV\276E\t\201\276?n\337=\347\210b\2777\230\210\276\206\204\204>\343\326\263\276qM\330\276\256\'$\277@\335\243<\354\243\241\276\t/\365\275^-h>\013\225*\275\"F\330\276\235&\305\275k\277\'\276\347\370\020>:\021\343\274\316\261\263\276sB\250=\306\224\275\273L\332f\275m-\301>.\304\331>4\364\210\276\2470*\276\265\232\003\277-\003\266>_\205\354>\254\276^>K^p\276\331\nm=\311&0\276)\224\004?;\267M=\035\351\353\275\2546\342=\316\211\374\276\004dE\277k\223\020\276\323\213w\273P\221\000\277\364Xo>\003~\031\276{\313d>\235\363\227\276V\"!>\311\266\233\274\373\334\200\276\250G\\\277Z\206\336\275i\201\323\276\273\367c\276\220`@><9\212\276\342\302H>\340\016D\276\300\222\243\276$^\374\276\00422\276\001\337\275\276\336\226\304\276\032\224*>\262\325o\277V\226\302\276&i\341=\212\315z>N\264\214>\215a$>#\'\207\276\017\\s\276g\002\345\274VG\325\276e\244\007\275\355\211u\276\014M\202\276\2650!>\233r\037\274R\024\270\276\252\034\236>SO\013\275\251e\260\276_\002\236\276\002\311N>\2360\263\275\310\230\305\273\253\344\'\276\350\355\367=\335\006\205\276\017Y\273\273\302\255/<\310\241\014\2768\274\202\276\332&\034\275\241h\203>\230\345v=\364S\316\276\237JL<\311\303>\276\007s\301=\300\023\224=\212(F;QK\023\276\246\001\222\275\255\342\342\275\255\202\251\276JS\300>?\331\202\276\026\351\200>v\225=>\362\332\220\2769\037\347\276\036\333F\2762c\245>Y\302e\276\207H,\277\270t`>\204\3525>\005\342y\276J_\022\277\2127Y>cd!?\206x\245>\n\261\321>W!\200>\031\277\326\276\'\205\215=\2705S\276IK.=\361/\374\275\272\034s\276T\210\321\276\3751\340\276\234\274\003\277\340\035\024:2\267\201\275\272G\336\275\372\316(>\360\315\276\276\345\366\365>ksx\276\253\\\020\275\336nT\276w=\003>|\250\315\275\250\377\220>RI\206\276\277?\355>\003E\362\276\364i\271>\211t\026?\342\240\217\276\255\261\315\275\"v{\276\267\036$>\302l\302\276\365@\022>WH\034\277ma\322\2767\231\226\276\\\n\251=\265\214\321\276\205J\312=\207\371\256\273Y\3452\272\005_\016\277\023\347\263=2\225^>C\201\331;\017\210\016?\210.\274=\220\230v\276\001}h<\321\301\213\274\001g\212=\030\261\'>\205\373\201>~\036\'>Y\242\013?\224G\211\276q\302\201\275+}\026?u\n\003\276\272\302\270\275\364\244S\276\\0D\276\265/ \274A\016\215>f\255\266\276\210@\030=m\363\324;o\361K>\346\361]>\3373\315\275\035&\006\277\246)>\276\322v\241\275\202?\335=\002?\324\275(\272Y\276L\300\272\2762\330<\276#hX\276+}M\274 V\014\276 \270\\>\377\307\316=\005\2355>q09\275E\017Z>xS\343>Va\004\277\276\340\304>\223\360??x\022\225\276-\027\335>\220v#\276\277P\224>!\367\210\275cW\013\274\337\307\256>k\222\211\276\"\003\361\274\323\213U\276\2454\260\275\240y\303;\221\327\366\275G9N=\013\267\301\275\230\245%?\363\004\005>\013TP\276Dt\304=\224\255l\276\300S\347>\263\255\231\261\263>\017r[>>\324P?`\241.>\303\333*\277\316\356\350>\334\253\235>\023d\311\276\230\343Z?\243V\023\277{\273\016\276\355_\255>|\362\005\277\257\317\223=\001Q\353\274\036\030\001\275\311\035g\276\205`\270=\312\333;\275l\376\">\234\275\267>\253&\030\275\031^Q\276{\343E?\243g\003>\215Zb\276(M\325\276\321cZ>O\264!>\347S\004>\246\3428>\335\3332?u\255\276\2751\204Z\276\253\277\361=\254\322\344=\245Y\021\276\030\033\223\275\252\315\004\274\230US>\0052\242\276\331\226\275\276#\375\351=\217\367\t=\023\314Q>\026X/>J\0171\276K\361w\276\312\252_>5m\300=\017\017\224>\014\030\256=\023\262\r>g\264\022\276j_\010\277\324)B\276Nk\"\276\216\202\023\276u\312\025=\331\273\326\276.K\177>k:\210\276\327d/\276\275 ^>\250.\261\276f\340\303\276\327\017\013?\370)\036=jh\211<\202\206\224=\366\304\356\274\267\027D\276\315\252\255>\347\356\212>\237\271\342\275\371\327\035>\025o\231\276\003v\347>CI\266\275\346d\315>\356\016\325<\252\303\362\275\274-\257=^\335\024<\001\272\321\276\345\2441=`\204(\276\360\022\243\276\010\261\233>;\242\004\277I\014P>\372C\256>\371\037 \276q\315\377=s\256&>\361\366O\276\335@\204\276\371\331\001=>\320N\275\203\302\220=}M\022<\252H\003>\217\212\330\275\251\343\n\275\226,s>\357L.?\262Ql\274t\002\210\276\016\241\270\276\374\257\220\276\374\332\352\275\021N\275\350a\241>Jp\356>\233\354\266>\005i\215>\321\306\262=\353(\027?|\316\"=t\206G\276rY\346<\325\301\022\276Y\341\002=\361\353\362\275\205\362;>\2368\023>\226Z_>W\316>\276^\021\250=\177l_\276[\230\364>\247\r+\274\246\373=\275u\366\255>\367\235\333=\203\363\305>\246\234\003?\331nt>\245\216C\276\306\365v=\223\334S>I\344\267\275&\363\360\275\231\323\222>\217\0260?\206\234\314>\274\350Q=\020;\264<\217I\227\276\333\242\255\275\347\013\250>l\327\325\276\363\263\316;y\244>\204P;>|\246\253>\n\347\307>R\373\233\016?\353\241\255=n\276\\\272.\271\306>\372\227*>_\275\270=\022\274\306\276\254\231\264=\322\200\232\276\341\305\333>t\0172\275}\347\203\276U\037\250<\260H\257\275\362\307\020?\241{\361\2766\331\326=\322\343@>\276j8\276Rb*>\021\231\333>\247\337\256\275|\266\350>\241\256c\274\200p\205>+\245\207\276\016\377D\276\330\322\270>/q\332>\334\031V\277\375f\034\276\253x\326=f\225\037\277\316\270\036>d\013\033\277\256\305\254=\377B\353\275R\330\225>\021\373J\276\306\337\206>\266\277\007\276\265\307\255=\336\273\353\275Y\032\303>,z}>VgN\275/\346L?\014\240\303>\275\017\032<\361\017\026>\275\246\230>P\324\027\276\350B\253\275Z\005\244\276cB\036?\314\252\273\276\254\331\266=\211\210\016?\342\244\016\276CL\t?\007Q\024?O\354\344>\222\377,>C\330\207\276\345h \273\301\304\314\274\003xt\276\025[9\276\034\244\005?\233\2434\275\312s`\276i\0247\276=e\035>\342\261\003\275siP=\302\276\027\277\246\372\343=\265A\214\276\226\255\"=\'a\250\276\261g/>iB\255>\353IO=\355\324\353=\235\334\362\276\037\371\266=d\023f>+K\000\276E\024_>:\277\200>\001\230R\276\216\0362\275Zx\021\275\372\271 \275\0340\034\277\024\1774\274\245\001\336\2749\321\366=\300\335\320\275\217\232)=\300\3038\276\257\205^\276\356F\006\276\340\376\376<\357\244\251>\257;\323\275dh\217\275\266@\237\2764\321\264\275\"o!\275gy\315>\334\241\310>x\035\320\273\022Y\234\276\032p\354<\350|*>l\365\331\276\204\317\014>\301o\231>lR\230=v\'\003>&D8\2748\376\304>Z N>\205\206\233>\257\375\005?Pu\"=\324\006\272>uD\037>V\201\375\275\2631e\276<\201\\>\323\354\377\210\330\367\275\260\237e=[!\330<*\202\365=\032\252\234>:k\257>ok\211>:\374\236>y\317\314\275\257\315\235>\333\030Q=\352\220\222=\3676\216>\377\320\251=kE\341\275>\363\001?\230\r!>m\273|=\\\273\260\204z9\276\326n\310<\336\251:\276\177n\352\276\320\244\032\276hB$\277\203R\230\275\255A\374\276\200t6\276\211\004\210\275\335G\007\277\212\265\006\277\347\020<\276\266\255\036\277\346\320\023\277q\374\245\276\035\315\343\276oA\022\277\363%\334\276\350?\344\276X\247\267\276:\223\003\2778<9\276\216\366\333\276CK\352\276\366{.\275\235s\265\275t\333\023\277\2403F\276\313\037%\277\307u\225\275\271O\253\276z-\344\276\353\202\230\276\232=\361\276\033i3\2778\355\264\276\025\"6\277_\224\343\276\264\277\017\276\334\254\247\276\346k\020\277\316\020d\276N\313\263\276\244\267\021\277\265\t!\277U\314@\276#\336\023\277\233\307\002\277Fe?\276-\320\376\2761\311\200\276\270\232\r\277\377\275\204\275Ah\365\276@\006c\276\031$\372\276\253\310\230\276\013\"x\276\035?P\275\251$\035\276+<\252\275\017\370\242\275\364^\234\276m\206\325\276\351\231\007\277\244\017\340\276\'\262\334\276\262\303\256\276\244\014\362\276\022\277\323\276\'\204\321\276v\221\306\276\234]%\276\237\270\366\276iJ\344\276\003\003\204\276b\r\251\276\361\351\230\276R\370!\277x\223\021\277\314v`\276[\2007\2773\250\254\275\222\365\353\275\352\270\271\276@?\025\277\262+P\277\201\035\340\276@F\223\273\216\356\224\275#\307\037\277\177\374\302\276\002\2475\277\006f\337\276/\370s>\250^7>\316\334!>HpA>0\343\254\276_\034\003?\276\032\223\2765*\n\277:\347X\276\005\316\204>=\220*?\345K\t\277k%\321\276\275\256/=\317\221\356\276D\034\303\276\225\323\224\276h=\213\274Lpo<8\262M\276\357\022\346\276\312|\352\276\367\334\022\276d/\030\277M\264M\276\355$8\276\300\244\214>\3547\356>\344v\033==\"w\276{\352\022?pW\377=\304\330\023=@\307L\276dL\237\276\273\311f\276#\314\037\277,\220\265\276C\336\251<\225t`\275\245m\t\277\026\277b\276\254\006\277<\241\361<>\2366G\276\016a\344\245\252\216\276@\ti>$\305\002=#\347\231\276\367\217\307\276P+\307>\374\301\224=D\270\200\276\242\260\227\276\353\210\375\276\326>\207\276\0375\226>M\350\337\276[\277/\277\010\264G\276\330T\304\276\376N\010\275\241\243\021\275\307\033\320\2746\263\376\276\363\242\210;\343<\334=\374\366v\276(<\207\276 \204?\275t\014\375>\204A\212>\"\306M>p\247\004\277@\214\241\276\225\320\024\277\327\030\306>\334\365\230\276\254\376\312\275W\341>\277\325\001]\2754\265\021>\025\336\316\276\307\323\251\276{\014\331\275\"|\245>\230)+\277K\031\024\276p\205\235=\'\262\332>\3156\320<\262\220$\306\240\216\276\355\325w>\017BW>\325r\262\276ji\316>\005v\315\276G\323|;\337\252\246>!7\277\276\2108,>\364\255\204>\343\265\253=%,\t>\005\335D=B2a=U\234\256\275\305H\207\275[r\005\275\221vP\276\315\243\372\275\214\"\206>Me?\276\277V\030\277\\\236\222\276HQ\t\2773\242\364\276\310\255\224\276$\217\210\275\005\345\010>\264\241\232=\240\377\337>\235\277\207;{=\232\274\3414\256\275\354H\032?\203\231\376\274\203\021\217\276\233#%\274A\306\342\275\020\200u\276\003k(\277I\037\373\275\274\"\333\276\006\024\334>\0165\303\276-\333\211\276\306\002\346\3535\223\2762,P=\353\261\306>Xe;>\377\355\210=\263\261J>\246U\246\276\363w\313=-3\223\275L\274q>\205{r>\300\204\302\276\372H\361>\244\207\315\276\000\033h>t\360#\277\006W}\27688\217>\253RD\277p\233\021\276\313\274\223>\265\340\373\275\244z\201=\206\270[>%\375\211\274t*\013?7\n\227\275\205\022\017\275Nf\037>\277\207\007\277F\'?>_\205\200t\325\234\275\271\2256\276\333\342\000\277\210\241\215>^\335\033>D\213\000\276W\201\323\276h\253\201<\336\204\226\275\360|\233\274\211\\\005={k\225>;\t*=q\264y>-\346\360=8\234\332=!\2029\273Aek\276,\014\212\276\232\260\375\276\272\"->e\007\005>?\302\271\276l\370r\274\315\235\024>N=\316\274\023\375H>\346\312?\275s_y>\306d1\276\324<\234\276\003G\323\273\\\200\202=\250\371\003\276R\320\030\276\201\311\245\275\355t\032=\225\010\023\276&\2531\276Ig\251\276g\224\261\275>\270\247\276d\350\333\274s\001\343\274qfV\276\275$g=Sx\021>\362\177\213\276\222\177<\276\2301\310\276\356\245\340=nF\331\275\242D4>\320\t\255=\000\r\221>\372\307\243<\365\007\276=Mvm\2761\360\251\275\3204\234\275\357\325k=\226\037w>\017u\230\276N\244T>\202\352\217>\312L;\276\"x\020\276\273g3=49\205=\2230\022><\215\017>\372x4> P\233=@,m>K\303>\274\361\201\227\275\203\236E>m\356\303:\021\247\307\274\337%*=+\016\003>\313\200\206\275\365D\223=\307\377\233>zM\037>\244\356\001?\2328\001=\337pE\276k8c=m\231X\276ZB\005\2771X\021=8M;>\235\033\034>\311)\262>\220\361\265=\372@\310>2|F\277\366\021\250\274\333\014.\276\245\322I\276\270G\311\276\237\260\237\276u\236\272\274!&\311>\200\302\242\276Z\3111\276\317~{=2\016$\276:6O=\010]\022?G\247n\275Z\266K=k\374\352\275{\234\247\275\204%)>9\240\327>\332E\364\275\303\222\346\275\236j\220\274\207\211\213=(\341\247\276\210\247\">9+5\276p\227\272\276\204?\014?b\022p>\322@\227\276\232@\032>\232C\345>\222ze=\344O\221=qh\330=\340$\344\276\234|\027>5\026\251>\220\177\025\276`;\207<\324\221\317\276\352\206\354=\244\242A\276\373\353<>\177^2=Q\207\227>|X\271=\\(\300\274\211\227:\275\246\260#\274Mhk\276\324z\211\276,\225\'>\0313K?`:\214=\235\367\315\276\003?t>\255l->\037\\\327\276\201\000#\276p\236\001\275\370\200L>\214\005\260>R\346\246\276\2631\034>k;<\276\203\242$>o\244Y\277{\315\002\276\022\226\242;\"w;>\215pA\275I,\025>\347\013P>\"\271\310\274\261\370\250>X@<\276\002\240\021>V\270\372>\351\341>;\345<\335=\263\315\277>\020\314\021=H\327\021\276\323\242\026>\2314-\276%\002\004>^9\335>4@d?\223\013Q\274\035\022L\276\303|N?8\2727>\217\312\205>)\263\277\276\244\020I\276J\037\355\275\313\356\342\274\334\351\255\275\305R{\276\215\312\201\276\376\233?>\025W\t\276\004^\233\274?\263v>\366n[\275@\345=\273\343\024\371=\230\222\035<\030\343\256>bS\237\274\270\206l\274m\206\210\276noa\275Bh\346\275\271\325\006\275\276\344\264=f_\234\275C\255\200\275\006_\005>@F\230=<\017\264\276\270\300^=A/\344=\347\200\221;\203\303\206\274\3768\255=H\330\225\276-\246\002\275\373\325\006=\266\237\"?\202^4\276\277$d\276\266Y\013\276\032-\325\276\271\267D\276\227)\312=.#\263>\347\353E\276x^\230\274\370\213\320\274\354\357\240>\274\004\265\276\002O\216\274\327\345\340\276G\263\274\275\356\r!=n\272Q>\257\273\022=\\\004\032>\267\014\273=\000\200\324>,GY>Y\326\013\273m\355 \277e\177\243\276_O|=7e\373\276\346\333J>\261\237\354\275\"@\245>\006+\345\276L#\330=\177*\234\274\336\031\014\276|u\234>`}\231>\274\377\255\273\211\362\217\275\377\241;\276\\s\306\274\211\000\335\274`\1776\277d\320$?\216\347\372\275\317\262\227>\021\034\213>_\342}\276\234\374\303<\206B\233=\rS\211=O\245I\275\341\371\371\276.\256R\276\311\2365=\231\345\"\276\265\306\201\275\226\323W\276B\322\232\276d\0274\2766\323\340\276\263\370\343\276\254\013\246\276\255_\206\276\024:\273>*\006\032\275\033>j\276\321\245\252<\000D\245=\2222\204>\245\272\r\277\373\002\004>\024O\241=\2639,\277\004%\007\277\226\201\036\276\004\377\035\276\223sZ\275R&\254>\206\304\032\277\220],>\314h\344\276j3I\276=KW\276ogt>\330!\314\274\026u\226> \365?\273\006S/\276\032O2\276n\003B>\005\376\215\275D\026I=tX\n?p\323\020?G*d\276@\217\333\276\226\374\252=\320q\277\274G\244\252>?\336)\277\366\\\203\275\263R?\276\211(\332\275\255f\210\275\274\362$\276\003{C>\236\007\267v\327C\276y\2326>\003\352\\\276\244]\345\275&\"}\276\331R\027\277\031i\013\277\245\252\317>\367\032\225\276y\372\312\275OK\'\276\233~A\276{<\246\276\237\325_\276\327\222\316\276}\225.\277\334l2\274\241\342\361\275\031\276\220>\350\026!\276\032\270\233>\2558J\276\243/\365\275\257l\270\276<\235t\275\363\001\244=\270\003o<4[\260>\306\373\266=x\355l\274\022\021\223\240u\240\276*\365\232>\n\245[=\210\316\222\275\'&\305\275N8 \276UI \276s\366\004xI\221\276\0035\364\275\202\226\363<#6\322\275k\300\250>Ny1\274\215Eh\276\230\225\322\276\014\327\334\275\204\211U\276\234\212^\277z\340\337\274\3551\363\275\3373\340\275\261Xg>\261\324\254>(\005P\275\024E\037\276\004\256\354\275\326v\245\276y\203\255\2763\240\316>\361\374v\276fS\350\275K2G=\023\031\357\274\310R\267>\376\263\363\276\2567\301\276\304\356\232>\037\371\243\276\006\016\300=\265\355&>\267\313w\276z\n \276\364\272\275\276E\307\201\276\267>\016>/\304_\276\023\270\342\276!\277\273\275\233mQ\276\016\316\321\274\227 \302> g,=6\265\307>\370\333\306\276\311/q\276>}N\276\034\353\240\275H\343\351\276\203&Z>E\224\271\275\311\254\271\276\267y\351>\373\337\300\275\357\276\245\275\340\007h\276\373\271\374\275\026\223E\275\006\035\021>\340\337\r\275\322\376\034=\243\237\276\2753dU\276\227\322\214\276V]\037\276\235\244\252<\005\343\322\275\245\200\217\276k\\\327\276\205\376\'\275Q\004\246\276\345p\334\275+B\013\276\361\007\304=8\213\214\274P\331\001\276\034A\007=\204\201\227>E4\272=\350[B\275\320N]\276\223\034\320>\355\342\226\274:\033.\275\276\300\365\275\005{P\276\337\374]\274`\345\021;\307\377/\276\342\252\212\276+\302\202\275*T\270=\324\320~>@\350\370;\245\314\244\275\357\375\014\276\371!\264>\326\374\274=\035\223\024\273 \250\330\276\230lh\275l&9>\205\021\215\276\007\225\023?\263\n\002>&\3142\275\336\372\013\277 -\017\275)\336K\276m\251\010\276>\314n>\375K\233\275h ]\2768}\214\276:\265|\273\377+\320\276\263\202e\276(0\365\2750e\016?\200\314\316\274`O\001\277=\021\017\276]\207a=\317hM\275\214\025\355\276)f\201=\260\253\230\276\263\006E=k!\021>\353\352n>\223\007C\276\225L/\276\356\312q\276Os\313\276\376\214\325=\345\317\254<\362\230\274\276\346G\347\2764\304+\276\231g\251>+\216\221\276\364\300\020<\353Z\020\277\024\200\323\276\341\314\335\276\354\332a\275\215$\215\276\0144O=u\235x\276\177\274\226\276\0331b\276\006v\255\276\275Q~\276Ir\233<\200oF\277\303\020\267\275\232\374\005\276\261\001\375\275\236`]\276A\316\204=\005F\224\275G\370\207\275\312\331\304=\224y\270\276H\300_=\200\247L\276\303@\016\277\331\221\335\275\020e\241\275W\013\343\274ga\217\276P\372\220\273\245\305\034\276\260\341\205\276\321r\236\276\n^\245>\031\367\000\274\331:\000>\024\\\020\277\017\363\301\274\252\302\006\277/7\025\276\231\353a>\373]\027\277\327W\354>s\233\\>\"\036\336\276\263f\231\276\233\202\020\276Rpl>\375M\266>\\\023\223>\267=(\276\256Q^\274\277\333w>:\214S\275\254{\376\276\000;d=\2379l=\333\352\033>\265\234\322\276\277\241\343=\214\366\233>\307\235\375>l?d\276\207\232\206>\254o\304\276\323^\337\276Ab\255>\276@\234\276\274T\260\275\016\261e\276#N\013=0\373\220\276?h\'\276\027\307\213\276i\313f>X\364\271\276\356\246$\276\366\314\225>\373Y~=\266<\312=\313\253\310<0l\324>\334R\203>\354\257\313\276\215\256\323\274g\265\213>\362\205\233=\177\266)=\2014Q>\036\343a>\266\363\261\276\205\271\232\2766\327\001\277\371\036\226>C\236\365\275\'\206V\275\227=\357=*o\312=Z:\257>0\317#\277\017\001L=\310\353^\276\215?c=\271\031\\\273\360\245\210\275%\001d=\211\260\243\276k?\356\275\324Q\211\274\204\267\031\276\253\005\000\277|w;\276\254\370\003?+\n\345\276\206\330\025>#^5:O\216\253\2765d\205\2756-\037\276\275\017v\275\331\341e=\363\021U;^\030\224\275\003]\357\275q\316\034>\376g\307>\215\211\330=\272+~>Hm\250<\227\000,\276\3764\271\275\275\301\203\275\200\211\033>\224\r\224=)\306=\276!K\217\2767x\202\276Ie\346>\n\317\020? \024%?\263\031\226=\n\344\365\275\220\335\223\276\237\213\306\273\234\3301?^\244L\276\026qD\276\003\353\006?!:\363\275i\375o>\235\314\253>\245o\307>\373\022\200\276\020T\266=f\235d=\335\035\021\276\313x\220=g\020$>\333\301\030\274\0338\020>n\371\034>\354\250\334=\255\345\277\275L\020\212<\212\260\310\276\000\027\350<\273\251\274\276^/\025>\006G\271>\314\023\003?y\370\006>7\t\250\275\363\002{\275QC\265\2758\222\031>p\272\224\276\036\276h>m\266\310>(\033/?844?tUV\275\224\033\021?\335\235\201\276\267i\216>\365\205\017>ie\334>\317\361\227>2\346`\276\374\267\000?\225\334\034\274J=\242;\216+h>-\326\272>\316\001k\276\014\372\032>\360\301X=Uz\224\2766\241\361>\2516\242>X\223\227<\006\270\315\276\204\005\347>\242\205\023>\223A\267>m=\333=\250\302\205>,\362~\276\251\2415\276\317\224\005\276\177Y,>;\3672>\323\262\014\276\rn\020?y?\303<\006\316\375<\036\364\375>\324\322\222>/\327\263>\270v >-Yj\276N\033\036?\316\323\212\276\355\270=>5D\244>w&\304=V\2421\276\223\035\206>3\036\277=\214\234\271>\007I\361\276\222\231\237\276\211z\236=y\214\307\276\326\223M>\377\377\372>/\026\177>\303\250\354\206\325\373\276\\*\'>=B\214>U\277\000\277\3427\260>\362\2379\274t\265u\276\023\330\336\275\214\030\237\276>\007\034\274S\324\352=\342\234\326>@\365\243\275\257\3250>V\004\347=\352\241\336\275D\327\003\277\334 \217\276\353\0001>O\372\036>\\\375\206>\373\251\311>\343}N>\020\n\004\277\333\250\273\276A\322D>9\362\210>.\360\310\274\033\257\350=\023\327=\275X\360u<\315.\234>Oig\276\334\303\244=\333\325(>[\002\005\274\026\354w>\264\031\201>\344;\355=x/\212\2750\303\362;\376w%\276\215\370A>tx\317>\316\272\245>\237\341\321>\325\242\262\275&,\321;p\210P\273\243\222\262>\322\230u\275\033\000v\274\266\035\243>\337\334\357\275\212\030c>\016\307\227\276\037\224\365=\265|\366\2743Q\332=\213\022\236>\355\335\013=\254\033\033\276\000\245b>\215\r\350\275|\272\312>\235\314z>]7\317>\306e\311\2756\254\241>\377\362\305\276%\371\202>\261\225B>\215H\034>\253T\205>\374\342t>\376\311\021\276o>\212=`cQ\275Ll\016\276\221\206\240>\276\225y=Ud[>\370#\264\276]i\300>\362\032\237.:H>fG\n?\233\034\350=\334)\203>\277\212\275\274\270\t/?.\267\233=<\3438\276\\\202\236\275\234\302\312\276\334\254\272>\212\244/=\252\204\014\277O\317\016\277C\257\310:X\247\"?\036\222\246>4\2274>@t\330=\006\030\331\213\242^\276m\377\233=\340\031\211\276/\026\375<\367\252\245>V[\317>\236\321\207\276\004\307v\274\231Zv>`\216\314<\2116\376=\212\345\001=\352\301\232\276\271\014\205>\314\000@>P#\"\274O\032\014\277\233\275q>\314\217\\=\301\002d>\303\365\241\276$\311b=\366\302\353\275\274\345\241\276\016\217\266\276\374\234A\274\216A\022\276\262l\022\277\314\335\310\275\266\022.\276Zd\320\276\210Q\340>\366\355\373=lJ\351\276\214\230\036\275\303\232\231\275\230\004\367\276\210\250v>\272\300\230\276\313\216\020\277mX|=\035:\330\276&6\206<\361\367\230\276g\206h\275\364l\305>\222\305\013\276@\254.>\250\201\305>\211\241w>\364\374\340>P\274\034>}\242\213>\260\366\210>\324qV\276]\347\370\275\034\370/>\346\364W\276\302\030>?S\327\322EM\212=8\365\203>\233\3325>\272\247\331=\261\324\t>f_{\276\367\237\322\275\217\035\320\275v\265,>\355\375\213\276\023\r\002?\250\200\226>\261e(\276\300o\311\275>w\373=\374;S>.\357\235=K\ni\276\326\\\210=\233\305-\275\251\321\"=\0006%\274\337~\230\276\352,d\276,\226\202>\177V\n?\337g\014>T\263\227\276u9\255>\223\010\020?\027\243\331>\302\017\356<\300`\021>\317\234\271>\253j\214>\217\260I=t\256\334\274\361\237\331\275\226\243\260>\201\313\363>F]j\276\017\037e\275w\237\237\275\370\212\324=\215Y\274\275\370\252\211>4.\244\276Y\020\316=\374j2>Jf;>\316\303\215=u\2474>\372\2555>\361\351\375\274\366\244.\276\035&\205>z6(\276\355\353\331={\376\256>n\ta\276\337m4>D\235\037\277Z\260l\276_4\206\275\030\353\300=i\025\302\275\007M\357\275#\377B>\267$\347\275f`v\274\027\273\267\276R\021\014?mn\350\275 K\227:Z\\\301>A#\016?2\204\032>\271\3313\276\202\3203>\'\230\226\276\315\313*>1\373\367=Z\340^\276\315\344\306\275\372\252\034\276\035F\202\276\244\2700\277\332\211Y>\323n\025\276\002x\245>\375\216_>?n\352=\303\262*\275\234\"\311=\362\277$\276Cg\n>\237\245\016=\202\241\001>\324D\021\276\234\221\303>Ctt\275\364\356\336<\360\322\312\276g0\030>^$\254>\235\374f\276\321<,=\021n\305\276\246\311\314>\301{\325\276\352\311`\275\221Eb>\254\213*\2755\306\270=\237\236\367\274\215\260\027\277\255l\317\275\232\031\207>*\266\333>\021\277N\276\373\373{\276^\300\301\275i\216\366\275 \373K\275\371:\265>>v\213>\007\341\r\276\027\305\212\274\306\214W\2765\341\263\275\033#\271\276\'\376\371=/\345f\274\322\307\004\276H|->&\034k\276\371W8\2766a\273\276O\360\376\275\370d\023\276\266\"\205\276\304f]\275\013\270<:\223\263><21\265\276\032\373d\276\\T\004\276\3053\234\276=\003P>+\362\326\276s\364\255\236\325\255\276~iQ\277}\357p\275\'\312\377=\247e\274\276\334\235\240\274\225\311\323\276\035\332\232\276\204\005\317\275[\205\366;\023\254I=\221\n\310\276\202\311_=(\265\261\275\230\325P\276\236\253\032<^\365\261=h\252\344\275:\225z\276W\035\020\276\245\227b\276M\251\314\275\2267\323\276\204m>\276\021\270T\2777\372B\276\256A\223\276\306\303\302\276\014\213\267\276\262\344\014\277\361\203]=6R\362\276;lO{p\022\277U}\205\276Ar\023\275\216\0208\275\256t\376\275\025H\025\277\316-*\276;8\010\276\221\367\221>\306\364K>\007\3372\272v\307\222\274JZ\302\276\261\177\001>\333wr\276Q.\300\276t@\230>\341(\310>I\211\212\275m\273\200\275w\265\'>\277\326\373>=j\237>\375\\h\276\356\241\317\276Tj\033\276\300\211\307\275)\356\220\275\206\375\023>\244\252\274=z\267\036>F\003\237\276\033\327\226\276O\374\241=p\214\343=\227\225\233\275\353\354\300\274/\016\204\276M.D>I\032\210>\"\031|\276\203\234\333>\221\3230\277\r;\353=s\032\346\275\356\321\200>\320n\026>4\3416\276\237\037x\276W\237\274>+\332\343>\350u\013\276\253\303\214=\327\334\304>\271\253~<[j\255\275S\221\262\276oK\342\275\260$\304=\022\t\"\276\265,o\276\376\351K\276\334\372\033>\201r\216>\373t\233\276._\243\276\330\306\366=,\027.>\263\272R\2773h\007\276\204\244\276\275\373p\037\276\277`\262=/\357\016\274\317v\274\276\300/@=@C\271>\035~h>$\024\272\276\025<\206\276\233t\204\276\024\363\305\276?`\235>-\243\255\275\013\346\271=\210[\213\276V\3314\275\230 9>\257\210A\276\225\360\374;\002a\013\2763\304;\276,\n\004?\370\200\211\276\030\242\264\276d\0216\276e8\203\276\215\016\277\276\337\353\377=a7\340\2760G\273>\334\304j>\"\013\313=\'S\202\276\207\343\001\276\317H\017=\262\266\320=6Bp=Y\204\032>=\240\021>\374\'q={?\000>Lh\334>d\252\006?.\n\241>\317\227\245>W?\033>%\022\031?\365\005\217>y\376\200\275\330|\365>\345\360 \276^\2616?g:\305>\330\354\017>\266l\356\275\242\n\\>bN\022?\265\203\353=%\345p\275i\305(\276~\303\202\276Bre>\032\222\275=H\231i>\\\343\267<\356\355\007>\nL=\275\214\253\225>\376e\231\275[w\221=\3056?>]\212\270\275FT\331>\337\302\224>\321\3521>\016\304\265>\236\330\206>h\014\030?n\376K>\022\341\t>\345B\037?6\351\345>\226\037\030=\312\360@>\236\230\243=\261r\227=\212\265\315=\332\337\000>~\327\032?O\336\350>\336\344@\276\345\363\372>%)\317<,#\226>\344\2055\275\323\253\207\275\350M\244>\346K&>\025\275\306=\2662\222>\036\213\021?I\206\004?\322\206\233>\'\276\344<\025\366\007>mL\002\276\3000&?\353\321\226>y\265\"?R0/?\276\204\202\275\353\311\365<\202\332\365>\232\233s>\005\304\342>Pg\230>l\356\262>\265\223\247\275\270\356\236>)\311V\275\030\251\250>\215\325\273>\217n\355=\320\226\356>\3136\247=\013xA>\007\207z>\247\027\320>\332\027\231>\324\014B\276\375\373I\276\277JK\276\256\240y\276\222=\002>^OF\274d\203\254>T\357\206>\246\211\214>\005\\:=\224\030u\276\331\037\007=$D\236\275\222\240.>\331\017\367\274\250\366\373=\214\373.\276\235\241 =\347\255\234\276B\373Q>sq\262>q*\252>1\3153\275\243N\366=0\345\202\276$=\014>O\235T>\232\346\237\275\220\276\271\275\3617(\276E\000\226\276o\311J\276\370\242\257>Lw\016>I\362\365=\\\317\240\276u\305\003?l\241\340>R+|\272\017\203g=S\000m\240>\336\367\263\276\202y\016\277\277\246\213\275L6\267>\232F\233>\332\0207\276\020\033\204>\276\342\363<\315\310\272\276\233e\270>\'\275g=*\006\227>\374W\247>\353\223\202<\346\377\210\310\270\004>\034-\200=r#\356\276\350\352\014\274\327$d\273\373\r\203>=0\207=\253\302\357\275\251\321%>\\\361\001?W\316s=\'b\013>\375\017\355\276T\334\256>Zf\241>\013\207\243\274p\t\366\275\014\020\361>\212m\230>\236|)\276\300vQ\276\001\223\306>_\331\002\275\214\224\273=\036 -<\\\2468\275\251#\321\276;=\216\275\217\362\t?\010Kh>\271:\004>D\373\205\274\362w4>\347\224\251>\332\321\004>\253\210\016?2(\255=\001L\272\276\374\035^>\341\201\365=q\215\243>\237\204\355=\017\234\205\276\362\244\013>\323\266\222>@\303I=\205g\264>\304\372\233>m78\276\273(,?\364rQ\274\330>\376\274\320\325\261\2768\002\213\276\033\274\031>\210\023\003>\013[$\277\275(\007>Kk\215=\005\225\374=\3506\307=I\351|\275\214\325\006\276\234\375\023\273a\213\217\276\340\003y\277\307\352\300\276?S\022\274h\227a\275\002N\237\276\001\343\322>\225\207\305\276\355r\336\275o\017\250>%\246\227=\334\365n\276\200\271\320=\242\2062=\035y=\276il\216\276\311\2436>\326FB>\016\223\271>+\310N>\316{\310>\016\215;\275}xj=\305\312\321\274p\345\265\275oW\225\276:\303\234\276z2%\276V\316g\276S\337\017\276\343bd\276-*E\275\266&<=\330\251\321>\365\330P=\242\230#?y\014\224\275\033\373#?3\2678\276\220\316K>\346\263\252\276\001=\007\277\3129\032\277\241\341c\276\270\366\250>\021\300\275>\233\223\374\276\273S\313>2\341r=\312\210\307\2756\204.\274\036\301\013\2757E\264=\373w\344=\375\036\354\276L\212|\273~p\262=W&\037\277d\010l\276\340\010*>\004\342\243\276\277\003a\276\020\303[>\355)\322\272I\376\326\275Y\000\000\277)X^\276\252\270\215\276\270T\334=1\205x>\225@\210=\177zS=\223\217\217>.g\273=%6\213\276\032\312\272\275\325\321\320\276\334\036\235=1\r&>9N/\275J\034\301\275\317\004H>T\2528>\257\325\241>\010Wt>\232\002\010?\2512d\276\232u\244\276\177\240\213\276H\353c\275r\tf>\232\242\225>0\211\344\276QD\276>8\335\205>G\3249\276\354\223@>2\"\033>\024\337$\276sTP=J<\214=\233\300\342>k\226\005>\223\033@>apF>n\313o>\203\245\033\275\n\023.\276\325i\262\276\377c\025>{\214\022?\216\275\232\276\357\304}>\310\366\233=u\302S\275l\311\034\277\024B1>k\264\210>tH\322\276\357m|\276\307U\215\276\211\225\036?\266\3774>L.\246w2h\276\371\353\337\273\362]\232\275\364\361\t<\266\2764>6g\t>\272f\001\275\377\223\202>\033[\273\276\373\260\177=.\375k>\006\326\020>S\210\234\276\007\034\020?/w\217\276+R\353>9\255\370=I@\231\275\357\2405>\352\365\224\276\312\246\226\276\236dh=\275\351\242\276\204\247\265=K\001\222>\211\277~>\033\357\221=1&n\276\261u,>\260\226{\276\257\257D>\\\316\243=>\033\004\276,\336\016>\031E=\276\333Kv\276\027\250\024?x\301\226\276G\n\355\276\r\275\200>\323\341\026?\332q1?\255\310\021?\277|\030>x\251|>d5m\276.\305|?\303\022\027\276\026U\276>\002\244->\230\331f\276s_E\276\3762\342=_\307\277=\310}\226>\361\377\245>_\313\202>l#Q=5\376\346=\203\035\026?$\214~>;e\220\276O\353\220\276\205+\350=\032P.>A\243\331>\377\313\007\277^\216\240>\236L\177\276\"\206\r?\243\1771\275\nht>\361\347q>*\217\t?\027^J\276\327Kv>A\2560>yWK\276\240\016\313\275\322\230\220=v\202\267\274\025k\260\276X\276\007?9;\372\276\326\243\347>Tk\224=\020\210\230>F\344$?\373\020\357>m\027\311\276`>\225\276\026+o>\335\247\247=\373\366\006\274\347\r\212>N+u=\020\323=\277\220uC?q.0\275H\177\275>\032\323\240>&:\034>\310p/\271_l4\276o\n\354>H&\003\277\rh\241\276\253\224\271\275\2174\004\276\375\334[\276\251\031\220\276\371)\275A\276g\177\013\276\222g\322=3\014\032\275P\234a>\316\333\005\276\247\004\313>\250B\215\276k\r\236>#\317\036?(\315\366=\347m\363=\366\017\321\274Ah\205\275;\365T\276\035\333\257=\203\032\223>\247T\000?q{\304=\356\315\300=mW\301\276\223q\246\276T\021\254<\020\202\235\276:\235\214>\236\332y>\210\307\032>\246l\334\274\013Pn>r.B>\025Y\202\275\334p\2279B\312\003>C\360\213>\022:\016>\252\345\362>\031\317\007>\323\315\361=\257K\316\275\216&\225>\354\262E=\232\244o>\331m\261\275\\\317k=^\357\361<\376C\256=_\306\224\275\271)\234=\216\373N\276\357\r\010\277JX]>\223\224A\276\277\363!>\226Z\263\273\260\016K\276q\227s=i\276\216\275S\253{>e\r\267>\006{\313=u\232\230>U\023\t>\271\362\307>\310=\370\275Z\223y>\243sl\276\335\256$>G;\021?\331\347\332\274\252\003\024?\002K\231\276Fs\236=\347z\010>!G\234=Bl&\275ai\205=\013\020\263>\366\227\014\277RL\010\276\355IC>\006\234<>\267\336\023\276\227u\000?\255\2633>\374\242e\276\014\341\266\276\331\221\377\274y\307@\276\364\331)\276>\351\226\275\240\253?=\031\243\232>\322b\212>\244i\235<,\365K>\205/\246\276N,\207\233>\340\206\013>~\231\032>\376\242\201={?}>\213\257\236\2766N\317>\327\234\271\276~\227[\275\250\255|>\216u]\276\365\004\372\276\021\216<<\252\305\271\275\200sM=\261_\303\276~B\301\274qt_\276x\354\206\276\216c\257>\251(\363\276X\013e\274\345k\237>_\230\036\2764\004B=\032h\212>\316\277\325>\303\321\004\276\036bg\276E\211u\276\322\353\316\275\254\341H>\220\002\245\276\020\312%\276=1\304=\367,\273>\344o\036=\256\2031\276\343\336@\2759yk\276\237\025<\276\013\351|\275\213\312\r=\233\304\252>\326=\204>\340#0?-\217\306>J\336v\275Y\324\t=\327O\251=g\207\270=\247\334\215\275\214\357\222\276(\"Z\273\217\267\275>\016.\340<\310\357e=\305R\021\275\"(S=\203\003\014\276\342G\336\273_yD>N\337N>|Ak\276\266\010\333\275\007Z\275\001$u\275\312\227\246>\3649m\274\'\340<>\315fN>\234\331\215\274\343\351\205\275\307\242\243\276\225\277\265=\033\222\240\275\313\247(>\245\345\t>\317,}\275\355\365\007>3\330\331\274\255\352-<\235\007>\276\272-%>\001\323\335\275;\370R\2764q\005\276u\233\217>&\326M\3553<=\321\301\236>\300\n*\277\301XP>\357\261\033\276\014\203\035\276>~\021\277;\302\346=\237wW=\367\277\030>L\366w\276rF\336\272(A\271\276\215`\211\275p\213\034\276b\306\376\275\365\336c\276\3551\344\275w\033B\276\320\025\327\275\224\3551\274\342\271\320\276Kh\024=\214\356X\274\250\3347\276%\027\016>v\313\364\275\010\256}=\345\317z=\377\327,\2761{\014>0]F\2764\376\236=\250q@\275\346b\014\276\313\340\032\276@BB<\357\236F\275\230%5\275\253\224\354=\2464g\275Q\327\203>\177\3468\276\340s\265\275\034\007\305\276\376W\314\276t,N\276\2214\025>B\327->Z6\340\276\206\016\263\275\230K\204\276D)\370=\226l\371\275\266G\245\276\023\036\337\276\013\226(\276\023\307c\277\362`\014>\243:\266>}\207\220>\270Y\216\276\210\311\216\276|\273J\277\361\231-\276\252\302\267>*R\270\276\266\231B\276\271O-\276\327\376\035\274\326!\274;K\311\202\27680\356\275Ha\227=kG\277>b\\u\276_\353\026>c\340\230\276\3163\302\275\203\030]=\033\307\274\275\030\321h\276I\233\270=\337\336\306<\\T\263\276\345T9\276\366,~\275|\013\225\275\360\205X>\276!\"<\322\266\177\2766b\337>\203\304\216\014\353\350<\261\236\t\277z1K\276&`\032\277xG\020\276\242\344\007\276\356\315\215=\337\242\205\276\336C*\276EI\361=g\016\333>\302\362\260\276\257\217\343\274\23091\276\275\350\257\275o\350\021\276\220 r>\035H\220\275DqX>\335\021\273>\317j\271=S\242\221>G\340\'\275z\271\371>\235)\000?l\314\030>g\262\322\274\227\251F\276+p\200>\000o\252>\320\033u>x\223=>\232\324\253>\031\200\311>4.\207>\252G]>\357\177\020\277\206s\t?\362\351-?Y\177\023?\005V\332=\373\025t>\203\320i\276\030)a>uh9?a&-\276\325wg>*\260k>D\313\304>\300\311\262>\355k\251\276\241\354\324>6\310\324\275\r\3429=\003\245\243>S{\201?W\2023\275\";\203>\301\212\014\275\033\177\222>\233p\251>\004\373\016>\322\017,?\r\361*?Mh\254>\203\235\254>zj4=\307\313\236\276\331\177\320>Y\355\320>5\331\032=\231\262\326\274\242f\001\277\272e+?4\026\'\276\235\350O\276\230d\"?\301|\r>%\214$>\301\270\305\276fT\270>\373\033\304>\241Vm>\274\247\004\276\337O+>\255\201\232>W\230Y?\365T\007\2770Kj\2753}\341F\353\010>\031\204\347>u?\365>u\200k?\373/\256\275\2407\306>1r\t\275\240\253\203\276_\200\021?\271B+=Of\206>@\374\215>R\311\031>@&\311>\307\322\">\262t\034=\324#\265=-P\007?\230G\374\275vy\346\274\261\314\375\276\215\201\227>j@W\276\356\242\311\275Q\247\275>W\003;>\021\317\300=\016\032\252>\216\003_\276\222\032\205\275\373 \262\275!\231\306>\244\362f\276\341\300\325>\\\244\036>\252\300\205=\350\343\332\275\250*\362\272\341B\205\276D3\252=\307\227K\276\352\276\240>\261\257\234\275\360h\'\275\265f\352\274.\237\003\277\235\312\306\276\240-k>\313-\206\274g&\007?Q_\333\273X\275\256=\031\264e;\264\354a>\331e\255\276\020,\016>#\206o=\216\311\262>\351\365\262\2745\023\002\276(\206\240=\304j\335\275%\342\257=}I\346>\247\'8\275\013\367P\276\320\016\014>\0336^?\333\211t>\316Y\375\2765\323\276\276\205\317\232>\206V\251\276\316\375\245=2\370\301>y|\352>\214\006\230\275\035\325\277\275\004P(=G\313\017>\231\271\017>\3152\031\276\232s\346=\270c\203\275\273\027\216>\016\000\310\274{\255P\275\026S^\275\325d\365\2766\374~\275\036G\241\276\372\366\t\276T\275\223\275;N\272\276\271\004\346>%\327\346>_\363T=5s\n>\304\372\244>\026\263\035\2761\027\213=\357\204\324>\305u\311\274\234\317Z\276\224?#\276\312{\356\275Z\262\243=\207G\003>\347\237jSE\004?\375\252\223=q\\\201\276B\205Z\275b\375\273\276\372X!\2776\352\203\276n\310\210\276\031l\242>.\014\266>3\022\005\277\331\317\350=y[V>\020|\n?M\300\306\276\023I\347=I\266e\274\316%\023>\367\356\032\276)\010&>Z\007\205\276^\230T\274B\362\n>w\267\">RQX=\247\'\253\276\220K\351>\263f\203=^\032\243>.\'\340\2759C\244>\017a\034>\004\226A=\371\312\314\276\021:\371\275b\222\213>\222\264\227>:\r\215\273\345\017\013>\235\232F>\001\366\376>\311[5\276D\003\306\276\256\231\264\276\320\235\347\276\240c\212>_\225\206=|_\216< \2146\276K\366\247>[\016\341=\007\014g\275 \313\304>\246e\247<\360w\210>\337\242=\276\343\371<>^\376\243>)V\340>\213J\027\276\221\262\333=\376\254\230\276\201\017\264<\376a\221\276+\376\302>\354\245\316=;\356\236>K\350X\276\262l\014\277`\256$>s\325M\276\364p\223\276\252-,>H\233\242>\330\305E\273\220\217\236\276\204\177\t\276\300\325\275\276\201\361a>q*C>\350\n\244\276H\301\335>w\026\201\276\244\336\210><\273\314\276\220&\273\276\312n3\276\244$\335>\323x\201\276\307\264A=?\331<>\3415\332>\034\253\225\275\210\340L=h\023\231\276\242\031z>\265\035\014>\354\310\324>\367hm\276\206\366\224\276\234\371P\276\265\310\227=\007\2072\275\323\203A\276\033\204\263>X!\267\276\315\315\341>Y\237\262\275\334h\302\276\343% ?v\204\232\274k\327\320<\350)M>\331\027\374\276f\254\273\276\371\nH\276G\037\267\232un\276\020\004\016=n\365\234>M\271\227>x?\371>\237]r\276\036\021\340>\3319\303>\020\233\234\275Q\211\264=\254\301Y>Mp\333\275\262U\203=&\232<>Eb\360=\037\354\207>\016\345B>\366\352\327>+\201\004\276\353c&\273\315\333%\277\355\212\265\275:\303\261>\372\202R>\263>\205\275;\272\211>w\3516>a\352e>\246d5>t\257j\275yN\310\275L&\233>*\263\247<\321\346\334+\035\374\275R\240\227>G\215\210\276\311a0\2764@8>\034@\201\276?\333~>A5i\275\350\177\217=N\275G\277B\n\035\276\356n6=\216\352O>\2600x\276\311\326\016>\213\304,\2764\220\216\276y<\022>E~\"\276\356?P=S\377\327=!h\002>\016\356B>_\217\004\276\345\270\211\276?\265h\276\305\nw=\254`\207\276+\001\252\275\372\214J\276\267\312\362\276\266\226\020>\001b\323>\353\032\352=\252\262\303>\354\341\303>5\037\226\275(\365}\276\020\020\363\276\374\330\261>\345\215\372\276\327sY=[\'{\273/\255\022>8=6\276\220\240>>){\001>T\034G\276\353\324\244\276^1r>\210oq=q\367\002\276s\227\277\276\021\237k>\036\312\005>,k\213\274\265.\021?,Q\245>\007\347Y\276>\362\236\275f\306\276=3\301\014>Rb\353\273\304\220[\276\371\2447=\362m\315\276\250\033\334>|\235\253\275\314B\007?\307\303\251\276\325\264,=m[\310\274\263\211\021?\272\312B>\210\377\216>\332S\004>\204\270,>\007\220\266\275\222\313\346\275\023\304%\276^\234\333>\365\301\207>\267\320[\276\211E\003\276}\025\213\275\242<\027>N\337\313>\260d\203>;\005\007\276\323wi\275\206/P>b0Q=\362\360[<\302Q.>i\346\241=z\202*\275\300\031i\275_\003\354\276\363a2\276\217vc>\"\331\017?\331\006#\276\035\276l\276\236\202M\275\016M\332=\266r\210\276s\256\204\276\273\274\006>\332#Z>\265e,<\247\334\336>\376\271\213<}W\177\276\274;\216=\270\220\302\276\354\322\376\275\305\023\214\276\250\316\302>\276h\301\275%&r=\352%\343=\320\366R>4\177\275>}:\351\276\240\231c\276\216\371F\276P\203\036>\362\250\032\276\035D\330\276\367\3246\276!]\r\276d\223g>h\003\325\275\357&\366>\356}w\276\361@\344\371\'\250=\262\3623\276Q<\210\276\361\256\275>\000\362\262\275\003\371\357<\256\014\022;\200\206\311\275i\3304>S\327\330\275\200\200\313>\212\267^>\250\215\322=\213(m>\230\337\350=\241Q\003>]\244p>t\3510=\322\366U>]i\200>H\212d\276&\037\024\276\3577;\276\345\302\337\275\215\234(\276\236\227U=\254Km\276`\266\023>e\340#\276\227j\010\276\217\324\353\275{N\324<\201Gf=c\206\017=\366\232a>I\257\233\276v\3638>\266.\216\276\233#\r>8\220&>\272pt\276?\247}\276\243$\301\275\371N\370\275\365v\214=\235\240H>\240H\036=\205\373?>\363K\276\275\261E\254\276\325\216\207=\330m\251\275\004\n,>ct\007\276\245>\022\277\227XT>\324\027\241\272\327\020\264=\026N\260=\331\214\302=\361\211M=}[\017\276\271\371\362>7I\212\275\320\332\370=\300\256\305=\252:\031>\252\344T>\026Z\316>\206\247%>\006U\322=D\r_>z\262\020>\367\2054\274\271\252J\276,s/\277\351Q=\274L\016\250>{\315\365=\006\032\037\276\3162.>\242\214\225=v\212\202\274\250Z\026\276\270\205<\276N\376\254\276I\031\372=\324\272Q\355\004\223\275h\000Q>\234\372\036\277<\016\225\276\202Q@\276\351K\302>~\260\020\277#\030\205>%l\r=\237\223\'\277E%\266\274\370\334X>s\370\226>\270\207\265\276*9\227\275\316Ao\276dh\317=0\351\312=\016\266\262>\350C\274\276T\367\351\276\301\331\332\276l\263O=\356\257b\276\317J\027\276J\303\304>\260/\021=@\225\324=@\034\024>NH\204\276\362\232\257>\033n\211\275\020\261\232\274\312\r\274\2458X\276\352xU\275\211E\362\240\261R=\261\327=>\301\'\257>\266\322\016=\211\336\260\276C\257\320=\251^\004\276\016\336\333\276/\327\\\275\021\246\006\275K \374=\000\001\342\275\244\362\233\2762\360\314>\261i(>}\220\345\275\350\343\341\276\031\235\273>\244>\247\275\005\005\177>\252 \361>\224\023\355\274\t\356\230\276\204\367\240=x2\025\275\227\026\233\2769\210\240>h\005\336\276\177\232\345\275\340\340\213>\204\332\277>\356m\270>t\314\224>\022Q\370\276d\340\037>\035h/=fs)?\343\211\275\224+\354<\036\345B\276\356-\211\276\243\227\305\274\033y\236>I+\016?\003[\033\276%\261\221>\026O\266>\010\277\247\275\227@\\=8\336\316=\017\274\240>\337Q\'\276\266\213#>\214\261\013\273\305P\241>\325\030\310\276\"3{\275J\200`>\231\'\026\276\272\300\375>Zd\241\275RQ\003\277\230\177\213\2761\264\003\276.d\216\276\361\003_\276\210\364\2768\365%\211=J\316*\276\257\252T\276\271\036\303\276\232\366\313\276Q\344v\276\027\031&\276\034\227\026\276\346\347\211>R~\243\275\031\266W=\373\022\365<7/\225>\036\234\341\276\237\022Z\275\010\002V\276\236\252\343==9\242\374\307\203\275\203V\374=\035|\207=\344\373\021>\260\343\224\276\207\305;\277\177\260\237<{g\246>\003\234a>\312)\352\2747\307\372\274;=`\276\253\032\237>\2046\220>?\237a=A\335\021\276\200\247\003=\354K\302=\245\343\020>\313\376\205>\376\033D>\202\260\177\276\017\001\026>R\302\243\276\205\352<\275\037\347\226=\307C\235>\331\346\226>\2077\231<\"\374\372\276\364\355\263\\\313\007\277\342\334\250<\356s\355=\216\r\227\276M\333\257>\374\0257\277\006h\247>\244^\251\274F\"\316\276\220\253\'\276.\376\004\277\214\307\364;\3221\031\277\371\213\277=h@\247>w|/\277:\213\036\277\246\263L>\377\021\220>\034\300\216\276\"1\032\276f\213\316\276\353\337\320\275\272\031h>\321\345W=@\372\316\274\231\031#>\327\017;\276\215Y\237>t{\241>\007\377\253\276\254\215\">\024\247\002=\273f\310>\'MI\277\2348\310\276\335P\311\276\371\276\306>\\\202\031\276\345\360$>\237\3107\276\305\241|>\205\250y\276\360\377\201\276\243\212:\2761\272\024\274\365\214\337\275:\027\005\276HG\316\276y\354\344=/qH\276\256\200\226>\364\017\213>K\274\351\276(\232\025\277\377\203\251\274\306\302\300\276@K\263\275}\265\346\276b\001\345\276\271l\245\276Y\332\270=p\332\301>\020\333\336\276\350A\374\275:p\213=\357\027\270\276^\273\343\275\347f\260>C\355?\276\201\345y\276\330\031\310\276\201oD>\nZ0\276\226\035\347\276\240K\255\276R\037e\276f\266\235\276\001\276!\276\021\033\000\276\371\325\013?\260\346i\276^33\276`F\007<\257\363z\276&JO>\325Q\233\276\312V7\275\323\270\271>\260\310\361\276\177en=$\206t>\315\207\006\277\252\232\010\277+\021\201<\026kO>\310\211\254\275z\312\363\276\215=\r\276\232\264\222<\014I\200>.\027\310\274\376\334\322>\n\274\313\275\332m\003=\301\206\014\276W\365\031\276r(\267\2750\225\025>\334\315z\276z3\273>B\204\336\275\017\004\211>\233a\214\276\346\023+\276\236\263i=\306Qp\275\311\317\026\275\310\221e\276\337\377P\277\331\3542>4\001\024=\250*\347\275\333\263\264\275Z\227U>x\243f=\231\313\310\275\235k\222\275s\247 =\217\322j\2757(_?\022\304m\276\217;\r\274\326\370\263\276oV\r\276\037\217&\276\255i\264=\217\016U\2766\275\355<* =\276\307Le\276\266\375r\275\357b<>K]\370\275\330\232;>\000X\321>\343X\'=\372$\000\277|)Y\277q\007\027=\223R\241\275h;\327\271Z\343\243\275\230\234y>2M\316>,UJ\276\247\234\010?\022\262\025=\240_\031>\365A#\272\017 \314=\244\371\213>\355\316\357>\267\266P\276\203\034d\276\325,\371>\261b\204>\303K\252\275u\216F\276\334\203\347\275nf\367\276\356\275\320\275\330X\215\273\260\020\031>J\355/\276\026\367:>\360\376\311\275\205\207o\276\\\202\343\2751\037\302<\343.B?\216B\216\276\231H\001?-i\252\275\310\016t<#\023\216\275\345!x\360J\202>\031\220\257=\240l\326;\307w7\275\336\341O>\341\025\002??\326\027\276\021\036m\276H\222m>\360\177\267\275\004\227\r>.>\317=ct\372=-Q:>\215\255\313\276\240d\261>T:T=V\273\366=\266=\355>\212\252<>\031\200\366>\353\305\254>\232c\276>kz\201>\360\2150\276\312<8>0\224\336>}\321\016>7\033\356>||\240>\360g\343>\265\360\302\276\266\315v>\r\345E\276\336\001+\276\177\034\305>\204\234T>\265+\207=\371\277\036>\326\3268=+\t\303>\005\336\375\275\257KL\276\275~\325\312FM=\254\"\225\274^\300\255>\273\324G?~X\341>\260Ys\276\374|\\\274%|,?f\270\373>^bL\276?\003\227\276-\237c>\220\362A>\274\376\211\274\037\321\260\274\374\252:?\246\000\310=\003\232\204>\221T\030?\220\255\371>\336\357\244>\022\032%=\"o\217<\212\335\226>\354_\231:X\032\224\276a\247\316>\210\212\341=\252$\245=c\201\245>1\302\201>\033\215\005?\244\200e\276\317\023\014>xh\017>l\357\314=\313\240\032=\3227\027>\263\004\204>\363]\226>|\354i\273\247w\025\277U\020\306>\2230v<\032\337\201>J\365\213>\363\321\006?\032\237`\276\226\234\330\275i\003\234\275Y\223\357\274\027v\241>\332_\003\2775#\334>\377\372\272=\313\304\313=\337\302\310\275\023\346o\276!O\033>\336\303\217>\351R\225>T\262\215>X\372\214=yo\025?\345\311)>H\'\222\275\353Cv\276\">\002?{\311\376>\345\263\274>xl\r>N\356\276> \2715\276\013}\010\276\007\304\217\276\335\t^=\346\225\270=\240\007\205\276-y\t\275\211_\376=\254`\314=uy\005\276r\207\213<(]\014?\266P\000>\365H\212\276\353m\211>\220r\010?T\351\361\275\250\370N>\260\352\377>\342YD=\376\214\335=\352e\224>j\254\271>\237\334\240\273\345\037\366>7AC\276\025v\261\274\332m\221=\';\255\275\230\364`=\032\334\313\275\376+\325>\353Z$\277\236\240\027;\362\227\346\276v\377f=\270\020\334=\314\277\216>\347\202\005=\025\016@>x\254c>\033[\371\275Dcc>\303\356e\276\306\"y>l\216\230\275\314\364\216>\264\360_\276\375\343\213>*Q\305=(\257\322:.\336\377\275\020\254\235\275\252\352\017\276y8U\276\274\364X>y\356>\276\3450K>T<\373\275\033\217K\275\002\234\366>\352\331\224>\352\007\302>r\007\313:\265\022a>\017\316\203\274\267T;\276!Wd\275\340\361\351=\251m\251>\265,\024>\300#\210\276^\307\207>\306\220\n>\342G\234\276\267\340`\276\327d7\275\027\252\267\276uU\344\276\246\026\022\276\274>\246\275\336\001c\276\266\242`\276\314\220q\276&\272p\276]\360\274\276\302\356\332=W(>\276\370\r\264<\207iW>\250\000\211\276\035\022\310\276R=\310=\205\260v\276\177e\217\276\322\334\021\276+\202\265\276\340\274\006\277\345B\232\275\020E\033>oT\024\275\267\235\304\276+]\366>J\"\036=yG\"\276L\226]>y\\\364=-\311\262\275$\362\223\276/M\305\276<\302H>\236DD=\232\204 ?2(\335\276\036\333:>sP\356\275f\021d=OM\363\276\223u\316\275\323\235+\276\214\001\233\275\271#$\277xz\n\273Z4m\277\221?\333=\250\204\247\276\245\006\026\277\240\227\345\276\035\177f\276\241!\005\276d\343g>\225\332\314\276r\270\366\275\346\220L\275\342/\305>EUW>gz\310\276\251<\250\276\247z\366\276\023\227\304\276\311\246\357\275\307m\351>\327\020\312\276w\277\351\276\"4\277\275,\212 =\332\355;\276\177i\033\276z\267Y\276\236j\027=\362\252i\276\232\327\347\276*\331\177\275\354\312\357\2765\244\302\276&y%\275\237\003\020<$D\225\276q\345\305>\"\341\024=S\3512>\207\353\215=U\\u>>\312.=\347\330\004?\230\203\205\276\311z]>\272M\033\276\3143\345>B\020&\276=\341G>\363\003\205>\345K&\276\343g\274\276\304\3778>P\230\250\274},\206=\260\203\t\277\271\265\221>\302\2040>.\3675=Q\220\256=\273p\244\276\214\301\210>\253c+>v\352\224>\010\032\r\277\000\260v\276\261\362\275\274\266\272\017\276\256rS\276\262\037!\276\337\021\004\276\014\263\323<\311\330\"\276\3119\311>\361\354\252=\344C\\>m\016`\275/\367\302\275\206F\354\276\311\354_\275\333\266\302=\364\271\244\276\036\314.\276\223\321\220<\334\324\005\276\322\214\372=\325\"\212\276\005\016\007\276\315\302T\274\352\376\002\277tA\"\276y0\n\276=\317\220\276\315\243\300=lf\224=\210\200\004\276\262\320k\277\254\r\264\276\250O~=\220\252\031\276l\305\205\275\370 \206\276\007\320\237\275\003\233*\276\036\370\344\275\327\036L\275\353\300\r\276\006Q\331\276\200\247:>\020\020\234\275@ut\276\347\344\346\276\263\240\374\274\265L\257\274\r\000\235>\000\300\345\343\275\226\227z>k\216}\276\2246\257\2768\314Y>a\220\314\275\244\233\005\276\276S\304\273\346\344@;Y/<\276$\001$\276\232\337\003>\200\365\000>m8Y\2765\214\206\276\235\010\035\277f\271R\276\"/\370\275\233\310}\276\002\247\252\276-}<\275M\034/\276\260\203\242=.\344\372>7\'\n<\215\376\006?\204?\322<\240\233\267<,\3557\276\336\221p\276\351\301\023\276\2129\217>$yh\275h\214\023\2754t\217\276\032\200\315>\355B=\274\'\3166=\263J\'\273\035w\242\276\"\035 ?Uia>\355\306\327>\024n#>\030p\016> t{>\243t\374>vN\321\276\205\300R\275Ft\206\275\274\365\360>S\325\000;\005\025\307>-\016\256>\354\345\226>\265\273\014>\346\237\202\274\237\256D=\002\tg>\267\271j>\227\003V\275\367\024\332\275\300 \352>\214r\016>\234\214a\275\336;\217>g\310e>\340%P:\347\262\201\275_\336\214>z\212x>\335\013\034?t\001B\2750l\034?\260B\r\276\302\0354\277\"a)>\326\203\257\275\336\222\003\276-\222o>\200(\003>9\220\304=\224\314\t\277\275{\263\274]\366\202>\270\332d>\227\257\010>\237{\276>\332-o\276\355j\356>\320A\300\276%;^\276U\226*\2765\031\016?5L\247> ?q\275R\376b>F{X>1\004\311\2747s<\276\276c\215>jn[>\007Ns>\242\017\330\275L\373\000=\200\324\377\275\372\177\210\276O\260U\275\210T*=-\010_>y0Q\276B\322\245>\355e4>\230\'\216=\343\231\277=|\032C>-\241\234\276\036H\204=\014\030\004>\210\242r=\255[*\275l\030\350\276\307m\273>\216|\020?O\376+>\345\314v>\n\357\314\275\033;\270\276\233\301\255\274kg\257\276\274\3354>\310\264\201=\010\302\013>\246\034.>\245\261\254<>\327X>\241\334Z>c:\202\276\344\343|\276\251p\014\276\033lf\275\\A\245>P\320\027?\r\3510>\347~\344<\277k\257=\272\234\353=l\3631>\217\233\253\276\035w\333\275U\206\266\276\3233\212=L\343\"=<\244%=\354\325\002>h\007\254\275\320\330\310\275\224\023\232>\327\237\r>&D&\276\027\353\255\276\246\267O>\225\027|\2764\270\017\2754\233\r>7\342\005?\260\314\002\276$\003\252\276\265F\023>BHc>\233s\335\274l\035D>\377\324\340\2740\026\370\275GI:?\302\237\t>f\200\251y\006\214>7\374.>\214\242\334\274P\250\247\274$\347\315>\326\343\306\275\247x\365=\204\372Q>7\320\270=\2772F>\301#%\277\r\0370\276\251\236b>\365\247\203\276\375\257\254\276\323\357m>\247Z\313=\317\253\307\276\014L\226\276)\236\334>7Ea\276\'?\205>/d\301>b\023;\276Jg\260=\205W\227\275D\363\223\276\020\033\366\276\223V?\276c\237\376=\302\334\000?\360\325\325\276\276\204\354=<\'G\275\241\364\221\276\210\211x\275q\307\206>rG\310>\007\314\230\276\363C\212\276g\342V\276\251\350\013\276c\342\367>\255E\211\276\331q\234\276q{g\276\210\372\255>\344\246\216>\321\027\014\276k\271\350>29\251>\236}\243>\203\037Q\276\251F\354>\260\204\325>W\006\236\2762\020S>\\@y\276m\356b\276\340\002\257\274\223\363/>*q]>.5\316\274E,\324\276\200M\331\275\276\234\207\275\3771\270\276<]~>\036t\234;P\341\274>\250\370&\2776\'\035\276\305\032\214\276\210s\271>h`\232;\265\346\340<\315\n\035>|\222\275\276G\217\345=\202\325\220\276\3355\265\276p/\024\276M\t%\276\024\273^?n\341\023\277\250\201O>\335\216}=\261\235\016\275%\336w>\266l\016\275Y.\243>C\245\274>\305\032\317=\305\312)>\006#Q=\027\225\242\276\037B\013\277\321\032\010\276\213\363\304>\262\237\303\275\335%\251\276\\\304,\276\345\247\276\276\366\233\263\275`B\322>g\350f\276U\336?\276\"&A>\n$\017=\374\264\035\277\177\245\t>\364\330\252<\303\013\252\274\314\304\251\276\355\032\320=\365\361\215=\277c\217\276S]\201\276Uu\210\2760T\177=\312\222\304>*\004\314>\217\021I?\361\310\000\276Q,I\3579\303>\030n\304\276\300U\311>_b =\271\266\200=\346,\276\276\326>\216<\032\272\236=\306[=\277\217\317\212\276\222\254\236\275\024\323>\276iL\256\276\306\014\021>\326\016o>j\"\252=x|\316\274\224\261\365\274\"1\233\274.\213\020\276\202\210t\276&g\351\276d\360`>\022\254~> Q\272>\367c\256\275\302\202h\276\221!\272\275U\344\001?\3777\235>\004\372Y>\005\224\227>\r\335v>IfI>\214\356\253=\326\237\363=~!\314={\3032==\027^\276\006_b\276\245D\002\277\254\266{\276\250\277\245\275~\037\217\276\0061:\275\325\264\022\276\005C\301\273n/_>md\000\277\326W\'>\260Y\242>Q[\226>\210}\002\277k|*>\\\016\243>cI\205>\371\221\255\273K\261\323\276?\031L\275\352\342\037\276\314\314\342\275D\336K>z\302\310\275\022T\325=\342\257\203\275\255\220y\276m\331\301=#|\246=\212~\216\274\243\242\266>a\324\032\277\347\031\310=W\324\206>d\374]\276\005g\024>D\207\021>Z\324\216>\226\260\035\276\326&\"\276\217\341\024\276|H\263> \371\371\276\201\016Y\276\244?\255>\204\264G> %\370=\006\373\244>L.a\275\007\366\261>\361\020s>\006\257\203\276\027~\370=\367\376\007\276\233r\324\276:\320P<\344\361\017\273SW\320;YlY>2t>\276\225\315\205\275c}\233\275o\374\347\276\251\231E\274\334K\362<]\215`=\212\333\243>\365\033\325\273\357\273\003?\337yE>\267\273\223>L\236\275\275\034\"&=x\202\202\274=K\272=X\371\232>\374\335\267=\374\250\016>\225\216\020>V\t\300>\344\337\346>=i\206<;\031;>\310_\257>\036\020\007\277\264/\206>`\206U\276\312G\332=d\344\010?\227k\204>\210sy>\246O\301\2744>a>\263\317\276\274\006\363\217\2764\006v?\024L\237\275\261\316\025?\324[\177=\234\277\351>]VJ=\010\361\203\276O-\206\275{\t\215>I\334L>\323h\204<\261\0304<\017\204\270\2753\336k\274\260\313F?jK3>P\030\270\274e\014\205=s\306\340<;\007+?\2346o>\273X\267>T\225H\276v\265#?\353\313\000\277X\262\002\276`x\203>8\250\333\276\0175\r>3%\014?T9\000\276\325\3765>\304\366_>F\324\365>X\r\220\276\351(&\275\260r\252>\003\006=\2767\202\353\274\223\333\306=\201\214a\276\225\253\017>\217\036X\276\006Z\034=\273Kd>\306\353\235\276]0\222>\275vR>?\321{\276U\\2\276<\261k>\262,\241>\307\237\234?\327\2077\276\001\321\303\276\2108\301>\355\375\273>\327\037\277>\261\376.>\210\226\220\274\252^F\275\233V\207\275\255\231\254=\364{\250>\355@\017\277\341\250\231>5\240\254=\272}\324=\373Y\272>\016\267\317\276\274`\201=\013\'\224=&\014\231\275\n\030\316\274\210\332->\350R\216>Eu\224<\327.\305\275x\205}>\311\013\321<\264\214\316>\034\271v>l(\t>\024\256\224>\375\\\331\275p:{\275j>\301\276Z\340\201\276\337\005\035>\237o\360<\240|^\276v\220\345\276\002`\035=*\305A>\266x\024>\257!\327\274&\315\000>\371\333\007>ym\022>\020\331\267<\2171:\276\'\352\006?\334+z\276\203\242\316>g\272\255>\023\035\235>\037C*>c\312\016?\273\324\035>\250&\375<\365z\367\275aR/\273\370i\211>\374\020\301\275\207\250\327=(/\306\276\202\320\'\276\262\311\220\275\311\033\210=\232\236B\277l\300\260\276\217x/>9\231\030\277\260Mc>\366\016\334\276>*\006\276z\263F\277\342\nY\276cY\277\276>0|\275\257\032\r\276\020Q{=\203\332\013\277\347\010\323\273{z\304>\256\375\275>\260vn> \2265\275\277\021\246\275\370\026\\\276!\344\000\276=<(\275\354\007,>K\271/\277R\005y>[\203R>3O\034\276\224\272\177>\212A\033>\256\276\036\275V5\232=\007T\031>\267d@>\204T\342\275\243\032\026\275\351\271Q>^\326\330\275\311\336p\276\254\343\033\277\362\225>\276\374~\323\275q|\323\275>e\016\276\260,\217>}T\312>R\226R\276\"\371\373\275U\276\244\276L\324S\277t\307\302\276\336\356\321>h\032\246>\271HX\2763\201E\276\231\354\244\276\234\314\337\276\253\364\351\2763D\375\274/1r=}.\302\2766\357|\276\333\253\356\275jOp\275\303\267\241\276\301@\033\277\262\027\217\276^j\233>\216\320\360>7\363\032\276\221\227\201>\3466\225<\215b8<0\363\314=\006v\322\276\001f\177\276\351!\326\274\036\235\273\276\346z\350\276C\223`>\307!\316\276\253\334\r\277\037\212_>\241\255\344\274\246\353\224\276\355^0\276\371\215\316=\213\360\363\275>Z\351\276\352t\326\276\210?\203>\r\313\003\276\326\226\365<\323\201\005\277\336;:>z\371u\276J\243d\276\333\344a>}\003\'>\340sH\276\013\005\337\275\333\2125=\334\373\354<\310E\201=\002os\277\334\224j\276p\216M\276V+{>\2040q>\240s\234\275.\270,?\312Y\316\275\257\032\255\276\2349\005?\374\031\224<\024\r\022\275X\024\225\276\237\205\001\277-!y\275\333\001\225\275\330,\267>\324\324;>\220\211\325\276\275\271\226\275sa\305\276\025\027{\276+\221?>\326T\222=\303s*\276`\367\321\276\366{c>\270\243\253\276\344\361\023\276\242\251e\036\032:>\303\256\033\276!\001\250>\321M\320\276\r6s>3\200;=\212\220>>\2044\t;\343K\206\275 m\355\275\236\267v>0\021g\276\337Ky\276\013\220\013\227H\351\276q\235\334\276\252\211\200>\002[\340=\271>\315>jT\n>1\314q\276G\0148>\253\010\031\276\026\345b>\026`$?\014;\266\276\202\013\\>\326&\346\273\206 \311>+\307y>\020\263\003\277t\331\215\276L\007\211\273\225\236=\276\373\232\336\276U\030\210\276\260w\303=\221V\316=\252)g\276\361\365\273\276\304\226x\274\302\331\222\276\350\024\264\276\307\036\033>\005\'N=\r\241\237<\336\003\346\275$\335\343<\031\010D\274\335\352\025\275B\244\227\276\217\324\345\275\265\234\326<\225\320\034>ppE\275\306h\367\275\270\302\321\276\347Qm\276\025)7>\374f\027\276x\2118q\323\024>\217\274\237\276\345\326D>i\034\300>rR9>\201\030\265\275;rK\275\030\325\222>\353h\211>d\221\327\275\311\233D\275m\237=>\016\333\276\275\322\360\353>\332\212F\276\255\3477>3\351\201\276/\224%>\315\351;=\272rT\275\354Q\225\276\031\253]=|\311\036\277\325m{\275\2317q>\031\345\036\277M1\214=\002\346\t\276\006\246T\276\014\310\207>\223\021&\276\303\037\'>\346\223\213\275\220\370\376=\315$i>F\033\250\276\362\342\373\274g\007\237\276\272*\321=\037x\354\276\271\254\304\276@\370\220\276\345\t\031\2769|<=\277\205y\276\214f\346\276\235d\221\275\372\033\251=z\3377\276\235?\027\277.\337t\2759\3706=>\273\260\276\374b\217\275]R\030>\250\r\033>\307i9\276\2446\027\276\234\251\226\275\301\326\343\274\266\374)\277\276LS\277\244\247\030\277\306>\266;~C\006\277z\351\253\276\t~H>l\304\232\2753\273\006;?\242\267\276\273\253m\276}\344$\276{\341\376\276G\030\003\277p\345\247\275\217]\004>F\210\213\275[\324\347\2767Q\026\276\331\2013>\327\007\272\276\213W\023>\364n\016\276\275\375\232>\343w\300\276\306\n\353=q\314\214\275nS\370=\006\227\333\276l\0379\276\226\340\265=\272)(=\221\341\233\276 \3058\277\367\177u>\367\2656\275\022b\320\276\003\243K\276r;\355\276\332:\334\276`}\247>\225E\177\276\353\207\032\277\030\223S\2762u\\\276BY\247=k\002q=\200Q\201=\3556\203\276\\\262\013?\343\314\363\276\356R\030>d/\374=x\0219\277\2536\304\275`\333/?YD1\2774oU\276\367\362\240\276\206.J\276\254;\026\276\232\220\014\276\024U\364\275\013h\214\275\221\216\260\276P\321\006\275\226\253p\274\377\254\246\276\262\r\266\276\361\370\232>\354\227\250>QV\333\275\1779B\276\254\215\377\275\231\361\020=\324[}\276\013\235`\276\362\204g\276\0101\200\276\347\236C\276\300Q\270\2767D\275>\373\354w>\321\321\262\275\363~v\276p\311u\274\t\036\344=] \036>/\300\036>\320_\236\276\260\372\202\275\370B4\276OsU\276\342U!\276\3679=\277\010\251\267\276[\204\037\275\202\231\323\276V\000.\276U\'i>)a\311={\206\344\275Ck\263\275=\356)\276F\275\254\270\302\036\313\276\347\317\030\276\344f\303\276\252\017\234\2766J\035\276\360\302m>!\343\333\274\2663\233\276\340\331J\276v\347T\276s\225\315\276\227<5\276\r\277\260\274\241\234x=\322\377q>\344\032\371;\223Q6\277\250\313Q>>\301\242=zX\345\275\276\353.\276\261]\006>\2001;\276\353\366-\275\177\244\016\275e\215w>\t\223\014\276\344\237\001?\363\244\000\276\333\201`>K\022\210>gR\204>\206\223\326\275\002\376+=nv\203\276\317\364{\275\213\"\217=\035\003V\276\035\231\221\276\034\367\351S\226\274\276\230\257,\277\036\320\354\276\275\362\275>\260\357\305=\371/\233>ei\034\277\242\251\003\276M\252\224\275LP\353=\326\235\322=\\6\203\276@#$?\310>\221>\003\226\302=}3\342=\321P\r\277\020\312M>\227]\333=\362{\016\277\021\241\230\274\326\007\"\277\362Z[\274I\037\311\276*\231%\277\354\020\022\276@\376\324=\2540)>\276\252\275\276a\250\265>\355\212@\274\371\241u\276k\030\024=t\316\363\362o\312\274\022\260`\275/\315\331\275S\274\261>i9\014>\2274\232=\372\332\344\276\206\264\210>*\363\005\277\324\242B\276\263\'\013\276L\370(\276\274\350\254>1\017\216\275p\375R=\307F\006\276\020\343\370\275F\247H>3i\375\276\366f]>\346\'q\276\371\347%\277d\233\360\274n.V\276\023\356\007\276\217\346[\275\367A\223>7\330Q=}r3\276G\263;>\267R\237\275\241\2051\277\242\217\017\275e \214=+?\237=\313\206\304\276\024\\\002>\024.B\276fL\201>\375\207{=\236\311\247=\223\2048\276M\301\204=\203-N?Y\256e>\366kX=2HW==\272\261\276\275\207\221\276\213\016\347\275\'\016z?+\204A=\035\277K\275\306\"\206>\214\206b\276\014\016*\276\323\233\217\276\237&\232\275\232\371\342=\021\020\215\275\313\017\274\276a\356\300\276\021\236\000=n\252\336>\227+\257>f\275\205>\\7\304\275<\344\203\276\365\014\177>&\355J\276\214\251\006\277@\340\216=n\274F\275\265\231\315\276i\273\r?\270\254\325\276M\342\250\273O\036m>\010\354]>\267\244\350=\326~\374\276\034\037\014\275]\362\255\276\224A\023\2769\026~>\210;t>]\214I\274\033\005}\274\223I\016=C\311\270=\357-u>\n\215\261=X\3243>\317\326\221\276\250\332\276\275\331_6\276\332\343\355\276\250\347\004>O\024\244>N\203\023>u\3117\276Z\010\313\276\367!\t>=<\006>\034\236\246\276\333\033\220>\373\'G\275\337\035*\337\204\275\237\203\334\275\240c\235\275\327-\021\277f\327\354\276\275k\341\276Q\354Z\276\010a\206>\217]\363\276\033\245\342>\177X\371\276\247\327\024\274\240\250\355\275\351\331\245\275}-O\276v4\202\276\020\304\037\276\356>\243\2756O\203=\344z\014=\014L\347\276L\271\200\276\237?\266>\\\312{\276\276.\226\275\325<\r\276\325{\273<\037\365|\276\215\300f\275\3651\010\277v\024\334\276\230\231<\277\375SX\275\'\026\330\276\032n4\276\024\345\305>\343\2374\275\024S#\276\233\323\370\276\266c\004\273\027\024d\276\240qP>e\340\331=\277:D\277\216\352\"\275\346\276\332\276\271[\013\277\36404\275\\X\372=\0167\213\276\223\223\251\275\207\346\217>\266k\275\276\000Z\024\274\013\213\373<\3555\r\276\252U\344=\255\001\033\276\006\2639\277IH\000>>l\363\276\305;u>bPF\276~\006\027\277\343\325e\276\321x&>p\000\366=\344\275\364>@\017\321\275vY\251nu\312\275\325\311W\275\004:\035>a\271=\276\344\260&=.0%\277;#\253\276\252EZ\276|W\\\276\311t?=\0261\221>\372r\006\277\025\320r\275\230\330\022>\022pk\276\375\254e=\324\341\315\276\306N\030>\025\352\201\276\252\217\252\275l\307L\275 L\336>\246\374\365\272\205qu\276\317\265\351=\356\234\262\276\205\312\300\276\007Vk\275\374}\242>\036\314E\276^{\247\275\331|<>/\355-<\302\265\217>\276^\245>W5\236=\026\276\271>\253\000A>\337\273\344\275\203\236\215\275\347\n\330>\265c\032\276\2770\326=~L\241\276)\304\005\277\022\010\267\276I+\267>\223X\035\277\274G\263\276I\3432<\251\265l\276\330\207\350=\231s\202\276\035\355\024?\225\330#>\237\240\010>:\305\013\276\036\372\227\275\340\322\202\275\365\356\303=\277\033h\276\270a\365\275\317\345\355\276\203\207\240\276e\207\217>\336\237\036\276\n\204S\276%Ck>B\254\241=\212Yq>\374d\317\276Or\373\274n\221n\276\023\256\334=\265i\">\311\0276>u\224\230\276\317:\034\275f\337A\276~Y\356>g#~=\232,;\276\346&(\274\001\200M>\231\263i\275\003L\264\276\346\224\256\276\\\273(>\021\306%\273\201\025/=p\025\315=\016F3>\371\241\203=\327\035!>\211\210t=q\355\363>Y52\276i7o>\177b\377=\375\267\205=[\275\245\276\020\211\024\275oqV\276\226\227/\276\347\306\260=\357*\016\276\331\275\255\276M)\305=$@\326\275\014\361\237=\016\022?=\027\274\252\2759\346\031>\030\353\206>\222\347\021\276\303\315C\275\373\302\233\275p3\321\275\373i\236>\2243\253\275K\037@>+\227\'>Y\023\351\276#\211`>0\214+=\202=\377=\274\013\266<\200\232{<\000Vf\273\300P\266=<\352\023>\336\001\023\276\274\251\341>\024\324\235\276\253\360\326\275\322&\227=5\240\224\276\256\206\240=#\370\r>\245\304q>\225X\267\276?%\211>6\033*=\256\353}<]\206\202\276\272\005p\276BM|\275-\221\237\276v\275\230?\202M\266<~\260\257=(\353\373=\374\003\366\276r\305\212=\311`\267<\320\322\304=X\261S>\370\253X\276JY\213\275=F\032?0\340{\275\270Cy\275]]\235>d\337\352>P\027?=\322\334\317=oK\345;Wae>\365\030\243\276\225\324\277\275\023S\271>i\277u\274\356R\241=\233\270\017>\242S\342>:>\211\275U7$\2772+\025?\3754\355>\202_\010>\360W\356>\205\2332\276\335O\013?\367\321\003\277\306t\374\276\322rg\276\241R{>*\217\241>\030\212d?\237g\324\275\331SU?\222\3657\276M\027\323\276\333\202\267<\337\305:\275\266\377\331\276\2100i\276\243x\200\276E\310\377>\257\241\233\275\300\230$\275\347\202\235\276\246\023\n\277\356\351\000>/]\254>\371\210>?[\370\327>\261\014\263\276\177\241\034\275\233\255\370>^j\237\275h\024x\251\255U?\265l#\276\361f\257>0\266.\276r\003\246>0\272\245>\276\027\'\276\034\324\244>\314\244G\276I\206\232=\030\035\007>\223\035\210\275\371v\246\275\000n7>\331\230\343>\315\350\260\276\211Xy>\275\0249\276\016\236\006\277\027l\305>,\361,>\027U\017\277\024\307\223>\311\260\002\277\304\216\343=\272q\357=\003\350\322\276\223j\314>\302R\213>^3r>\253H\344\275 \006\024>\300h\003\2753\344d?\020\263\034>\272/\272\276\2269J\275/\236\226\275n3?\274\261\200\357\276\257u\032\276lU\002?R3\226\275p]\306\273\372\ne\276\336\246$\276\2331\366\276E\231\226\2767b\322\275]\271{=:\273\371Z\375\013?\032\204\016>\261ES\275\226\245\305\276]\253=\276+\324d\275\301\236\327\275\243[I\276\222\246F\273\342\266\207=\000K\250>\004\1771\277\323\363\000\277vmJ>W4\265>\270\210\344>\243M\313>D\331&\276\312\262\"?Q\371@>kX5\277Y;:\276\340m\267=\214FJ>\n\245r\275\337\343\021>9\261\205\2767\000\312\275\177.\377\275f\000\205\274\024\331\211\276\263\206j\276\220\004\220>\217\001%?\275\343\266\276\215g\027\277\227\032\334X1\301\2761r\036\276\t\276\255>\363\253\272=\232~\300>k\\\026\276\367\365V<+\"\370=,\373\004\274\340\034\202\275\021\212)>\254J\355;\322\263j>\005\316\355\276w\301!>\243\316\325>\255\333\325>C\333\013?Q\220A>b\271\322>\\\251\220=\022\006z>g\256\373\275F|0>\246\265\231\275Y\206G>\301\374u<\347\206<\274 \331K\276\240\220\350;\345\233\026\277\355\226\246=M;\001=\027\020\323\276\331\365\014>\001~\204\274,\013\316\276\t3\226<\274*\006\276\207\005#\274\232\202\302<\371\260\213>\243K\213=?v\324;\342B\010\275\243:\246\276\375d\344\276\005\240\230\276*\304H=^\324\201\276\000J\006\277\221\031C\276Q\210\r\276\027\224;=\270\233\375\276\\\370\250\276\266\224.\276\302\203\203>\374K\026\276rC\006\276\317S\343\276Wa\343\275\365\317F>~xA\276Lmj\276s\373\350>\252\257\253\275LEA\276\372\027\245;\363\005\336\276\014\004\022\274\241\221V=\204L\305>\351\004o>J\334\026\275\331\354\325\276\364!\257\276d7\210\276\257L\340<\230\365\304=\251\324\321=k\\\261\276\217\353\020\276\250\211\031>\232\220\016\277V\246k\275E\327\242=Q\212\261\275\355\026\224\013\223\257\276\363\037\234\275\205\234\373;\007x\357\276k\301\244\2763\330\034>\2704\254\276g]\021>\345\324\035\275\031?\363\276,\310e=\305PR\275\211-\336>\364\260J\276\234\372\323\276\232\227\301\276\234\003^>\330\2776>\255\201\322\276\204\363\026=\307\341$\275\374\302\227\276\237\367\344>\200\022\013>\302\260\031\276P\250\265\276Df\276=4]j>\274\326\021\276C\373\240>\217\0039\276\207=\001\276\254\203\266\275\3301\036\277\266\211\341>>\342X>^\0005=s\213\'>\2478\253\276\340\366~\276\201:\251\276\213\022o\274\324\344\346\275#\342\251=XP~\276\223&\266=]\203\225\276\326\325\016\272\004\303\230>\246h\301\276\3323\014\277]\253\377\275\310\256\021\275~\007\202\275\206>\232\275\324\277\302>\004\001\353\276\236jh>\263+\305>U\210\206\276\007\367\332=\004\\A\276}\005\006\276&\335\234\275r\306\211>\302I]=7\225\022>\0246\200>\332\033\223=\315|P\276\210\240=>\266E\275=\243\002g?\325p\216\276\332\340;\2750@p\275\247\177&>d\272\254\275\361\304\307>\2762==`u\374\276{\200\244\276\001_\315\275\376p\301\276=uj>\346\002\332=\204\035\272;f\2313\276\212\271}>f\346\275\276\004\302\314\275C\007\333=\257n\006\276\2259\210\276\027F\274\276\264%\204\276\236\266\271=T\361\202\276\253\327\252\276\324;\020\276\352~\261=\322l:\277!\212\223\276\276!4\276\343\360\177\2760\"\037\276x;\256\276\355\354\250= \356\000>=\274\354\276\225\032\312\276l=\007>\\\003L\276m\367\220>\277\272\353\276\354nx\276\323\372N=\301\315=>\200\314\356\274\342\016\344\276A}\317\276\030F\254\276/lF>\250\246\027>\204B\334<\0376\373\276\023\177\211>\205\014\340>\233q0>\232\325\304=\372\212\365\033?\204\323\345>\261\'>\274\372\037\332\275&\222\225<\246\267\251>)\253#\276\254\022\004\276\223\257\377\025\217\216=y\345\211\275\256\330\314=c\037&\275|\216\204\277_\301\214\276\311\n\266>G\301\006\277\376\014\036>\030k;\277\203\310\355=\245\235\005\275\002%\226\276\326dc\276\362\255\017\277\242\020\372\276\222#O\276\212Ti=K\210Z\276\021]\014\277\201\203\022\277\372@\235\276K\241\030\276Y[\255\2760\245\325> \214\217>\016\223%>u\324\"\277R\r\373\275\006\024\265\276\272\2312\277\220\001\265=>\314\024\277\347\210\317=\377\013\372\2745C\255;\357\242)\2765$\000\276\277\320\374\276!{@\276E\360\213\276\275\201\253\276\300\364\237\275\023*b\276\022\355\216\276\246\032\222\274M\220\211\276l}\017<\351_/\276\347x\034\276\037qM\276+\375\332>p\t\033>\203[\233>8\370\031<\212\t\311\275\321\215\340\275o\356\335<^\t\227<\254\2724\276_\235\364\276\252\020\245\276\253\000\202\276\3473w\276\260\222\227\276\336\206\227>\020\t\342\275\241t\343\276H\351\366\276\256\306\226=E\330\256=(]P\275-\254!\276\311\003\330\276\031l\366\276\225\361\335\275\234\336(>Lj\220\276\241\366*\274\235\2725\276\313\036q\274a\212\270>\210\251\010?\223\032\023\276\271\371\037\276 r]\276*\300\031\277\035cS=\305*\010\275\000\367\t>\343\235\002\277{&\314\2761\264\177\276\203|f\275B\266\350=3\362\364>\014T\225<\313\352\275\275\r\206\263\275\to\004\276$\'\004\277\271\250\277\276j\022y\276\004O\314\274H\351\027>SZ\215\276ns|\276w\252(\277\356\220\217=\375\177\347\2767QO\276\370L\306\276k\006\276>\234O\005<\372_\033\276j\206\312\276\330[\254\276\377\301\253\275\353B\215\276}4,=\010\330\251<\366\353\367=\n\034\307\276\020\235?>\225\017\022>\332\241\305=\3352:>}\230\027\276\255\002\245\276[\254\264>\017\254M\277g\333\n\275_\306\230\276\303\251\234\275\370ig\276\204\031\260\276(f$>\3173\377>qB\261\276\265R\005\276\005\255\241\276*\\\317\274R0\036\277z\001\222\276\247R\032=\353mU\275\331\213\013?\355\0237\277\343\332\'\274\010\006n>A\341\210\276\207^`\275\255$*\276\026Q\371=\354?\330\274,\312\312\276\'\345R\276W\270\217\276\211p\307\275\222r\247\275\320a\217\276\037\330\337\275\255\213\260\273\276e\300\275\\6\256;\025\251\266\276J\003)\277\242$u=i\252\335\274\300\\\r\277lZ\035=/\330\240=@\325u\275\340w\302\275\222E\030\275Z\225\254>\303\201f>Q2D\276\233\236/\276\314\356\250\276\260\263\225\275\241.\360<\032\304\020\274\203\211q\274R\010\265\276\346\225\364\276\315\331\313\276+Q8>\340\203\r>R\342d\276\020\'\350=:/{>\351\032\267>_9Y\276~\333\263=X%\255D\004\333\276\363\263\212\276_\3654=\367\022\013\276\377{3\2764K{\276\331\000\232\276\032\344\377\275\014F;=\023\235\322\276Je\034=\021(\"\274v^\200\276\261v\n\276\212>\013\277\026\271\256\275M\272\026\276\214\216+=\3405\331=\362\243\256\276\235\355\031\275\236\222(\276\307\270\276\275\221\251\300>\237\275\234\276\226!-\276\221\246\223\276\316\344\363\276\201\201B>\003\252 \276&\363+\275!\251\203>K\271$=\272\257\255=7\234\020\277\005b =\036\221\206\276\254\335\334\275\22462\276\260|\200\276B\000\007\275\203\342\227\276\020\2376\277&L\204=\"~8=\016Y\343=\016\305\321=\347^T\276\200)\212\2769\223\370\276\355\272|\274:\336Y\276\330\277\237<\2302\r\277\201\032\016\277c\3570<\233X\251\276\006o\n\276\022\\\202\276.\377\261>\361\313V\276\014\272\017>na\334\276,\031;\276/a\361\275\326\344\025\277\302k\020\276\267\271\256>?\004\233\273\010Y\270=*w \275\317\347\033\277\363Jt>\020\343\326\275YS\235\275\240>t\276\344\241\202>g\217\232\276t\322J\276\224\257\034?\313US>`kq\276>2\261\275\375\306:=\354\014\004>a\210\217\275\271\356%\276P`~=\225\022\231\276\020\270!>\317\0163\276\361:\034>\260\301K\276\032\245\252>\344\263m\275~\001\352\2769R,\276\315$\214\276\023\271\332\275i\212\255=\300j\313\275T\021\203>_\003\"\276Hw\341;\267\365.>/E\261\276\213\007\032=\207\221\r\274\206\340\316>\356\243\242=5\346\234>\204\225\304;\244}\217\276\255Y\273>O\'\037\275\221\305\221>\351\373\250\272\276n\304\276e\324C>\254\000\336\275\343\013!\275\326\230U>#\220\245\276\361_\003\276k\376\321\275\323\"\210\275\315{S\274\006\004\212>`Hw\275\226\222H\275\031\333\263>\025F\264\276\252\357\004\276s\243.>\002W:\276<\300\023\276\025(\223\276/\003\317>\305O\313;\354?{\276\033\223\266\276\t\265\372\276o,(>\343T\336\275Y\030\300\276\275\022?\277\251O\t\276*K\311\276\007X\322\276=3\311\276 \347\211\276\372*\273\275\371\211\200:\354n\352\276\243\330#\277\003\354\246=\30498\276\211\363\342\276\363\220\225=\244\221\031\277$\307\204\275l\372f\276-Uh\275.\234\266<\200\362\303=\0231|\276\221m\252<\333\376\220=2D]>\265<\245>\273\371\231>5\265z<\366?}\276\251\3053\273\324i\001\276`(\347==&\205\276\223\014\240>[Y$\275\345c$\276i`\244=2\213\251;\320\037\303\275\020-;\276\333\233k\276\303\207\217\275\320-\310\275\271<\217\276e\337R\276\275\3459>5K\\=\373\204\304\276yK9>\316\317<\275X\215.=\377Yl\276p\006B\275~\3606\275Q\031\262>ik\204>DU\234>\177\262b\275\250\027\306>\377\331\007\276i\231\004?iV\350\275\234\274\322\276^\317\325\2766\207S>\367#\033=9\260\241>\257}\027>\363q7\276\335Z\213\276N\177\342\275\341\230/>\205\3012\276_\375\030\275\317\330\344>\030:!;-\331\312t\315\010>\212aa\276\021d\037>{S\216\276WR\214>1j\030?\307\'\311\276\225\024\210\276$\333\311\276\306]\230>\215\270|=JU?\276\003\303.>v\2011\276\272\316]\276\261W.>\3021\316\275\277V\202>\340`\250\276\242\033\351\276\211Rl>g\220\217=@\\\234\275\365-0?\276\027\322\276\220\203,\276\304\rr>\322\207G\276\350\343\234\276d\356Q>z\220\361=\355\036\316=\316\233\002=g\025Y\276\227\247\366>\354\026\355=\317\223\247\275w$\010\277\277\262\243>=\375\231=*\026\324\275Z\320\010?2L\024\277\334\345H\276\300\213d>6\300J>R?\036\275\315S\250\276\210\236\031>\tWX=!&\003?\002,\217\275\270b$\276\257>\034>v\362\200=\357\220\263\275A\275\215>:\336\033\276\245\353\212>\245\014\331=s8\203>F\275@>\316)\240>,^t\276\031\241\377\275\310j\254=h\363I=?\263\361\275XKH>\225\226\271=+)\r>\336J}>\374d\347\275\250K4>\201\233F>To\030>\024\033\030>\320u\265\276\377\202\014>I#h>\203\203N\275D\301\334\275\314\177\276\273\263\030>\276\244W\260\276\334j)\275\333\215d>\264m\325\275\304\331q=n\343\206\276\336K\207\273\205\225\362=\271\'\321\275(#\244\274\n\364\261\276\337\361|=\364u\246=`C\003\275\2479^\275\213\351\243>h\032\024\276\375\373\305=s\234\233\276A\262\201=\301U\331\276$}\207>\203\216\205\276\213\373\266\276<\003\207>kL\202\276\202\251\361\275\031\303\243>\001\373\371=T\350\305\276\010\304\320\2768i\032\275\250v\350\274i\226\373\275\030\302\303\276\345\370(>_\247&>\265\344\367>ku<>Q\255\246\275\010\014>\276\377\026\037>L\037W>W\307\301=\020k\352=\177I\250>\352\214X\276\270\334#>\344\202\321=ry\021\276HO\270=\"\025\311\276\301\337l\2764\201\366\276h\301\266\276\336\020\200\276J\303\204\276\3269]\276\221!M\276%\221\037\276\261\304\327\276\203\273\363\276\\\205\252>\253F\311\276\236\304\034>\240\017\257;\236\302\236\276\267|\271\274\353\re\276J^\212\275^\025\201>~\272\025>,F\304>Z\241Q=\234\2356\276\227\250\212>$\265{>\002\227\221=0\r\357\276\250\312\r\277\223\226b=\007\232\367\276\365\230.\275__`\276\204\017\020\276H\267X\276>I\223\275\257\\\217\2753\275K\276\245\3555?\013>\320\275B\352\275=\3516]\273O\331\250\276\360\265\347>\253\305\215=i\205`\276s\031O\276\246{)\276o\274\317\275 \0168>\032b\t\273\267\331\362=\244>9\276]\375.>Eb\204\276\371\275\323\276S&u=JU\334\276\014\362\370\276\017+\206=g\270C=\002\024\230\276f\037\247=\220E\341\276D;c\277\340\002m=$\037\177>\035\n\017\276\234z\304>\223\354\351;qbV>\222+^\275\037\273 >\001\274|\276i\376,\277A\021<=\251\353\217\2763\210\305\276\345Q\006?\336\315\220>\251\261\257\276\'\271\240=\224\256\325=\217\035\236=\343a\214\276\207u\370\275@\353\240\276\'\002\333\276p\201k\276\256/U>\177^\\<\311~V=\305\312\376\275\243\230\013>mcJ\276\207\237\265>\203\265\357\276_\345\333\275\255\333\346=\244\232.\2763:`>_#\026\277\303\226\315\276u\351\021\274\321\235`\276\377\024^\276\260:\t\275\002|\227=\036\014\003?\272j\002\277P\033\304<\267*t>\243\236\021\2764t\226\275r@_>\347\030.>:>\252\276~\355Q\276^(\036\275\t\362\204\276E?\340>i\003\022\276X\033\251=d>a\276\344`z\276\311}\024\276c\305\346\276\343h\004\277;\024g\274&\203\320\276\023\303==b&T>\265\205\305<\222\324\340=[xI\275u|\360\275/\321\\>c\232\262\275\357\376;\275\373\r\"\276,\016\005\276\342\371\257;@\024\330=\034\251w>\252\262\224\276d\307y>\373\006\001\276\223/,=\330\302\326\275\311\2413\2762\332:\274q\221\362\275p\\d\275X\201\327\276D3\334\276\360\240\317=.F\360\275\212,\202\275\203\010\022\276S\244\241\273p\305\211;<\2248\274E\\d\275J\223\241=o\002\031>\235>\371>\227J\210\276j\236I\276}\342^\275[\035\t\276S\360[>>\330\207\275\317\345\277\276\340r\242\276\017p\242\276D\373E>\023\330<\276\213Q\201\275\331\225\355\276\310d\352\275\010\326\333\276\001\t\200=\267\003U\276\232\241\305=\312\275\301\275N\177\224\274\001\330\017\276?t\216\276\371\253k\274\206\277\310>)=5\275v\211\372\276\245aa\275\203lX>\236\271\253\273.\321/\277\'|\t\276\332\347Y=\272\036a>Cqf=kj\205\274ZV\004\276n|7\275B/a\276B\306\346\275\253\357p\276\004G@>\336/\361\275\236W]>\tN\016\2773\236\351\276\242\031\014?\316<@\276\t\332\030\277\224\254\342\276b\020\311\276\276\253)\275\363\3237\276),H\275\355\322\'\277\006\347\337\276\323\325\030\276H\3343\277\311\202\020\277L\374j\277\240\306\021\2768\372=\277M\230B\277\305\371\261\276\216\241\243\274\027:\016\277\2436\273\276\273\213\014\277\021\366\021\277>\367L\276\202A\010>M\177\314\276j\262\'\277b\2551\277\024\336\006\276\306\030\030\276\364<`;O\1772>\34216\276\231\374X\276\266\351\036>\010I\312>\224\313\003\277\347v\352=\367\026e\276PT\302\276\331!n\275!\267>\277;\r\311\276\304@\001\277:\372\355\276.aV\277\206`\026\274\344\237B\276\331\005\303\023=_A\313\276\02213\277\334\250\241\276\324n\004\277\347\017\351\276a\326\354\276\235A\t\276\313\032\022\277\344\006&\276\316\221;>\"\355\337\276\t\\\r\276lMU\277\\/\212\276Q\344\325\276\312\345+\276\213b\300>z\371\n\277\204\t\211<\3403\301\276\266N\246\276\310n\323\276\353\016\240\265\'\206\276\216v\272>\240\030\345=\325\227\330\275\3566\311\276\204u\032\277\320F>\272\222\334\323\276\333\022U\276\231Z\365\276\261\237\031\277\200\340\266\275y\026\207\2770\235P\275\223\346\003\277=\322\026\005\237\227\276(&\204>\002\003\001\277\352_\237\276o*\322;$\2648=\010\365\210\274\n\336\254\275<)L>\031\270\330\276\346\243Z=1\2540\276\372\022\307\2762\353;\277\r\271\207\276{\230I\2767\326R<\r9f\277\323\240\260\2763k\r\277h\336#\275\216\177@>J\261\363\2763*\234\276Z\237\210\276o\254\\\276\356\326\237>\r\272\335>9\226\212\275\003\221\343\276x\312\200\277i2\355=\233\272\014\277\345\224*\276\245x\233>l^\236\274\035\257\006\277;\000\245=p\303\n\276XB\336\274%\300\320\276$\010\312\2768\202\331\276\307S\203>\035\3333\277\317Q\201\276\366\245\261\276\036\216k\276\270H\266>\025\321\314\275/\024\023\277a\377(\276\344\357\002\276\031S\243\275\024\275\355>\376V\363\276\305b\003\277uX,\275\036\213g\275\3662\345\276Wj\340=U\314\276\276W\2002\277\225bo\276\337\031\256\276\237\016G>g\"\257\276\017\317\001\275\241\353\335\276/\346\316=\365\253\240>\333\222\237\276J\"\310=i\350\210\277\232\266\210>O\377\007?\320\251\347\275\331\003\026\277\322,\220\276$x/\277\177W\225?\271)G>$J\271\275\243H\220\276H\277\024\276\244\337Q\275=\356\374<\321F\002\277a\364\305\275(\2210\274\214\372C\276\253\370\007?\201-\351\275H\312\255\275\033O\001<\023Y\312=\035B\251\275M\324\214=,\354\256>\215\234\214=\326\0346>(\250\352\276\202]\r=v\240\345>\007\005\340>\007w\312=\2516\274>b\033R>\021z\005?\002\013]>t\225\370=\257t\231\275\345\226\367>\322\220\247=\243\201,=/R\343>\253\025\225>a[\232>|\304\227>\364\340\033=\331\026}>\034\247w=\312\216W>Z\200\366\274\243|\211\276)\321\354>j\342C=]\032\226\275\025\200\205>\313\215H\276\317\352$\276\272e\264>\034o!>`\331\224\274\206\335<>\210T\315>\\\204S=\355^*?l\376\027?z\346\325:\035^\263<\233\220\226;\316V>\277/\000\013?(\225\017?\342\371\212>j1<\276I=\372>\020F\201\275\2060H?k\220\201\276\'\245\033>\352\330\312\274#\201-?\003\261m\276 \316\316=\260~\213>\240\235\017>h\240\354\276t\372E\275*\324\237=\220\rO>n\242\277\276\027;\302=\373)(\337\366-?\033\000\205\276\334sw\274\373L\216>n\312\367\275K|\261>\363\310\217\276;\371\001\275\202\330\265=\310}\333\276\177g\207\276Jq3?O\006\005\274\026\r\212>HRY>-\017N\276\360\376K>n\016\253\275?T\311>\366\342\267=\346:\246=\023-\217\276[\257F>\360.\304\274\234\246\271\276-\246\342\274\307\020_\276\243\355\323>[\245M>\376;\206\2767\377r\276\227g+>Y\3545=\354f\017\277#J\r>\030\275\272>\n\2352>\031D\206\276\r\024\214>\310\332\027=\177\\e\276\000\267,\276\026M\273<\314\322\203\275\347\353\261\275\201\273>=\003\300<>\234nb>g\021\035>\022\356\275=\302j\000?6\270\265=\234w\357\272\025\207\245>\246\266\274>]+\245>ub\007=)\201X>\340*\251>\224\211\266\274\374\275\035>\357)\276>\241\274Q\275\374\275\370=q_\222>\301\034>>\331\335p=r\177\273\276t\3274>\r>\212>\277\314\354=\360\272\260\275F>\177>\307\365\037\276\n\030\230\275\215\210Y\276\275\263\335\274O\310\343>\270\237\225=r\353\247\301\007\314>A\005\303>\034+\301\276\034j\266=\035k\303=j\212\000>\267\204\317=\316\036\255\275r\037\253>\262\242p\276PJw=\"w\200\276\262\344\226>\374c\204>.s]\276\025\323\364\274:/0\276\327\317\327=}\310\304<\222\274\005>\033o?\276\313\343:>)\341G>-M\327> s\374=\363\026l>R\203\020<\271\"Q\276\300WM>s5\207\276\376Mt>\303^h>\002\243\226\276\037f\221\276\021s6\276\262KC>\206\335\351>&\026Q>\273\014\356=9\000\253>6\r.>\344\237\000>\244\221\210=\252\271\307\276\355\235\246>\372K\302=\343\336\301=\321\020\203>\321\240d\276p\211p>\365\351->[\306w\276\016\357=\275\224\203e\2769\367?\276\267-\271>\247\220\032>\357}\362\275`\033\376\2754\'#>\'\347\240>,\307\227<\0254\261\276\240\341D>j\377\010>\245%\357=\216\203\340>;\367\276=\250\370\002?\200B\343\275\326\244@\276\265\276\305\276p$\200=K\354\227\276\374V\016>6\234\202>\200\235k=\204B\304>AE\356=w2@\276\211\002\003\276=\323\002?|\346\037\2740Q>=\377w\204>\262Z\314\274L\030\267\275\021lG>*\327\017?\213\007\357\275\226;x=\347\303=\275L\250\253=\244\312N\273\372\020A?\352\031\335=S\177\204>t\177\333;\273\010\001\276\020\250\n\276\227:\331\275\356\315\337=\201\r\017>\300\024w\276z\244\244<\'\200\364\274}\037\310=m\3616=\327\010_>\215\326\\>hc\300>\025\001\020\275<2j>\207\004\272\275\326%O>\034\362\202>\300\345\213>\2250\020\276\340\305L\276\030B\010\277\306\010\026>\001\245\026>\366\333\246=\247\321`>\027\217\231>=\376\217>\002\255 \276\302\310\004?\257\214\372\276\235{\007=\377\311\236<\367y,>f\030\035=\335\274\206>\253g.\276\'\256\263=\213`5>\250\014\270\276F\021^:<\361(>]7\220>\207\217^=\370\227\232\276?0\267\275\317\233V\2757\\\031\275\214\251j\276\226\313\344=\225\201(>\305{\340\276P9\207\276\006/\222\2769\307\203\275\0318\341=\322\035?=w\262w\276\243p\\<\204\3413\276\251c\027\275\032`\237\275Cmp\276\344\346\211<\222\375\244=\335\022\007\276\356\222\304=K\207\271\275\227P\322\275\240\3570\275\242\312\020>\203n\312\275\323\373m\275\355K\374=\031\353\312>^\256\311\275C<\304\274\231#&>W\343\301=\010\253\245>\216\274\020;|\300\342=\026\'\303\275q\\\023>\315\335\013\275\031\375\325<\003z\356\276\236\370\254=\273\231A\276\374\321\265\276\364\334\273\276\356f\004=q\215\237=\346\3730=\374d\301>\330\317\324=\036\316\201=cl`>\374\374\350\275\035\350\302>\307\273\036\2752\234\313\276\300\366\343\304\272\373*\233=8\224\002>2ef>\032S\207=U\260\317\274\014`N>\272\217\220>\223\022\313\276\266\354\221\276\352\257<>J\334\246\275#\212\276>\010e\205=\216>\217\276\370X(\276\203\033\211>3q\204=\312U\202\275\346\234A\275\336\223\n\275\247\035\024\276\323\307\301>\347\350<\276YL`\276\323ul\274\235\260\342\274\340\211=>D\272X>[3d>rb\356<\220\021#\277\301^\n\277\375#G>\322z\336\276\241\367-?I\325\240\276.\330\373\275r\343\367>L\211\014>\3378\336;+a\345>N\014\234\276=\265\204>\003v\273\276\352\032`=\224\001%\276\027\\r\276\247\310\275>\376?\232>\260\014`\274\027o\021\276\214\223\210>\r\365B=\277\362\006\277.\007\351\276E \300>\244BD<\023:\215<\243\033l\275t=\344>\276R\237\276h\0340\274GJ\025=\024\263\224\276.4\017?8j\370\275\005;\302\275g\271\235>\230,\246\276eD\031\277\316t\363=\217\263\277\276\366\267\333\2766\210U>\n\371b=\327&\222;9\203\336\276\300^E;t\201\215\276H=\260=|O\213=ARK\276\017>\036?\335\312 ?\363d\002\277,\243\377>\235\3503>\324\r\216=*8q=nA\351\275\271\366L>\350i\210>H\324\322;@a\327>BF\243\275\002\215*<-\216e>\235\233\236\276.y\004\276\347\014X>\315\270\027\276n1\340=R\367\313\276\035(6>.\177\030\277\256\361S>wV#\2761\230\231><\256\217>\226kd\276\217m\304=\266\276\342\275\315`\344>Ur\240>\342(\265>\271\3460\276\014\213\203>2>\376\274\320\370R\276>\234\345\275\203\365\t>\262v\347\276\326,#=\236\303T\276\214L\212>J\013\202>\004o\006\277_&\273\276\032\177\001>\367\313\266\276\367b\335\274\322\320B\276\362\223\021>\037\327\246=\262\356\002\275 \327U>[*\265=U\347\n=\337\331\304=\251\245\'=,f\311\274X\351,>f\026\032\275q8\034=\303S\203\276\272\021J\275vJ\030>r\375\246=[YD\276[\271\325<%\347\222>\1775\243=l\002\235\275\320 e>\361\304O>\253\350\267\275\3203\343=s\000/\275t\001\201\274\373\252A>\006hx>\376\270\357=3\374=\275\024\337\206>\006\031\303=\300\002L\276QX\033\276g02>5\333\354>;\355\215=\016.,<\317E\005\276\352\n\221=\233\221\233\276Lj\202>\240[b\276m\027\\\276+L:>\313\203`\2769$\206>j/\013\276jp\224\276\301+\331=\341\2252\275\265\235,\275Y\354\210\275\"D\031\276\347\235[>\343%\222>\006B\232>\272\212\017>\021$\352\025\031&\275\250\331l\276_\267\275\276\016v\276\276R6\222\276\t\ta>S\000\006?\272\327f\276\027<\252<\014}\331\276-,\205\276\001s\213\275\177i\212>\201~\362>\001L\272\273\3736\340>F\304\227\276\0373\355\276\277\326\346\275G\344\352\274\275\243?\275C\243\253\275r\377\311\276\207\237\306\275JL~\275\247\203\253\276\267\014\220\275\261n\024>\215\372@\276\331F\206\275\345\265\352=|\005$\276\0076\277\274\263\002\013\277\035(i\276\n\350\207\276\251\245\007\2774\363\266\275H\317\336\2762\024\247\276HH\211\276\375\251\343=\362\010\346\276kf\255=n\346\224=V\261^\276^\255\205\276\344\347\276\276\256}\013>T\361\344>?\377\276\276\0275\301\276\323\250\034\276\365_\346\276\022J\203=c\350\372\276g?$\276\313 \250\276\373*h\275 \207\345>\374\257X\277\335.\364\276\346\303\001\276\021\222\243\276\263\2404\276\016\323\257\276\331U\274\276\344\306\346=\221er\275n\221\261\276c\215\010\277\020\007\304=Y\302\363=\247~\025>\213?\340\276\255\013\303\276\236\r\017>\237\250\225\274\026O\224=\215-\345\275\220\3250>\253\355l\276\232\257F\276\365\364S=\207\251J\276\327\200\357\274r\334\243\276\376;@\276tW\206I\333\234=\243^H\276VI}\273j\243)<4B\246\275\\\023\365\275T\253g>\006R\231\276\223IC>F\373G\276f\370\231\275Q\340)\274\365t\203\276i\014\310\275Bn\257\276\334U\272\276@W\272\275\363\324K\276\306\235\254\275N3\200\276\003\215\261>\275\373\222>\032\227\017>\301\244\234\275\266\306\313\276\214(\270\276Z\272\204>}\376\023\275y,G\274\315\317\230\275F\353\014\277\'KQ>QB\310\276?\200\353=2\370\241\276\352w\220\274\277\324\"\276l}\361=`\373\034>t\327\303=\302\365\264=\230\376\225\276\367\0255>\243\0077\276\205\2015>]\027q\276\261\034\035\277\373\030M\276\243\0320\276\362\003\315\275e\010\031\276\263\325\320\276\030\010\020\2776T\252>\253\266\366\276\327}\014>ux\225\276\316g\312\276\340_`\276F\210\343\276\233\276\342\275TdK\276\016_!\2770\213\006\276\213\363\222>\221\t\207\276\005m\'\276(\272.\276i\212\000>[\271\003\277\302\370\267\274\236\226\343\2765\342\245>9y\355\275\010cc>b\271\017\276z\347\246>\326\334\025\274\330h9\276\231&[\275\013\021\256\276\362bS>\234s\370\273f\345L\274h\013C\277d\315\215>\343\'H>\002\206\200\274\231\371;\276}>\356\275\030\204\254<*\177\316\276k\"\323\2754G \274\017n\203\276L\271[\276/+\314=\0354\035\276\004e\030>\271[\001\276NJ5>K\303\360\276av\007\276\355@\035;\255\210\270\276\212:\266\276S\203z?\325\202N=>\321\302=\rm;<\227\255\266\2758\217X=\346\275\242>\344R\231=\217T\361\276BJ\203\276\362\315\310\276\214k\320<\022Z`\275\215\\\230\276\314\301\260>\245\000\241=\305R\227>\372\263\t>\360\177\030\276:F\274\276x\310\332=\317a\353=\254\367\274=\265\033\200>\001\002\204=\342\036N?\267\216\037;\302\006\213\275\000i@>\222\324\'?\2337i>\032h\211=\307\001\202>\006\230b\274\226w\263=\037\235G?^e&?)\271\312>&\316\370\275\252\007\\\275\315\251+\275\014Y8?TY\353=\3730\\\275\342+\266\275\217\274\004\276\270\275\031?S-\337>\177\205?\276\r\371\261=g@\326\275\345\004\217\276\364\360\347>3\033\003>S\332<\274\344\330\014>\315sK>\037\217|>\235\213\243=\327\3740?\320\213\235=\267\232,>8\336\244\275o\322>\275\307\263\343=\306\007\347>\250\251\023>\3039]>\376o\203\276\274\370\373>\032\020{>\272,\034=\210:\021>\271!\031>:p\341=\347e\260<\200y\234\275\336\211\024>4i\307\276\350x}>}\361\n\276\262!\344\275}\263,?\356b\035?\rv\266\275\272!U\276x\377\363\2766\333\201\276\252<\310>\274D\312\276\036\224!>/\033\010?\374`\356\275y\234\014\274\375jm>\376\370\312\276\357\305\243>\261\243\274\275\305\213\360>\256\347\215=\267\025\017?\373\025\276\036\203\207=\315\277#\276\234U\337<*\306\320=\327\3257=\3114\354>b<9>\r\201J\275%y\237\273j\207\250\274\315\303\304=kQ\211>\215p*=\212\311\230\276e\\\014=\\\202\350>\303V\236>\370\025\201=\372\246\326\276\233\326\250>\376d\234>u\317\320\275O\301\205)\024\311>7{\004=\270\351\362\275M_1\274\266\363,\276\271]\371>\021\323o>\003\013\243=\316\242\234\276\235\256j>`\375\314<\213i\365=\322\254\023?\377_\024>\367=_\276|@b>v^\341=\254M\215>,\3432\275~&\355\275\352\020(>VX\032?K\233\221<\216\235\231\276\366\343\215=\235\242\304>jm\032>7\024\315>\263v\350>e\207\005?\014u\254=\217\031_?\370\245\332>\356\210\243\275\nw\222=\366\256\237>\300\223;;Nt)\276R\032\016\274\200\353\005\276\303ks>d#\023\276?ZF>\000m \276]\235\311>\271\345\347\275\370bN\276\343f>=,u\363\000\010=\276\247\r\210>Z\244\004>:\377N>\327\265\033\275\207E\367\2754\323}>\262\327Q<\230U\271\276d\020\037\275e/\014>\374\314F\276\363Q,=5\276e\276\376\261T\277\222\265>>\270\2176\276hVA=r\221\267=\272\252\360RB.\276|\246\275<\256L\330\276\257H^\275\r\215I\276\375v\210>\237Tm\275\363\000G\275\226hf\276W_\241\275\303\324\312\275RzL\276\264\260\n?\270\273\035\277\322\n\361>\033\356\010\275L#T\276.g\257>\321\253\246\276=3\251\276V\212\241\276\224\004\267\276uq\203\276\374w\201\276\340P\261\276\347\247\177\2760\310\252>\314Ud\276~\357%> \211Q\277\360I\240=\327\334\241\276\013T\360=q\004\357\273\2738D\276\t\207\210\275U\350\333>e\227\310\275QI[>s\220!\272\236\237\213>\034g\177\275\"\315\000>\217\231\003?e\233\\\276\343\253\327\276\033\231\207>\322{\261>\371\212\022\277\221B\261\276\006Y\255=\264\002\027\277~\342\277\276\271\242\222\276\307G\305=\017S\331>\375\315\">\331\260\275/K\004>\027\305\024\277g5\212>\243\262\337\2763`\207\275>\245i\276\272\017\225\276C\225\346\273\0000V\276\216u\347\273\017>D\273M\200\247\276z\227\355\276\322\202\356=\251\326Z\275=\026G>S\017\342\276\312m\034>\t\252\233;Y\227\333\276e\001\215>8\037\377<\254j\250\222\227b=Z$\315\274\245\365*<5k\201\276\023\375\365\276\220\033\'\275\355\334\337\275u\313i\276\372\247\212\276.\221\335\275\366\377\327\276&\311d\276\235*\211\276\026\336~<\240\305\346=\020\266\030\276\203\216\010>;\005\221\2767\373\206\276*\333\020=\033c\373=\246X\302\276\007\260F\275\367\256\335\276\'\312\204>I#U\275\024\003\315\273\306p\370\276\t[+>{R\004\276\306\002#\276c\243\024\275v\312\247\276\272\244\347>i??\277\275p&\276\216\366\r\2777p\224\276^\220\020\276\016\273\370>\365\270\336\276\363\247\347=\2718\230<\214\352\212\276\355\320\203\276Z\r\236\274\3718\214\276\224\234e=\233\320\345\215\223\301=\207\273\032>e+\001\277iu\312\274QO\210>\322\204X>\254\345g>\315\257\022>\225\235\233>\342\250\227\276\r\177\330\276\271%I\276\356\301\273>~x\247\275\030\036N\274\334\t\350\275\204\236\373\276I\2449\276^\252\005\277\346\037\005\276:\234M?\310\202\016\277+`.\275\240?\345>\315,\307=\275O{\276\266\303\241=\204\315t\276{7\334>\220\331\376\275\233\345\036\276Z&\254>\r\210 \275\356j_\2758\357\331=\230B\313>\216\274h\276\026\256x\275\201\325~>\234\2309\276\034Y\221\275\336\005U\276\024{\373>\230\206\035=7b\'>ml >\231\362:>Sm\n?\005\3428>-yK>[F\223>\365\233\237\276\r\301\257>\n\000\364=(\353\374\274\207\257\270\276R\225D\276PGo?\361-c?\311\217\232\276\260\357\337=\024\220\027\275\322\034\007\274\002\303\210\276?\322\323=\036\276\314>\246\354h>\246O\200\276\344\332\204;\336\377\t>\323\270!?n\347\001>\261\006\331\276\374\004\277=\001q)<\255\374\214\274\206\234\205>\346\002,>\037V\307\274\263\2550<`\302\305>\024\210!?T\"O>\211\345\026>\333\263\205=\262\263z>@qo>l\r\311<\272\235\237<\n\272\314>\316\005\202;\035}\307=\356\226\371=@\237\324>\254\016\251=\336\307u>\240\326K>zp\372=.\217\017?\014D1\276\265\301\300>\371\037\010?\3245\341\276\313\363V>\007\360\373\275\370\236\251<\263\364\210=D\n\007\276\361\233\251\275\035\206Y>e\332\002>\363\335\221=\233\266k\275\234X\215>`)q>u04>\003\376\375\275\266\317D?\33157\275M\037Q\275X\007\353\274C]h\276\354L\355:\306Y/\276\216\344\244\275>\202\311\275\016C\351=Rt\206\276s\266\r?\351\036\302=_\354\347=Z\266\253>\312\263T>Y\224\017=\025\"\234>/_\232<\260\207\030>\335\347\276>\271\335\003\276B\200\374\275\246\027\216>\226\326:>\323\325\316>\023\356\207\276$q\300>f\316\373=B\032\254>\3577\007?F\352\235\325\323g=\224N&>\303_\202\272\200\3658\276\205\3060\276\337\243\027\274f\225\226>]_%?\364\205\233>\364\006\337>\332\256\331>Z$\233\275\246\034\020\2764\313\315=\222!z>\302\217\007?\277\3706>\216>\200\275\377U\000\277\266$\021?\252\365\252\275\203\013\023>{%\217U\340\336=\330\013\201<\333,A>\334\315\255\276\321\274\025=M\024\205\276\202\024\034=\365\356\316=\260\\\'\2760\245\232\275C\244i\276\276>\323>\211$8>\341\377\246=\234T\204\275\020\2462>h\003\256>jd9>\306\2702\276\215v\255\276\263{T>T\250[>\033\336\364\273\010\244\375w\236\013\276\202d-\275c\250J>L\346\"\272 >\230\275\260\206 \276K\3333>\326\255\276=D\324U>.\017\325=\255xv>q\240\316=\252\201\362>-\374I=\026\014\211\274l\315\200>+;\275=\270\272\303>\3159F=\204\267\014>\210t~;\2272\032\276\016\0334\275\177E\204\276\245a\326\263\263g\276r\362D\276\020\241\254>~\252F>D6\261>\220M\320\275D]~\276h\263\205>\231\226\361=\345\241\231\274\234\035\320\276>\233\330\276\021\240\253\274\346\224J\275\007\215\231=\007\007\006=\304-\255\276\227\243\271\276`\252\002\274\026\003\024\275+{\333>\343\277M\275&\207\356>L\266\267\2756s\021\276r\013\226\276\212\265 >K\031\264\001\252H\276l$E\275\256\260\326>\231\357\004>\2724\010\276\253W\322\276\267v8\277\230\355\334<\210\221Q\276\243\224\317\276 \\n>\302\324\n\277\177\256\007\276Uf-\275\245\016\302\275\323\274\341\275\324\034\361>p\261\014\277\212\315z>g\223S\276\242\310{\274\2414\304>\354A\231\275\033\334\240\275\224\373\226>\375\257\361=\232\014\234\276\230[\244\276\263y3\275t\345\204>\265\t\263\275\374\324D\276O\366 <=c{\006\301\005\276\271L\242\276\330J@\276\205u4\276\350\213\226\276\2150[\276`\340U>\241<\367\275\216\313\001\276;Cs\275\257}\365\276V;\031>\256\337\374>\211N\370=\305T\310=\227\257\014>:\2206\276\276\362b\276\235\257\247\276S\211=>\177\343\323\275\360\r\010\276,%\033=CL\021\276m\032\303\275\303\265\207\276t\205@>_\277\007\276yl\t>\376\003!>\316\002\324\274y\032\270\275wD\217=\\y\202>\006qI>\252\3165\275\261~\214>]\253J\276h\030\022=&\235\003>:#N>8\\\366\275#Y\032>\257\217\266\276Y\205\324\274\202\035I<\323mR\276\231@\222>[\270\034\275|\344\027>\312\030{\274\201\003J<\262\350\230\276\017\357\001\277Xc\306\276|>0\276\313\314\250=q\204R>\301\3018>\211\354\343#V\377;\363\\\266=?\200\214>\250\006\332>\242\322\347<\242\365\305<\322\252\224\276\370b\301>\201\036\261\276m\016\220\275}\246\264\275\310\366\302<<\357\221\275\340M\211=\265\340T>J\367\263\273\210o\201>{e\346=\233\376h\275J`\235\276\300\2102>UP\215=\351A\251\276\321\241j\274\337\377\017\276\242\341\272=\346\220s\276\221}\210>\325\235\304\276\216\000c=s\037\254\276\354\315l;)!\250\275\307\234\017\276" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/gates/kernel/read" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/gates/kernel" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/gates/bias" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - dim { - size: 192 - } - } - tensor_content: "|\365\036?D\307\005?q\250\304>BT\210?\255\313[?\242B_?\274\366Y?\273\236 ?\361b_?\271\311\374>\276\2605?}j\032?}\331%?0H5?du\024?-\r\271>\303\0146?eE\336>\036\267\331>gV6?\030Y\030?\367D\307>\330\343\006?\020\351\300>+X\025?\204g\251>7\241\032?\264H\005?\017\254\334>X\341O?@\310~?\347}\000?+.\376>\257%\346>\254\225,?\2433>?A\350\035?\035\201\030?n,b>pq\356>\243\215\035?&e\245>\372Q\324>\3561\331>o\2713?\001I\231>\006_9?\352\311\270>\347\350\004?\226\004\315>B\205P?+\321\n?\032\252,?\323\0019?\0306Q>\352UU?\246*\345>p\215.?\274\366\003?\331\020\010?\3162\260>\300\213Y?\024\006\010?\22652?]=>?lD/?H\035\005?\275\005??\007q\357>\325\351\001?\332\031\246>F\333\365>\331\026\007?\177\321\322>\220\373\330>\223\330e?d\000#?!A)?\3128\220>O#\r?\215\013g?\273n!?\"Z\013?\257\200\375>\237D\362>B\037*?\366\325\226>\332cx?/\',?\362\3478?\336| ?G\344\303>\303\016\326>\240\202\\?\272/\013?\265\244k?\241\026\370?Bb\314?\'\366\000@\007\251\364?4\036\024?\006\303\241?\261\2373?\377\001\000?H\205\221?\0164\314?\'\367\315? \3468?\200A\213?\365\263|?\010\213\341>\000#\036?5\246\206?\376\271\212?\241\301k?x\0239?d\267\241>`\003\352>\232if?MQ\'?\003\200]?\241\204\376>`\245\330?3B\323?0(A?\271\342:?\211e\312?$\327\357?\3055\212?\262t(?\332=\024?\350\271d?x\036\247?5\2654?\314S\327?\306\\!?\"g&?\374 \253Q?\350\224d?\247\\\016?\006\272h?\221:\254?/\255\201?\200\347=?\331\246\023?\363\2458?\260\027\212?\351\023>?z\315e?\277<\007?a3\177?\350F\301?\357F\217>\222\206\222?\020\312T?\206\254R?A\336\207?\310l3?\305>e?\341\373\177?\234\244\244>\250\365\014?#\236s?\222_\220?\303%\362>\324\244\314>\036\326\277?\251P\365>\000\026w?\236\204\264?\356f\363?\204[\217?]\230\255?\242p\000?i/#?\222(B?\3016\352?9\3111?\332\030_?\331\317p?8\033\243?B\315\254?\345\254>?\037E\200?\262\036[?" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/gates/bias/read" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/gates/bias" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/candidate/kernel" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - dim { - size: 192 - } - dim { - size: 96 - } - } - tensor_content: "\324\317\343>\214V\223\315,`<\300\036\302\276\037e\211\276\006\334\004\277e\371[>\262\"\236\2741\032\270\275\347\021~>\264\214j\276d.E>\367\350\003?2q)\276\266\004d>\230y\n>r\221\326>\\K]=\265`}>jF\025?\262\346a\275s\251\236\274*\001f\275/\005\210\274A\033j\276\2031J=\220\356\\=\354\263D\275[\377\210>Q8F\276\2234w>\352w\251>\327\t\317\276\003\202A\275\362\232\001>\356\361l==C\254\276Zu\223\276\024\005\247\276\033X\253\276\331ha\276\301[8:\224\305\t\276)\352\351\276\231\340\371\275\236\236\003>\325\352\034>po!\276W\034\207=b\002\357\276\177\341\t?!F}>\n9\021?l\275\231>\231\247\242\276\360\026\021\276\0054\020\277Ap\216\275\366\2576<\374\021G\276\023\351\275=]#\203\276\000\211\204\2740\260\305\276\276XS;\236\243\031>X&\030\277s\312\227\276\005+\226\275\210\014\217\276\253H\027>\211\021\301=\001|\205\275\213&\314\275\315\203h\275\344\017\215\275\211\272\273=\274In\276\256\233\234\276\257\310\033?R\257=\276\017\201\277>VG\317\276\320\202\251>\330\362Q>\267\250\371\274\024e\206>\256k\024>\247\224\350\275\313\257\222\276;\236\317\275\226\032\334\276\372\034\323\274\272\312\333>\030j\303\275/3\341>\010\3553>\017\225\243\274\224\262\226>\234\033\351\276\341\335\007\274\331\254\026>\372\036\211>\036\2559>\016\371\205\275\027s\217\276\3662\341<#c\366\276\230I\254>\26150\276\010\367\001\276\004\034d=6\025\260=\3761M>4\017\023=u$\246\276`\351P>\354V\267>s\3243=]W\242>w\206\252\273eI\215\276I\\\363>\024\220&\276_i\036>{\021\212\276u\001\006>\324\313\202<\323\306A\276a\334\326>&\245\254=\001T\245\276\213g\330=%\325i=UP\315=h\006\223\276\013\276\327\276\222=\037>\247\354\032>\366\267\372\276\324\331h=\\\270\206<\364\302\032>7\177S\274mVC\275\2336\376>\246\200\250\275\205B\212\275\343OO\276k=\300>\325?\034?\222\231\014\277\025\005\326=\214!\346\275\306\362\230\276gs\250=\325\371\370=\272\202\332\276\037\321e\276\325=0>k\336S\276\031I\206>qJx\275(\353\266>\361t\013\277D\210\203\276{\267\351=\032\317>\277\035O\317=\275\3441\2765!\227>\375\256v=-YI>\020\313\351\275H\242\200?\322fo=>\206 \276r0\321\274\301\346\223>\3618t>\262\214\213>\306\024\006;\377\310C>\223\264\246\276\307I\r?\373\026b\276y\317E=\316\356\245\276p\334q=D\361\275>Q\357\216\276\033G\276\2766i\210=\\_\300\276\017\374)?]\220\267>ST\225=\233\005T>\025|:\276\232M\320\275\245\004\362\275\037\307\372>\277\0030\276\026\327\342\276\337\212\327=\306\013\352\276\274\036\"\276\340\036k\276\314\203\235\276\334\t\n?4\031\255\275\tw.?\013\366\031\277\250\204\301>\243\3468=%\205\326<[7@\2403Q>\326\325\212>\310\201\032\276`\237\"\276\362\217$\214\230/>\r\016\314>58\241>\266`\311>&\216\203>@\271\225>2\031\346\275\365Qo\276\323\341A>XYR=\253<\303= N\256>\213,\206=\304F\354>\300\303\177>}\2405\277\035\264`\276\210:\263\275\344\314\310\275a\250\235>g\t\307\276[\033:>e\367\236>,v\017\277\271\230\305\276\253\034\223\275r\262\022\277,\024D=HR\004\276\243s\274;\201\323\223=.\342\230:t\266\t\277\237\177\311\003e\237\276k\263@\277X_\n>\037X\002\277\2046\177\276B\256\030\276\210\002s=R\367\210\276\356>N>\262\363\021?\362\253\227\276{\260\226\276\325\021\207\2767X$\276\373\020\364\275\2038d\275\037P\017>9\247\323=\216e\247=\250\210\314\276U\363\'>C\367\007>u\013o=\3574\334\276\313\352\256>9y)=\2413\004=~\310\367\276\026\373X>\244\340\214\275\345\216\'?\017\316\207\276v\274\027\276>\300\004\275\033\216\366=S\233.\276\324a\323=\nC\226\275\202\333\025=\023\206\313\27588i>\035\007\354\275\t\031`>\333\274}\276\315k\244\275\376\022\231>?\006\201\276\231&\017\276\013D\213>^\223{>\215px\276\322\330\324>\326|\004?\333\360L\275\335]\374>\376W\216\277\342{\233\276(Uy\276\234\212\216\275r\242\256>7\203\346\275I\000\272\276\026\330y<_\n\037=\376\021->\230<\233J|\276\232\320\006\275\204~\267\276\306\250\217>\264\332\214=\344\315a\275\273\036E>i\366\354\274\021IV>\232\357\247<>\026\205\276V\355\024\275\206\314\250\275-K\354>,\223\221>`\210\366;q\302\211\276\312\361a\275\215\365\024?I\216^\275/_^\2758\022\236\276\365\032\255>l\3764>\001\036\343s\276\355>\014\277\256>h\020J\275k\026\244\274{c;>tbQ\277\274i\036;\275g\035?\021\014\360=\256\033\302\276\341y\013=X\2708? \014\306>\261:N>\241C\372\231\351\312=N\312p>\260\246b\276@\275\264>\320\266\214\276\342\034D>cf\001\276\010!r>\371n\311=\354\324\004?fv\361=6C\233>\366\343\020>+\362\367=\221\352\253\276K[\002\275\204j\344=\275\231\336=\324\033\037\276B|\227\275n\320K>0\340&\276\375u\357>\315\003\227\276\324!\250\276\370\354\316>\270\346\033>9\211q\276\363\366\311\274\305\227\350\276\363\265:\276V\331\267;1\300\023=\320\241\027\276\021d\270>r\342\373\275B\0271\275y*j\276\225j\210>\244H\261\271s\255\222\275\313m\236\276\333\265\224\276\340\374\026\277O6\236\276\372\324+\276\226\202S=\302+*\277m~\323>\036\005\233\276!\003\301>\253)\327=\021\325?\276n\225\333>\306lQ\2754DV\275\224\322G\274\356\013\031?\220\271\225>y\264\021=tq\224>v%\346\275mtH\275:p\311>\270\231I\276\3616\323>\367\333\037=\023$\340>\264\010\217\276r}h>\305\327\305>\250:j>#\021\362\275\274\rb=\300\027\354<\274\260\301\276\231\375\027=?-\035>\271\266.\275\036\377\366\275\367\335\226\276\364kr>\321\t\037>\303X[\275\322\026\270<,\252\252>\240\\\347\275\305\236j>\322\031\251\2743\247N\276\372J\016>\022\316\016\276\002J\'<\253\311\201\276\025\272l=0\203\013\277\270t\351>\021\3639\275z\010b=v\027\000>|@\177>\222wV\276\315\247\275=\277\323\361\276\274\345q\276\352\266\222>\372vh\276\003\241\260=\217l\240\276\025\n\247>\357\217\022>\325\206\027\277\027\303\210\276\330=\204\275\226q\202\276\310\220\241>\241\034\263\276$\344\241>\237\030t=\344\007\316>u\300\270\274Y\210M\276\306\031%\276-\022.>$j_\276IN\017\276\255\000\211>`\266\215=\341b\240=\273Hv\275\366\337/>\0355\361\276?\356\275\276\207\245\223\275\272-\275>c|\213\275\233P\231>5\023\206?\322\223\337>\304\315\033\276\311\014\n\277\225\3421>\350\010\n>\314n\257\276$\0059\276\235\032\310\274\2549\004>u@w>\377z\210>\367\330u\276h\343\216>\244\242m\275\234o\014\277%s\211\2762=\257\276\332\375\373>\371c\217>jnN\276t\027\243>\356;\302\276\272\013S\273\331\347\010>\376\360\234\276\367\273G\274\226.N?id\337\276\244\237\177\276\027G\205>\021i\203\276\211\217\204\275g\274\310>\241\261Q>\337Z\311\276I\035\2059\334\016%>N\370\233\276\240\3471\276\337\271\223;\217\344\227\275hA\370\276\226\271\246>\'/\330>\247\262\231>+\364\333\276\321d2\276\365\035]\276o\325\247\273\214e}\276\306S\377\276\264}%\266AQ>JFX\275G\227\301>RW\245\276\321\rH>{:\352\276\026\245\351>\034\240\222\276\255\242\264\276\264\177\346\276`E\211<9\206\201=\340\036@\275d8@\276\027-)\275G@\233\275\353\244\234>R>7>5\033\013\276]\362\234<\372\350\377>\223\3078\276LB\311\275=j\253\276P\262F\275\301\213\210\276j\375\337;~\300\033>+\332\027?)v\007?\335\3157\277]u\224\276v\271\324=\337\036w=\370\363\345\276 e\214\275\255i\234>\227\201\237>(\273\340=c\337\342\2764\3046\276.\013\202=\273 X\277\031x\016=N[\347=\245\345\374\276B\365\307=/(\250\275J:\n\352\361::/\275\004?\016\324\206>\365\313Y=|\270\271\275u\371\222\276\335\002\362\275\366\032\332\276\262\021)\275!3F>\225[a\274*\371\201\275\255\350\345\275p\373\304=\334\004V\276\0313q\2764,\313\275l\347[\275\354\313\367\275\236\210\240\276\343\203\246\276\255\364\243>h\021W\274 \030\204>\204aC>\375\232\230\275\235L\024>kQF>^\320\316\276\010\223)\277\r\243\214>\342M\315>\237\026\302\275\335D\213\276\025Vf>k\225\026?\017\354\230\276\313U\222>+1\260>\222\274\027\276\001\027/\276\333\377\315>\247+\200\275\205\216\003\277\337\375j=\304b\250\275\255\r\033\274\260\220#>\246\256s>\331\017\031?D\227&\275d\3357>J\267\213\273\277\211\377=\003\372V>0\310\363\274-$\014=\252\333\371>\030?\211>\242\266\022>\332\335\212\276\373eP\275\037\220A>.*\201=\340p\212\275\321\337\324=\243F)>\365V\272\276O[\237=\027\022\361=Ar\014\277/\037\213\276l!\360\275\"\265\252?7\201\207>4\216\025<\226\002\254>/5\301\276\223<\270\276\240x\201>S\301\210=\020\371\350\276\364ie>\032\355\005\277\244\301\004>\357,\220>\360\274\337\276s\360#\276\037_\356\276\266\315\026\276\005\364\212>\223\217\353>HS\010\275\211\032\222>\2343\037\276\233-)?u\253M\277\263\234\252>\242\207.>\356\2143?\273\361\322\2766\202!?\370u3\274A\272\322>\3523\245>\214\227\007?\230\215\035\275w(\234\2758z\301\276\261\231\275>\324\271U>\361\224\345;V\361\030?\346\367\216\274\251e\000\277\223\320j\276\007\026\322=F\343\207\276\321\312\300\276x\212O?\205~\216>5\217\036=\331M\t\276^\222\313>\036\375\243=\300\350\'?\371\371<>\250A+>\346\377\240\275Z\025!\276\232K\375>[C\007\276\305g\332\276k\\\247>G9\311>1\240\330\276h\'\037\276y\'.\274\250\010w\276\270\254A\275ns\314\276\256&\013?\027O1\276\\\271R\276\0310\315>\3348D>\035\330\330\275\331\201b\275\373\326\232=pJ\307>A1\256\276vW\361<\302\032?>\321v%\276\003\335\370\276\224\021+\276\271\022\241\276\320\223\260=v\374\201\275\220\333\230\275\177r\263\276\002\374\002>P\263\377>s\204\273\273A\237\221>\240\331\335>\241J\317\274ZD?\275\t\214\271\276[\322@\276g\340\363=\375\200,\275\355\240\027\277\320v\017?\355\234N>Z\306\004\276\260\177C\276v\226\211=\367\331\242>\336\271\020=\322\',>\375\257B\276\373$\202\275\371\\\364\275.\002B\276\355v\n\276\310\025\333\2750\271\365=\361\316\'?\346\202\022?\1773:\276\221y\214\276i\014\267\276\360~\"\272h=\302>\231t\276=\025\223\216=\306\013\210>\334\321\262=\341\223e\2762L\"\211\333\275FY\353\2752\272\355\275I\350\371>-\362\205\274\252x\332=\341\262\241>\023\010\335=7x\212\275\265\335k=qRc>\231Q\200\276\021\363\345>\243V\370>\014\023\312>\263\001\365\275\0009\313\2763T+\274A\"\260\273y\356\342=\374\312P>\216\325\313>\327\2113\276\023$.\276\271\002_\276\327\335\324=g7\302>JJ\033\277\360\206\272\275f`\241=\337\223\304\276\322\036\231=P&1\2778\221\313\274\261)X>t\030\242=h\030,\276\2732\227\276\250\344M>b\366g\276\256\320\213\276\335K\026>\036\304\315\276W\213\206=5\0209>1\242\266\276\312LB\276\316\226\321=\376\'\367;\027\232\221=\364\277\037\277@\3364=!i\030>\206\326l\276j\2721?\205\177\216=/\310\200=\333\366\017=i<\260=\300\235\255>\201:7\277\310\326\213\276E\240*\277\232\3572\276q\227\222\276\246\351G\276\360\270\032?.\020\244\276K@\013>\036\331\366\275\272\215\030=\000N9>\205\250\\\276\243P8\276,\321\226\272\261B\027\276A\260\270>S\261\313\274\263\t\274=r\251\205>FpL>\"J\242>\324a)\277\000\261\351>\303\262j>\377\275\312\276-\244\002\277\271\322V\276/\014\017>\312\226r>\313\363\356\274\260(\000\276\364&\374\276\021\013\341>\206F:\277\034\"\233>\376\226a\276\177d%?\r\326\264\275d]\327\275\007\024\321;\370\336\005\277>\2637?;\325\256\276w\207\234>\274\033\233\275\222\004\240\276\010 G?\267\222\271\2766\226\227>=+M=\203\244%\277C\210\363\276\357\317\320\276\272\346\225>\361s\313=v\023\265\276\004\377%>\313\327\233>G!\302\273[~\026\276\363\347\323\274\r\375\247=c\306\254\274\355\342\267=4\237\244\275\247\005\252\2768\304\230=\221?o>O+\032\277Ic\206\276\217\257Y>\322D%\277\013!\014?\334U\037\275\271\0021?\"\014c\276\013\005w=\234\225\031\276q\230\n\276\245(\236=\002\226\205>\007\332\223\275\200\335\335\2762y\026?\3140\275\275A|\017?\345\n\337>g\304E\277[D\275\276.\314\326>\313W\204\276\252\357\025\276Y\225\362=\260\262%\276\263\235\310\275\223\224L>\240\217%\276\364a\322\276U`\034?B\244l=O\376\252=\240\250\204\2763\'&>\363U\030\277\242\344\025\276R\270D?\305y\020\276\275z#>\373\277\336=<\330\214\273\331\tN\276\347\337\223>0\374\272=s\253:\274\365\034\236=EE\233\276\216<\363<\320!r>\245\024C\276Wv\032<\25562\276\336\364N>\224\274\373\275J\243\306\274\347\256\006?\006\223\016\2765\314\026\274\304\211&>\034\300\217<\255\\r\276\264&n>\026\275\033?}A\234\275u\003Y>D\366\033>\361\361\027>d\305P\275$\323k\276R\264v>-0,\276KX\217\276\257\2428\276\330\272\004>\223j\207\275\003J\013=\031\273\005\276\262)I>pA\271>\273\342=\275\264\001\245\276\226yA\276\327\010\361>t[=>^\315\251\275\202\325\311\276\335\264\203\275\364\265\273>\276\377\025\276P\0257\276U\'\361\276#z\301;(\317\373>\2717\353<\303\266\374=\336\033\271>\306\350\006?\203\313\007\274\t?\003>Y\t\301=\212\307\261\276\350\353\360\274\'\336e>o\353\273\275\214?)\273\2369\336>.\356\347\275\341p\203\276\246\027G\276\001:\343\273Qr\305= \2078\275\224\314\037\276\236uv>&\315=\275\242\"\355\274 \332\316\275\"e\325<\336\321\006?`-g\275\311\346\250>&{\013\277\031\356\376\276\210\013\022>&\007\301\273\353\313k>\223\"\203;\026G(>2R\267>\r\210\357\276\225\221R>\003\034Y>\310\027k<\226\271\344\276\375wO>\'^\252>3\372C\277*\262*\276_P\016?\274F\323=\017\002G;\200\346\330>\242B\022\276\t\252\027>m\324B\276\330\210\002\274\312\273\373\276\265z\222\275\240\352\323\276\262*S>\222]\221>\372\270]\276\231\343\317=_\322\227\275\215\224\274\2758\250\232\276n\231>>\340`~=\346\205Q<-\001\r\277\235\263\021<\276\005\366\275I\024\254>T\014\361>\213r\'>2\006\333\2759\023*=\366\216{>\312\351\250<>\214\255\275\2051\346>=\316\230=\202~\n\276\332\265\360=\266\335\343=5\206\246>\356\232,\276\035\364[\276\032\010\273>U\245\303\275\264\301\377=\217x\212>\325\r\034\275\302\257H\275eb\004?$\004\310\275\356\270\350\275&7g\276\007\017\224\276\250\010\315\276B\243\236\276eE\344d\274\356\274\013`\200\275e1z\274\022 \036\276\233,\217\274\377&\307\274\355N\256\276\017\031G=\312\324\246\276\320j\262\275w\252\007>hW\006\277,v/>\356\030Y>\210\263\310\273\324\262>\247\315\263\275\365\320x\276\365\360\014\277\220q\327<\005~\013>]q\004\277\266\232\017\276V\017q\276v\317\221>~\032\205=\300H\375>\327\031\375=\246\025\206\276\025\265&\275\314\321\366\276\021\344X\276\204\312\311=\254\355\322\276Ea\"> \262\206=\222\246\353>\251\337\366>\242\235$\276\211\277\306>\200\274#\276\\\363\250=\241\2167\275\365t \276\321\364\256<{\025\253\275\t\233\377\2758v\373\275\230\273>\276\277K\200>\023\263\016>v\031\253\276u\210\260>)Z\253>e\013\231\276BX\266=E\367\212\2758s\212>G\272\204\275n\345\301\276\300\022\334=\336H\005\277Sv]?G\225\342\276\007\251 <{\256^\276\310\336>\276\333C\003\276\237O\005\277\367\302\220>\037|\241\275\246(\333\275\326W\005\276\267\262s>#\245\222>\354\262{\275W\323J\276K\\\356=$\256\204>\303\310g\275\024\355C>5\231\310\276\006\376\241>[!g\276J\372\345\275\304G\006\277%\001\002\277\232\276H9\034^\277>\310=\322>\r\207Q<\002\337&\277\325b\216\275=\235\n\276\245\347\214\274cap>>C\201\276\223\260\027\277~2\220>K\340\214=|\000\315>]2\023>\230S\372>\324J\332\275\"\236\270>]\225\355\276:8\036\275._t\276\241\336\312>\336\370\330=\360y$??M\225\276\373?\001=\234\247\326=\232:\025?\n\216_>\253\031\300\276\235\211q>\255\034\307\274bq\230\2746\033\014\276\317I\251\276\037\036\034=\355b\365\276\022\036\343\274\354\340u\276NSb=U\377j\275&\206G=k\272e>\000\3308\275\240\205\317\275-\234\023?\2037\246\276\267\r<>?\247\366\275\343\351\020\276\307\222\225\276\315\313\360=\273\003\306\276q\367\302>\330\313\230\276\226d\010\277\210\361\">N\302\333\276\t\332\002\277\2564Z>]\253\254>&\233 \276\245\336\'\2771|O\276\332?=\276\035L\277\276y\267\n>\024B\233\276 \373\034\276\214wF>t\265\033\276t\220\330\273J\010\033>h\016\274\276\364\322\230\276\024\316\232>\365\237\261>\345F\n?[\217!?\366\356/>\340\035\357\275\337\247\207\276\000\365\344=7\243\004\276C\230^\276\220\327\317<\274\314\225\276\013\272\265>j2)\276\326t\341>U\217\301\276\267{\014>\360\263\254:\267\346\016\276\'J\224\274j\3247>4\276:>]WE>\036P\255\273_UP\277Ie\017?\331\222@=\365(\210\275\343\210\001\275\310\205-\276FDI>Ml\323>\304%\202>e1\217\275\314\031\354\275\232n@\2769\336\026>5\317l>\224\242t>\223\000N>\342\344\301>O\355\t>\310vy\275\n+\002?{!\247\276\006O\341<\223\352<\243\245\346\274\024k\024\272mm\367=\0220?>\013\275\207=-\\\243>\265\004-\276 6r\273Sn_<\335\272\260\275Z\344\226\2763\030\334>\2653\212=\273\006\203>\223)\336=_\021\211>\342\226\232\276\371fX>\2771j<\327\334\252\273\212\3319\275(\206\227\276\206e\247\275\327\326\017\277FJ\225=h\307??\204\274te\333T>p_t>\256f\252=d\355\213>\207^\002\276\224\244\001\276\260\273A\276\324\325\240\276\021\232\273;\227\236\212\275\020\343\342=\212\231$\276\242\026\276>\2201g\276\177\031\230>?\\\016\276\213*\255\275\025u\223>\037i\262=\225\225\231\276\221\005\223>NT\211<\371\255\001>\306\301\232\276y5\232>X\353\222>}B\036\275\334i\302\276d\002N\276\257Q\256\276\033~\301\275\234\303\024\276M\250\244\275\227o\301=\231/\325\275?s1\276P,U\275\r\000+\276pv\326\275f\371F\276\004Yn\273|$\311\276\260Y\">\211\361\326=:n\221\276]\302\352=\355\235B=6\253\235>jiQ\275\327E\241>\036\240\006\277\027\330\'\276\351\333\246\275+e)>\002\322\014>b\316\240;f\253\252\276\356\241\365\274u\211\025\277\314\352F\276\335\333\031<\312(^\275\220|[\275\313\025\246\275c\302\362\276CSY\276\263Y\320\276\376\361\220\276\025 \223\276F\255,\276i\313\372\276\003\033t\276H\2050=\027S(?\316\343\n\275\2558\260\276!\235\353\276[\251\222\275\313\026p\276\027\\\002>\227\366!\276_~\'\275]\272\036\276\263p\330\276}\005\017=\304KI>\247\2470\276xz\t>P\317\236=)V\315=J`\240>\230~A=`;\203>\213\356\217>\215\250\237\276\302\240\250>z\010\002?\371\214\r?0\261\257>\316N$?\305\021\267>\000\315\260>m\002\257\275]Y&>\306\224\312\275cZ\227\2762?E<\370\200\256>\310\343\223>\005fy\276\026\273\247\276\t\261\371>\276\234\357\275+o\233/{\313\275\236\354\373\276\376\323\276\276\260\271\203=\307AW\2767\221,\275m\030\203>a\202\303>\364\030\215\276\304\316\207\274\256XT\276\005\325g\276\360~\266=E\261\t?$\205\025\277\311\014!?\nB6\001\253}\276>\365\240\276\230\232\350\274\256\262\340>\325\021\030=\376\2508>\224\365c=\367s\366\275%\247x>no\254<7]\305>\310\250\214>J\353\211<\267c\362=\226G\257=\004\263\357\276\374\270\331=\314\034\323\275\373`\205>\367=\023?\331~u\276\252<\275\275\213\020U<\262\251\214=1E@>\333\275-=\274\233\223=\200\315\211>\01629>\001\224\301=M\212\347>v\023\310>|\"\n=\274=\271>\335\014\021\276\341q\212>\367\335\212>\244\212\003?\017\221\336\276d\211*\273T\037h\275\245\"\231=\212\022\212\275$\336\223=\210\261\024>f\000\345\276t\223l>)$\016?\004\257t\275\016\274\016\276\352\352\347\2756\232\037?\276\320\022?\014x\207\276\310\274\033>hL\213>\271\003\027>\230\023\216\276[\204\252\276&\337\220=\354\240*\277\216*j\276\363,\301=Z\372\2509\221q\342\276s\025\032< \351A\274\265M\257\276\272\000\245\275K]\361>\032\r\317>\240\003\022>\331\361!\277\302C/<\352<\204>0\314\310=1\233\022>q\331w=b\260i\276\377_\267>\333\r\023\276SU ?@\200\'\275d`\337\276\227\323\225\276,\017r=\242\265\354\276f\274\002?\374n\210=P\261\006>\222T\306\276\027\317<\276\306\017:>\302\232k>\313B\262\276\346\020\213<\343\032\273>R\210T=\266\252\304>\363\247H>\361r\372\276\224\202y\271^\'\310=\005\256\005\277#\rj\277\265P\237\276>\270f>\2319\003>\217(\341>\210\244\331>\007\303\002\277\021%\232\276\332\377\353>\331\000\341<\365\327\225\276\036\350b\276\367\234\r\276\334\232\000\277\032\247`>\005\231\377=\034\023\213\276\310S\335\275\031B\203\275\264\354\273=\241\177\306=|,\"\276\235(\334\276\237\034\001\276\035\205\031\277\341%h\276\242\223U?\225\001\305\276o\231\251\276\221\341\035>F\2011>#\356\255\223iZ>\364\355\264\276H\311\350\275\030\021|>m\355\220\276\020]}>\004Cx>\201i=\277\336\274\225\276w4\030?\257\317?>\244\212\343=\217\357\365>^\254\214>\242\235\357\2764H6>\032!\212=S\260\311\2768,;=\221\360\210\276@\014\273\276\030\273\331>\326\251R\275R:\032\275\204\330\221>Ub$\275\377a\332;\345z\227\275R\227L\276&\340-\276\032*(?kvi>\222\r\320\276\353Pj\276\346\264\010?\325TW=\003\2072\276\t\341\243\276K\210\230=\274\006|\276\371j\321=y\215\312>\254\322\233\276u\371\365>\225\021\">fr\255\2737\2621>\036\002\345>/\202p\275\361\267\264\276\207[\246>}\307g>4\027\270\275e\314\267\2768\265E\274\350\334\314>\370&o\276P\232y>\331\334@\277\340f\366\276\200\211\366>\360\342\344\275\025\033\261>jC\241=*\221\247>\334\265\336=\203<\016\276\273\0207\276\033\210\273\275\355\3378>\374/\203>V{#?\007Q\261\276\211N\266>\347%\233\276\220\320\357>@\273+\276w\276z\275\"\303\232\275\242\373\235\276b`\225>\006\323}\275\341!\033\276\020\360\304\274+\310\263\276\211\215i>8L\034\275\312\255\033?\223\266\027>\331g/\276fJ\207\275w\2529>t\207L>\243JL\276[\362\312\276\233\221\253\275\333\362\253>@\027\025\2769\361\326>\236\013Y=\024\273\025>/Q6>Q\320\244\275\267\217F\276R\342#\276\314\225\270\276\2635\220=\031\005c=\257\376J>},\376=\211\212\255>\305x\323\276\223+c\274\214\351\256>\215\004#?c\227\034=Vo\262\276\365\341\304>\000@\347\276\021\027\334\276\254\205O>\035\n\304\273\303\226\272>\017\206H>=\376\t\276N\3155>H]\316\274\376\231\262<\245X;\276\256_\242\276(\367\227\275#\336\370=\336\221\354>S\343\235\276\217\317\211\276\325\302J\275\257;Q\274\\\255K\277\006\350\037\277\302\301\224\275\274\010G>\215I_>xE\255>\262\216\"\276\"\223\201\276\17793\275\265\226\302>\275\264\357\275:\'\321\276\205\3437<\010z>\275\353\274\017?;<\373\274\2175\254<\003\237@\277\325!\322\275|\225\003\277\323\010\210>\244f\017\277\216]\211\275\207\217\243>h\275\233\275\334\247\223\276\314!\315\276Y\232K\276n\342\337=\177\335\211<\315\201e>rc\372<\213\0241=\365\206\350\276\376\241\001\277\374Di\277\346\204\210\276\202\250\350>\321I\350>\004\265\026>\243\327V=\221\024\277>\333R\036\276<\326\237\276\r\261\341<\231\023\256=t\226\341=\312\364\255\275\303\003\245\276u8\017\027\036;=\031\337~\276B\217\034\276\304\247}\2767\341$>\036w\275>\232\315\276>\356\336\326\274\017x\373\276\255\036\376\276\0220\233\276 \264b>r\210\226\276x\207\305;\016\223}\276\350N\020>\316\271\027?!\351{>n%\026=\t\021\020\277\335 \225=\024#\215;\242&_>\367*7>\275\344Y\275\271l\024<\260\242N>*`\271<&\252\221\276\367A\211\276\336\301\302=\247\310\363>%\310\240\276\322\327\335\276\225\302\204\276\300\363\217\276\261?\230=\320\210\373=\354\r\241\276\327\323\342>F\217w\276\r\357\265m\016\246\276\361\373\215\275\321\234\307=\020)E>\024A\t\275b \220\2751\334w\276594\276>\350B=\210I)\277\334\032\032\276\275$\245\276z\260\246=\352\010{\275]|\242\275dfG;H\365\235=\260hN\276\246e\322>R;e\275\rb\211>\307\021\001?T9\225>JO\036=\213Q)\276\022\036\261>\266\203\025\276j\361\366=z\004\017\275ER*\276\365\030c\276\323\221\014>\321>b\276z\324\210>\224P\000\273NR\004>2(\236\276\006I)\275\374\277\361=P\374W>\201R\276\272\342$v\276\275\262\003>5)\353\2750\3308>\245b\r>\323\256\022?\036\367\323\276\376\217\030\276\223\355\306\275~\320(\277w\004U\276{\366\002?V!\037>\330\326g>\302b\217>\357M\244>q\301\270>\244\374\240>2\315R\276\2664\244<\017\371\275>\343`Z=\245\306+=\264\037\013?P\276t?L3\r?\000\307\352<\257\333k>a\336Q>\242\006\246=\347e\t?5\341k=\302eR\276\252Cg\276\221lr>\312\361\322\274\275g\001>a\244\203\275\361V\026>\260\256\223\276*Y\201\276\004\324.=\r\272\213>g\330\001>\020\300\261=\360\025\204>\235Z\271\276\344 \375\275~2\316=\027\016A\276\022\221\323>gjO\276\364a\006?\310|\t<\22169\2777\277W>\335\033\022\277\342\203\007?\375\366\000\277\254\344\252\276\371\247\214>\352}\364\274\362F\234>\362\2119\276i\323$<\325\343\375>F+5>\370x\240>\337\334\021\363\205\331\274&\001\014\274_\242\245\276\367z\203>k\305|>P\310A\276cOg\2769\267\242=K\256\301=>\265#>V\n\017\276\274A\347\276\262\007\222>\007\033u\275P\316\362\273F/\002\277\222\003\003\276\276\342\303>\220\006\210\273\370Y\r>\321\264\216>_x\303\275mH\021>\321@\037\276G\346\214\275<\307\272>\361_\020\276j\207t\276\320\327\223=\244\0268\275\250\317b\275Z\205\340\275\363Z\355=U[\217\276\351\363\210=\245\335\336\275\223\256\211\275\2316n\275=q]\2768\202s>\374\002\271\276l%\345=+\001\257\276\321\004\233>\231~\032?\322\305\215\2764=\221\276\200\002\230\276\250r\355=\337{\214\276\346\361\263\275\307\276\215>\236\216\227=M\360\331=e\216\264\275\333&\204>\254\315\226\276\205\214\035>6\244\231>\235\021P\2755\362\265\276\177\037\324\276J\002\360>\263c*\275(8h?2~%>\206Z\340\275e\034w\275\317\320\231=\242\261\035>1\334\357>.\262\300\276\214\014\205>\022\024\353\275\006\367S\277\306\342\350\275VD\216\276\207\324\322\275\375f\341\274k\n\377=\032\327\216\2758\034\303\275qc\267>/e\264>\016\306\306>\020&\361\275\0355\020\276~G\300\276\310\324\307=\022u\266=\374\202\262>s\312\017\277\031\335\225\276\372\014\371=\312[\010>\306\330\002>\202@F>\212d\002>\252\024\321\276\222S\226\276#T\255\276\363\213\261>\306K\241\275\007\337\230>\356f\225\275O}\213>\"?8?\265/Z\276\254\330\314>\204$\226>\033\203\306\276\225\024\003\276\246\263|\276Nb\215=\376\222\372\274Y/\223\276\235M\023\276%\335f>\265\3539?\316\206\212\276V\347#\276I\343\373<\304y\201>\205h{\276\026\277\314>\367pq<\333\351\305%\346)>\305\325\334\275\356\323\372=\376\353\021?\242\231H\276i6o>9/*>\226(\304\275J\345W>D\303\022\276\375\273\340\276\262\332.>:\351\027>\3538\014>r<6>\303\214\013>\305\037\264>\037\037/>\027]\204>k\302\323\274\275\332\217\276\221\235\241=o\340\260\274\347W/?\321\353\207\275HB\214\276:\240\234\276\216\222\321>\035o\006?~]\367\274k\276\246=A\316\357>\001\237&\276\334m\304\276\204\tM\276\301Y\\\276i\001j>q\362\346\275\003s\225>\352\236\310\274\207\n\244>\310\237\331>CQ0\276v\252\256>\222\030V\276Z_\254>\035\364\326\275\240/M\276\342\034\317\275\3102\323>\227\336\254>\345\000\031\276[l\302=\326\315\200>\r\016\341>\271\246\211>\360v\361\275c\237`<\000f\204>\3703\035\275\231;\350\273\303V\251=\227vs>\353\375I>\177\200K\276P%\025\2761){>fO\005?\035;\300\276\022I\354<\2142\031=W\027\304=\000 \251\275\231\337\212>\370\265\020>5/\030\275\200\030\304>\036\017q=\r\"\317>Y;\255>|\325W>\343\246!\276\321/\242=\320\335\364>\254L\212=\362 ,\276Q*\300=)\362\264=\372\263\317=\027\262\004=\331\272\206\275\217\337$>\257\026.\276-\025\361\276;\374\320>8\307\206\275\361\256\000?\241\2770>\312La>F\022&\276\353\267\253\276m\002\342\276X\344\270\276\200s\254>\335\201a\276\367u\265\274\021\360Q>\347\226\243\276`\251&\274\215\346\371>\263\333\177\275y0\200\276\334\301\026\277&7\310<\345B\000=\277T\037\276R\\\356\272\302\014\334>x8\320\275z\014\240\276\005BI=K\242r>\265\025^\275\000\231\331=wt\235>%\203\254>\305-Y\276\251z\313>\212\261\027?<\327\003\276\241\241\327>v\324\236\276j_\016\275\232\374F\276C\035\t>\017\372\235>\014\342\350\276)\002\272\275$\353*>\205?\032=]\027\256\276(\210d=\300f\314>+\242{=X\310\006>\330\200E?#\354\214\276IN\014<\365\333R;\313\020Q?:\t\271>Y\341}>M\213\264=4\316\366>\241\272\345=\371.\036\275\006\227\312\275\356\333\036\277\346mM\276 U`=6\177+>\207.\255=\333h\006=L\353\242\275:\0233\276\3663\001=l\006\337\275\342\320\205;/}\245\275\233\310$\276\010\234\372\275\231\033\255\275p1\235>\315\343\230\276\332\367\223>\027\255\000\275c\256C>\271\344\320>R\3554\276\332\317\246<6\334V\275y\230\210>VQ\360\274\233\216\242>)n\374>\303\n\t\277\354.\243\276\024\364\273\276\305&v\276\2219\250>\352o0?gSH>\250\324\235>uzP\276Li\274>\326\327\007=\177\023\006>Yg\331=\027_\260>\340\337\020?5\223\273=\305q\303\276*\250\r\276b\373:\276n\n\027=\216\377\370=\004O$\277\302\353\270>\276\255\305\276\357M\330<^\237\267\276N=\206>\352%~>A(\301=\344]\201\2763\276-\277\214\255#<\371\316%=\356\320\232=u6\222>\023\0349\276\003\305\003?\265\321~\273\237K\004\274\272sO\275L_\265\276Y5\266\275\315\\A\276d+\305\275)\214\\>!X3\277G\362n\276\242\215 \276\\\333\007>\215\370\024\276\355\314\245=H\r\266\275j\321\257=4\225\026?vye<\370n%\275\034k\322\275\275q\327\275\332.U\276\355\277\037?\264\202\221>\316\006\221\276\321\201\330\275\340\017\274\276\206/ \276%$\260>\243\'\264\274l\211~M\376\022>E\270\243>\335\327\225=+\246\211<\2447\362>\244a\264\275T\345\226\276N0\353\276\322\251\'?e\243\342=\332\300\016>\267\211S\276\372ic\276P\025,>G\017(?,\344>\275=\271\022?\245\001\263\2762\225\005\276p\263\n=\355\307[=\362_\312=\220i\347\275\252\327\375\276\215$\202\275\207\240t\276iG\005\277p\264\324\275\303\213\026?\013/9=\224j\255\274\242\245\364\275\33091>j\027^\275F\007\246<[yb\276e\346\225\276k`D>\006\3636\274\373\020\321\275\372\213\204\276\324\253\203>\0223\234=\"\344F\276\004@\214\276\272\344\232>>\373\310<\3647O>\251{v\276\004\316]>M\224>\276U\025\301>\341d\304\276\010#\362=\005\003\027\276tW\013>\004\371\211>w\253\017\277Lj\360\275\002\334\346\275\321xI=\230\215\303\276\203\246G\274\361\340\023\276c\267\277=\355\n\332>\030\366\331\275\357\022\344=ye\016>\261\326\230\276\243V\261>\344\206\206\276tP$>\002\275\263\275\302\363\224\276\364\312a=\254\n\335\276\307\315\265=\205T\331>%+\236\275\031\220\276=\255q\001>\211do=l\223\351=\3774\316>S\272O\276\245\365\276\276$F\366\275\357$p>D\021\034?\27499\276\360< >Q\230\275>;\346\036>\016\314\201=ds\211>\334q\004\275\013\326\'>\220\343\340\276\t{\227\276Q\352\216\276\310R&>Y\227\215\275\250\246\355=9X\230=\230\350k\275\321\331\210>0\303\241:\305Z<\312\216L>\240\022\017\275\222\306\267\276\317@\251>l\302\211\276\020\266?\276\0049h\275\260\020j=\212\307u\276\035\356\016>AR0;Q\365\004>\301\317\006?~\t\244>\244\312)?\320\033\035>\274\301\205\276H\231\330>|\377\317\274\023\356\007>Cj\262\276\246c\336=B\256\032>\231\r\336\276&\327\311\276=\353W\275?W\335=\232\370\254\276=\261\207>\031g\233\276Q-\322<\341\274\203\276\030=\271>\223\nX>;\230\315\275(\240\207\276\225\0260>\214\004\024>\366\314\302>Q\'\243>\022\331\202\276\004:\210\276\352\313\321>\221\366R=\264\327\275>\231\301l\277 o\204\275W\302\274=\213\245\200\276q\2638\276\335\273\333>\257\331\r\276?\017@\274\214R\212\276f\212I\274g\272\317>~\337\002\277%\260\334>\332\202\263>\274\240P=\261\320\224>G\024\257\275\026\362\264\276Y\270\325\275\346\006\226\276|\266\016\276\033V\217=I\345\311=e\0360=_\324g\276W\200\360\275\177Y\366>#\352\001>]\002\310>x\261\235\276|\002\211\275yc\316=2|\013\276\330\021I\2766\221\243\276\344!0\277z\277?\276\036\244A\277\320\262 \275\241\342C\276O\230<\275a,\355=\334\305\214\276\256\nA=<_\310\276\252wk\275)D\325\276\246y<=A\027n\276 v\007\276\227\353\204>\\Y\370\276\n\350\253\276\361\325?N^\256\276?|\225\276\014B\315>\345,\350\274\t4\337>\217G\202>\022Uj>\337\327\324\276\361\215\032\276`D\r>\223\321`\276\215\026\253>8\374\000>\233(q\276\3708\326>fex\273!\027\375=\301\274\010\276\270\205F\2753[\030\276j\254\330=\246\323\032\276\3066\216>\333\263\322\276\271\347\307\276\260\310\016\276\014\"\263\275\354p\210>\247\033\200\276\222\377v\276\177\367\247=uB\276\276\301`,=\332P}\276\273\017v\276S\322\003\277\034\323\3328kWK\274\331\263\'\276\303&\240\276\306\211\226\276C\363\252>d\2072>\274\266\021\274*\351\367\276\347\314\363=\212W \276\310F\010>?\253\'>o\372z\274re:\276\302\3677\276\3661\324=\365\236\206\275\033\243\002\276\307b\217>F\326\\>\273)\362>\2513\354=\373\024\253\274c:\241>VE >H\233i>\376\235\302<\235\257-\276\030-O\276E\242\026\276\370\202\212\276\257yV\276K\356j\276\232\324\216\276ON\022>\315\315\252>EP\035\276\032\336o\276\210I\312=\245-\351>(X\331>\330\274\245\274/\223K\001W\377=\234\311\331\275\364\272\271\2762n\226>\204\230\251>V\031\327>\241\316\202\276\213\006\354\276\010rc>\262\371y>\234\230\002?\274\306\005?\267\345\236=1<\220\276\211\370\361>\312S\324>--\310\275y\024\241=z$=>go\235\277\275I];\346U\202>\342^R>\226\310\020=\322\033\010\277\355\357\327>1\025\276\276\007\322$>\337(\247><\262\027>jE\245\2764gt\275\017\317\024?\031.\216\276fH\275\276`\363\252=P\227j\276\352\264B\276\302\274\211>A\327h\275\273|\227\275NEC\276\t\240\251>:\376w>SH\264>\353\255#\276!\224\342>P\354\265\276h\360s>n\333\320=6d\'>+\243\021=;(\251\276x%\202\276z\350A?\037\223\326;Oj\372:\031`N\275\273h\240\274\246@\364\271Ens>6\247%?\260\232\023=\276\273~\276\343\335\317\275F\r\003>8\326]>\251\250\002\276\315\001\377\276\\\371\375>\235\024N\276\0347L?\327\033\005\277u\036\\\276\002\311\314\275\\\203\355\276{Y)?\353|\273=\350\210\233\2769\222B\276\242E\262>\267\005\342\275\343\306\"\277\377\345_\276\365\277b=\235\2540\276%\371\t\277\316\326\237>\373{\330\275\267\022\276=.\215\026>\234\373\300\276\317\202\332\276\267M\324\276*\275\230=\237\215\023?\231\025\270\275\325\032\351<0\214\312\276\026-\246=\320|\317\275\262`\005>\tyB\276\205\260C\277\243\344\300\276#\013\232\276\224\307\317\274\230^$=\217H\223\274\360\357\236\275|\352k\276&\251\325\275\014(\310=\256\234&\276NU\250>\001\312E>\301m\007?\340\257\027?\330n\362=\n\216F\276k\313\331\276\365\n7\276E\261\353\275x\365\372\274Y\342o>x\"R\274\304k\004>UQ*>\256r\205\275H\252\375\275\023\232\243>\242:\311\274\375a\327>\247\250\203>?}\337\2751U\003\277\222\341\327\275y\034\027\276\246\306\377\275\256\024\347=\225z\376=\360\322%\277A\240e=_\202\020\276\362\t\203=W\035P?s\201[\275\025!\371>\224s\261<\217\224\244>\204\t{=)\265Q>\261x\231>#\242\033\276 \241\330=\377\241\225\276\244\361#>\231-\276\275[\302\345=\256\364\245\276\246\024\324=\023\020\213\2756\201[\274\256\000%\276c\204\016\276ht`?\022.\006?v\300\212\274\375B$?P\216\204\275\005\000\225>\365\334\204>\232\344\336O\\\023>LRx>\210F\r\275\024\313\203<\300\261\211\276\251\300\357>q\031\003>\\eK\275}o[>O\362\307\276e\016\260>\251\370\206\276%\251\241>_\036_\276\202\232\267\275a,\341>\'6\221\276UA\373u\277b>\033\253\350\276\223_\223\2740\274\022?\2704!\276\326\305\350\275\264x8\276\3412\021;\370I\240\275\367{\334>\212\332:\276\212-q>\272\302Y\276\345\347\372=\026f\022>\275\336\031>\325\320\232>)\351/\276\210\222\002\277\364\257\207=\211l\200\276\3611\014>\261\'\037\275\221\317\253>\236\244\252>\030\201\023\276J\344D\277\032\004\224;O\315(\275}4\265=Z\350\223\275\366\030u>\271\017\305>\345\206\317\275+\205\253>\236\377\r>\323\321\030\277Z@\007>S\253\257=UB\275<\337M\034>w\241\021\277\305v\221\276\216%%>mwh\276\274~\312\276KX\017?\223\007\257\276m\035\326\276\350\245\030\276\351\243\242=D\332\'>\365\023T\275\212\007\221=,\213@=h\364\256\276\244\306\225\276\243\337M=h\0100=\252+q>emR\276^>\261\273T\356\210>\t\002\033>\034\032\022>Gb\251\276\252\207\222\275\276\024\345>\000F\330=\214\373\357>3\274x\276vn\231>\246\304\025> \256\213=@I\371>y\022\240=\340pD\276q\337b\276\326\277\220\276\'\300]\275\326\255\302\275\330\220@?U\013\034\275|{\226>\001\035\224>\324\362\334\276n\326`\276\204\010\300<\360\177.\276\247\302\005\272\237\362U>\206\352\230>e\216\201\276\362\347,>\\\342q>\221l\007=Iv\033\276\355\213E\276\027s\273=\023\001\241\275\002x\232>\372\367\312\275\247\013l>KPI=\3028\307>\315K\314\275\345\217\033\274\327W\263\274hP\016\276j\0359?\305\267\021>\206\310\020=\256\275\224=xK\377>O\013\031\273\020\017B\275\350\213\220\276\375*n\276\014\033B?]\n\267>\203\2163\276\016\240\346\275\303\010\357>\321\217K\275\242\216\014>1\220\247=\260\263\021\275]d\267=\360-\005?\230J\223\276Q\266\237>\213},>\t\177\030\277\311\356/\277\233\276\227>\327_\232<\204\327\002>i5c>o\250\307>T>\332=$\302\246\275\3261\031\2753\2035<\214\035\013\273\021\307\007\277z]\262\276\367\000:>\315\374\330\276\255])\274\\\226\"\276\037%#=\220\363g\275\0220\027\275p\245R\276\274\344\204\276:J\016>y\356\001?\325W\362; ws>l\353\222\276su\214\274b\346\324\275\230\212\216>q\215o\276x\323x=\237w\367\276\263\373\220\276\315\355h\275\343\272\350\276\026\257s>\275\3738\277\177s\363>N\207\215\276HC\234=\236\277\223\276`\021o\273\270\036\230\27693B\276\345r\003?7\253\311>w|\014>\312\227\026\276)\341f<\375\371\007=\340v\205>\"Q,?B\204%\276\320C\373\276c9\200\276\336\250\246=\207\206\177\276\210\027\356>\323\0021>\200P\022\277\306\340\030>\364Od\276\353B\273>i\310\305\276@\000+\276\323;\016\275?{H>\021\347\277<\324\270I>_\275\344>\301\262\316\276\t\263\007?\230f\032\276.\3037\275ze\313\275Ry\216\274\355F\350\276a\321\255=V\304X=\236Q\237=M\203\017>\320\323\207\276\346\234\330<\312\"\"\2776\364\204\273\317}\201\275\336\237V\276\022@#>6\020\003?\301\223\031?}\303\021=\007\341\005\276\303r\212<\013\376\'\276P5\322\274\315L\365<\005v#\276\375\331\322=\203\245\006>l\342\211\275X\365\324\275\234\307\253>\371W\032?\206b\307=l\224=\277\271q\n>%\016\307=\036\247\234\276\253\033\232>\335+\224;\301\300\021\276\024\316\n\276;\336\306\275\365&\310\274\n$\212\276\022v\351>\032\265\374\275t\235\227\276\",\025\277\010\362\243\276\314C@>\031\217\244\2764\007P\277-~\257\276\001\024V=\007>\233\274\301\264\032?t\314\236>\026?V\276\233Gg>\220lp>\220L\010=\'\226\330\276\331c\250\276ne\203\274\014p8\276\314\256\372\2769\371\033>\017\001\341_\354%\276\036\230w\276\350]\207>\303\372\017?\010\261\230\276\326]\267=Mk[=t \351>f(,>dn\256\276\367$\220>g\340\300\275\200\025\307>D\023}>?\212W>W\236c\276\"6\261\276\275N><5\030\247\275Kfv>\354\334M=\030\3164>\027\200\260\2750\236\036>\034\366\354=\357t\030\276r2\273\275\tR\376\276\204\224!>\271\026\256=\353H\r\275\203\266\330\274\2331\261>\350dW\276\303\222Q?\3032%>\353x\000\276aj\240\276\"\337\023\273*\327R>n/\311>\334\027\307>\307gV>-N\374=\300p\360\004\334\244\276\277u4>p\010\252>\263\004\335>\203\351\342\276I*\211\275>\311==Y)A\276\374\256$=\262\333+?\006_\236<\377x3?\016\237r>\200?\316\276|\222<>k\266N\276\222\264E\276\365\316z=(\363\236\276\340ZN\276j\326*>\230k\250>u\2529?\002y\202\277CA\344>O\014\273>\312\300\031?\271\364\353>\322\275}\276\265\t\030\277\346\2169=\254oM>8\245\007\276\272\202\266\276\312\021\326\275\330\036\200\276O\223B\275\3273D=\345\363\246=pu<\277\t\271>\276\\\277E\276\301R\006?\233%n\276\014\276\326\276C\343\334=7\266\327=~f\211>\342\327\244<\217%P>#\370N\276\371\215\304\276\2345\204\276\376J\315<\271\325\214\274\241\311V\276}p\201>\364r\237\275.\257\234\276b\271\024\276\024\324\n\277\331\016\367>\312\365\242\275\313B@\277\320\372\013\276[\364\200>\0131\000?I\224\023\275\357\254\255\276\306\305k\276\201\250\r>G\306\272>x\212 \276\345F\312\276\025\230\323=\263\225\317\275L\021H>7\231\037\276\1775\240<\315B\270\274\216v\274\276i\221\205\2763\031\213>[\213N=\231\307\307\276\334\356\243\271\3272\277\275\240\313\r>\373\203\331=\304uN>\235\271/\276\212~N\276Z\2139\277\303*\212\276JM\000?\220\032\331>\364\323\330>;8\353=4\352\310\276\263\226\203\276^\200\204>\314\036\260=C\032\232>\000\253\265\274\\\005L>\240\327&\276\300\353\035?\005\233@=\236\346B>BG\037>\215}\366=\233$\253=\002\360U\277\234\014\333>\274\367\254>\256\226J>\001\030\336\276k\332b\276\212\026\314\272p\317#\276\334\204E\276z\243\263>\245\tP=I<\025>V?\352<\216\026\315\275\314\310\227>\341\335\242\2758eN\276;\232\225\276LV\326\275\257\332\024>(I+\275\225\361\351\341\272}\276\014\016\205\275:>\004\276\273\272\350>\024\215\010?oU\341\276\274\225\320\275\023-\304\276-C\243\275\215\017\214\276\324D\036\275\372\315?=\302\332\216>\177\216V>\227\031I\276\217\306->\341\214K=0\223{>Q\357\326=\272f_=\311\301;?OR\352\276\375\030\330\275\205\222\267=\210\213\007?G\230\201>Or\304\275QO]\2751\201\367\274T\370\033>7\365\372\276\207\027P\274\215\275\"\276\265\312-=\262\236\n=\350\336\375=J\001\277\275\236\001L\275\3645\222=\221\361}=yg\002\274}\n\306=F\352!\274\234C\320<5\374}>W\005V?\353\315\337\276\304|\370=}U\177\2766\010\276\276\n\255Z=\315} >[b\\\274\360\244\344=q\315q>\331D!\2770}\215\276X%2>=\253\234\274_\316\304\274\305\221==/\'3\276\347J\245=\374f\240\2753\254I>S\222\373\276\377B\351=na\270>\322\375}=\256\t\002>\362\202(>\322\205\341>:\307\355=gS\333\276O\357\222>\324\262s>q%\313=\313\221~\275\002\333\353\276\343\'\231=\306\335\024\275\221z*<\201\377\267=\021\307\002>T\344\304\274\365|\007\276\302wP\276\2363\036=\013\232$<\365\207\312;z\210\320\275\320\255\031\275\010\000\346=\300\312\306\275l\201\004>\r\346x>,\235>\275&\355\236>\035[e\277s\314@?\023\370\000\277{v\363=5\tX\276\211\031W\275\346\021T>\230?\250>D\010\004\276\006\244a=\206\332\010\277\347s:>.\316\374<\225\223\020\275g)`>\024\021m>LM\212>\202\016\326\276\313\307\343\276aH\334>\363\225\352>Q\231\231>z\214\033>\024\216W>\332,\274>\314\301(>4\347\270>\017\305\027\275\256\371\275=~\2640=\374v\017>8\305^\276\216\264\322\276\312Q\245=\366O\235>\202{\r>c\237\222\276\373\222p=O\347t\276\222\253\204\276\227\314\252\276=Q\202=\346C\024?\333\024\255\273\234\325\325\275\312N9>\234[\214>\234\243\037=\365\363\333=\022E\312\275\305H\212=\2266A>\323\'\343>\276A\256>\253\276k=N\227_>zd\007\276vdr\276\314\373\335\275\262J \277\265#\242=U\332\320\276\231Y\336\276\331\013\220\275\000\201\222>Y\370\261>\3423\r\276\302\267\223>W\224M>\022Z\242\276q\220\177>\261b\006\277\214\002%\276\256a\215\274Yo\317\275e\311M\276k\376@\2772\267\006\277\032m\307\276\273\362\272\275\256\344\260>!B\201\276-\235\202\276\353\320\324\276{\360\236>\255^t\276y*g\276\340c\232iCw\276K\003\277\276\227^F\276\030f\223>\304\210f=\034T\310\276\034\020\231\275\213 \314\274@\370\265=2\224\025\2760\320t;+\212W>\333\263\203\276.\272\014>\251Z<\276Pi/\275J\007\327=\364(\255=\264\r\304\276{\3061\277\216|1>z<\203\276\355\330l\276|9\320=\302\240H>\306O\332>\345\325}\276\255Z\r>\227z\264=\212ee\242q\331>\344\354\272<\205\215q\275\034a\350\276\331\321\246>\000Q\r\275Bty>\266N<=[\264\337\275\200\tC\276\310\365\316>\310\347\201>\263[\370iH1\276:\\\206\276\272\274\374=\215\201\345\177\341\244>?\332\304\276\306C\271\274R\363X\276w\305\216>\364\335v;\300\222\256\276\033z\344\276Y\376C=\2579>\276\010\336\006>\020%\305><3\027\274\340\374\244>~\030$?%>&>>\306\321\276N\371\021\276Z\214\233>\301\177\366\275\361\200\"\276\305^\343>\010N\313\274\210\214\231\276\205\024>>\250\254\317\274p\021\'>\275\244\215=\260\304\004\277\355W\365\275\323U\212>h\353;\276j\275\305=2\243\373=\t\333\347\275!\2768\275\325\024\303=:\224:\275;\210\260\276\360\216\024\276o}\244\275\275}\363>\2251\030>X\275\204\276;\211\017\276\272\007\211\275f\311\313\274\201\301\220\274\246\007<\273C\001\036\275 1\276>\001:\006\277\032\270\306>\031G\026\2774aW\277\361\3654>\231\356\n>#\256c\276h\361(\2767\272\207>\211\301X=\024my\275\310\243\276\276\276\360\350\275d\244\355=\353\035r>/\376\034\276D\326 \276\006>\016\277\022)\005\277]\317j>\302<\341;bl\301\276b\237q\275\004qE\274\230K\031\276RFI\276\263\006B\274\325z\327\275#\335\210\2763\270 \276\225l|\276\';\234\275\003\030\267\275q\321\032\276\260\251\377=\276\303\203>\217\361\234=\255\351\016?5\317\331\274T/\320\275=\204\275\275\214\220N>c\211;>i\355\302=\202\232\360>L\342\310=\221\267\010\276X\247+?\031\004e\275e$\267>\031\237x?\354\323\214\274\363i\305\2761f\226\276<}\271>\330\"\356\275\204?\314=3\003\274\2767\340\365>\337\243\027?<\344\210\276\256\3468\276\212c(\277\241^\241\275\375\327\375\276p \341\274U\344\002=\020\310\374=\241h\376=bi?>\303\354\277\274\033\375\376= \224\331\276^2\321\276k\014\232>)\242\344>p_)\276e\310\202\276\251\234\000\274\242\270\235\275\340\246\216\276\252(\247\276\023F\037\277W\005\027\277jhR\274`\331\t\274\235\033%\2756\276+\277h\035\310=\\\364\203\276\226\241~=\2143\236=f\205a\276dY\377\276\360\206r=\250#\254\2766\250?\275}\222\227>\242P\203\276\235\025\221=\333\201\034\277z\255\224>\214^\007>\371\213\037\277\246\371\263>;\327\330>\361\300\261\275P\206\016<\240p\333\276\315\311\r\275X;\245<=\222\261>F\356\274>\n\337O?\004\361D>\212\352,>H\216\177=\250\255C\276\255\000\214=.B\211=\270Q\002\276Q\253[>\013\236D=^\316\006>\274wZ>\313yF>\224\334\200=8\302O>\213o\231\273\261\277\021\277\023\017\246=\256\0167\276\216a\371=\361B\374\2761\301.=\022\267o\276\034\362Q=B\264\025\276\323\r\020\277\332gL=\0326\177=2\202\222>\320;\026=&i\372={\331\351\315s\246=\020L\003\275J\334\240=\264\214\243>\0108\225>` \352=\024\364\000>@o\220>g(\310<\024\312\314\276\273I\254\2759j\210>*;q\275@3n\276\255\324\371>\037\350\214>u\313\256=N\023_\2763\202\351\275\275\251r\275g2\026?8?\317\273Z\377\316\276Z\307\266<\010\344\376\274?\267\325>\273\332f\276~Y\301>\252\373\310\276\256\202{\276\304\310-\276\334\313Y>\003\255\261\276*\330\336=\0250*>\367\360\010\276\202)[\275\273\273\353>\250\302\227\276cV\020\276\375\256N\276\253\205\205>\237\247\257>3\243\226\2767\202\204\276\002\330\343\276\241f\232>\241\227\235\2761\266\226>\353!i\275\037\346\022\2762\027g\275d\243\245\276\245L\031>=p\021\275\024\335\014>\352\255\344\275]\226\\>#k-\276\375 \275>\362\227\\?\"T\273\276\305U\n\276\3153A>\274\255-\276D\212\233>\346\346\227\276\264[$>\335\364\366<-?\177>\266C\356\276/g\333;\306ah>W\253\037\2760U.\274\267?7=\177\204\307\276{\365r>G\216\r\276Q\301I>\312*\327>\233X\376>\213k\214\275\213\246\225\276B\327\323\276&\365\004>_mp\276\347\023\322<\273A\r=\250\2479<{\377\231>pt\241\276I\023\251>>\240\220\275\356\344\013>J+\223\275F\267\302>\260\365\177\275\317JX\276z*j=\333\206\002>\333\273\020?\013\302\n?\'\013\241=w\374\201>\312\363\301\275h}\244\275\227\270\030\276e\314M?w\304Y\276A4\021>\032\302\205>\216F\024\273\203\306\205\276{\315\217\276\256\251\237>\370\216\355=\346B\232\276&\022\330\276\225\334\203\310\025\000>[\230#\277\004\005g>\255\270Y>\340\032\237\276\247\017<>\370\313\310\276]\242\230>\322\252\244>X\234#\277\316h\214=l]\366\275x\317Y\276\363\272\205\276\004\302\027\276\341t-\277}\253\270=\363M.?\237\005\330=\200\231\224>p(\261=\254\220\025>O[*>\302\217S\276s\020\031\277\263\356<\276\347U\354=W>\307\275q\311\303=)\322\253>9.\226\276\252`\252>\245\3436=\333\242o<\016\2579\277\3746\343>e\031\036\276\305\350,\2770\322C\275\ng\243\276,\231$?\010-\016\275$\372\355=\352\275\336\274L\025\206\276\265\370\346\276\353\213\203=\033\274\347A\201\324>gYt>?~w\276\246\332F\276%\350\231>\202\336g>\271\270\371\276>\025\r\275Z\215`;\332\027m=G\034%\276\225\330\210\275\201E\022\276\347\231\003?g\362\331>>\214f>\004\242\276>IU\334\276\377\326@>\231q\363=\237\260\242>V\306\242\276\374\240\350\275P\214k\275\357\314\277\276\301ce\276\016\032\213\276H\t\210\276\265\315\276>#\212\246>\304%\311>\371m\253=J\033\354=\366x\027\274\376[d\276\225\370\251\275~\360\017\276b)\253\275z\373-\276\307\352E>\233\261\234=\267\355\255\275\217\234\005>\253\255\214>rn\361\275r\306\234>\201\'H>\362\200\t\277\353\277\244\274\233l\252<\373\372\257\275\371\216\206\276\315\316\255<\nN\324\276\227N\207=\014p$>\266\276\020\276\335\013\263<\300\237\003?\251\210N\276o\026\276\275\375vH>\260\203\233\276)\037Y>\007\233}\273V$\252\275\014\363\242\276`(,>,\014\260\275\234\177k>\177@\\\276\374\257\200\276q\232t\276G\031\210\276\013\215@\277\322\373\266=V;3=\257\2467\276\216\343\231\276\256\214\206=\252\267\277=^\223H\277\362v\226\276\026\035h>\016\206\032=4\261\211>\360LI\276\310\320)\276\014c\024\273\370\272\004\276\371\343\354\275\256\033\265\2766)\347>\204\317\036\276\264\341\222>U\226S><(\323\276v\247\377=*\2503=3\\\023\275\007\350\322\276\237s\330\2753 L\276B\264\265>3\025\026\276~\227\241>8\331\301>#+\020=\033\364\'>1\377\236\276\212+\343\276\373q\001\276.\026\316\275+2:>\000\260N\276\n\235\001?\376\350\006?\377n\364>\332\342\000>0|\023\276\340\334\201>8\251\242\276\306\345 \273\006<\201=?8\346\275~J\371<9\337\022\276~\006\222>\212\3254\275V\247\205>\237\245\236\275/\202\320<\257\206\264>P\340!\275\255\232z\277mkG\276\207\021\252\276\007\376\005>\230\'\377\275\316\006\202\276\363\237\253\276P\314\264\276l\215\227\276f\014e>I\370|\276\t />Y\273:\276\377\203\360\274\203\003\320\275\344|\n>\317\304X>\207\241\231\276\226@F\276\t=\222>\017\254\320>\032\355 \273!\024\307\274\225\177D\276u\372\222=\210i\206=\321\333b\276[\"\037\276f\006\345>\341\3256>\000\210\016=X\250\335\276h\336\020\277p\254\376>\236]\305\276\t\370O\2747\320\330>\233z\023\277\205\201/>;\027\231<\245\206s\276X)&\277Hqj>c5\357<\340\341\224\275\270\366\250\276_`\257>\3633\233\274\327M\020>\ng\272>8w\254\274\274+\207>B\250\357=c\354\365\275\034\036$\276i\256\220<\301\351\031<\204\330\205>\343+\317=$\351\240\275\\\221s>~\313\214=\024Z \276\364\303\314>S\252\030>G\334\202\276\372\243\374\275\223\325q\276\307\203\035\275\303U`>\336\233\326\276\315I\t>\326\342\300=\271B\"=%V\236\276\306n\203>&J\257\274\236\322+\276\353\034U\276o\334\370\276\023\t+\275\265z~>\375k_\277\261S9>\267\230\231\276\331tZ=\'\347\351\276\363\006\220>\352\376\364\276&u\332\275\246\327\370\276\313\320w\276X$\231>0\305\310\276\330\337\026\277J\265\213>F}1?s\303 \276N\037\037\2768\334x\275\315\365\030>\341\205k\272\274\365\263=b\370\033\276R\212W>D[\377>\032\0332>^\206\007>O\037\220>:\016\332>\214\242\014\275Fzo\274\324\364@\276\3522\327=\331\236\203\276\240)\023\276\026\355;>l\301\210\276Y\206\263\276{\004\314\276_\017\352=L>\342\275\213\371\221\276\337\022\216\2759P\r>h\240\247>,\022}<\312\010\265\274\275k\030=\321?\230>U\330\\\276\351\327\376=\265/\257\276M\032\203\275}\327*>\204\215t>\004\231\215>\207y\037>y\314/\276,\030\007\277\361\035\253\276\016q\002\276\0025\310\275\242\225\024>\023\363\002>\020\261{>\371\323\233>\322\375\277>G9X\275\235\225\345\276\252^D>H\316\265\276\212\235\325=\370\016\263=(\177\276=\374D\026\277\357V\272\274\263\373\304>\2207\020\277\360\016\227\016\\`?\327\312\274\275\355\376\204=\232\037m\276\243;]\276~\021*>\225~Y\276#\214\245>\036\307*\276 H\312\276\004\2237>\034\216\336>\251q\275\275\342\357\020=\200\332\031>\211?1>\010\365P=Z5\373>\0233\021>\275\251\000?\362i\"?\222\362\346\276fg\013?_1\336\276c\252p\275\330b(>[\035\213\275\277\201:>\014Ek\275_oQ\275\017\222\351=\250\315\026?\267h\204\275\271\035\372>~\303\246>t\311\277=.\363\311\312\310\034>mG\311>\304\336)\277\237\0321?,\245\177=\266*\374\273\302\t\356\347\331\376\276;\302\226\276\325:\371\275\376\222\343=\361\301p\275)\265\022\276\343\371\335>\017`\230\276-\340\031\277\021\365\322;\250\370k<|\007\365\276\315\314@\275\235M\301\276i\177\216\275C~\243>$\023\265\275\361\177\267=\232r\360>\312s\336\273b\212\241>\005\003\010?\364\355\006<\235e!>\366B\354\275\252\222\027>\024\323\203\2766\273\303>\302\206q\276S\201V\274\325o\007>\212\035\035>d\213\254\276\254\007\253>Q\177\232>\224\007\002\276\002\034\213\275#>\004\277C\265y\276\300\325\331>M<~>\007\200h>RP\031\276\264ZU\275DD\336\275\250A\"\276\260.\217\276\023H\025?F\2118>o\342G\276\354[\370\275\277\263R>P\001[=\330\3770>\251\270\304>\374\007\322\276\216\216\276\272\002\021\277K\210\215=\021s\202>\274\362\216>d\327\306\276\211\030\017>3\023\234\275=w\216\276\226P\201\276 \345\004?\211=\207\275H\"\310\276\223\316\311>[v=?Xo\333>\322y\314\276\"u\004>\351Z\020\277\211\002\267\276\014\226\314\276\251Q\000>\177g+\276\305L^\276\361\033 \275\313\370\004?\222\275o>n\243\030\276^t\034>:\000\355>P\273,\277A\252\227\276X\205\242>\354\013\214>\261\345F\276\\\201\232\276\310\n\317>\346\356\226=(G\t\277A\2449>\231\0142\274\235\217N=\370\003~\275\030\220\273>\007]%>\036\"\346\2768\302\311>\247\240\242\275\351-\226\276gv\356<&\201\312>c\333\273>-y\212\276\323\200\254>\3136>>]\273\244>\367\377B?\242\364!>\354N\304=\252\376\016\274\323\032X?L\020e>\026\312g>\264t\026>\341\231\257\276I^\226>6i\222\276\367@\001?\220\212\303>Q\320\256>~\211E=G\247\243\276-\334\025=,\351\177\023a\021\276\031\260\261>4\237G>\036A\247\2751V\027>j\371A>\206\247L>n\210\r\275\035\331&>\314|T\276\\S\356\274\245\320\212\276\355\363\316;\302dc>\362\251\023\276\311\207\227\276\311!\224>\322N\031>\326 5\276\210\270\315\274\256\004\016? \370P>i\262\262>\371\242\200\275\367\006\336=\000w\233>\364\021\236\275=\247\000\2765\347\007\276\2711\033\277a\225\023\277A\001\365>\355|/\276\361\016\025>D\006\203=)\336\346>\230P\300\276\r\250\300>\375\251\305>0\311A>\266:\013\276\233\241i\276\366\350\261;\231;\362=\217\211\226>\007\331\203\276n\353\020<8\260\016\277\2701&\2769\321<\276~*\200\274\013\000\364>\004\362\274>\013\222\'\273\243\234\023=\201\004\257>v\005z\276~\356\270<&M\204\275\004\374?>\277G\362\275\014/\254=a\327\013?\223\000\351>\242\364K\276\356\004\235\275\226\327\002\276Q\373\314\276\303\330\"\2775\255\325\275\212\273/>\213\021\360=w\327\224=\265V\223;A\354j>\327\216C>\333\264$>\276*~\276\312;\360\276\346\371\221>\236\233\177>Q\262(\274\250\221\'\277\371\216\026\276\030\343\311\276rz\021\277\264\215\251\274Nq\222>\1771\206>\376\006\330>\374\036\221=\345ug>\266\313g>\331\340\315\275\232s\r=B\255\267\276\235%\256>\001\n;>\375\315\007\276 l\000\275n\252\244=,\236\304\274\035C\031>h`\375>\366\272\347<\333\306\217>)\341\202\276\327\202\305=\2745\377\274\230]D>;\322\260=<$J\276\357DN\275\317D\014\276\n\204k>#v\270=\265\373\254\273\212k\215\276X\212\317\276\312M\222\276\357ph\276\376<\232\2756:\r>B\347\240>G\374\n>\332a\242>\270\3255>\315\353\266\273m\261\314\275\357\005_\276\337\254\215\276)\331\314\276\213\377\265=J\001\004=\275\372J$t8\275\234B\337=e\032,>CM1=\245S3\276\233\247{=\356\305w\275}\225i\276\233\222\306;\036\2404\276A&\363\251L#\276\026x\034>\255\307k\276|\273\303\274\350\273\377\275\216\307\276\276\2400\220\276\177\006\202\276\256\374\023>{\202\376=\365\025\350\276\2330h\275\212\270H\276\367\212\020\276\016\377\355\275\377\007\314\276c\344\300>\234\211\226\275B[>>\213\3473\277\270\361\007?\033\356\226\276\351M\234>\353\034Y\276\016\031\036>N\031\217\275/\025\323>\276\203{\276\352\001V>5^\206=\211\232\005>6\006\200>\371|\235>\223H\014\276\334\n\004?\357W\340>\227\346\370\276?\217N\276\221=~>w*\031;\210\240\233>8\212\200>L\342\004\277\026\026\'=\n\351\252>\314\377\251;\317\323\217>\303\204\264>m\326\315\275w\306\357=V\177\000\276\036t0>3\223\026\27711\260>\302\320~;tXT\276\005z\251\276|\035\023\276Q6\263\276\346]\263\276~\275\232\276%>\'\277{\240\317>\242\177\244\274u\033R\274\031\356\"?\243\327\324\276cZ\252=\230\020\004\276\223R)\276V\212v>\212\3020\275\301Y\365=L3\307\276\205\261Z\276\227\0339\276\213\363Q\276\361\232F=\026\221O\276\247-\016\274\021[\230>:4}>LHp\276\033\366\214=\327\000c>1R\221>\312md>X\350-\276\363\214\202\276\224\302\224\275\213\\\036\277\014\253\260>\366H!>\365TM>5y?\275\025\005\254=\355l,\276\241\270\314>e!\354\276\031\341@>\202D\033\277\360\372\000?\001K\001\276\357\361b=y\034\224\276\226R\277\275\020\037S\276.\230{=-\335+>\316\301x>gH\006?j\211\273\275\'-1\276\200\252\005>N\326\201=$\3337>MU!\276\335\254\201>\346\024\352>\250\371\306=0*\351=j\374U\276\227\2647>\006\227\342\2750\302t>#n\211=.+H\276_f\251>Cx\006>\006\360\241>F\303[\276\226\252\023\276\002X:\276\262\274\371\275\\\306\271>\300\271\260\276\311\003\035\276\330a\257\276\031\234\014\277%\270\230>.#\362>\213\300\014?\304^C>\333\350\t>\230\261\002\277\267\234\230>\206\324\356\275\332\200\215\274\337\312&\275_\004\245\276\344\311\010\276\017\356\t\275\373R\035\275Y\237\214=\374\307\241\276\215\274\005=\023\"+\276\323\216\260>\267\007>?h`\202\276\017I\222>\031j\245>\0240\334\276,^\336>\236\320\2549\034n\006\277\213\036\242=C,\010=\320i\312\276\2554O\276\221\205U\276\350m\013\277\215\316\215=h\215v>\201\030\272\276Z>s\274\247\031]>p\273\"=\376dw<\006u\253>\352\203\337>\254\247h>\257\261\273\276.\323\357<`^\207=\0144\252=h\265\225\276\025Y\200\276\303x#\276\332\312\334=-\252<>\350\002p>\334\301\266\275\365\3750\276q\000+\276\014\177\270\275b,\357\256\230\331\275\365\352\330\276h*w\276Gt\343\276\245\202\331\275\347\315\370>%%o>M2\020\276\267UE\276\256\373*\277\001\377\002\276\316\335\351>\\\025\033=:u\315\276i\254D\275\'\275\250\276\337\342\227\2765\3470\275]A\035=\204\004\366=\357\200\203\276\224\021\247>l\357\255\276\016\247_>\322ck\276\001V\265\276\342\215\271=.\006\004\277\000\245\326\275\272o\034?\336\000\243>\372\335\277>\235@\250\276\002\276\225\276\367\023k\274\002x\005>&\266\233;\3770\276>\355\022\341>\035@e\276\342\310\017>\2711\370>\274\226J=8,b>\026\211\343\276\037R\270>\3224\r\277h=\n\275\265PF>i}\025\2770g\036>Fg\276>]\304\204>\323.\321\276e\314\310\2762V_>\343\002\023\277\366v\352>\205\210i\276G\361\227\276\206\310\261\275W;u<\262m\257>h\250\311\276\017\303\003\277d\034W<\004\357^>/\032\\>~\340,\276\246\026\376\274\222\002\267\274\250\3016>\365\205\315\276\213\002\265>\244\244\275=\314\263B?Z\255\224\276a\317\005=v<\367>\022a\024\276\363\334,\276?m\211\273\350|\351\273\3671\030?\205\200k>,\227\274\276\'\005\373\275\360O\306=\336N\313\276cd}>\346X\376=\014S\017\276y\345\375\275Wv\225>-\372\233\275\215\371\030\276\025#\315=\245\357\020\275i\367\031?kdh>\241 \254\276Xm\267\276\236P\n\276A\254\265\276\346]M\275\350(\276\276\354\005>\276\320\304E\276\205N\276>k\263\234\276\010$a>\337>\243>Xl9\276\001WX>\201\267\177=a\352\005\2778\027\'\276\"U;\276\203\377\014>\347\220J\274\023\033\323\275\014\033\022\276\343z\226\276\"\037\223\275\270I\210>\245\\W=e\300\211=W\221(\276 \355\257\274\002Z\021\276\264v*?D\344\263>M\230\260\274}E\357<\312\312\230\275\321\036\327>\371F\312\276\n\014E\2764s\205>*2\213\275\205!\330\272\023a\203\276.\352\n>Z\262\230\276\004;\265\276\352|\206>K\233\300>\302\215\266=\236\321\357\276\206\233\256>\207\204@=P\346\346\276\217\350\001?-\035\333\276\211\317\252\275V\267$?j;\262\275\277A@\276\346\273\261>\005\224%\276|\315\035>\342\203o\275\246\367\251\275\027M\025?$t\200\276\256\037Y=\270\307\n>\245\265\240>\336\217\246=\363s~\276\222\342\211\276\177\014B\276\037\020m\276\247\321\254\275%\034.\276\200\326\222\276+\363I\276\376)\217\276O\005\010\276Sx\273>\n\r\017\276s\231M\274\023(\251>\320\347\336>\035\360\342\273\206m\263><\312t=\245\226\324\274Ehp\276\007\227!\275s\021h>\376\325\271\276\261\026\257>\341\230\321\275t\253D\276\247\235\036>\3247\031\276B\225\265=\031\366M\276T\360\316>\223\344z=a\216\216>\021G\334>\365\306\317<\232gJ\276\313\252\211\276\323U \2743\260\307\275\312!\244\276V\375Z:o\200J>\241o\225\275O\345\024\277\032\257j>i\233w\276\023Wm=]\371\305=]\322\000\276\333\271w\276~f\330\276\001>\313=\005/\242\274yN\013\277\027\354\n\277\314\341\026=\311\245j=\225u\325\274\310\3636>\033;^\276\245N\213>\274\347\231\276\237S\031?\307\344$>\332\316\317>\345#\262\276\250\330\251>\354\362^<\273~T\237\3407>h\347\'\277\320{$??\332M>\213\315\252<\241$\035\275 \330\017>\024`\346\275w\371\347\2750\326\205>^$?>\016z\004\275\003\367\000=\307\014x\275qv\327=\221}|>(\217m>\022\246>\2759d\327>\207Y`\276\337\271\322<\tf\272\276\365{(\276wN\363\275F\2020=vC\026?;\330\356=o\026\r\274\003\201R\276F\235\223=\314\356\027\276\365*.\2764\274A=O+i>nZ\246\275~\351\227>\236\3522=($\t?\215\002j>\t\340`>\344\362.>+\014\003>\326\356\016\276\212\370\252\275\206|\244\275\251\276\202>\035\305p\275\256\353\232>\323\335\253\277\207\006\'=\365\020(?\322\032\317\276\301\262\314\276e\315\215\276\2067\235>\337x\243\275\234\253\026\275`\203\037\277\322\220z\276\035\262\317>)\324\010\276\237\255\000\275\254-\222>\350U\273\276\244Z\342\274H-\353\275\233\\~=5\240\236\276\037\252\033>\227\004\270=z\233\244=C\014\242\341\207{>\360d\242>pa\r>\242\261\r=\037,\271>z4\272\275G\314\313\276a\333\320\276$\354)\275r\353\243=m\250\002\nu\033\277p\320\343>_\325?<;\322\"\276\273\254\016\277\357\333\220>hL\326\276L\347\306\275\325\334\022;\201\301!>ON\032\275\340\333\324\276A\305U\275Z\031Q>^\252\272>\021\234\346=\3006O?RI\364=d\257\376\275\340p0>g\025\234>_\351b\2765\337W\275\321dr\276mLc\277y\306\223>$\000\r\2771\330\201=y\332\000\277\211E\233\276.+\312>\030O\243>l\023>\276\215\301\356<\005\303\320>C\335\200\275\202\322\232>\356\036[\276\325o\334=2\261:>\205\331\272=\207\220r\276\255\215\360\275\200\020\242>\037\030\247\275\315\210\246\276\237\355u\276\200\020\323\276\317(5\276\007\207j;\252\332\316\276\225\027\270\276/\245\373\275\177J\371>\334X\216\276\253\230\204=\320\026\002>\246\013e\274`\314\031\276\274B\207>t\242\255\276\340,\202>\022\013\261=Rp\233>\374\036\n\277{\365?\274@\016\036\276;#\000\277`u/=\302\201\t>\330\200v>\246Z\225\276\263_?\276\227B\324\276\272j\374=\375%\364>G*\255\276\355q\247\276\207A\007>\021\227}>\3611\246>\274\313\303\276T\232\007<\2348\314\276\3304\366\276\305\017\341\276G<\205\276\363\333*>\320\025\201>y\310\222=(\230B\277<\353\322>\007\252\316\274Hq\001>e\316\272\274\300\244&=\256\'\346\275\347\301\225=X\341[>\367|\037\276\r\372\t\277;\366\005>V\367\036\276\340\004b>:\322j\276\0241n=T\017y\276vwI>n\253\222>\345&\204=>h\003=\234;\203\274\366\354\266>\3633\331;\300\330\200>\207\266!<7\371\226>=\271\023>\225p\033\276\363\304\372\275c\250\203\276\305\377\224\275\246n\207>\212j\006>\364\242\370=_al>A|\021?\003\270m\275,\2422>8\260|=\346p\263\276uC\213>5\257\263\276x\334\332\275z\244\366=\'\247x>\"\233\010>J\271i\276\013r\033\276\252\257\362>\033M\206\276\313\231\027?\nH\210<\374\244\302\275@\335\271\276\253\227\010>q\377\010>\306\330m\276\304\266\201\274\375^A\275\\(\207 \001\230>@v\037\272\314\010\025\275\006i\036\275P6\264>d\201\311\276\244\277J\276k\245\271=a!y\276\263\341W>\344\360\\=Z\277\365<74\013\276!\252\323\276\255\341\236=4\264\271>B\250\255=\200\036\037\276k\274\213>[\3755\276^h(\276\301\231D\276D\334\332;j\316Y\276/q\241>\t\314\024>}\010\251>)/[\275-\270n\275n9\206\276X\363+\276\224\t\026?=R\\>\243^\222=\360w\227>\327a\254\276;\223\007\2765\032\025=:\263\223\274\"(8>Cp\272>\353\377@>@\265\273\276\374\330b>\341<\203>f\366\277=(\245\273\276m+@\276s5\r>a\\\224\276\263\340\305\276\334\362\000\275m\266\337>WKP=jW\202=\340\021\013?\356\236\350\275:\370\204\275H\302t\276\312\315\200\275^\276\'\276o\300\035\275,\177\330\275\363\'Z\276\253(\375\276f\303\220\275\300\224\003\276\204\232Z\276\225\207\303\274K\004\267\276\271\020~\276\023\206\323\275\013ly\276\315\320\261\271t\n\013\276\246Y\264=YJ\246\275}\365\317\275\342\231\030\276\000H\030>\362tn\275\315\360%?Q\372\020?\346\275\204>9I\374\276t\016\365\275\253\313\010\276t~\343\274\213\341\236\276^\3160\276x\037\203>\373\214\374>\247Q.>Q\265\206\275k\262\236\276\356t\356[LK=\273\323\266>\227W\320>w\215P\274\320KJ>\223>\007\276>\037\021\277f\027l=\"#9\275v\265\304>\227\316n=\205\226$\276+\252\021\277J\037\244>\t\261u>\212\177\300\276Kl\235>\220\200\'\276\317D\344\276\331\203\003?k\223\310\275p\3308=\010\3070=\351K\326>\316\376\010?\302c,>\306=O>Nb\204\276\366\226\366\274\006\r\325\275Z\351\344\275}\230\230\276h~j\276\374\220<\277\'\022\254>\207(\216=\303YP>\227|\220\276\367C\322>\226O\004\277\006\341\271< \301\344\275\035m\304>}\254\313>6\366\004\276Kh0\275\270\362\263\276W\330\213>\251\2765>\232\344\024\276\214\025N>\322\216\022=>\362\341\276\261k\331>\013r&>\253&C\276\227\210\035>\nj\213\276\256\311\367=Bq\237=\214b\006>\3376=>\305\000&\277bH\201\276>\305\n\276\307\254\033\276\320%\215=^\370\321\216@n\276\020U[\277\231\236F?@\327+?\221\263\010\277\n\335\236\276\274$\360\276\214\205\340\275\233\177N=\334\253e>\254b\001=\303g\361\275p\350\205\275k\341\206\275\357\357;>\247\330\212\274\262\0178>\027-\206=\365\302\331>\033\224\032>\335\213\366<\357\342H=\250\215\216>\322\347\031>\273g\363\276\225|\250\276\370\213b>\353\021\314>\376\026\031>2~y\024)\270>\\\352\022\276\223\177\020?\316A\242\275\2557\315>\332\212\273=@\2263\276\021Y\240>\244\\.=C\016%\276\\>d>\247\372l\275\322@\213=NU\337>\0014+=\377`\r\277U\344e\276Mq\236\275e\361\327\276\252\037\342>&!S>\272\234\327>9\243*\276X]\177\276\\b\020\276X@G>\243W\301\274\025)g\276\267\311\326=\024(\017\277\354L\231>\355\214j>\311j/\277\2728\021\276!2\234= ,.\276\273\016\335\276\010i\353\274\354\346f>\0260\002\275G\365\036>\324\251\317<\200\367\004\277\207\232!\276yK\225\276\016\253\355=o6!>\377\310a>\224\253\374\274Ne\324\275`\247\337=_5\272\273I\275\">\366I\212\276\300\016\013\276;\225\323<\357b\253=\364YO\276=\204\220=\204#\257>\264 K\276\257\210B\276\327\031\020?lyJ\276\177\207\006\277\207C\355\035\021\352\275\3600k\275\202O]>\203\3031>\354y\205\276\307@\304=<-\251>Jh\206\276\241\222!>\t\242\022\276\034H/\274\232\346\000? ?\276>1\307\235>\202\020\230\275\022\000{=\274\271r=\216\214\220>f\260A=Z\331\325>\267\244\360\275\235\016N=\3572\0071?\360o\266>p\304v\276\023\'q<\323C-?\236\214}>\323\355\245\275\nGe=z\2351\277o\272\275\275m\347\357=\034$\225\276\177\245\200\276\020\376\267\2762\223&\276\035\315\330>s!?\276,\0100\2768\254\246\276\312\n\267\275\237y\'\276\224\\\203<\352\250\344\276\222\322P\275\001\313\231\276>v\222\275\265\240F?\325\250\021>\312#\305>\370\n\243\276\2305\330\275\351\001\027;~?E\276\322\247\242\276\322\366\217;\242\327\366<\257\365\346>\211k\220>\026\222~\276fg\020\277-\337c<\367,\031>\375\025\265\276\267\235\023>\301zU\276lI\326\276\204\307o\275\344=]>\333lT\276\034$\345>MR*?\243\273\300<\201\030{>\324\310`>\'a\203\276\237\014\037\276\267\310\360\274\246\367\371\276\315\014\014\276\342W\220>\'s}>r\254\225\276\372u\376=\321\372)=\315\372\022>\001\231f>\n\306\016>\251\302-\276\006\316\336<\003\204\203\276\342\360\370>\013\315\\\275\233\027\222=\367\037\304\276T\270\"=\245\020:?\204\3452>\014o\277\302>uM\r>t%\013>\351\326A?{`\202\276\032\010\213>5\231\227\27684\241>\360^\362\276e\261\374\276\325>\312>\263\324A\2762p\325\276\021d\\\276\007\364\241>\300.\250>\250+\n?\277j\302\276\352\032\214>\3470-?g\224\314>\274\302/\276\257$$>w\337\264=\310I\346\274\212\315v=C\257\312<\265\323\\\276\n2q=R0\252\276\272\321\266\276/\254\202>\212\331\222\274\326\356j>\n\301]>\035m\225>\314\366\217>h\260\277>\2629\021?\001\246\212=\3140\021\277\216\273\033=\r\346\222>E\270\232=HE\\>;\036\017\276\312\r\245\275\317\017\201\276\330iy; %\025\275)\035&>\033r\177=/\352\235\276z\200\205=\020\277\327= C\254\270\226i=\276\374\3656\275\003e\331>\020\016\310\276\210\365.\276i\323|\276\202\0309\275\221\351\273=|\217\t\277\340g\177>\311\261\312\275\220d\037\024\2531>\023\325M>r\035\0059#tE>B\303\013\277x[+\276G\236.>\374\326*\276[\254\027?\027\233\347=\013S\244\276\250t\256\275ea\204\277\206\210^?w\303\225>\n7\\\276(M\204>G\021\263\276\3603\213\275)\301\242\275W\2371\2765i\001? p\235\275\216]\212\276Y\331\274\276\252\365\241\276\226n\311>]\321}<\272i\212\276\003^\177\276\245\271<\274\300\023\354\274\212\333t>\225\213\026=W\035Y\276>\007\t?e\342\316>hk\255=\210\320\006>`\211\021\276r!\312\276\246\233\204>jM\234>\340\257\037\275e\026,\277\367\036*\274\246\025\232\276\343|4>l\212\252\275$\261\037\277q)l\275\351[5>\300P~>i\352\213>\3533\004\277\310\233U\276|\356\310\276\330\337\265>\313\037\303\275Y\264]>t\337\241>f\257\003?x\245\220\276v\000l\275^\340\214\276:\323R>\375\211\007\276 \347\323>;\361\241\274\t\247\265\276%\216\216>\221\352\037=j\236\247\276\177\333k\276.\263n>\360\r\373=c\010<>\363Ee>\273\316\300<\223\'\263>\247\031\006\277\256\224 ?!\244\354>\017\273\225>\340\n8=\263\361\">\217(_\276\364\227^\276\223v\240\276\367\246\341>e\005\223\276u[\203\275}\306\264>l\250\274\275B\246\\>7O\203>E9\217\276nN\361=>F\364=\371$\232\275\034y\374=\004\241\306\276\335\271Q?\021\"\317\276\235\250\033\277]\320B>\017\261\316\276\321\355\261=\026rN>\tG&\2770\201\311\275V\364\222\276\213\352\237\276OI\013\2779\322c\276\330\241\010\276\305)\257=\025\372/\275\276\t4>+\023\302\275\304\273\234< \026\220\276\340l\030>\r\223\027\275\302\232\004\277_\345\244\276\242\371\323=mL\007=L\206\236\275D\234\300\276\302\0212\276\271\3616>\303r\220=\322lU\275*8\323\274ro\244\274EF\263=\362C\3329KIY>\023{\371=\234\"O\276\322~S\275\343+l\276\206\366\210\275\263\366\315\275|q\312\275\323\331\361=\352\213\356\275\\\n?\277\304\013\001\277\255c\017\276\3059R\276\236\350\004\276\244\373\245=\215\373\031>$\001=>\211\344\356>\2713\244=\315\2523;\235+\007=n\365\002?w\311\214>\202\327\006=\013r-\276\037&\000?\3602\207=e\236\262\276\313\323\315>\202\352\326\274\356\320p>\t\257\274>\274y\257>6\355\223\276\203Lh>\027\340\260\273\006\032\353>\000\342\257\276rPx\276\026\nn\276+\035`\276\310-f> \210\375<\006h|\276\376\227\311\275*\354m>\r=\014\277\246\t\024?C@;\274~\345a\276\306\001\303\274\247U\214\275\036\272\301\276E\272\001?\262M\n\276$\250\375;\334f\247=\005\002\256=O\216\244=\276N\006>/\365\314\275\'b\211\274\334\356m\276\003h\026?@cb\276\315<\214\274.\213M=\031\206S=\310a\324\275[\324R\275\206> ?\267\223\'\275]b\274\275*_\300=r\356:\275!\317\025<\037\n\320\276\311\265\273\275#R\344>D\245\023>\312\002\034>\326\377G\276\204\240B\274\302\323~>\265\n\354\276\374\300->\325\n\371>Zs\350\2763\276u\276\033\357\233=\\.\246\276\031\366\001?/\336\232=3\027\251\275\375\254\272>\252\\!\274\326b\212\276^\336\n> \306\242>Cq\247\276\242&\245>\032\032\274>{A1\276g\373\310=\277h\004\276G\312\241\276\003P\216\275\037LG\277\325\371\231>BzW>O\000\200>\337\245\230>\242=\372\276\024B\025=\217\215~=\213J\217\276.f\330\276\3308\211>\n\324\201==\2149\276\345\264\215>\202\252\315>[\203n\276\366:\341=\033\030\212\276\362\212\235\275\232tk=\232\321\207\2765o\332=\250\353\304;=~\005?#_\264=\317\242\200\276&\365\261\2766?\210\275\334\273\215\276\224x\211\277}d\030>(Z/>\354\341\275\276\014\345v=\227\336\236>k\026v\275\223\032&=8:\306<<5\347=H\004\341=\323\256\001?\364X\354\276\300\014g>\273%z\276\264\213\206\276\013z\213\276\237n\205\274\006O\301=t\031c<6\377X=\ng >k\343\003?\317\243\276\273#N\265d\373u\276V+\274<\310\342\354\275\306\3318?\024|\214>\t\362\346=\300L\033\275\305\226o\276\254\013\363\275\352Y\347\274q+\344=A=\243>Y\230\262\276_Hv\276\264\233\373=>\306Q\276f\026\220=\221\213\327\276\034\343H>\017m\336\274\311u\340\276\374\276\210\276bQc>\001^\216>pb\257>m\277\356\276\375\226\310\274r4n\276}\037K\276\260\010\223>6\000x=l4\257>\355\265#\277:\313\221>\004N\216\275\232\313\206>\007\220b\276\320@M\276\035\365J>\ri\310>\t\005\256>\365\357\254>\302\332_\275\031U\001>\271\251\215=8\017\013\276\353\243F\276\257w\315>\313\007\027\276\235\254\200=\006#\340<\376[\251=v\243\203>\253\3247\276n\216\000?\250%\233>f\037[>6\321\233\276u\305\270>\240#w=\035\317J\2763\203|\276\230(\261>\267\333\240>\273\327\327/;\037\235\302=\2248=\274\207ri\276\227\313\243\276W\340==\265\347\252;\352\371\325>\217\241\370\275\226\013\356<.4\224>\344\331\020\277\031\251\202\276\263\304\252=\\G\310>dQ\220\276SR\203>\354\233\245>\217\341\260=\032\210\205\275\020\020\337\276\214\004\345\2750\346\372\275\371a\017?\351\311#\276v\227\235\276\246\352\005?\022IH>KQ\372\276\0140\254=\342\376\200\2764\360\361\276^\361.\276X\234.?e5\216\275\214\361\360\275l\342\371\274}\300\024>om\003?\331\313\302>A\247\027\276\030\016\212\275\304X\216>\217\271\200\274\345J\'\276\2574\243=\t\0145>\237\245E>\206\244E\276&\021\323=<\257\r?\026y\354=^\340c\275c}\237>\361\371\373=Y\277j>\357a->\213\347\202>m\215\204\276.\363A>\305(3=\220\222k>\361$\263\274\307/\t>\301\252<\274\347\360\030\276}\322\242=o\251\334\2754c\034\276\261\347\005\275\275\346\026>\231\254\367\276\013\001\257\276$l\241I\223\307>]+\374=\275\336\367>\215\227\262\276c\007\212\275\n\320I\276gK\022\276k\340\266\276x\322\027>+\233p\276\224\2749>\332X\315\2764&6=\326~\230>\001)m>K\211\037=\330\233>;>\324M=\010\260\025>\220\273@>-\261\037>\333\250\262:V\337q\275\036\304J?\264\2555\276\264\257_\276&\235\350<\004f\252>|\320\202\276fF\243\275\003\307\214>\374\264\306>\317\223\177\276\223?\002=\230UN_\326<\\\311\220\275\254p\016\276\014tR\276G\346\377\276Y\020w\276\035e\273\276;\026\024>\334=\364>\357\266\241\275\260\275\201>\326R\223=\313\353\240>\313\270\002\277\024\237\n\276\255\345\233>\3429\365=lF\215\276\361\017\224\272\347i\327>+x\200\276\003[L\276=A0>/4{\27602\225>\035\244\010>[Vp\275\204\226?>-\211N\276\363\033\272>)\300\221<4\334\237\276,\377\346>\366y\234>\306S\024\276F\230\237>\026\037%\276}\232\367<\345\005\027\276.\266\346= &\034>\262\306$\276\217\006#\276\215\0204>\227\324i>\007\245\255\275\331\026\333\275b\212\030=$u\037=\237\353H\276S[\312\275\315d2\275\026\347X\276%yl>\340\266\214=P4\226>\005\326\214\276\275n.\275\252\271@\277\301\005b\276\246]\025>\334D\337\275XS\233\276\002E\263\275\000\357\217\275;7\245>\215i\214=\313\016\321>~\325\276\275H \357\274\006\272\002\277X\205\314>\257\340\004?\351\366H\276\344}$>\020\253C\276\332)v>\026u\317\2767:\370=A\350\310\276\343\344\204\276\327j\017\276\2136\225\330\257\002=\317\307\347>\220\241W\276+4\000>v\005\034>\275\'\307\2762\241r>`\246\230>\320\010W\276^\251->1\006*?\342\245\207>\263\354\202=\332\"\010?\310\346\021\276\241\250\007<\271m\247\276\225&\366\275!\311\227\276\205\356\300\275,\331\304=\2722\225=f\311\373\276\223\007\341>u &\273\3533T=\260\207~\275\311\214\240>lX\225\276a\311\262\275\320)\000>\n\232\017?%\375+\277^Q\317>\004\276-\276s\324\'=\373\234r>hGY\276s\363\017\276\236^\271\276\345\357\007\276\204\223\371>\014\245\236\275\243\370\305>\361-j\276\263N\326<,\221\335<\022U7=\227]\030>\201i\225>b&}\276K$\244\274\276@\242>$t\267>\004\374\377=\r`\273>\250\226p\277\224\004\340>\334\025\317\274\373\236\231>\t\210F\274\356\031\320\275\037\260\020\277#\347\357\276s-\336=7\361\233\276C\357\325\275{\000&\276`F,\275\\\3746?B\304\001\276\314\336\313\275\030-\300\275\245\213\277\276\236\350\237>\263\036\335\275&-{=4L\205\276\305\"\007>K\252\310=\303\374:>\312\362\370=51$?\031\257\306\276e\352$?\250\200\231\275$<\217\276e\267W\276`\021\223<\020?/=p\210%\276.\240\262>\033\263\264>D\223C=#}\024\276LK\202\274\211\342\027?6h\"?\010&A>.6\275\275\'\344\005\277/\336\260\276|f\302=)\363\240>B\221d?}p\320=\376%\006>5P\206>a(\234>\024\234_?\346\334\224><\302\355=\347\256\343\275\344\321\007?B2\\>t\253\266\275\322\271:\276\27166>\016\271\224=\244\010+>\320\345E=?\266\'>\330V.\276\037\213 \276\250\210\317>\021\354]>\355\342\014>\321R\210\276#\370\320\276_\013\342=p\235?\277X\314\213>)\354\361>d\337\262>|)\341>!Ni\276\204\2242\277?\001k\276\222\221\202>\302\222\016>y\021\016>\224\375\031\275q\343U\276X0\203\276\037\016\363\2755\013\032\276\365\226`\276\350\324\275>\240\276\264\276\2525\231\276\337\222.>Z\255\004?u\267\025\276\243!\312=K\330\370=\314\221\271\276\312\210 >m\372\346\276\326\342\233>/\242r>j\350@\2765\302r\276\235\217\322>\3448\n;y\236N\275\252Z\260>\001\231\232>\032\032\340>\315\324\201<\271\262p>5*$;\223\"\263=\242\363C>\363\274\264>\322v6\276\225>\237>\3736\r>\016[\340>\023J&=\2026\301\275\251?\234\276\005>y=U\305\003\276\303\326\035\276\253o*\277\345\245w\274\342\366\000\277\356\233\353=\370g\241\274\2615\334=Z\321\211>\204\277t>\026^)>ip\207>E\257\010>zF\032\276\347\377\236>\357~\272=\006\245\217\276\307@\021\277\275\363\324>X}\013?^$\010\276/*\204\275Q\365\354>>?\265>\252\235\033?\272\255\201\276Xl\376>Y\200\024\276FP\210>-\253\025\276@\"\270\276\247\325l>Jj\005\276/\371\334=\r\243\302=\241\317\232=}j\220\276\031\"\364\275_\321\032?\301i\002\2764\t\227\274\242\3312\276\030\361\206\276[\023\211\276?H\"=\314\000\360<\215\225\205\276K\202\210\275\177\254\256=\357\203/>_\363\031>\251\301!\277\353h\016\277\202\223\301=\241\252j=\273\353\253\276~ek\276/\370\351\275\374\250\333\275\264\365s=l8\256=nS\"\276fB^\276l^\261>\221>\220>\024\256\001>,wv=\262\023\022\277\003:\354\275@\353\210>\331\272\037\021\231\210>\233\235\n?\347\201\276:\233q\257=2\266.\277\232\217\336\275h8A=\\p\240;b\305\227<3\375\204>\250(\301=\270\0339>\243\035 \277\247P\250>\225\322\022\276\354\272\340>\210/\217>D\330\002?I5z\275\336\304\303>\212\273\323>\025\035*?1\271H\276\367\233\240\276\213\221\034\275\266\204\344=V\314\203\276\226yv>\347\305\217=\nA\254=\377\360\315=\362\021\331=\"\322\222<\031\214\272\271\017\\->u.\007>\016\003\374>\311\304\251\276FF\031\273r\256\225\201M\222\276\177\260z\276\273\267v>*v\364=\353\241\314\276@\327R\277\205;{\276\310\242\215>t\272\243= \2442\276R\000\223>\004\310)\272W\204\226\274\256L5>\"\222Z\276\201\226}>\366o\252\276\201\273\316\275\222\214\037\276>\026\240>\206\224\340\276\001\313\025\276`>\370\275\317f<;y\032\334\276\337\350\004>t}E\276\033\367\332\337\264\262\275\224\272\344\276\354\277\330=jC\251>\0349\033>\004\307\246=\346UZ>6\245a?\372\332f\275\200\034%>\215F\026\276\023\275\263\276\262I\376\276\215\261\226=g\245\251=\250\364\331\276K\356\243>w\002w>\373\301\352>7\334;\276fo6>\230\323)\277Z\177\334\275o2\200\276\263A\001\275us\267>\306\253M=;\001\306\276_\033\363>\034\323\212>\345\351\301\276M\245)>\020\233\005\276J\3651=\234\205[\276\360PT>r\370\210\275\201\254B>\3113i>\372)\276>\361(\026\277\206\300z>\347S\">B6\354>E\013\006?rZ\n\276\007\361\223<\367W\235\276\017#\014>ey&\275\330\233\324\275Z4G\276V\277\317>\220\rF?\351\243S>\305\320\222\276\323\320\006\276)\220\246\2769{G>v\333o\276\344B\272\276\307Z\355=\270\245??\345\376 \275g\020\306>)\255\270\275\036.\010?L\016\227>5\302\n\275\337\367\275\274[\032\000>\227k;=t$s\275r\351\324>\301\026Q\275\262a|\273(\312\222\276\201\302\347;\023\266A>\2571\321=w\237|>\333S\344\275\316\247\252>X\237\033\277\340\017\335=\177$3\276_\2175>\304(}=Y\014\036=\331\240\"?}\346\266\276\361\330\007\275u\362_\277u<\336\275\300\335\262\276\246+\207\276\367\016[\222\344\237\276\027\306\'\276Hr\367\273_\005\345>\003\316\331\274A\r\230>\220\330?\277,\274\370=U\325\031\277\366<\204<\313p\213>\331i\026\275\203V\035>Bp\376\275\313\231\020=&.\211\275^\203\002\274\277\327\370=\201\371\356=\260\277$>\217y2=\315\004\206>\212\361\233\276\241\3153\276\244\004\016>\016\240\201\276Q\364+\277\333\177\036\276\323\020\227\275\234)X=\271\014\326\275\322\0107>\261\'\302\275\020)\262>\177w2>\313\014\226\276$K\271\275\276\t\264\276}\210\344<\333j.?/\360\226\276\240s\204>[\341\313\275\217\305\025\276.R<\276/\004(\276\355\227\344>G\221]>\266\374\232\276F\021\213\276\302\006C\276\2363\276>d\201\207>l\027p>{\016t\275\25733\277\263\010\005\277\021\265\231>\221\003d>\245\236\025<#\317_\276-!\300<\007\376\253\276h\312\021>G\"\355;\254\276\323\275\305\263D\275>\340)\276\355\310P>E\301\035>\315(\213<\007\2271\274Vm\223\275\246\363\362>,{\"=r\372\376>\272\276\354\275\371Et\275\032J\264\275\177f\221>\260c\021\277T\232\364\276\316Z\326\273\375\036\007?\006\235\310>\233z\210>\322E\035>\036\010\311>\252\252%\275n?\376=?/\376\274\260\026\351\275i\220\367<\261o^>4a\237\275[\364\270\276\366\3372\274\376\'~>\020\023:=\303\264\025>/fG\2769\235\270>\307j\300\276\306j\025<\234\002\030>\177\230\010\277\341\204&\276tZ\213\275\362\347j>o\252\361\275eEF=^\250_\276r!\007=F\263\016>z\260T?\256\276\211\275\256\247*>\363\265\237\275\2123]>\214\203\350<\r\236\245>q\276\350\275o0\002>\324\207\207>\353C\030\276\222\013\237>\221j\227\275\370l$\275]\nZ>\274\271\r?\2537\003\275\021\333\004\276\207\315\257\276\006l/>\302\303\370=\217\302\321\276e\374\345\275,\263\236>\027y\034>\241w\264\276|\350\260\276VQ\233>J[\000>\320\227\257\276|\030\321\276\373=\230\275\364\307I>\261^g>iN\345\274\223o\211\276\355\016\311>\010\215\t<\204\3007\275k\344|\275\321\303\365\275\375\360 ?\014v\364>\204\377.>%\315\376\275\253\'\'?]\264 >Z\246\262\276zs\324=:+g\276\014\273\335;f7\227\276\367\370e\274\004,\036\276\360\034\335\275a\020{V\217\230w\020\225\276\251[\312\275a\355\234<\254q\277\275&\262*=\004\207\215=6\261\222\2765\216\201>&\307\277> \023A\277\351\021\362>K\331\030?\330\010A\276\177\370&>\211\264\257>\240\367\361>\270\315\344 C\005\276\223\032\354\275Lg%?F`\212\276XN>\276r\233Y<\310z@>k\352\023\276\250B\273\276C,\206\274\315\270\022\277>J\233>\257G\234>\276\016\251<\'\206\227\275&\302\360;\362\236:\276\020 \327\274Y\253\006\276\247|\377>\004\327\013?\305\324Q\274J8:>\037\314+\276_w\037=\221\223\027>%w\210>\230\006i=r\253\224\276y\315\312\275\351r\023;@.\204>\326\310m>i0\003?nVT><\365\330\276\000\361\027>\342\263\370>\360\345y\276\312a\225\276\345\340\366\276x\020\266\2769;\020\276\r\345\376\2750\213\353\275\243n\023?\364\360\237>\201\372L>\007\3607\277\211\032\377\275t\335 \277\277aX\275\247,\212\276\342kq\276Y5\326=|\006\245\276\251UC\274\t\001:>\374\300\233>\364Z\003\276\002\000\343\275lS\312;\214W\346=d_\021\277\372\324\203>\224`\022\277\243 \337\273\330\262\215>o7\034>\017\033\327\274\335g\220\276\366v\013\276\177\373\216>\341\225\036\277\240\355\237>X\024%>x\377\314=k\273{?3G\030=\211\304\212\276\204\037O\275\rA\327\276\\\215\275\276tr\010>\021\353\361>\236\210\351>\245\037X=\204(\004\275v\320\023?\312\215p>\245r\032\275@\313.\276\262\202\r\276h_\321>#\364)?\276\211\343\274V\217\200>\236\365\234=Y\223\262\276@t\021=09I\276\330\243S\275\213\'\353\276\360e\325\276\214j\372\275\273\364\000=\347ay=\277!\036\2761*C<\240c\207>\354\336\215>2\307j\275\253\320\007?\331\246G\274vJ\247\276)\234\240>E\325\206>\014|\343\275v%\217>\302\234\322\275\372\374\343\276\333\222\">\260\315I=\315\364\216=!LU=e\213Y=\240\227\013\277g\377\367\275\206R\022\274\243\370\215>\207b\017\276.\377+\275\250J\013\276\275\361\216\275\346S\345\206\034\352=W\355\324\276\364\227\005\276\3250\014?Sw\252>\321s\325\276o\367\t\276J\024b\275t\231\330=\303\377A\276\003{\020?\256\353\001=p\335\226>\004\326\347=\211`\370>H\002\232<\377\227\026>\226$\037?#\241\360=\360\231y=\306\322\237>\362N\224\276\332\371\357\274\376W\263<\235\265\200\276\331\357\327\276\275\321E>\022O\206>J\000?\277\241i1\277)\332\357\274pd\235\275O\246^\275^r\204>\361,*\276\002=*>\204\212\221>\255\245\320>\223B\367\275\032\355\231\276\313T\255=|H\330>\205t\216=\347\344\243\276\336\330\345\276\317\337\254>nv\316>\300\202:>\263\325\235=~\334\316\275\336\253=\276\212Wb\276\001\324\016?\350\251\241\275\004\3028\276{\203\310=\204u8>\361\232\232>\327\260N\276\205\345\270>,#e=\007\266m\275l}\325\2750\334\234\276.\025A\277\265`\033\276\335:\312>Hz\253>\361\014`>-\322\213>\226M\363\275?\230\303\273_\221\034?\027d\020?JI\024\277\315\020\005\276\014\tt\276-\010\026\277\362\262\345>\023J\301=\013t\336=\232g\273=nJ\266\275q\033\205\275\232l\341\275\333\204\244>CT\031\276\220\037\320>r\344&\276u\340\254>i\313I>\331\227\314\276\245\373\036=\265\355\225\276YuM>\225\003\037?Kq\r\27702\371=\254F\t?E\006\200=0}\257\275$^\326=1\270\244>\262\006\311<\351S\207>\351\357\337\274b0\262\276g\252\261\275\235\326\364\276\366\227\037=\215\005\304\2763;\032?(Io=\361#\304\275v\007\241\276\013\264\243=\034\014\340>\357\346\252;qg\215<\277\027\311>\332\235\216>]\te>n\033\301\276YT\223=\235\220\035\276f\356q>\305\251\367>8Q\026\275*\375\023\274\273\273\231=9+6\276\207R\307\274|\371\000\276\304\244&>\335\366\370\275\020\271\315\275HK\013,\330\023?-\020\030?Y\026O\276\235\302\031?\"_\033?\022\305\375=\324\031\316\276\267\303\207\276\256\317\363=\234\3651>\r\305\003>z\002D\276\351F!>0Z7>\311\013\231\276\031\275!?lZ\231\275p^7>\346-\304\276\362\033\007\277\302B\254\275l\'\370\275+\005\275\273\020{!\275d8\312\276\377\271\237>\013\262\254\276x\332,\276K\351\215\276:}\345=qq\271\276\264\272\006?\337\304\212>\233\247\243>9\313\032>6\374}>PL\'\277(\213\221=\205+\007\275\321\343a>\370W\304=\271\263\232>\253}#>\035\331\306\276\"\344\223\276\"\200\332>v\372\223>\332\'\305=\371_\177;\322\004\331\276[Zf>\334\240p\276\260\311\037>\"\207\351\275\037\213\005?\361\350\036\276z\346\005?$QM=&\340\245>\314\366\202==\251\215>\025N[>S\356\025>\006\252\004\275a\300\322\276\013\312}=3\235\343.\307\224>\2271\003\276X\230\002\276A\257\366>\336?<\276\204;\321<\016\350\267>S\311\350\276\177i\364\275\354\3350\271H\2056\276\230$\322>D6w>\377\251\021\275,\353\321\275\341f*\276\332\242I\273\346\247f=\352\347F\276{%\263\276\235\244)?.\255\371=I^\272=N\013\227\276\215\224\377\2764\346\004\276\262Q\362>\261\356\026?2v\361\275\030\315\306>\365 Y>7\253\n\277\020c\364=|{\255\275FD<=\362\217\225>I\233\254\276\242Q\313>\"\272\345<\252i\335\276\320\345\223\273\316\264\337\276!z\034>\227\266\224\276\244\211;=}\302l\276p\223\350\275A\273\204\276{O\300>\024\250 >\"5g?\363B\001\275d_\203\275t\\w>\242\364\027\274\007\000D>\3561\256=\345[\316\275\252U\241=\372\262\212<\354\276\003\275\030\263*>H\3331\276\271\223\231\275\340#h>\364\206\353\2763:\226\275\372\320\221\276\327\267l>\036\026w\276\033\035\231\276\221+e\275)`\003\276\017\303\032\276\000Z[\276U;n\276m\344\013\277\223\t\312=\235<\317>.\223\213\275\0172F\276\255\002\244\276\025W\227>\356\256[>F\324-=\353J\270\276QZ\022\276\2364}\276\240\257L>\013\256\235=)N\234>\351\037}\275\2433)>\t0\237>\035\211\200\275\301\224\302<\t7\n>*\305<\276Q\262\030>a\005\252\276\202\022\276\276w\216\177=(\315\233>ps\353\275\002\217==\240j\331>6\372\210\276BM\215\276\342\001\260>d\244\270\275\364\236k> \306 >\207\262\231;\303?J\275\334V\241\273%\210\337\276\000\373\017=)\264(\275\310\253!>\375O\002?\r\017\257\275\334\267\347=\306\273\215>4\336q>!\230\244>\266\3617>\312w\220>\361\351!>\236q\205\275\"\304\332>\014\260\353=>\003P\277\036I!>\340h\000;?\240\374>XGY=\334\005\267>cM\323=(_S\276\364k\006\276\210R\032\277\036(\376\275ZF\005\274/(!\277\231\327Z>4\010\330>Y\207\216=\204\254\277>\276\245\330=89\"\276(,\371\276\023\367\200\276\331\t\214\275\234j\231\275\301\320<\276\004\200\326\274\233\262g?\225+!\276\330l\320=\270\377\264=_/\240\276\375G\254\276\221\336\256>\035\036\321=llO\2769\357\237>!mR=\3747\206=\274\244\257>\375h\354=\001\327\252>\027\366\345\276\016\211\014>\310\362i\275\0100\020>\356\263X=\322<\222\276km5>[\341\304\276L\005\322\275\335\347\265>\021\307\212>e\343(\276i\210z\276\361\353}\274\363~+\276\t\275\346=Z:^\276\034k\220=\2636\342\275\320\215\314\276\216\306==U8(\276\242C\024\276\314\371\231\276S9\252=0\026\243>Q\334\005>\360\275 \276\202p\212>\370\317\347\276K\276\225>\005\026\256\275\361\001\207\276\361!\233\275^\220\224=\303\252\334\275\020\235\265\275\211\341\354>\035\"\253>h\210\332>#Du\275\226\254\026>DY\324\276\025\247U\277\200CR\275\373\265\010\275~P\202>2\020\257\276\023>m\276!\332\321>~\031\001\275$\377g\276)\207}\276\334\211\031>\034\214]\276\370y\232>\023Cw\275\222\312[>\323/\320=\243\252\017\277\204\377|>\"\315\350>\'G^>k\357\227\276\225\255H= `\027\273i&l?\356\314\221>\233\210\325\274,(\376>\244P\225>\376\343\237\275\360\241\272=\370\034\261\276\274\216\370\275\245\350\252\276\276!\007>\004\\\203>\001\231d=\260=d>\204\335&\276 \261c>\244\322\260\275\210\313\316\276\307\371\352=\251\321\006\276\033\322`\276y\335\270\276\302\227\215\276i\365\243>\022\366X;_V\354\274\013\336]\276\344Q\246\276\304O\361=>\263\202>\276I\"\275g\033\232>3\025\221>W\261\222\275\211-\302=\352\326\257>~-v\272)\265z\276\000Z\350\276\t\213\221>\373\255\234\276\276\3129>\263\212\347\276\001\034\260\276\267bn\276K\307#=\252\337\353>\032$Q\276\276\371\220>\230\021\003?c\376\245\004.\302\275S\247T\277\302\365\237>yve\276A\232\202>)\300\025;\340P\361\276\344k\004?_D(>\357\032\025\277\244d\245=\365\366\341\276\234\021\343\275\005GN>\243!\246\276\3554\005\276\277\212\t>\210\014\\\276\027\341Y=c\r_\276\"\217\224>\027\311\000?\262\263\265\276\270\254X;\2230\224>9\335\265>%\020\314=*\352/\276\3532\263\275\277\251\305=\323]m\276\247\206N\276I\206\257>8\235\254\275\3353\351>\370p\022\275$\267F=\302k\274\274\206\300~=\350\250\262\276\343\270\211\276\215~\370=\301\010\016=\370YX>\374\204\251>&\005\351>\217rS>\3649\234\275|\260\003?p@\237\275x\031\223\276K\213\216\276\312\031c>O\273w\276\243\233\357\276\370\326\000=\341:\222\275\021Z\313\276>\331\321>\320\303%\276\311,\337\275r,O\276\273\251\262>\357\317\234\2755\274\215>\251V\005\277\t\3661\276\303\263l=v{\037\276W\031\257=B\341d>\273\3149>\262\004\252>9\300\330=-\3636>\255L\351\276\262g\010\276\240\224\253>\032Y\222fF\210\276\376\235\265=\335\n\236>j\354\213\276R\305\327>bG\244\276?\331q\276r\316!\276S\233\247\276G\014\034>\220\0171\276\273\331\313=5\254\001\277\027M3?\277\363\345\274\271\345\210\274\334\255\373=\023\264v<\260{\340\275\020\354\001\276\025!\253=\221\250!=\262\216p\2760\332<=q\023,=\307rM=(\367\273<\273<\032=X~5>\013\304s>\335\364\t>\311!0\275\'Dr>\245\343\005>\004\375\217>\354G<\276w\377\271>\245\337\321>#^\340\275\233_\270>:\314\'>\025\305\321\276R\213G\275\303c\320\2744a\010\277\236\207\223>\323\346\035=:\273\230\276\335\250q< )\302>\t\227d\276D\360!>\320\263\214>\315g\253=\213\0104=\322\037\036\275#a\003>\033$\362\026\003\253\276\313s\246\276\023\331\212\275\"\026\337>f\305p>\270Kx\276yqj>+\221/=N\314\266\276\242\177\013>\350\032Q>\322\276T>+X\005\275\335,\240>\325\366\335\275P\0102\276v\240\033\276\3068#?\272\207R\275\023\231\326\275\344\357\037\276\\CR>\235=\215\276Of\221\275\223\022\367=c\242\266=\004\250\321=ZYx\276\010\330\215=d\312\337=\216&\261>\304\333\014\277jw\353<\201\034\360\276M\347\250\275\276$W>\021\256\t>6_C=\t\337H>\347\335a>t \200\276\001@k>U\024[\276U\260\264=\325\212&\276\250\"\032>/%\007?\337/\363\273k]\324\276\301\270->\305\302\232\275\341\343&\277\3026\215\276\000\212U\2773\\X\276#\275\245=2\213\213=\005\030r>\342\027\210>\354\232Z\2750\230s\276\345lI>\324\327\005=b@%>k\234\272=\207\362\242>\227\220W\271\246)\360\274\004\214\016?\216\242\212=\243\376\365\275\375\003\356=W\371\244\276\277\236\324>]J\200\313\225e\276RgO\277\373\243h\276c\230\000?\257\370Y\276y\343\037>\003C\023?\206q\201>B\300\267>\345\313F>11y>\365\004\310\275\267n(\276\257S\242>=^t\276\364)\254>7\213\211<\331\030\245>\231Y\210\276\",\310\276Z\256(?,@8\275\n?\\\273\232/\231\275\246T\330=\225\376\300>\343\317\036>\343\342\325\275\320?\r>\004\206$?\304D\201\276%K\037<;i\355\276\271\315\243>4\266\217> \004?\274D{3\276y\204`=\247\026\004=B\333+>9\036\317\276\036\262\007?P\330\377\276c\250[?\001\372\032>{k\000\277\330|\363<9xK\275\271u\031\276P\346\334\276E\206,\277a\214\266\276I\310\273>\260\2347?d\271%>AQI>\250CV?\263H\"\276\026\330Z>\352@C?\272%\003\276\352QZ<\r\241\240\2763\337;\2760\017\014\277\233[E>t\305/>\303\010\016\276\264\247\004\276\215\277J\275\334\225\274>\370\345\233\275cw6>@d,\276\346\304\250\276t\252\\>\235\305\327\275\246\361/?]%\274\276\301\036\270=\373@\233\276g\274V>\344\r4>9i\002\275\214\006+\276X\257\033\276\303V/\277\025\222\303\276\222wF\275\275$\375\276\216\352\r=\251@\347\275\230\017\227>\374\016\034?\022\271\020=\240\203\264\275\271\215I?\356\026A=\227\373s<\231\372\362\275\315\325(>Z\253\005\277\362\252T\276\211\325m>\022\215\263\276\214\352m>\"\\\322\273\371\236\237\276V\212\275\276\332H2>\310\276\207\276\366\346\366\276\327\250|>\356V\203\276\250\236\225\274$\250\325>W\314\265\276\374\022l\276>c8>`F\356\274\257c\241\276e\366\275\276\365\310\034>!\220w=B\255!>DZ8>\003\001\022>\217\335E?6\007\342=b{:\276\310d >A\"P>M5\"\275\376\220\225=\210\347\330=\222\231\001=2_\354=q#\206\276\200\200\353\275U\004\350\274\222\200:\276\237Y\230\276%\030\214\273\377\233\t?P\371\032>\213\031\222\273m\255\256>{\004\250\273\231\203\271\275\237Q<>\364\272\275=0O\007>\255\262\376<\351v\276\276\367\334D\276e>\244\275\276\005\263\276@\003_>U\356D>\211\370\007\277\005JR\276\250\257\221>\264T\246\276\271\344\022\277>\"\010=\004J\314=wCw\274\037\t\177\276\031\0372\274\306Z5\276\332;\342>\032\243\245\276\370?\361>\305\307\222\276\003\233\271>\264\r:>\030d\022?\177[\206\276\017\355-\276Yk|\276\234\0249>\276\023\322\276\275\002\370\275\266\306M=\244\241\376\276\251\345\351\275\225m\376\275\362\252\251>c\316z>/\177\263\276\210\370\244\276\360Z\032\276A\277/=\"\023+\276\303\371\241\275\253n\206\275g\364\036\275p\215\031\277[q\010>FE\005\276\3172\206>/M\215<\302\323\243\276f\364\275\275\330O\205>\013\212\361\276\313\342K=\360A\216=\231\227\331\275|2\024\277:\277\000\276\251\272O\276\275\273~\276g\2618>\031/\001?\rg\032\275\232C\265\275\224V\215>\221!\232\276\031\014\315>\373\246d=\344\277\r>\2633\201\276\241\0141\274R(\270>\037\037\027>.\364{\276\327j\202=\262\205\364\276oR^=\244\212\241\276mN\r\2774\260\"\276\3121\341>TG\262>\2064\243>#4\244\274}n\251=Y^\032>$L\307\275\204\311a\276\326+\003\2778\325\025\276e\224\250>i\337\002\277\251M\316\276\232\354\353=\364\237\261\276\207G\250\275\231\245\022?\222K\256\276\202X\310\275e S\276\207w\014\275\305\240o>\rY\231\276(\304)\276\375\242\331\276\376\366\'?1K\217\276\r\351\333\236\257\370=\214sd\275\352\320\247>^}\232=\337\346\025\276\2276\264\276\340\267\337\276\266\035d\275\366\257\177\277\201\375\236\273z\307\243\274\240\353a\276\204\376\370=3\233\001\277WB\006>\213,\177\276\267L\221>\223$\304>\226U\215>\rh\335\275\226Q{\276\033{|\2752a\000\277\337\331C\276L\240\257>\210\334j>$\370\237\276\251K\307;\007\002\030>\321\302\304\276\334D\013?\310\t\r=r\017\351\276\001V\222\274\361\241\013\276\023\332\000\276\276y\276\276\224\007\223\276\375\204\234<\032\320\203>f\016^\275\212k\021>N\346\202\276=\3406\276\340f\006>4yx\276\272_,\276i\022\277\275\r\241\357>j\210\026?\214Z\204\274\244\376\277>y\2155\274.\rm=\343)\316\276g|\242\276\030eZ\275;\3509?!\222&>\34250>\247\203\254>\023\250\010\277|\255D\276\212\350w>\363\034V\276\277W\254=v\211z>v\346\271=\207SJ\276\347^\004>Zx\276>\331\260o\276?\232\273=O\031\322\276\356\"$\276#R\341>\341\004\361>\357h<\276\321\275\324\274\277M\267\276\221J\330>C\347\312\275\255\373(\276\004R\203\275j\2066\276l\021\344>5\004\243=\023Y\220>\252r\237\275\357\020[>8\231<\277\355Zn>\257\022\003\277\363S\374>\201\3608>\036\207U\2753\235(=L\264D=!\315\0365\257\220\275\037\240;\276\323\207\313=\314\203\304\276K\204\241>\014O\226<\336\004\033>\023,\322\276\032\340\222\274\021\367\307\276\327\242Y>\201\322\300\276\262C\211=`\353\234<\330@\322\276\231yV\276\3108\026>\211\313>\2761\322V\276U\256r\276\r\020\320>\027\316\210\276\302\310\367\2747P\313=&~\013\276\376\222\033>\360B\211\276o\234\310\274\035\023K\276\334\205 =\326\030\007?\354Kk>\374\004\255>*V\374>\340\247\033>\227\202\232=\331\274A\276\002:\220>\272\001\270>\360B\335\273\271\211\344\275\275\360\275=\206\222\321\275\303\204\221=\220g\353\274\277\227\321>\244\375\260\276+\211\211>\203g\230\275\217\036\330\275\350\323\274\276\036\005\355!\305\335\276i\307<\276n28\276\211\327\263>\225\013]>\321\253\264>\351=\204\276-_(\277_=->mK\004=@\236\245\273\367\321-\275\357\037!=\235\365b=\026\335\350>@:\014\277.$\240\275q\335\217>\315\007\342;\210\023\266>U\023\227>\350\031\211\275\335\325-\276\212\212E=\227zi\276\037It>K\\x\276\300\353J>i\276\242>\320\310C=\325V>\276\215j\205\276N\305\370=\005G\377\275m\360\013=\221?\267\275\210\323\336\276\244\377\001\275\336o\202\276Kz\340<\367\375\341>\312\225\214=\325Xs>\351#p\276\334\317\307\276\245\225\323>\232X\377=\ra\030>z\263\337\275\2466\227=\241`\024>\261)\035>v\034\204\276=D\255>J\315D>\020\216K>\311\236\006>d\221\215=\033w\275>\354\306\232>S\352B>\326\260k\276\321\337\024?>\253\246\276\'Qx>\311\267\225\276q\250\232\276\362\033\233>P\325\347=\362\013\266\276\370\013\217>\375\321\262\276\221\031#\276J\371\035>\317\372H=\350%\017\276\260SY\275QUq\276\216#\275>6,R=4\223\352=\323u6>>B\261=^-\367\276\251]\375\276\261%%\276$,\330\275\216^|\275{L\323>\222\037#>y\320D>\251[\\?\372\335\330\276Q\260\212>\017d\203>\\\274\211>]\320\032>/\2770>\304\375\035>F\316\216=\326\253 \276\225\312\225\275\305\264\016\275E\256\267\276^\345\030>\265\216\212>V\276\213>v\240\236\276?\030\325;\237*\035>)\302\362=\t\026\267\276\375\342\014>\316\330\313>\340}\014>\rZ\245>$K\251>u\245\234\275\202\251->\224\035\206>\312\tL>\213\375\217\276\0246\322=L\300W\276\013\336;\276\314\245\001\277v,\026>\356\034\332\276f\310X\276\237\375{?\312\311,\274\333\337\266\275\362\335\251:W\351\246>\214eo\275\371\315\371J#\372\274\020\322\223>\335X\244\276\354\"v\276?ep>\370\376p\276p\207\212\276A\244J>\251,\261>\021\007\306\276\266\357\244=\220\340\245>\014\316]\276\007\237\302\276{\363\003\277j2\322\276~\377C\276\316\210\325>\326\214\220>\354\3247>\235\317\351\276\217tJ>\312\215,\274t\225M>\360\036\031\276,\022\216\275\231\034\302\276]\352\262\276\004\\\272>\226B\243>\335\301H\277b\316\321\276\216\333A>\353\347!\2776U\266\276O\233\356<,\343t=u\312\'>fE\214>\227\200\333>S\246\350=\225\355\235=\001L4\276\006\247\201\276;d`\276\360\362\325<\367\007\205;YN\245>\323\240\331\275\352\237\002=\263>\342>\001\027\000>(U\310>e\240w>L\353S\276\242\\\375> w\016>\3357\224\275\243C\304=;\000\037>\314\210\235\276\352eR\275\331b\033\274\240I\251\276~\270\343\276D\315\001?\373\014\377>\244.\312\276%\006\n\276\262\204\263\272A\277.?\264\364&>\016|\251\275\023V\225\276\227M/\276\032.F>;Ni\276\\\023I=\235\237C?\016\224\342\275\223EF>\203\201\356\274\255.*>\"(\022\277>\0036>\227\345*\275\355$\201>\022\265\341\276\023\377;=\270|\204>\221\344\330>\271\226\233\276\342\314\237\275_\261\274\276\035\351\240=\330\027\357\275\240I\253\276\306\220\234>\204\201\236>\317\266\027>Z\306\025>\233P\310\276\\\244M\276\260\200N\272w\225\217\276N\362\010>\2722W\275\307\234)=\332j\325\275\200\317\267\275\222k\226\2769\274\257\275j\326\314=u\377^\277\373U\250>\346\242\236\276\301\341\331=\320~\315>\210\361\233>\254<\026\276\375\242;\276\200\232|>]\340\001\277N\242`\276\257\201\220\276\332\004\273\275Z]\222>~C\261=\212\0139?d\331d\276\247\003\203\276wdu\276\014=\311\274\262\247\302\275@\213P\276\374\302\315>J\237V>\350\220\226>T\215\025>\\\260A>\000?\304\276\317\"\r;\014\357D?\343?\235>\214\027\252>M\325\330>_\2120?KX\217>c\363\357>\217\001\326>\245\350\314=mEr>\025(]<\211\320\222<]L\264>\212p\036\277G$G>D>`=\201)\002\276\304\021\006\277\\\365Q>t[\263\276y\221,>\234\037\230>\264\267\014?\364\216\003\272\326-\032>F5=\277\030\266\271\276\277_\334\274f\021h\276\226\253M\276\220j\265>\254y\013?\317o\204\276J\264\345\275A\210\'?\351\377n>\t\342;=\250\374\237\273\240j\007?\220s\002\276l\344q\276\303L\362;\032\023\">\2673\333\275\321H\235\276\220\373\032=\205\204s>\027\217\220\275\3634\222>\245\272\223\276\222K\024>,\334\325\276\030\356v>\374\250\233\276\315J\247>\337\272\354=\303\230\021\276r\327\251>\030\320\343\276\001\201\227\275\tp\'?p\376\311=\006\027\205\276\031\212\200\274}\260c\276\366\206\030\277\255\343\243<\371\344\037\276\037\020\227\276H\363\201>\254\253\266\276\2003\373>\243\301\213\276\367\221\030\277\353g\206\276\227\211\375\276\2467H\275\256\303!>\2071\211>\232;\343=5+\026\276\356\236O>\372\366\302\275f\203\371>\036\036\003>\024\177\267=\205\\|>_\034\006>V\310\025\276\000\250\265\276Fl\223>\216|N\276\240\247\372=\346\375\232=JC\216>i\321\225\274Z\332\263>\253\374\230=\266\200\324\276\202\ne\274\325\325\333>\224&\313\275\3130V>\233. \276\244\377~>\256\002g\275\220\235;>[\"d>q\342\276\275\243\334\261>\345_\210=]J\302\275\250\241\204\275\022w\225>\023P]\276\025\235\203=/\371s>\257\342\000>\361@\315xZ\237>\363\013\241>\370\347\376>\360e\007\277\001\227\262\276q\305d>sh\010>y\017f=\346\001\323=\230\332\"\277\311\227\010\275:K\201>\372\027@>G$S\276\352\2718\276\375x\306\276h\004\275\276\3404\235=\250 \276\334D\'\276\314\277C\276y\374G\274\010M\367\276f\026\353\275\037Y\322=\267i\231=/w1>\314\033\264>4\355\005\276\340\3614\276\003\000\326>\331\231\226\275\002=\235>\231\242\202\275k%\000\277-\270\021?fW\033\276\247\260\210\276\323$\236\276\323w\221>\177\024*=O\217\237<\263\377\303>w*\212>\335\337]>yG\002?-{\204>\210N\311:8\247\322=\010zR\276X\352\260>\272\330\311\2765v9>\005\223\304;\206\327?>\212\236z\276k\007\205\276,\2766\276\001\216\215\276\017n\303>\262\277 \276\305\262\307=x\235O>F&W>\277\000\013\277r\231\256=\251\006\212\275\251+\376>\2029\204\276\235@n>\261\262U\276\223j\261\276_\337\267\276;v\255>\323KX\276\215\256<\276\263\253\223\275.\326\310\276\222\004@?s\362\276>\367\370.\276\372\345;>at\302\276\230\\+>\316\275\245\276\021\307\254>B\243&>)\271M=:7\226>\177\333};\212Z\327\276$\323\325\275\360$\233>\235\230\200>\031-a>&i\267>\202\277H>\204W\313>\226\314\255=L#\312>\205\331\247\276\336ai\276:\360\246>\336-\252\275\247\263\214>\342Q\t\277\261\332\">\371H\253>h\277\236<\233\005\216\276\026{\321\2751\334\032\276\0310\363\276\030\212\354\274\007\230\250\276\363\3566\273t\307\214>\371KX>r]\354>>\334\327\276,`!\276\236\331m>7\027l<\003\372P\276\021V\206\275;\377\241=\320W\221\275\307\274F>\361\233\240\276F\253\353>Z\365\347=C\325\321>\203\355.\276\022\0323\276\345\346\014=\020\202\255\276\264{\314\275=\353m\276\372Ap=\001s\204>\217W{>T\010\301\276\247\357\375\274\026Y\007>\205\356\256>t\367\273\273gN\\\276\340@\257>\262@\312>\344\307\226;\203\341\230=\302\370_>l\377O\2759q\014\274<1\206>\000\230\010\275\034>\227\275\305kg><\377?>\330\220\323>\032B8\276\352\217\371>\035YV>)R\342\275+\340O\276+\315\232\276\277\355\224\277\276\364\217\n58>yP\207>tt\204\276\377\2735=\031\352\000?\344\321\311>~1\303=\243\025\314=\304\2723>\321\214F\275\355H\324>\"\363\242>\264\224\263\274\177\222\250>\000@ >mLd\276\231h\250\272\275\004\245\275\305$\224\276\214\204\275>\371g\323A\034I\277\342\326i\275\300C==/]i\275\207h\276\276\331\013\005\275\005.)\277\251h\212\276mR&>\213b\373=\312\017-\334\275\335M^\275\026\'Y\276=\035&\275\371\320 \272\036\207C=\356\352:=a9\317\275N\363I>\236\336\355=\345\373\261\2766%\025\2767\337(>b\324\325\276\344\261\230>\027\221.=\341Z\375=\276\021\016?c\020\032=\372\025B>\236\2606>\020{\021>6)\357\275\326\004+\274\241cb<\261Y\000>\273E\207\276w\371\272<\017\256\207>\227\275\301=+\272\375\276\357\030\303>\177\246%>\022\326\023\276kk\204\276\024E\271>\356\244\206>\205\354*=\216\333\254\276z\377\360>T\374\001\276\345\006}=\332\265\230>L\177\200\272\034\350\263\276\nt\233>VV==\231v\322\276f\035\214\275\303X7>JP\226>\037h\245\274\347\206\324=v`|>v\356\351\276\023\377\212=\320\341\013\277\0032\030\277\353\367\032>z\241\006\276\013\324Y\275L\303x>\306h\033\276\275\037\242\276\027\036\234>\006\035\017\275\326W.>\232\374\231>\376>\353\276\003B\210\276q]\'\275\250I\340=I@\025>\367\343\333>J\261#>\311\006\260>\273\344\317<\370\200,?\275\217\327>4\016\002\276\221\235\217>\026\013~>r3\337\275I\315\210\276`f\007\276=\317L\276\231\255\276\2764\341\342>\010\226\214>6\000\225\276\366\365\020>\251\332\322\276\016\t\311>+\227\013=\347\302\033?\002\200\254\276)\301\372\275\331`\246>_\333\251\276%\021\353>\232\367a=\267\244m\275\333\263\224\276eZ\267>\224\325+?\277\310Y>w\212\273\2743\"\206\276@\264\225>Y3\266>\265\000^\275\017\304\202\275\335DE>.\353\246\275\227$#\275\0367\210>\215\223]>GDi=\363\250e\276H\345\000\275U\314->M\366\031\277\020\225-\275S\251\020\277\326h\365\275h\375U>3\036!\2763b\256\275\233\356\304\274\317_\303\276&\232\235>\346\003\203\275\231n\361=N\215\366>d\204\257>;#\n?\302\002\035?O\251a=\276\036\241\276\330\255\221>o\277\003\277gL\336=\324\353\344\276\256:O\276_\371\216\276OG\200>\332T\206=D\024\337\276\335 \036>\224\375\305\276\246\277\204<\344\001o>\233\367\232\274\357\275\300\276\376\206\321>7L\206>F\374\205>i\251c\276v\361\365=\014\\\272\276\024\205\315\274b\275\324=qy\314>\235b\241\276\225f\265\276=2\225>\335\331\010\277\304\373\275\274a\272`> \355\036>\277\340\361\275;`\024>\253\324\347>\212;\363\275\273P\306\274<\376\372>\311\225\232\2764\001\230=Eu\222\276\224:\214=\272\327\247=\330\254A>\320\304|>\246$z\276\340\364\246\276e`o\276~\266\203>\0218\256q\t{>M\260\311\276^\216[\276\t\206S\276\005\201\333\275!\2214\276\273r\244>\260\270)\275\022cl\276\233`\355=\345h\313>J\222O=RRr>\301!\362\2759|\336\275\245\304\357\276\202\031Y>\037\001\034\276\n\315\371={\314\032=\265\237v>\007b \275\3412\254=\320\003!>\332\373\030?,\242h>\020\261\245=\300\333\273\276\323\313\010\277\230\337\006\277~\020\371=\003\332\273\275\026V\030?\356\250\331>\266\301\203>\031\273\370=\243=`\276K\3764\277\220Y\323>\272\247\"\275\217Y\326>\tp\310=\252d\322\275\316N\017>1\372t>\275\210\210>\257\246e>\301\337\376\276\033\n\004\276Q\311@\276\032\202T<\024\007\005=j\316\177>\342\340\016<8\313\202>\356\211\221>t<\220\276!V\202\276\251\235\241>\320o\362>9d\330\274\010\233*)\001\275<\252\221i>\310V\233\2743\222\023\277\224\314(\2773\367y>\217\210\362>&\365\212=+\226\'>;Z\322>b_\260\276\3273\266=A\320\017?QQ?\275\200\270\014\276\206\032\263>P\370K\276\375W;=:\354\262>\316\276u\276\037\240~>\'*\250>\035rw\231==\276\367\261\225\274\'\347U\276\237K\265\2754j5>9L\233>\035\220\355\275\272\274\212>\205\233a>.\270.\277\214\353\246\276\006q\t?\306\245\223\275\355\2476\276Z\211 \277\302\355]\275\375\225\376=W\261\014\2768\223\007<\342\261F\276\340\024\373\275%OF>\225\307\002>\352\001u\276&\366\336>\311|\200\276L\320x?X{\244\275\204<\376\276\370T\362\274a=\275\276\310\266\025\215D\256\276\235\275\307\273\237\321\027>\370\377\201=Y+\321>\027\327\310\276Y\204g>-\024l\276\372\377\362\274\270x\207?\205\363\r\277\250WY\276\312\357f\276\324^+=l\033}=dJC\276\330\310\033>\216\340\317=z\276m\276\r\265Q>6\025U?bq\213\275c\325\271\275\007\001\224>\244s\233\274ie\233>\321E\341\274-\201\243=\0269\t\277\340\0074\276x{\370>\2411\r\2769\201A<\206\362\333=o\211\211>b}\235=\030\350\203>\327\242\260>\'|\001?\213a\262>\307\207\261\2765|-=D8\312\276\274\300\273>\303\321j\276G\222\370>\2033Q>\334\256\207\276J\371\204>}%\032\277\031G\275=\222\267\301>OD\262\276\021\273\277\275\353\275\343\276\227\340\234;\321 \021>.p\032=\340>\332\275\272\230\234=\025\033#=\321\337\213>E+\"?\315:\251\275X\\\357\276\024HW=U\017\221>Pu\305\276\tM\322>\331\271\224\276\210\020==\321en\276\335\336\325=\356Q\226\275\007k\200>\372\243\350\275\n\312O;\226\230j\276\004\211\361>Q\3165\276\316u/\276\n\210?\276-{\003\274Q\340\235\275xlf>:\214k;\260\314\345\275\375FF>\311H!\276=X\320\275s\324[\274\300\t\010\277\355+\265\274\265\316\352\275\261\361\271\276\203\313\345\275E%\261>\260B\326>\344?j>9\270u>[\260\245<\035\3765=fg\237\274\251}\020>\261\265\034\275E\006\211=TA\254=\177{\263\276\0040Z\275-\252S\276\337\261\014\276\246\002\007=\374\256\266>q\230j\275F\325\255\275)\256\r\276F+4\276\010]\003\277i;->\274\017\025=\007R\225\273LF\310\276\205B\325\276\300m\372>\025\327S>\001\001\'>\272\325#\276\327i\026>gI\264=\275\263\244=\325o\020\276f\016\004\277MK\'\2763\323]>\254\034\241=\315\254\326=r\033\370\275|z\256\274#<\370<\267,\230\276M,\266>+a\307>\307\313\200>\321\'\035\277u\001\010\276\240\215\273>\372\004\241\276\340Q\034\276\303\354\020\277\210L\251\276-\347\356>k\345\246\276\367\343\371=\237\300\271\275\330\245\335\274\350/\363>\003v\017\276\206\272\024?\340A*\276\365\275\236>\262\202o\276\266T&\276\324\tv>\300\250\216=\362\206\224\276\364_\211\276\205e\256>\302\252D\276\373\031\225\275$\322?\276RQ\216:$\002\210>\013\263\202=\333T\210>\267\344\304\276\312\017\372\275\311X4>l\321\026>)\000\373=\246\254\n?T\236g\276J\322i>\026\305[>Y\265\252>\317\355\236=\341%\260\276U\351\203\276N`c\276<\243\035=\344\027\246\276;\254\214=\314\301\266>\036\302\216\276\312\333\322\2752~o>:\265\336\276\265\n\202\276\256\362\005\276\362j\274\276\201\306\273<<\300\372\276\307\020\267\275\256\217\346>\227\262B\276.3\254\276\304\r3\276\257KN\276\325\347\265=\241\3148>\n\210\274\275W\312\256\276\034?\315\275\325\312\343<\325g\226\2765\264\360>G\'\257\275]\376F>\001\204F\276\\\231\223\276\204\023\001=s1u\276B\375\207\274\372\034D={\352\355>\232^\301\276\264\231h\276\335\200|>\263\r\226<1%g>\370\332\332>s\344\375\276\232T?\277\3014@\2769\215\354=\300\033\205>*kC\274\032\322|:\222N\366\275\202\205\001?\"r\036\277a!\232\275\362\232\310\276\341x\335=\3424\246\276\3365^\276\030\356d\276\304\224\346\275\251)\255\275\224\321\"\277Kxr=\355L)>\010\321\370\276?\236\277>=\000\265\276\372\356\263>]\372\220\276\003o#\275=\274\036?\374\370\354>g\275\345\276|\260|>\232zV>}CC\275;\312I\275D\002\027\275\206\203\262\275\245\3659>\246\363\201\276M\364\264\275\241\304G\276\006\252\202>P\257\243\275a\301\241=U\355\030\276?`\372>L\307+\2766\0212>v{Y;\303\027?\253N!>\306\250B\276\016M\322>\21102>\002\251\312\276\010\215\376\2762,\224\276\024\231\352g1\325>\211\305B=\232\336\242>\231;\361\273l+\207>\265\257\245>h\317W>\035\235\004>l\342\213\275\360\177\357>8\223t\276\237\006\205>\026F\017\276\214\025\313\276\222\217\024?\242\266\342\275\007\000W\275|\3618>\362~\326<\005&\251\276\230\360\364=\203<\254\275H\033\003>\271\330\001>SI\320\276\2361E=*\331z>\354:\337\275Z\274W>h\354\311\275\253\255g\276\326\323\340=\220\204\364=E\230M\276\340\307\357=\2404H\275\241\307{=\367\327\270>\253\"\033?\250\267\207\275i\263\376=,\300h\276i\2043?o@\325\275\205T0\275\273dK>\323 \n?M\340\307>\006\r\001\277\225wW\2757\343E=\312\352C>\365\027x=\302\364\033\276\214\230\022>\361\247\252\275w\'\223\276\261\340\232\275\0103\331\276B\2141>m\035\201\276\013\211\226>\231\200\360>\334g\274\234\177[>E\001\273>\004\235f>]\361V>\267\233\254>&\202\242>RcQ\275)\344\233\276\3039s\276|\030\005>\201\036\007=\007w\003\276t\227\211=\n\347\026=p\256\205>eI\263\276\n\014\215>ll{\276\337l\201\276m\200\245\276\214\033\216\275AN\374>\265\312\n?,\370\247\276\300\005\202=6-(\276\324X?>\027\362b\274\024[[>b\206\244\276\370\006\273;\322h\267\275\362j\331\275\214\036\r?\336\363\014?3M\264>\357<\243>_\311&\276!\016|\276\374S5=\3073\353=;\352\276\306\201V\275f\265_\275~\274\214\276\373tW>\347\004\270\276$\035\\\276z\217\345\217=\207\000\271\276\276\200\245>w\315\034?\337\323\207\277|\237\327\276qIS>\375v,=\016C)>\336\254\225>\337\341Y>\226\246p\276\304\257;>AC\347\276\233\303\244\275\362\260\253\276\000YP\275\235G\014\276\376\233\004>t\261\252\274\036=\221\274\037J\016??69\276\223\014\246\275\377\222\242>L\321>>-U|\275tv\031\276\014\234\354>&Q\034>\221\250\016=\377\315\306>\333V\345=5u\031\276h\020\240>\022\'\220\276\207\221l=\267]\361>\242[\033\277#\320\241>3\224:\276\263\000\306<\243\231\021<\243\362\256\275\022N\205=\r\321/\276A@I\274?\242\240\275\313\375\227\275#\275\346\275\205\007\032\275\371\352\201=\326=\211>1n\'\276\254\"\211\276Gm\343=\203F\033\276\364\215\314\276X\233,;\337\344\214\276_\327\276\276\353\013\366\275\202:\215>\312C6>\377\360\277\276:\227\302=\342\271\367\275,\025\316>\243\222\266\276\231\313\335\273\223\'\317\273\365g\322;\363\372\364>X|\'\275\2778\241\274\251d\231>\036j\261>\225\205\256\276\342l\367<\316)\200>\345#\201\275,\302\302\276\003?\004>1\276\200\276\345\r\302\276\370\0338\274\0331\306\276Z\212\321;{\237\021>\223\354R\2761N:>\347\267\302\275Y\204\245>\177\014\275>\250\256\231\276\235;\240>\247Zk\275\271\374\270>\326\202\343\276\002\230\n\276-\t\273>\020\3646>z\tT>\335t\326<*\307\024>\245N\031\276\366\217\231>\337\032\204=h2\301>\005\363\373=m>1\274\2369?\276!\030\261>\242\360\254\275\031\026!=\"\235\210\276\270y\241>c`{\276\336\277d=\202\335\005>S\021\007\276:,\206\275\024f\373=\247W\256;\251\034@\274\250\255\200\277@5G?\357#<\277\337.\317=\375\205\271>\024\254\232\276\020\337m=\327<\270>OR\304\276\315}\344>y\255Q=lqO>i\371\340\276\266\257\266\275!\2104>zC\n>\332\334\006>/i\036\276\267`|>\204z`>\353{\233\275\"\\\r\276?\314\277=P\021\336>{\360X\276\263\003\305>\207?m\276\313\270\215\276\224^\003?\300cW=:\202#>9\024-\275\016h\216>]\325\355\275\244E\240\276\035\236H\276\355k\224\276\332\244\312>\277\037\202\276\217\256\025\276\025\204O\276\327\240\372\275\354\'O\275DrF\275r\312p\275\365z\014?@\313\024\277\231(:=*!\317>BX@\275\"\263t>r\340H>\2721\005>\373P7\275:\253\200\275m_\371\275\350C\237>\323\243\205>\223\256^=\202\261\340=\344|\034>\374\221\212>O?\245=\376\212\200>YO\343\275\355\213\334\275\033\017\304\276\315\026^\2756\323\333=\366Zu\276\333\217\025>\252/\246\276\320\342\023\276)\273\326>\000\036\330\276\234\031\203\276AXT\275\0270a=k\3718\276\313\351\020\277\227\263[>S\225\314=\2224\006?{B\246\276\027E\336=m~\250>}\341I\276\256\305\206\276Z\235b>.\230\362\276\334\335\310>ku\300\275\214\026\313\276,z\262\275\3751\301\275\264\276N\276la\372>a\364[>@\375\303>\207\323\r\276O\024!\274\246\3467>\311\226\254>\201\'\317\275(\215A?Q-\032\277!\243\223>o\251\361\274#1\240\275\021B\345=\325\014\330<\tW\213>t\351\376=\013\370<<\221I\362<\344^{\274HR\232\276\324M\265=\237&\234\275\007\311\215=_p\251\276\245|\255\276a\227\316=\"c\030?\245\n\024\277N\207?\2763\266\226\276\344\227n<\217\302\007\277\017J\377\275l\265\334=\302\345\254\276\377U\031?\222\211\212>\235\207\'?\323\235o\276\300\377\261\275\010\344k=\343\020\256\276\276\204I>\272\206\026=\222\230\303=\305\311\237>[8.\2766\244\032>\204\305\352\275\210\032K\275\034\230M?KP\247\276\254\370o>O\302B\274r\3559=d%\016>;\3347\275\340\0246=u\232\333\273\224T\300\276\274_\211\276\333\352\207\276\372\312\376=~r\232\2764\244a>\031\262B=\260\204\003>\263s3\277\r!}\276\261\311\234>\312\225\311\276\372#=>+\376\200\2752C^\276\030\263\301\276T\264m>\212~\254\276p}\260\275`g\340<\374`\337=X\363<\276\355\347\026\275\377\237\004?\373|\225=\310y-\2774\233\344\275\031\242\001?\337\306\231>\010C\343>Ad\242\275\376\333\311\274\264\251Q>\342\022\252\276\223d\215=\021\265y\276\344\2438>pt\266\276\237\372s?i!\026\277\330\237w\276:O\227>\t\305\207>\243\223>\276f\243\247\276\237{\326>\336\320\272\276j\360\302\275\027\177!\275?ju\276\322\274\215=\276\336\213\276\305\034m;W\022\365=\266[i>\323\300\246\275%\000\226=\t\265W\276\300Q\205=I\227\325=\354\363B\276\261jw\276A\010\002\276\347\346\311>\321\016\232=\333\256\006\276\353\306\331=\006\311\343==V\344>\"\026\237>P\007/\276\262\314\217>,Dq>\352\323s\275\226\367\022>G2u\276\3436\003\276Z\300=>\243\264\022=\242\000\327>\353\353\324\273\272\014V\276\364\2739=_\031\213\274/\035[>gz\010>cm\222\275\033\272\255>&.&\276\254;\345=\350\257\220\275wJ\344\275\241&\375=\350\307\203>\302\357H<\225\324\226\276\033\033F?N\264\356\275\201\333\337>\370\021\264>\277z\237=\030r\000\276\002\266\372=\356\245\221=\n\263\262\276*\212?>\366\362@>f,\305\275\222R >\2631\232>!\317\342>\027\"\273>\357\001&\273dO5=\007\360,>\374Cr\276|\331\216\275\306S\243\275\345\345\247>#^\234=\353X\006\276m\224\261>M\233\270\276\371>t>H\346K>Z\2453?\260\364a>\177L\352>gx\\\277\300\365\010>[\322\004\276\377\202\266\2759{\032<\217*\253\276l\0367>\372S;?\211\360\275>\335L\r?\343\222\327\276\305\334\224\276Q\332\036\276s\263\010\277\241?c?\201\235\376\276\275\\\266>Z\260\244>x\272\264\276\0004\324\275)\2260\277\376\312U\276T[\265\2764\0173?\217\236a>\310c2\276\244\373\352\275\330\017->\203J\262>\351\344\364\275f\025\244>\255\315\314>*\366\n\277k\254\027\2778\351\203=\203\315\340> \033\222=\\\242\222>`k\242\276~\370M\276\'\351->\222\022\232\275\006I\240\276\004\376:\276\031k\030>#$\277>\360\n\022<\314\366\320=\241\377\023?\265\334\267\276\212.\356>\243-)\276m\321\315\276TG\231\276\347\257\003\277\0277\221\276~\252\301>xT\016>\351E\230>|6\226\276\030\252\026>\375\371\306\274C\332\021?\315\036}>L\366\332\275\035]A>S\256t>\3272\214\276\263p\325\276\2779b>\227J\325\351:\033>u:\272\275\227f\241>=\032\035>\2469\023\277\3235\265\2760\t\">D\221J\276\241\201\035\276\373\262\004\275\037>\270\275LG\275>aaD=B\265\236\276%):\276\3331\021\276\312\257\200\274\276\301\307\276\032\340\032>\035\272\346\276@l\256=\365\210\251>\355\004\300\276\325\"\026?\255i\304\274\305b\244>\340\020\026\276.\231\322\275\256\374\304\275\272\351\207>\373\321\217\276\355\351\'\277\365i\371\275\251+\235\276\361\010W\275\\\374z\276O=\353\276\267i\357\276\000\177\277\276xN\270\276\254\267q\275\'\270\025\277\252\030\342>_U%\276t_ \276\200\312y\275MD\335\276\360\003\254\276\264.D\275g\206C\276\323\275\311=\374\322\254\274\225\032\200>\206\204\"<\336\004{\276p\270\245>\216\010b>\221R\334\275\027\320\221\276|\231\212\276\016*$>\0310R\275\255f\362>\200\376\232\275\"\376v>\336\313\001\276>($\274\271\306Q\275\272@\344\2756\233\023=\306\240y\276>\304\016\2771:U\277\206\313\000\274L\251\335\275\217\376w>Q\006I=\254\t\233>\247\014m\276L`Z>\350\010F<\267]\303\275mq\241\273\251\006\034\275\260\0051\2772E\356=\224\0375\276\2653\035\277\354\246C=%\023\317\275p\3507\276Y:\325=@\006\253>\275(\210=w\234O\276\016M6?\323\240<>\262\226\275>\rrn\275\"\276\305\306\312\217;^!b=\366\345\230>\316\225m\276\344\316$>\313h\352\276\rq\020\277\372\021\223=Z\342\023>\257%\216>\221\252\332N\375e>\027\234\242=\273c\227>4\217\243=\243\364\214>\231\227^>\275\021=>\247\000N>\003\330\000?K\317\001>\274Wp\276\374\023\270>\222\262\036>\274>\217;*U\344<\340\272\200<\022\343\361\275\272:G>\'\251)>~\035}\276\225}L\276Y\235\365\274\200,\022?\033\250G\276\361\025\326\275B*\314\276n\323Z>\301\344A>\267]\214<\365\347#\344\007f>\016\233\325\275\033Kq>\332.\024\276\213\263\216\274\013\246=\276\026\310\003>\010\035\021\277\303B\035\277T-\221>\335=.>\242\322\336\276H\366$?:\007!\276\035-\302\275\315^\250>u\021\305=\372d,\276\353&F=\351\007\263>9\230\320=\033J\255\275\200\301\234>\346\360X\275\204\267\362\276\022\031\371\275\352\270%>jf\331>\346g\005>\326\277\241\276\034\342i>\324\353\253\276\341\215\013>T\203d\276~x\213>\'6\327\276\215\214\240\276)7\023\276\025A\n\276\230\004\334\276\346>&=\2535\271=\225\310\r>\366)\300\276\257\200\215>\312\235\240\276X\262\253\276\274\201u\276\000\235+\275\360\244\242>w\257\037\277\210\233c\276!%\354>\211N\277\275\017\353\207\276\322\210O\275\035\217\203\276d0\342n\004\272\276^@\013?d\"o>\006^C\275A\312\240\275N\177\241\276d\r\333=\315\013q=\3525\231>\364\n\262\275\204\004\263\275@\005+:\212Y\335\275\217oF>\337\323J>\257s\230\271\3340\272>\3034w<\304\353\001\276\217R&\2751\003\036\276\336X\007?\231r\215>\334zK>\031\307\205\276\007\004\031\276\250r\031?\033I\343=C1s\276\203\365\311=\256\222\251<\270M\214>^\374\246\275\217\230\000?\230\265\243>.\233x\276B\320\213>\227\235U>\254\"\264>*\\\325\275\251\324V>\312\205%?=\217\t=j \234\276[\324G\276\375\337\203\277\250\202C>\367\252\362\275w\325\336=em\004\277\350\346{\276@\006\361=>\3036\276cS\217\276=\210!>\324\253&<\202)\024\275}v\277=Bb\313\275\226T{\276\3416F>\020I\212=\2212\234\275\365.X>D\030\261\276\026\272\243O\3218>\257\264\327=\\\005\352>)$\240=G\225\177>zr\203\275\010\304\024>\035\237\303;~\3542\276k+\203>(\265\313>\342G\306>\022`\"\276\257\307\001=|~P\276\267\366E\276\340\221/\275\025\253\256>\342I\342>\365\023\276\276/\351->+N\311\274p4\215>Q\200j>\370\303\202=Fr\204\276\365\346\'>\241\353\025\277}\316\250\274\361 \000\277\214\357\263\275\335\016\013\277\001\247\026=\213U\016>\361\036\262\276\020\371\266\275\240a\231\276\025U\317b7e>\346O\306\276p\230/\275aL\026?\021\223.=\206g\016\276\365}\004\277\217\247\303=?\036\245>d\300[\275Ci6\277\252\t~\275\031\222^=\304\330\303\272\225rF>*\251(\276AU\272\276\232\246\225\275\326q\021\274\332\354\240\276(\252\036>\030\177\254\3340\031\275\1775\307\273\001\347\255\275.?\222\275\260\213\030\277\270\255\002\277\307\305\020\275:O\306;\272\341H\276#\031\274\275\3524\222>\276_\364>\354\242\010\274\261Q\336\276\013\375V\276\033\225\265=?\026\244>m\256\016>\327\325L>\376Q\003\274\030o\340\276U\021\326\276\002\244W?\n\\\325\276\337\260\235=Y\255O\273\332\032>\276S\333\263W?o:\253\276q!\230>\223<\026=\264\225\245>^\3452=\224\233\271=\035\251\235>\315\365\313>6\321k\276\316\220\246=\365\"\373\276\376\307\234=\2136\360=yB\257\273\312lH>\305\321\253\276\247Qz\276&\252/\277\240\200\354=\362\333\362=8L7\274\213\207l>\276e^\275d@\225\274h\225\362\2764\345D>|t\214\276\324\265\201\276\333&\232\275\013\3042=\242\216\t\2772\374\353\276z\261t>\032\364Q\276:H\266\276\305\010\316>\235`\343=;\203\017>Vk\030>.\211J\276\n\365\332=\332\013\267\276.\315\211=\036aS=\326\330\262\276 \230\222>\267\236\007\277\016c\003\277\002 \234=\365\256\351\274\301h\221>I?\004\277y=S\276jg\r\277\006o\357=\364V/\276\007\021\244>p~\273\275\277d7\276Ed\214\275\233\013c=\234\\6>/\205\036>\353\303\240<\2613\207=\3715w>\224\270\210\274\004\301P\276\013\323\314\276\212@\206=]\240\274>\270\374*\276L>\221>\247p>>\211h\251\276p7O\277\274s%>+\330\276>\025\217}>\364\372H?\345Ok>\"1\251\274g\272\311\275D1\r?\356+d\276t\330\210\275VNV\275\334\220D\276\227\260\004\276/\202|=\0232\362=\341c\022\2750\237S\276\332\234\205>\260\342\256\276\212\327\276\274\221\237\305\276\265\306\017\276\315Q\373\274&\335\037\276\317\313\034\276A\311\007\2771\266\361=\325\277\205=\277\242\361<\352\004\356=\030TT\275n(\236>\204\356\222\275\036\020P\277\313\276^\276\375\207\306\276\314\034\211>\261L\035>3\033\023\274y\302\341\275\013A\260>\373+\333>j\020\365=JK\261>\347\264\021>lQ\313\2760\017\001=\177\240\255;{\027\027\277uo\206=\006\215\212\276\246!\306\2765\302\366\274\225\367\243\274\333\'\216\274\367\227\033>l(\253\275\351\302N\276\277\363(>No\231>\2441/\276\323\001R\275\345\001H\276Sa\352=d\354\207\275\227\353*=\335\330E>\026:\035\277d\224B>n\317\312\275TAK>\302\016\026\276LR\374\276+\346\374>\037G4>\313i\371\273\245\253k\276\363t&\274\320c8>$61\277\321\336[>\274\306\033\277J\303\t>F\211\335\275\000\346\236\274\263\3308\277\2456\312=\002\034\203>|\252*=q\014\370\276\253\250\271\276/%\305>\277\301\377>\215\262L\276\037%\267\275\2727\322=_%\013\276T\245\\>\250\1775\277\365\210\313\276b\252\314<\304\206\312\275#\003\364\275F\350\242>I\037\036>\215i\212=s_\376=\340-\207\276nk\003\275\032y\030\276N\306\251\276\006\254\322=\021T\243\2763a\373<\215\027\'>X\"`> _9\276&Y\337>\206@\351>\251\331\327<\310G\221\275\240\233B\275\334R\260\275J\263\263\274\000C\325=\211\240o\276n\260D\275\216\214\200>\212\202\251\275<\305\001>\227\354\273\276X\035\006\2759\243\013\277.\240\343>\232\021\230=l\311\325\276,\'t=F\210\322\274\344\004\325\276\305\233\304\276\360-5\276\275\274\313\276mG\"\277\246/M>h\337<\276z\321\272>\306+D\276\333\265\317\276\271\301\351>E\301k\276\373\340>>>\342M\2777\006\316\275\026\n\311\274\370\265)>aB\002\276\335\274\000>;0\333=L\212T\276\314\312\266\276$\253\275>\222\006\213\276>\207\216=\030f*\276\277\322\327>\342\263U>V*B\276\344\245\233\275\3310\321\276\352\"#>B\332\352\275*\220Z>\016\254\032<9\344\212\276\033\2615>\371^\277\276\232E\376\275\'(\340>{\263\257=lu\245>sC\244\276\240\301\257>\237\215\231<\021\351\010>\323\024w\276\205\371\212>\3269\277>\350/\206\276+h\235>+\253\271=lI\014?]=9>\331\203\312=\"+-\276\315\356\276\275,V\213\275D\202:\276\304E\233\276\212\307Z=m`\013?/\034\303>\250DC=\374\276\000\276\202\303\375\276Mp\316<\371\372\336=NI :\2730\362\275\000\016\355\275\325\245\325=\037\356\013\277\340\226\247=\003d\215\2768\313\007>_z{>G\263\374\276y\034\010\275\362@\331\276\232:\204\276\274\303*\275\2752\226<\034;?>\324,\216\276K\200\265=\225\326Z\276\300\327\210=Q\222\222\276\320\315\005\275wp\345>\t\023\244>\243t\253>\323W(\276\"J\302>\t\017\352=\271Z\326\276\354V\317>\301+M\273 \375\235\276\313\034\377\276\260=J\276\254\014[\276\366\255\233>\361~\206\275\232\2621>\330\216\251\276\350\027\003\277\2405\266=\205\262\302<\352\256\026=z\353\343>q\233\254>B\020$\274\\:\305\275\332\253:>OM\206>b\342U>;C\247\275\273\353=>A\217\352\276 \252f>\007\004\312\2752\333\341=\313\022X>\315c\010=\0269\213\276(\231\317\275\234\267s\277Vp\314>\272\243\001\276S\206\024\273\325\273\307>\324Im\276\346u3>\271\326M\276-\013\362\275 ;\n>\220\304;\277s\200\320>\254:\005?A\373\237>zc\371\275\214\327\365\276\204\313\323\275D\350\206\276P\333\226\276\224\351\256>(\347 >\360^\320\275L\251}>\224\226\023\276\210\266\223>0\221(\276\326\337\235\276\271\245i=\t\277m=U\002\341\275\341l\363\274&7\364\276\252\342\213>\357\257\234>\366\251\204\276c\257\014>n\372I?<\030*\2756L$=\005\336\214>\367\262\017\275\375\362\215\275\275\271\261>P\220\232\276\241P\264\275:\202M\276\001\214G=\274[{>\005\211\367<<*\027?\317\333\006?\2639@\2748h\377<\236{\024\276\205|\016>\000\276\261\276nj\273=9\212\200=ru\323=\216\327\347=\2105\030\276\177\237\206=bTJ>\007z\236\274x\257\024\277\024\300\256>\361d\200\275\302]\r\277\020\353Y=\376z\201\275\237\025R\275\333~\243\273\343\333\333>t\344&<\225\222\316=\257K\302\276l\300\201\275\246\305\201\276\026\031\376>s\376f>\\\273$=\301\246E\276\366dC\275\004\335M>\203\033\003?j\330\321\275\304\023\213\276\310P.\276H\325;;H1*\276t\334\033>\371\016q>\022q\345>\301\001\344<\307\370\212\276T\356 >\310\032\035\275\335\341\245\2763\"1\276\017d7>H\361\202>z\251_>SDs>\316\242\354\275\261\360\241=\255Q\256>k\337\005<\217\252\010?\337\321\020=\007\265\254\276\022\001 \275u\266\367> \367\003>-6\n>\316\325\036\276\017Nt\275#>e>X\346(\276 Oa=\250O\241>.\265{\276+\357\'>\20037>\231\210\177>@\200~>\304\307d\276\334\020\231>\371%\247;\317\336\213\276Q\256\344>\377\307\310=\003\364\351>\264\327\002\277_\257\017>\305\306\321>\251<\327>h\335\213\276\360+2\276\n\352\360>\243@\007>\037\351\033\276\202\273o>\242t\235>\247k-?\315\232\007?\254\335\317\276\212\363\261>{\336\371=\344\335\024?jHJ9\341\371\022>\035\251\254\275~G\252\276\037\222\301=A%f<\022\222T\276 |\304>q\271\257>a\016\346\276y\374\303=\364\243\310>d\361o>\334j\300\276\244\327\307>\277F\220>):\006\276\\\266*\276\366\006\206\276\314\001\004=lo\247>\273\320\314\276\321*\375>\347\237\">\247\004\225\276\263\325y>\345}\226\276\246\022\014\277\302\225\205\276\034\233\361\275\343t\300\276\377\021\254>\371\277\307\274R\212\005=;\202(\275^<\004?\021\266\356>M\263\216\276x\251\307>\211\024j>s8\025>B\3410\276\275\205\250>~j\351=\301\014\003?\nP\025>J\206\\\276\374\237\214=\226\302<>C\022\335\276\026g\202\275d\tJ\275\301\323\"\275xi\310>\373\225\311>\247\343\360\276\370fM\276C\025\263\276\315t\256>\211d\202\276\231\361\374=Y\240\003\276\241\001\261\275ul\222=s\350X\275r\215\\\276?[2>B\267]\276\241\251\327=\346\033\\>\3027\026\277\300\357L>[)\231\276\3215\301>)\021\254>[\316\261<\257\306\343=\305\364\350\275\244\357\364>\313\007t\276\220\025G>C\031\005>\217\225=\276\336\3116\276\226\207\007>\235\204\256>\025\254\203\276ll\230>\364w0?/\037S=p\024\265=\331M\337=C(Q\276\355\277\024\277HR>>\276\356\372>\205\311\003>+\353\204\276J\024Y>\254T\230\276\024\022\013?1,D>\031Lw\276\310=t\2764<\346\276r\342\270>\026A\022;,&\331=\252xf\275-\r\271;\202n\350\276\312\210\\\276\227\034\220>[\263\237>\331s\261\276\r\364\217>\23188=\2432u>V\336\310<\360\257|\275=\243\347\276\3215F\277G7\332\275\341\033\025?\016\257B=\177%p>#\221\272\275T\370\321\275=\023\205=\035*\310\276\\\253\206=\034\024\202\276\324\310\315\276/\270A>\025\001U\276\370G\037\276\030!\033?\246|j>\266\250\016\276\241&\352>\334\243\212\275\316o\354<\036\366@\276\373\314\335\276\205n\003\276\320\366\202\2752\310\"?\336\220\265>w\025:\276\234\177\255\276\215\254\201>6\005\215>\3255,\276\313_\310\275\376\347\013\277\262\254&>\370\216Zd\356\351=P\036\037>\347\020\345<\021\246\372<\334I\347>\211\253R\273\217\250k\276\254(\225\276\r\001h>\222\336v\274\232\261&\2763\032`\276\360H\326>kh;>\260\273\231\327\310\227=&\363\240>O\0307>g\342)\276\356\021\343\275\314\026\005\276\371\231\034\276\211\335\325\275N\016Z\277v\344\372:u\272\233>\240\330\177=y\014}=\365\303T>\000\003.>Q:%>\25559\276\201:0\277\312\216\254=n\262y>~%\367\276e\225\335>V\364\'\276,.\311=\262\000\013?g\025@?Gn\024>\002\363\243\276\022\022\020>\271\253F>\202\352\004>\014.m>6\344\373=\371W\035\275\215\242W>\205\272\005\276%\025\344\274\204n\225\2761y\010\276\257\334k>\247\350\353\217\337\264\275\236\205O=[\322\203\276k\353\237\275\311\3441?\035\231,?\333\207G>jJ\225;LGS\276\371\007\005\276M\214c\275w\2018>\231\272\003>n\301\031>\365Z<\276\317\327J<\335\257B>\261\362\234>\220+Z>4\376\316\274\003\020h\276\367&\266\276\024\203\010?x\217\217>\004\366\021>\001Z\000\276\035x\265>\003\222\304>\247\302\235\276\363G\207\276N\312\246>%\305\014\275\0141\216\276\261L*\276j\301\245\276\022\3760>\312\031\252\275\321I\221>\006\355\345=2yU\276\264\301\026=#\010\362\273\323ao\276X\257\300=\030 ,?\271U&=QN\310\276\337\005:\275\000\323\323=s\340\214\274\247\272\026?~\351\374>\323\002\266\276\273\346\224\276\312iw\276\023\226\320>\203\0145\276s\014\201>\256\0252\276\375\354\206>\2534t>4MJ\276\250\005\013?\236RB>\272\274\017\276[m/\275L\346\315\274\274\r@\277X\262R\276<\254\277\275\313\276\224>\326\351\315=\262E\271\275+\247\322\276z\361\314>u\247\316\276\263\340\313>\306\374~\276\'Z\023\276\336\260\216>K.\226\276Z\247\227>\277\t(>\254\353\020>[\016\245\275\200\021\251>\256\t\216>\217\227L>.\324{\276\003\264\302\275\3604\240\275\0223\225>\272\312 \277\313\360\223\276N\\2\275\213\0032>\210\3023\276\372\031\014=\035\317\353\275T\327B\276^\215\204>7BJ=\034\301`>2N\352\2741\235N\276t\014\242<\276\023:;g\370\217>tj\366\276\354!\354;\026\372D\275\013\021a>\347|\023>\347\254\242\276\344\350y\275\007\273\371>z\365\224=\240\365\327\276\316A\314\275r\327\247\276%{\031>A\326~\275\315#\362=\357c\260>\272\356L\276%\0061\276\276&\243\352\246O\276\223:\035>c2l\275\006\001\322\275fr\206\276\002\352&>\315O\206\276\335\177+>\206\267\000\277\236\302\211>.I\001\276i\220\332>\000(\302>\333U\202>&\306\204\276g\024\235\276\304\327\236\275\220\031\361>?)\227>\214\0238\276\376\255\263>y<<>\355\251\301<\304Z>\276\252y\311\276\376\251k>\272J\"=\331\347\211>\354\273\002\276:\331\256\275e\242\243>l\035\221>A\251+\276\016h\375\275\201\301\t\277\307\244\\>\2632\207<\3615Z>\343h\264>\013g\021\276\037\357\356>L\371{>\342\202\370\274bJ\"\276w\226\376<\326\275\250>\320\262\352\276\013\017\377\274O&\036\275\334\342\255\276\231\233\202>\353H\n={\304\017\276W(\214>*\220\226\276dt\222>\035\312\317\2751\302\203\276\347\370O\277\2666\247\275\241p=>\301\353\024\277\364\357Q\274h\t\260\276\350F\020\276\361\014\330\276\254P\001\276\364\000\036\2765~\222\276:\265\227\275|\206C>\001\003\207\276r6\241\273\204\256\337\276\017\341\031>\217\326\220=\263R4\275\256\304\375<\323\311\n\277\253\304|\276\246v\305\276,\264l>\342\271\002\277\215\366.\276F\376\032\277?\344\371\276\004\004\236>\n$\267\276\342\022\212>\260Zr=\036\320\250>W\r\371\276\227#Y>$_\210>\204\305\010?\034LD\276\231\212W=90\326>`\"\262>\274\340\333\273\266\360\026>\215\211\002>\231ok\276\364Mw>\305\013j>\231\353\"\277#/\037\277\377r\356>>\343\226\305\3526>\350N1\272\273wx>,}\233=\001\3640\276\200\355\005\277\351\202g\276\222\245\224>\253\226\'\275r\363#>\026Q\323\275\374\351\006?b\322\361\276~\275#\274F\033.>\3721\302=F=\371\276\354\303\231\276\361\215\370=km\220\276\360]\022<\036u\035=\221l\236>&\315\264\276\207\313\300=D3\035\276\240=\246\276<\"b>\025\333\223\276\017\307\260>u\320F\276\020p1>\013\200\356\276G\"\303>\205\350\036\277@\2526>\246\006\351>+\334\225\276\233\261\346>\354\242\216\275\377\273.>\244\"@\276t\334E?h\372\227=\342b\016>@\357m\275\312\353\307>\350\177\204>2\005\010\276D)F\275\253\361\322=WH\377<\023\337\223=\252\330\262\273J\022O\275\"\323\200\276\226G\303=\321\227\276\276\363\250\003\277\377s\301>\3508\217\276\022\275*?Xn\344\273\226\246y\276\266\337\002?\276\206S\275VR2\276KZo\276\320\351\353=^G\266=\022\273\035>!)\266>\247U\217=L\031\374=/\'\232\276-M\371\275#\265\r\276\335=\273>yQ\275\275\027\345\243\275\263\322\200\275\335)\311\271\020\tr>\321\2568=\363\316\223\276\017s\345\276\207\313\014\2767\215\231>=^\327\2744( >S\360\234>\205(p\276q\327t\275\355\257\335=\346\204\301= \013\020\276\243o\236>\201\212\264>\270\001\326>\205\330\224\276\304\271\'?w\0349>dh\036>\376\034\030;\365\240\000>\344\373\201=\277\246\217=\321\342\">\031\222A\276\233\235\250\274D\301\361\275;T#\275\366\273\230\276).\346<\211\215\035\277I\'\t>P\331\230\276\231!{<\312\217\032\276$\210\306;\304:x\275\306Ze\272W\036\241\275m\317\326\273$y*>\367\256\003>\200^\243=\245\372\236\275\035\356\365>\374\352\210\276\177\261\241>\023\255\237=\364\352Z\276\271\377\337>\224[\255\276\327\206`\276\333Gc>\356\200\212=[\245C\277\2025\276\274\247!%\276\373\310\002?\016o\253\275\334\266\223>\272m\214\275\002K\320\276\357\"\212\005\235\347>\341\266C\276\007\276\t>\274\220j>-}\255>\311\260/\274R\372=\276h\241\204\276iy\233>\010\365\241<\370R\347>\364\364w<\370\272\213\275\020\022/?\220\260\207>\0238\223\275\251Y\020>\203\237\320\276n\370f?o\377^\276\306\310\230\275\245\310\356\274\205\233\244\276\265\205\231>w\025\237=\020G\031>\324OR>\323t>\274\316\271\223>+\315\223>:\325\337\275\326\374\236>\2260\214<\035\007{>\316\217b>b\362C\276@-n>\203\024\325>\343?\231=\\\322I\276\245\203\254\276\225J^>9C\315\273^h\003\276#\021]>\246!w\2762\344\003?C\221\234>\350\325\305>B\215\016\274R3\310>\326\212u\275\346\036\371=>\032 \277\243\376\022\276\261(\261=l\324+\276-\346i\276-\307\276\2761x\006\274\377\241\336>.+\352\276\224\250\317\275e0E>u\357R>\317\264A>D]\234=\254R\253>AB\234\275G#=\276j\227\325>9\354\265>&\351\025\275\304\363I=\252\3247=L\221\004\'\353\362=\266\357_>b\352\265\276\272\316\355\275c\271\033\276\"05\276\250>\371\274\314\335\002?\2005\332>:\014;\276\203c\023>\020\246\217>\241m.>\"X0\275\304J#?;\271\001\275\237s\345\276\320\231\302\276\017D\002>\216\022\216>\3253\223\277\366\213\006\276\271k\312=\020\216\357=X\342\354>\325{\031?d\002\304\276\256NP\276\305\035\224\275\2618\324\274$\320\321\275M\337\377=O\000\304>B\031\252>\354\326$\276\373\007\363\27552g> \312\000>\277\322\341\2714\237\271>\036L\016?\255\013-\276\2566\013\275\014\336|>\254\205\213>F9n\276\247W\312\276\264\034\004=#\357>\276\341h\320>L_\351>N\n\033\276\260k\204<\225\016\361\275\372\276\237\276\220\334\221=\001X/\276\341\352\224\275\334\220=?\351\r\331>9n%<\351\014\263>}\377{\276\347@~\275\030b\247=\252\316\256\275\240\032\241>\326\216\335\276\352Y\302>\006\367\n>q\372\002\277\007\324V>\372{\202:\206\241\307=\223\351\333\274\203\265\316=\266\357\226>\345\t\373\275\221_\200\275\014\262E\276\242\323\254\275\240[\016\275w\002r=\244\203a>\345\257\214>9o\217\275\271\337U\276\256\230\013>[\321\004>\307\213\217>\0379\200\362h:>Jt\037\276\304\254\210=.4\211;\204\024k>eC\364\276\354L\254\276\271\220p\275pb\235\276\341\240\211\276\005Q\236>a\330\334\276&W\262<\356\267]\276:\2300>5\231\357>&y\225\276Y\246\215\276\211\270\022\276\263\272U\276=\025.\276^\230L\274\025}f\276\320?\262=\027xE\276\331b\240\276|\273\303\276\235x\006?qw\221>lP\265\276\263\261\225\275>\247\247\275\362sh\276\211R\222\274\220\033\350\275\"\035>\276\362V\017>,v\236=\177lp=\206\267(\275\037\230\235<\302\354\372y>\243\362\367=\321p\255>5b->\326\r\331\2767?K\276\035\231\250>\330\245\277>\216\353\215\2764Y#\276\312\023\261>\030\2664>)\212=\274\374E.\2752\277\245\276\223\260\246>\373\373\010\276\026\025\312>\317\317\266>\000b\032>7[\001>\236)\016?z\0307\276\003\037,?+\243?>\205\363\330>\223\336\333\275\365\236\223\276\224\301\002>P O>.\377\'\276\223\321\n\277\035\367\267>@#\377>\021y\302=\014\212I\275\301/9\276\217\002\351\276\254\234\016>|9\220\2760\337<\277\312\341\246:cj\254\276WwY\276\347\003+>\257\032m>\265\351\222\276=\221\032=X\322\313>\027=h\275\276{\024\276\240G\375\275\373DL\276\002\277G?c\356\036=\265\244\224\276\013\326!>\226\304\267>\036\250\363\275\223V}\276D\350\350\275\220h\242;\021\200\014\276\3122\253:\270B\224\274\213\305\225<\2462\200=\360q\350<\214\242\'>\275\257\017\276\221\312\017\277\022\004\021\277\355\351\276>~H\205\275Rq\262>M\002T;@d]\276,7\243\276s\2477>\261\034\353\276\202b\002\277\336\211\320\274A;b=M\3267\276\277\233\322\273\357\023&\276\270-\324\276\275\216|\276\352\345\204\275\206\262\025\276\255+\377\275\250\217\345\275C\r\225>n\347T\275EMM>H\326\004\276\300\027=\277\276\034\263>{\233\207=\007\'\206>\374H\365=\313\273\256\276\240K\031?\252\023T>\222\323V\275\246x\273=\333u\234=8;\002?k\360\240\276;\nI=1\030Q\276\336\023:>@\230\027>\331*\373\273\331D:\276i\224Z\276\205j\210\276\251\"\r\276\306Q*<\252\267H?\037\256\003>M\300\245>\033o\227\276\017\260\356\274)\247\201>\201\323E\276\306\340\033\275\020_\243\275\342\221\221>2:d?\010|\221;{\252\237\276[:\206>^z\210=\363\344(;\300\325\212=:/\323>\271\267\242\276\331\035\250>)+;>QoI=\360\303\365\275:L.>\215\242N>\033\341I>Q\343O?%\273D>\370(@\276\224\303;\276\030q\003\277/\234$=\333J3>\215\021\374\274\205\214\230\276I\037\332>r~;>\177\036\027?\365\221\324\276-\316\345>Y}\030?H\344\t\275\200f\355>e\026\331>\2349\375\275\253\313\201\275S\\\327;\010pz>\274)k\276L\234H>\253f\363\276\275N(\276\225\335\223\276\255V\220\276\371r\226;\371v\021\276\027+\210>\330y\261>]J\344\275\301fY\276\\i\202\276l\n\352\275\232l\235\275=\005\347>>\2571>\302yt>\277\014\314>\036\263 >\213\276\200\276&WY\276\366\214\021\277\207\231\373\275\363!\314\276\324\276\305>)w\216\276\323}\211>\022\377r>\035\'|\276R\311y\276|E5\276\212\306\236\276\217\212O\276`H\330>\314\023\327\275#\214\343>\226\342\213>\332\026\002\276\377\3456\275>q\262\276\021\311\325=\252\354\227\276\217E\017\277\027kC\276\027\254\\\276\366.\217=\205x\236>\220\344\351\276\277\006g\276\231W?\275Z\272\265\275\254\004\263=\247\365\r\276$\367C\275\345\240\242\275\371\207,>\022\376m>DI\017\275\t\200\321>\014\t\303\276\241\3312?\221\2746?\332\r\227>\331i\016=\364d\267\275\212w\321\276\3711\235>\342\023b=\250S\214\276f\323\316\2751D\005\277\001\332@>\357\324\322\275\366\342\025\276\035N0>\025\321\347\2762\336\253\276\001\233#\277\324\244\267>Rl\'\277\372\234\021\276\216Lm>4k\031>[\036\220>\030\266\375>\273\037>>\242Y\256=l\373\373>\260\031\033\277\362\275\206\274\372\001\004>\265&\370=\360+G>y\230\031\277\300|\304>2|b\276^I\027\276b\016\222>\346$\330>\205S:=\227\222\231>\236\036)>\316\"\375=\352\032\214\276\362\"w\276\337\241j\276\226\027\323\276\237\247\302>(\363A\277}\333\363\276\3459\225\2763\206 >\374#\005\276\302\250\234\276\253\255m=\256\253\020?\276\232\344\276\332\325\363\233\031\005>\236\206\214\276\273?\027=\207\3457\277\023\337r\276\023\311\233>\3156u\276*\273x\276|\237\235>;\310\252>7\014\307\276\320{;\276\352\320\305\276\302\343\021\274wW\343>\343\316\303>\323\251\357\276=\210\275\276>o\324=9\365\252>\325Z\222\275\316r\201>h\201\202=\324lC>\260y\260\275\016[ >7\363$>\036<\237\275\024\027\021\277\216\272]>\275Zy\276\342\222m\276\341\205+=\034\0210\276\247\211\216>i\014\251>d$C>\212\210\220\277\263\260\177>Z>\030>-\331\245>\367g\021>\367\320Q\275\377\317\206<\330?\031>\367Tn>\020\327\010\275\2509\234\276\034\214\262\276D;\313\273Mo\316\276<(\206=$q\273>\200&\345\276\351\303\375\276\"\232Q=\361\275\303>\227\337,\274\376\252\212>\r9Q\276\004~\344\276\204\341\205<\227^\236>\324\275\251\276\004X\302\212\360\027\276\010\027\353<\214\347\225=8\361\264>F\305\223>\372=\006\277\272r\233>\375\372\210=ag\264>\201}\264\276\367\241\311\276m\353\332\276\350\036\230>\237*\231>\366\034\316\276R\361\250>\361\256\347\276y\003\360>CD\343=\014\255=\274XG\002>\234\331\202=\252\364u>\224l7\276\273\320\"\276\307\302\221>|\024Z>T\306\370=\253\275\026>\215\203\035?\326\005\304\276:F\373\274SR\214>\302\214#>\260\230v\276lUr\275\253\274\025\276\312~,\276\344\356\207=r\307\333=\256\336\311>\211\220\252=g\014\024?\013\350+?\254\334\246\276rf\370\275\246\003\213>!Ty>E\021\000\277\035\334F>\027\366\317\276\214\304m\274\036\220\371;\263\002\235>\377\021H\274\324Sq\275iA,>D\331\n>\t\222\252=S\032\327\276W\326\r\277\375\257\274\275\314\222\004\277\252\336\204\276\004;\303\276|#|\276]\346\010\276\317\214D\276t\305\013>\320+\266\276)\206\375\276\020+9\276O.\314\275\373\037n\275\005F\226\275oW\200\276\331\\\264\275\206\317\337>\215\226\224\276\277\225!\276\035\344\267\276\240\206\033?\026/\277>\027\314d=4\321\007?\332\371\205\275\341\372\n\276\274\342\220\276@\217\264\276\261\357\014>K\3442>)\251\312\273\321\252\020\276\253\307\361\273\253\204\216\276\365sm\276i5\030\276\233b\007={\230\356\276\321fC>\350d\027\276\346\344<>\021O\251\276\332\025\307\276\351x\231=\234)\213\276\241\030\224>\020Xb>\276\025\276\276\335\265F>\251\276\341\275i<\312\276)\307\r?u)\302\276\2668\264>Y\010\273\276\034\002\202\276\n\177_\275a\327g>\264\025\200>\236\261\205\276\204K\'>\030p\005\276\377\264\270\276\276\272g>)]\026?u\211\275>\341\310\377<\305M\237\276\370\224E<\354\005\234>\"\201\237\274|\341*>\373\344\366>\316\356V\275\204t\375\275\265;\t\2765\204\210>vY\026\276w\010Q\276\245UZ\276\211\352V=4&\r?m\311\244\274\374\255n\274`O\303>\014S\000?$$\002?\302\265\237\276\317\217\223\275\340l\037>\017[\240\275z\313\363>n\302\333\276\353$\213>\376\342\230>\034\373\351<\253J\005=\243\267\205\273\026\373\217>\023\326\327\276\205\366p>\016\267%\277\220\275\360;T]\360\276II*\276AK\244=\243\000W\276y\314\262\275\231\240;\275.\314\322\276\320\250\275\276_\031t\276\275\324|>2nF\276QS\244\276\2515\n\276\032\207s=\275S\201>*\201\313\275\326\014\365\276H#\271\275\374/\221=\035>\261>nu\306>\240\340\220\276\254\243C=k\210$\276\\\303\362\276\245je\276-n\245\276\204\002\324=\374\301\362\276\346\300\200\276pyN\2763\317B>\372Y\211\276\236\030\033\275j\325)\276\235S\212\276e\264\224=sv\242\276\300y\352=\362\'\034\276\366\263\215=\027\n\005\276\025Xz=HX|\276\225\377\301>\242\313\305\276Q\337\345=dr\n\277\322\250}\274\213\235\324=\177\0050>\341\374\205>\370\036\201\276\204\256\301=\233^\004>#PD>\343\013\034>\323\216y\276q\245\2720d\333;\247\357=\276\224\036\255=@\362\257\275\013\203\036\2765\021\236>7\305\t?.\250\267;\274\025\222=?\350N>6[\004\2771\221W\276\360\313\022?y*\267\275\273N\273>\274\214\'\276\034.\264\276\332\232\t\277lQ[=\030\353{>\247\231\334<\025\341\205=\310n\314\276\350\303\214>_\230\362\274=\372\276\276\261R/>\270\231\326\276\217z\275\275;\230\002\277\\\351\016>\332\303\232\276\362\310\302=\340&\267<\230\030\002\276(\035 \2762\305\326>\327\235w=]\001\r\275\016Y\222>\222\321\236=\363\375O\276\266\254\255>v\3400=\315 L\276\206\257\216\2763\200\236>\307hF\275D\301\332\275\273y]\277\350\346\305\275\270\213\202=\201~\240>\3243\336\275\261\245\006>h\313O>\327f\220\276r\304_\276R\204\022\276\243\017F\277\327D\326\276{\372\241=W\210\036>&\332R>\312T\264\276Q\225I\2772t\035\275{\337\354\276iO\002\274\300^\273\275Bx\260<]\007\330\274\375\035\220>\254\323\333\276sd$\363U\'\276\334\314\037\273\371\233\007>\370 \204\276\2304\326\2767\2258>e\353\250\276\257\346\215>\264\002\003>\032Q\365=n\372\034\276cc\245\275[\353\230>\214\330\031\277\300.\005?z0\003>y\360\355=\277n\000\277[\240\003>x\312\317\275G\010\341>\254\206\\\276]\236\302>)\204\352>\265>`\276\342sm\273~?\327\276\247\256\312\276<\002\023\276\007Z\361>\235.\250=6v\202>\327M \276\276\025\200\275I\237\206=\262\261\227>[\222{>uk\203\276O\372+\276k\323\263\275\255\360\215>&\n\010>1\201\275>\306`\313\275\306\232\331>\024N\017?d\\\364\275R\033)\276\271\021\177=\240\365\035>Z\230\220\275,g\333\276\017Y\272>\003?\223\273r\235\013>\312R\235>EDv>\014\226\024\277\243*\013\275\371\230\237\275F\232\036>\300\353^\276\236\265\226\276\263o\205\275\020\257\213\273G\350\201>Ns\256\276=\371\241>\347\324\324\355_\002?\'N\014\276.\265B?\301I\211\276,\212\333\272\001\374\006>~\221\025>\375\317\344>\220\025]\275Y\367\251\276r\303P\276\303q\334\276\204c\321\275wf\230<\310\254}\275\005\343\"\276\341`\201>\0302\214>\267,n\273@o\">\277\t\314\276\336\350\337\275\021O\035?\020,\204\276\256\255&\276\"\246\276\276\314s,\276\014i\203\275\202`\236=\260\301s>\207\272[\276C\010\373=\244i\272=\235|*\276\333\262!\276\256\376\353\275\352%\253\275\230\250\361\276\230\3768>(b2\276\330\251\001\277\262q\r\277\033\311\312=\354[\020\275\320\232\332>9\331d\276\021\025\310=R\367r>0\227\245\276\271e\002?O\327}>\034\311\240\276Y\'\317\275\"xb>,\234\270\275\331\231\224\276\316{A<\033|`>\213\371s\275)\024\006=p6;=\353\361\353\275y\322\304\275K\203\023\276\373t\002?\372\n\235\276Z\303\321\274\023\362\342\276\203\215G\276O\220\013\277P\301v\276\337\227\327\276\020F\201>qh\364=\030E\242\276\031q\315<\000\013h?\356\2371\276\336\222@\273Uy\027>?\r\232=UX\000\276\304g->\301\264\351=\204%\322=\314O!\230\236G>\201N\313=\034\266\337\212I;?\332/\026\276\225\212\255=!\224\243\276\362\177\032\276\274\005\214\276\003X\265\276\353\305H>E\353I>\013\255T>\034\213{>\377\2776\276\345g\243>nT\333\2767?]\276\215CW\276!\232\361@\216\024\277\211w\265\273\346P%>\017\256$>\215\"\005=\350Z\317\275\034\257n=z\257\025>4X\212\276\213\246\275>\262\340\212>:\335{>)T\314=\325\227\345>SzK;\276\315\246>\342\007\000\275\341y\214\276\222\275E\275\271\340\036\276\217\346\374<\016n\247\276\342\264\363\276\254O\034>\274\034;\275:\223 \275\001\371\007\276Z\017\265=\300^q>,0\326\275\247\371\262=\216\223o9\017\277\242=\201\325\001\276\272w\020>\243\267O\277\3031l=\210\235R\276O$\277\276C\331i\274\350qK=^\346\303<\226\327\322=\304\024\206<\306\021-\277\021V\004\276\371\314\303\275qkT\276\240\301o>I\355O\276\020\264^>8\246\002?Lm,\276\227 >?\201\334b>\304\274\261=\000\0022\277\177_\247>\037\347f>\001IU\276l\322K?\356m\270=\355>\250\274\275q\024\275d\374\325\276o\375\276=]\2220?<\300\274=\224\302\002\2760\325\000\277\005\207\003\277\336\274\030\277\356\013\036>\256k\226\276\3625\211\276W\346S\276\267\344\352\2764\275N\274\203\272\246=\rQ\270\2759\230\226\276\373`\007?O\271\251\276\333F\r\273n\306\306\275\245\366\021\277]oW\276QC\355\276\273\313\367\275YA\002\275\037.\242=\252,8\276\242\244\277\275\233\\\254\275)MW\276\210}\245\275e\275v\276\246\001\022\276\240*l\276\037\005\260>\310\341\262\276\013*\351=\\nf>\246\307!\276j\r\276\275<\376\214=eY\273>\367\341\000?\275\303W<-\\\025>\314X\206\276\272\374\002>w\336K>{\235q\276\t\345n\276.Z0\276\037\006\312\275\2330\r\276\356\313\303\275\231\372\002>\0355;\276\313\224\233>\262\235\014>\302Y\260>rDa\275\275\017T>\223q\212>\377\363\347>\367\344\225>\246\016\250=#\325\031\277\206\260\'\277}\"&;\261k\001?\353:\345=s\261W\276\204\362\n?]j\254\276I\324\267=iF\266\276mI\035?ZQ\254>\033l\272=\004\330\270=\373 \262=\2759\301\276qg\036\276D\344I\276\303\010\215=~\006\t\277\350x\203>:\036\374>\226\237;>J\005\355=(D\265\276\317\372\034\276\002\007\225\276CB\210>B\0364\276\226\004}\275\013\033\035?\351P\r>\330\223\323\275\334\241\243=-\352\035=e\360j\275\006n\022>\370\374\203>\'\023\307\273\205\r\027>\245\017\242\276\221\215H\276\251\310O\275\222Z\336<\327\010\273\276\312o\356=\316\316S\2759\354\220\277\205e\213\275\356V\203\276b\016\327>}<[=0\367\250\276\014\234\235\2769_I>\31450>\354\323\314>\276vL\277\rS\231<#w\377\274\267\t \275\277\010e\276\311\221/>Eu\214\275O\272K>]\302\267\276\263k#?\262\300\263\2741\215T\276%\031\361>a\257\037\276\266R\247>\007\253F>\013\277Y\275-a\233>\274\274\306\275\010g\305;;\233W=XV\245>\313\t\246\275\230\2517\276\235z\260>\311\257\004>\370;\276\276\346r\327=V(\223\275\333\243}\276-wn>\354\266N>\365\272\321>\375\245\254\276Sx~\2752\345\256\273u\312\205=\242\361\023\275\244\215<>\313[\346>\330\027!>7\323\274\276\257\247/\276 C\247\275\341<\236\275Y\322]=P\204\327\275\034\371\356>\263\'m>\315\204\034\276\214\260\306\276\346\214\361=\224\337\270\274\024\254\316>\003X\265=\223%\230\276g;|>\002\301\006\277\317\2011>2R\224>\336\017\256\276\316\351\375\276\324\'+?\304>\271=c:\201;\rE\372>_%g<\306j\017\277#\204\375\274\3040\325\275M\331\267<\004\256\246\275F\217\345\274\322\360\263\274\327\2077>\314\355\234>\344P\270>\347y\247\274\221\371\313\276\277a\004\276|+\210>\230\356\337\276\372\331S?\017\2525\276\333\214\230=@\212~=\332H\332>R\221\010\277m_u\276\342q\253\275\245c-\276\020M\262\276\272\241\266\276hr\364=\262\315\007\276\214\n\365\276\255\274\223\274\313@Q>uT\215=\007\345h\273\354#\340=\361\225\200\276A\203A=\017~_>\377\351F\276\005\0030\276\337\267\253>\3064^\276\257\245\231\276\301\033\007=\247\3741=]\325_>S\314\270<\3306\325=\242\200\005?\232\022\026\275um\203>dl\337>\177C\374\276<\250\272=\321,/\276\207g^>\366i\376\276;\266%>,\346\021?\026P\206>\211\'\374>\235\354\232=\275\315\310\275\260\311D\276\342\305\357\273-m\325>\246{h\276>\317\230>\227\211r>\277\254Z\276,X\364\275\025}\020\277\320\024)?\323\306Q\276\362\330\251\276fX_\276\333d\251>$\224\200>\273h\354\276 X\002=\006\201\220=\355\312~\276x\331|>\022\237,>\251\374\310=yTU\276\376\240\370\276w\007\243<\305\351\302\276\371`\223\274\216\353\232=\375*\264\276\202\016\273\275\003\203\210>\\\271\334\275\375~\017\275\225R\270=\222G\361=;6\235>\243;6>\300`E\276[\337L\276\231\240\014?\365\0041>6E#>]Q\005\277i`\366\276\361\210\342>\306\277g=|\252\013\277\001\335R>\313\312\317\276\251\212h\276\215E/=r+\371\2758\000V>\366\234\371\276{Q\256>kH\010\276^\004(>-L\016?\227\236\200>\"\213J\275hdD>~\222E<\'\014\334\275\006_G\275`\230\333=v0\204=\037i\177>\317\303\364\276\004\211\254>\024\330\332\275\254>\006<\357f\212=\300\014\330>L\022\255\272nS?\276\'\337\t\276\235\305Q=xjf;\257d\243\276\024{\216\276\263\354~>\370\341\213=\312_\002?R:_\277\247 \362\276\372\016\250>\336z\250=u\254\305=\320\023\203>\244D1\275\321~\026>Zvu\276\213\245\317>i\275\032\276\200v\307\2759S\206>zu#?\234R\331\276\202\343g\276\272e\301=\315=\274\276\037\353\003>\003h\202>\370j\037><}\363:s\001\243\276O\035p\274w\033\226>\357h\025>8\000\305>\375\242\016\276\007\312\201>\027\262\265=\244ut>\367\177#=I\2542\276\221[\334\275 a\024=rN\177\274\341\024\372\273B\3601=\300W\217\276C,\316=\21125>\316\232\220>_\222y\276|I\361\275\\\306\273\275\033\371\216<\304x)>\336\211\323=\005[ \277\304x\306\274\360\n\374;\014\247\227\276\002P\023>\241+6=m\266\213=v\035\202=TX\021>\204`\245>\372Ve\275\266\301+=\307\235k\275\317\347\256\276(\001\214>\"\256\037\277\363\250\033\276\002\001\222=\000b\262\275FUv>P\344K>kP*\277\233b\336\275\370\326e<\006\225\003\276\256\206\003\275\355\361\002>6\203.\277\256\013\037>\021\302\207<\344\332k\276<\240\225=\'\266==w\256\013\276\366(\221\276\021gE>\325D\307>3x\"\277}%\013\276\210\222\014\276\005\265\303>p\342F>\031o\276\275\005*$>4;\320\272\014T\212\2763V\036>\332\236\220=\356\3639\276\002\367\340\276\330\351\206>I\3707\276\014\310\016\276b\327\002\276t\303\227=\355.\213\276\021\212C\275!\212\202>U\375\013=\226\013\303>\206\036\267>b\362\201\033\252I=\246\007\001\277\316\333\244>vr\201\276\371\022\n>q\"*\277\020\342W>\014\232\214=A\360\016>2\337,>SQS=Z\022\177>\357\013\214\275l42?:(\320\275t\250\267\276\362j\022> XF\276\223v*\276\227\030\327=\246}Q>9}e\274!\303\224\276\370\'\307>|\375C\274xH\357\275P1\213;\200\005\247\276K\364u=1\264\220?l\014\336=-\n\007?\3255\006?\013F1>\354\326\306>\216\314\244>$\213\327>\253{\346=R\277A\276\276#f=D\245A\276\340\304E>Z\2331?\032m\304>\261\214\353\276r\363R\275\376\240\232\276u0\200\276\246yJ>\214\335S\275/5\021\27686\007\276=-\342>\362r-\277\010\374I\276\276\003q>\014/\017>]\257\225\276\240\261\340\276\017$R>9\350\315\276\206C\354>\274\017\335\2764DO\276E\340l\2755\250d\276\343\361\252>\201\227\032?\030\364F\276\000\004\267\274\037D\240>!\345\264\273\266#\312\271\022\264B=\321sI?\230NP\276K\343\374=]\326\257=`RA\276\260\030\027\277L\352\234\276\261<\220=\345\201\222=\025\2660=m\365\273>\030\317\253>\332\344+>mz\223\276}TW\276\272\336*\277\217\002L?F\tE>\256\027\213\275\200\226\324\275\333\243\231\275\3065F>\212\323\255=\242q\036\277gL\303\276\335\005\255\276m}\355\275%G}>\341w\000?\274\007\230\276\213l\256>J\256z\276o\006M>\347\223\216\276\204\257\223\276\303\262+<\306\204K\277}\364+?\351\254K>s\311\020?\243\321]>\312:N\276\023\277\326>\213\315\016>h\001\\>\371\017\211\276\374w=>\207\032?\275\222e\203\276\003j\313\276a[\347\272\304\255\242\2765\216\265\275\226N\346\276v6\230>\231NY=~z\000\276I\331r=\203\263\206\274N\350;=\252\032\317\275\354\260\343\276\250\273#>J\235Z9\356D~>\210\357\213>#\031\226\276\202G\203>\276r5>\263\255\245<\224\205\221=\007\t\016>\333\227\203\276\277o\225\276\245,F\277\215~\356>\252\245\347>\373!\024>\226\356\301\2753\373\374>2\004K\2766\342\341\276\264\3149> \263,>\360n\212\276x\342\322\276\3447y\275\200\016\322\274\021\230\323\275\031\360\n\277L.B>\223\030\247=\271\375h\276R\215\365=\002\246(\276L\014\r\276\":\312\275\373\336d>M\265\374\276\351Y\002\276v\036\207\276\264g\236=\017\022N\275\324\272\021\275\262)\n\276\037\n\026>\342o\373\275Y\367\027\275D\270\371\274]1C>\251K\312\276`\215\370\2732\000\'>\272\004+\276\024\322\336>\214?!?\226W\220\276t\346\034\275y\361?\275\246\220\324=\265\302\303=\306\\\023>u\0275\276+\262>=[\313\213>\353U6\276Z\335)=\006\350\374=\305\310K=P>\201\276\257\340\277\275\344\264\246\275\213>\001\277\212\203\307>\361-\344>\355\325B\277\204ry>l\212\220>\221x_=y~\223\274\203~\223\276\235\243T\275\224\245\317=Am\227\276V\257\313>\326\312|>\325le=/r\355\2764\231j=\206A\243=\253*\214>\037\313\n\276\203r\032>\\\006z>-\355\217\276\274\276\305=\010|\255\276]$\026\276\264W\037?\323X\315\274zb*?\021\207\206\276\0373\207>\253r\021?\372\326=?\010\2121\276a\233\002?\'O\270>\223\021/>y\204\250\276\004\241\304>\236,3=\235_\211>\371\2248>\330o\332\275\236l\372>\242\307\306>B\3060>V\003\001>-\303\005>vsp=\214T\301\275\220R6\276\312y=\276\210\000b>m\275\026?u\312\036\277\334\340\016>\002\r\350\276\3147\006?J0\001\277\003\307\320>\201u\305\276\263\277{\275\335\r\330\273o\264\336\275\257\3018\274>\020g>K\344*\276\034-\337<@\364e>,^\002\276S]\347\276\335\211\010\276\214SW>\232w\245>\300i\230\231\345\227\276\316\013\003\276\005\350l>\273\237_=\214\357R\276\013E$\276\337\302l\276;\222\001?\"\020o>FM\331\275\302\253\201>\237;\033\275\206\363\346\275\312/\313\276\240\030\252>\021\343\007;\371\371\241=\300\027O\276\t.\255\274)\357\t?\275d\305\274\227\'n\276\246\225\020>\345Hx\275K\202\272\276\346\001m\274\224\025\020>\333\016\277\275\271\255\334=\204\322\331\275\3028\021f\312\365\276\221\373\001>\201)\024=\317\330\260\273\021\245\002\277C\205\202?0\345V>f\362P>8\371\224>w~c>?*]\276T\345\240>6t\310\275\330\271\010<\237\224\336>\030\237\310\276\016,$\275\023Y >\210*h>E:\275\275j\364z\276\335\305\213\276m-\344<\257\241\242=HD\320>\236\216\212=\267\261|\276\347\n*?\361\373\242\276\317\301\210\276\002\t\302\276\312\363\340=\264wV\276\n!N>\007)\263>\274\n\350\275\246\312p\2748\017\235\273\002\367\306\276\252\210\026>\215F@>\261\020/?\031D#>\314v\353>\022!v\276\340\317\267>\236\036\"?\227\2525\276KJf\276\322-\244>c-6\271#\022\035\277\305\020K?\031\267\372\276\343\225\247>B\311i>\244\204\206\276\350\310\316\275\305\250\016\206D\031>\234\340\335=\205p\013\277|\341\306>_\312\004\275\320\304b\276\272\013t\274`\234\222\276\231\222G>\'%\177\275\022\370\260=\271D\017>K\023\234=)\361\016>\352}\224\276{\252\204>\'\004\226\276\216\332\017\275\032\027\274>\347\"\324>\302\350\262=X\3659<\027\266\254>\362;\225>>,\304>XG1>`j\315=\202\340\341\276\223\010\213\276\266\t\006\2767\207\202>\024\027\327\276\216\243\\\275\313\016#?3F\022;(\321%>\t\315S>\220\020\255><\373\277<\211\324\n\277\025\252x=\250\252\264>\243X]\276@d=\2756Y\343\275\010\230\361\275\232D\315>!@\231\276}[\275\276\'\270K>18f>\273y?\276\177\266:>6\263>\2766\035\020\277*\230\243\276\257\025\210>\377\n\360<3f\034\276\264|\277>}\333\334=\037\256>>\321kQ\274\037\006\022=>Z\352\275b{\000\277\355\"\235\276(O7\276\266U\201\276\177\260\026?df,\276\263\344\246=\226\307\363=\005\000/>\363\342\252=B\021\200\276\260\032e\275D\332\024\276X9}\276\017 \034\275\252+\313\276j\223\274>\377\\\210\275\356p\016\277t\271\001>\224C\214<\3375A\276\007SC>\033\311\300\275\032a\314\276j\377\010\277\205\255\270\275\262\250\321\276U\344\360\276\356\204)\274\316\264\315\276\236\330\225>\003\023\300\276\212Z\216>\230\210\321\276\037W\013?E\235\345>\"&\234\274\310\314V\276b.h\275\207z\255\276\004\324\324>\177*\235>\005\026\357>n\241c>\344\023\037>\2755i\275\021\0372\276\003\r\327\276\354\006\023\277\315G\014>\236HP\275\233\267\312\276\035R\271>0\352\026\277z\2363\276\030\202\313\274\334\000\346>\351\260\243>z\261[>\036\312J<%\237\007=\350\361\'\275T%h>crJ\276?\260\217<\205\360\033>C\240\014\276o*\261\276\256\272.>\321[\363\276k\025$\277\234\234\364\276\004Z\220\275\311\224\272\276\257j1?(\330\255>\233\r\034\275Xcs\2731\210[>\3345\255\276\031YO=\306gF\274^\376*>3\340+\276\303\232\000\277\266\266\030>\221U\212\276\236`\204=\335\035Y>37\034\277>\010\251=\017\275\217\276\206\343\235<\343u\256>\255\013\177\275\0346q=\001T\007>\024\254,\276\256%\202\276&M\351\275\306\353\221\276\213\032\275\276\340N\267>;\214\212>\200\010\346\275{F\021>\320\r\271>\332\243Y\274j\t\363>\306\324\271>\0360\252\275\314w/>fIW>O\202U\276*\377\223=\r\231\271\276\023\212\272T\006\323=\270^O\276c\235\343\276\273A1\276,\013\t>\200\207\035?\237\366H\276\310\302\205;\323T]\274<\313\031=l\255\253\275\377x\364>\355\342\231\276\224\225\277>\\@r=\345\351\215\276\2374e><\230A\276Q\033\247=\361j\035\275\321\203\224\276C\343\240\275\025\362\373\275\370\315J\275\263GO>\256\007^\276\203\310`\276\035\034\211>c44\275z\317\203<\206\347\017?\303\303\372=\350\374\031\275\264\370}>j\273\203\276p\227\265=\313\335\367=\207(\300=\211\215\260\276\210\367\274\276\215\310\347=\350\272\372>\313\010L>\225\335 \275\\o\350=c\251H\276W\307D\276F\331\323=\305\354T>\341\025\004>\334\234n\276\360^3>\234\235\267\276^.4>\1775\376=\233\317D\276Q?\016\2779A\362\276\363\274s\276\3755\315\272\303\204\241\276v\016\261\276V\215z>\2407\241>.\036r>$\010\376=\023\033\327\275\021Q\265>\340\317\206\276\367m\206\276\207~\337\275\275\026\021=\2470->\273\226\237=\261\026\332:K\341m>Lf\331\276\263\t&?\247D\210<\267\217\212>.\254\214>C\306j=\376><>\247C\355\276 \377\352\276E\253\211\275K\320\370\274\331\260z>\016tK\274,h\003>\3373V=\371\217\270>\260}9<\230v\353\275\273\211\264\275\202=\222\275PH\277>\346vp\275\347\365\237\276c\245\352\276\227\255r>)\234\237<9a(\277\360\r\243>zc\263=9\263e\276\354RW=\006t\216\271\363\024\253>\033\026(>\362\252\345>w=\014\2778\177\010\276\312\006\366=b;\351>\373O\260\276\016\326\360\274\372\360\300D>\227\311\364=\300\245\027>\'\213\231=!P\223\276\376a\213\276x\235h>1Z\246>\363\010\327>l5\262\275\200F7=\220n\023>\242w\024\277\363c4>\352\003\230>`\003c\276\034h\273\276\316F\261>\347b\275=.^M=z\327\221\276_C\t\277\024\020\302<\373z\226=\2031&>\020y]\275{i0>\213/\334>\254#\222\275\'\277\204\276\317b\t=\001\034\243=-,\313>\2357\307>\267\204\232\2766%3\277\000\270\223\276\364P\210\275\211\200\275\275N\027\023\275\355nK>\266\200\302>\256\233\202\275\344\330\241\276y\240\304\276\366\2638\276\365\303z>?\362\214\276i\317\313\275\222\325\276\274\212\223\007=)\213\001\276RlK\276\267\243b\275T\375\226>\232\202\020>1|\177\2767\0000\274\272-V\276l\271\335>\262\305\276=\024\004|>\345\314\370\275\340o\247>t\031\342=\222t_\276my\215<\252\n\310=\275\220z=\376\373z<\317\363=>\326n\273\275\304<\372>\332\213)=\224LX?sp\337\276)cV\275(\220O>c\030X\276\306\217t>f\276\300>.>:<\213\326D>wd\314\275\261\374\036>\257^\361\275L\010\233>\302\317/>\373\363\324\275\2231\245>\n\010\214\276ek\226>\305\377f>\014\254B=\337K\226\276\322\325\010>B4\342=3}\247\276\324c\355=\t\0200>\233R\341>}\376\013\276kf\007=j\n\222\275\213\312\237>\264l\252\276*U\014\276Y\"\006\277\000A\001\277\267&\206\274U\335\256\275\253\371\037>\371L\254\2768\002\031?V9\266=*t\254\276\227I8\277\240\313\274\276a\204\022>mE\030>\372\3356>\315\326\233\276\331\rb>\034\263\027=\023\305\037\277\035)\314\276\250|\246\276=\023\025\277U\006\322<\310\316\212>K\255~\276\364\"\222>[^\263\2762\310\266\276\343\355\215\275\227\245\236\275\266\334\245\2758\274\031?\357\344=>n\223\224\275\303\230v>\277\026\212\276\351\340\373\276:iF>\203\240\036\277n\323\213=:yi\276\270\241\033\276\275\n\247\276y\224\003>\327A\212>\312\343\317\274\273t\330\275~j\002\276\256\264\274\276\360\313&>p\356\026\277\341}\'<\273\325\037>%\332\263\275w(\226>\336\014q>\233M<=&\244?\277\376\327\322>\214\357.\276\206\372\201;+|\345=n\010\360>\203\346\244\276\235\0249<\000p\276=zx\273>.\227K>\353AT\276Br\274>\265\003@>\267\204\320=\307\356\343\276\330\345!>SS^\275\304d(>\344\313<=x\035\263>\303\242\016\276H\276\016\276m;#\277y\3644\276D$0>\203\030\343>\265\r\037>^\217\366\275\263T\275\276\361p\026\274\340tz>\237\3211?G\227i>\034gR\276\225\374\310\276\204l\342<\2653\324\275_\260\361<\343\3271\276\252\'\273\276~7\232\275\"\317\233=\355#\300=\221\332\033\276\267)\314\276oAEo\314S>F\310\255\2752P\221\275\362>B\277\313d\025=E[\034>\311\337\301\276\002\353b;X\344\027\277\212\271&>)\314\347\275-\306\266\276nB\"\276\273_\275\274\002\250\204=\010U\372\276\320!\304\276g#\200\276!\340a\2746\036\272\310\315\261>o\250[<\354\330\033\275G\n#>A\255(>\317NI>\265s\002\277\370\215o\276\231\343\242\275\"\251\366\275>\306\017\277b\230\022\276\3472\256>\266E\322=\220Kw>:\026\010\275\250\235\226\276\277\353\020\275`\"\322\276\370\0315\276\351\323,\275y\224^\275\351~\033\276\267\200\263=\253\230\356\276M\270\243\276\246~.=\302X\210=\301\024\271\275\320q\337\275\3420\277=\312\244\241>\023Sr>.\277\225=ub\245>/\033b\276\027$\315>u\375\220\275\321\236\263\275\354\325\210\276sa\000\275\201\221O\275\312nV>\032\252\224\276\360\217\227>e*#?@)\032>\026\250b>3\025\234>.\242\213\276\177\302\312=\300\013?\275\323\000\005>\276\247\033\276v=3>\274&\325\276@<\263\275\035\t\251>\037\212\325>\"\211\005=\031vC=\374\240\243\276A\036\276\276oGr>X{K>\335\035}>/\351\217<\007Z\037\276\025S!?\220\351f>\374R\207>\374\324\014\277O\233\021\276\340K\346>\nC\r\277\026\204\365\276D\'\027\275\016\'\021=,`\227>\256\tv\277P\036\252>\215\023\362\273\345\321\274\276\241\275\336=\336\367\236\276\277\013\027\277\332\370\344\274Y\036\303\275\257*\360=\243\235\223>Kx\315\275m|h\276^\206\324\276\022\354\320\275\351\243\205\276\313D2?\032?\272>\263\302\327\275\026\324\312\275X\304\332>\030>r>\037\260\345=\266\257\335<\243\354\332=\231\365\033\276\030\245s\276\336\254R>\363\336\253>;>=>\335\213a\276~\312\005>\325\343\007\277\252\346\361=\362%.>\266\010\371=f\361\222=kE#?\256m[\275\334\357\345\273\335\243\022\277%A\177\276\033:\214>\311T[\275\305\370\013\2745\202\225>\034\006\030?\264\033\346=\303{\273>\331\032\224>\364t\256\276\207\262\365=\305\222\017>m\027\327\275\016\270\225?\272\277\273\276\002C\204>.\006\030\276\354\347\246>%%\245\276\337d\271\276j]a>[\233\037\277\016_z>\225\376\251>\203\200|\273(&\255\275V\027\211\274\260zM>\207r\365>\251\301\266>k\227\224\276\3775Q\276\3310\206=s\220h<\341\034\223\2764\347\204>B\371/>\331\337C=9\\\010?\036\272\321<\366\203`\276E\323\020\275<\371\267>!\321\323\276Z=\213=B\246]>\271G\325\276\204\223\207>\200\261\005\275\364\321S\276\273\345\234=\021\230:\277y\327K\276\307\023*>\354s\323\275\037\r^=\002T;\276\006\350\245\275\037M\016\275/\364q\276\034\316[>\330 \336;R\207\306\276RW\316\275\235F\355>\364\003\203>\266^w\276\023\352\353\276\324\377\347=\360i\235\276^\373G\275\301\227V\276\264Cl\276\210\335&\276ql\021?<7\033\275+\356\272\276\376\270\226>+\347d\275\365D\026\274E\266Y>*\314\311\272\273`=\275\235_\206<\321?\220>14D\274TD/\276R\255G\275X\222\331\275\306\000\262>\341\036\031\277\303\304\032?\262Y\022>\377\3709\275\331(e\276Q\310+=\214Wn>Z\331u\275u\204\240=v\035\025?W.\001\277\3110\022\277_g4\275\207e\276\2765\305k>\345\251s>`t\313\276\273\375\354\275/\302_\276\206\017N>L\035\247\276[\017\005\276\026\305\315\276+z\360\2740\327\341\276\245*\033>\311\271K\274%?\222>\335\276\177\276\004\022\211>\337\266R>\305N\327\276\233\025j\276\327.\224>\320\030\236\276\351\002\004?\223\027#\276\tN\004\277`\336c=m\r\207>X[\261\272yt\210\275\241>1\276\373\032\305={sS\276M\270\274\276\007F\320>\373\031e;}\030\275=\327+\220<\225^_=\036\237\356>@\2224\277$T\010\277\341\\\242=\234\342+\277J\371\357=\\k\203\275\035u\316\272\317\357\311>\263\341\236\276\344\247\013\2760b\262\275\271\341\217\276\236\021!\275lQy>\336\226J>a\267\226\275\2334\310\275\342\260\261=\251\214\004=mE\214>\020g\244>O|\201\276\371i\356\275\246\315\262\276\330<\003>\363\322\022\275\375m\274\276K+`\275\0027\304=\324\324\310=\366\2423>\020\314\235\276\367d\240>\272\003\201>\256a9>\rB3>{]\316>OS\032\276Y\r\030>\344x\202\276,\212q\275\214\3246=\2710>\274\017\n(?\251\243\006>\376d\225\276qcZ\276\257w\021\276|\341\211>\036|\377>\375\021\327\275\334\357\247>\362\005\256\276\242\261H>\231\226\324=\362\261G?\321v\024?\315\323\225\2765\023\245>m\302,\276`\2733>\270\355\340\276u\241?\276\264\356\365\275\001\376\250>G\0373\276\253\027a>\362\234f\277y}\014=o}\026;\t\252\024=\270\3156>\354\231L>\360\360\255\275\271*\341\276\200\000\211>\007\204\354\2751\375\347\275,r\355\275\261\270\223>\331\201\002>\312\004\212>m\346\247\276@\347i>41\336\276\264|\210\276\302\332\360\276U\302\364\276\275\230l\276ZB\004\275\270\016\321\2758R\014\277E\342A\276\366\027\020\277\265N\257\204\343\006\277\000\t`>\035\202\365>\236\361\206\275\311\326\320>\017\217\235\276\240C\320\276}f\376\275\251\335m>\335|\034\277;\346\367\276\253\316\333\2762\322\215=\235\362t>K\237\006?\251#\345>?\301\341\276\270\255C=\377\027c\276\375\255\373>\372By>\233\\\021>0#L\273`I\224<\270D\354=\024/\340<\250\245\332\275\337_\252>Q\344\200\275\254s\244\276\214\351\017?\303J\256\275\237[\032?\263M+<\327xR?\374\204\262\275K\330>?\365C\022=\374\365\271\275\365U\347\276\304\216\264>\237\\\341\275\037\233\255\275\237\211\272=\336\200\351\276\332\205\033\276\331\376\344\276\344\365\010\276\202AG\276+\224\324\275I\017\215\276I\220.>\373\276\214\275\325c\"\273,\000\036\276#z\347\333\330\257\276\262\377\354\276C\272\035\276?\361L>\336\360\225=\345\032\357\276|%\\={\346\375=\300\2212>F\3170\277g\244\354\276i\210\277>\357\331\001?\026F\372\274\334\305\016>&N\004>G\231\246\276\315\023\336\275\354\017/=\"7#>!\026\236\2765\324`=p\023\222>\2363\r\276#j\346>rvX\276~\337\314>\003\210#\276:\306\357\276\266\"@?e\013\204>P\230\004?\314.\341\276\264\334H>\362\230E\277u\'\225=\324\330\003>\350\013!>f\016\374>]\217.?*\013?\276inz=\322](<\025\355\331>D\360\270=k\244k=L\250\264\275T~\327>\363?\\=76\263\275\t\225\"\276\344\272\347\275W\276\261\276\210\324x>\033\351J\275>\333\203\276g\014\273\276\340\305\222\274j\207\274\276A\301\000\277\346\205\213>\234\002\003>\350q\240=\355\220\364>]\275!\277]z\260\2744\372C\276p\305a>\031\356\225\276\300\030\201>\276\271\320>\266Z\356\275\022\033\267>:y\376>\003\356\355>\3574\376=y+\026\276\334Q\213>\013\361\231>\327\316{\273-\300\362=\307\272\246=H\005_\276\023\351\274>\315j\271\275ox\354>\262\347\247\274\236+\007?\037v\212\276\320D\226\276\364\277\006\277\021w->\017b\037\276\270\024\343\276\t\022,>\200\'\271>|\222\235\274s\353\013?\246;/>AB)>\261VL>\003@\373\275B\305,\276M\232\203=\'\030R=x\316\226>\026b\030>[\315\"=k\332\350\2768m\373\276\220\340\336\2752,\207=\314z\342=z1\264\276\026\334_\274\267;\376\274UGR>\357\366\353\275\254\024\233\276T\342\243>t\206\265\275Y\334:>#\347\177\276h#\002?\361\374\227\276Z-\032?\261@\255\274)\263\225\275\250\254t>d\271\344\275\370\234\216>\031\220\235>T\242k\276\342C\211\276\023\306\247\276]W\224>\265(*\276\004lW\276 \316\317>\221\026\254\275u\374\035>\306s\006\275*\323\306\276\222UU>\302\232\352\276\304Nz\2444\030?\032u\220>\204\276d>\316$\371=o\020\253\275\317\304\325\275P\301\210\276\275\332\320\275\035\271\245\276\357A\230<\005\362\001\2770t\001>\\\345~\275\017S\241\276\214FJ\276\375\355\373=Z\345\340\275[g\253>\314\363\344=0C\321\275\362\226\026>2d\017?\346\257\202>]H\253=x\343\031\277\3474\201\275\320Jd\275G\263 \276(\263,>v\230\221>\207>\243=\352\357\302\275\376_\321=Ur\314>\322\214\226>\362B\324>\254\372g\254\202\006\277<\021\003\276x\212\032\274\373\261\254\276F5X>\363\007\264\276\332j\371=\377\307q=]\267\013?j\\\357>\330\210\225\276\204\306\215\2768>\241>\331\275\030\276D\363\010?\203\245\302\275g\272X\276\025\323>=9\356\253>|5\320<\310\262R\276!G{>q\326\255>\335:6\276\374\240\307\276_U\006?\177\371\243\276\253\370d>\377\002\024\277cSD>2/\023<\024\333D\277H^\261\274\224V\335=\243\323\'\276r\247\232\276\305\013\322\276GqO=\244\265\240\275y\007P>\375\344p\276\253%\211\276m\034\"\276r\303m\276\354\032\246<\362\204H>\213\233\325\276\237\340\310\2759&\220=\235L\030\274\376\221\267>k\006\033\277s\024\232:\204:\275\245bc>\252\217\312=e\305h>\014\207\007\275\214^\354\274DnQ>\004\375\376=\020\246\003=\311\3431\276\371\363\226>\212c\373=\022\345>>a\2444\276O\220\327\275\271\335\257\276\224K\351\275\252\347\004\276\372\256\t\276k8\234>\033T\013?Q\203\022?\234\307w\276N\013\343\274\347C \276]\353\244>\343\302;?g-\222\276y#\002\274\006\361X\275 \276\300f\000>(;\025\276K\277\201\276\2458\304>\347\262F\276\211\031n\276\004\206\233\275\"y0>*\311\326>!\263\365<\ry\321\275\337\006$\276\004\245\323;\334]\304==\266\324>W\342\374<\206\235F>&1\t>^v\373\276\266\030\310\276\322\227\003\276\310#l>\254\245\036\276AI\325=\\\035\334=\305\224\332=\306\206\274\276\235\237\320\276@\270\320\276\243\260!>\177L\224>\177\225\224=\031\301\207>\207\215\'\276H\202\244\2742\013\200>\263\'\273=\322C\236>\003\247\366\275\325\005\255t\231\275-\276\021\260\r\277\273\311m\275\002\312\273=\215\207\347\276\231\223\200>0\222\350=\304\006O>D\300\244>\234B@\276\260\215\361\276\302N\307> m\021\276su\302>A/\324>\237\316n\275#\324b\276\305\335\r\276y72=7\034\261\274\372H\260=5\321\002\276\207e\247\276\256\034\377\275j\026\004\276M^\207\276\240\342\016\277\3218\220=:\014\312>NO\261>\217\263E\275\337\024\016\276\010\224\r\276\255A\t?\343\332\250\276Zx\204\274\376\004\301\275\261)V<\226D\252\275\275X\211\275\325\221\004\277\031\375Q=\037\026\235=D\216\261\275\262:R\276\242Y\026\276L\2564\276v\030U>h\301\371\276\3522\265\276o\020\273\275\300\331\244<\'\245\337=\001\312\201=\020\235\004>HP\300\276Fs\260\275O-,=\333\267Y>\210Uz=\244\353e\275\010\326\305\273\344\210s>1\0367\276\304\353\237\275i\200\231\276\326\302\022\275\222\030\024>\353\327\235>\214\256\216\276\306A\320=/\244\344\275\013rH\276A\236\203\276\275\'\225\2765\263\246\275\334\351\n\275\027*!\276\002KR>\3412\270<\350\340\326>f\327\211>nK\032>r\335\003>\313*:=\345\277\311>h\343g>\306\030\000?q\276M=&\356\274>+\316?\277>\216\247\275\230\215\213>\357\032\367\276B?\200>\210\245E={T\207\273\327R\002?\250\250\262>3\274\033?\214\267\n>\215$\270><\0176>\252\377w=\nZ\203>\276\362\326\275\014\350?\276\337\351\027>=l\353\276(\362\221>)\007.\277#>\352>\221\326\233=g\277-\277)\2673>\014\007\267<5\336\364>\257hx>l\204\223=z\377\233>\236\035\206\276\276\361\257=\316\013\002\275,%u>l%\334>\026\260\'\276\237\323\255=W\233\203>\016\224\217\275\337)\035>&T\227>\330\232>\277\316\233$>\000\306\327>\016M\254\276\254\000<\276\336\313\023\276\024J\354\275t\320\262\274oc\276=\332;\370<\275\037\267=\222\312x>\006~L\276\\\365\333\275\375:+>|\026\034\276it\233<]\310\255\276\350v\311=\374l\204\276\345\242\302\276\232\230\223>\265\3228\276\221\213\313\276~Te>M\221\261>$\274/\275\236\305\336>x!\252\276\325]\243>\265\271\017\276\036\037A>:\345\237\276\323\317\226\276\235\370n<\376^\334>\024\205\006>\267lW>Qy\265\276\366J<\276\226\n\252=TB\375\275&\333\260>\254\006\266=C[\250\275\370[\314\276\203\370\221\276\315q\212>\020v\306\275\376\210\373\276\233r\034?\272\006\221\276\363Sy\276\t\324\264>\313,\020\276S\341\337=\223M\263\276d\326S>\263o\241>\024\2421>o6|\2756q\247\276c\351V>\3403\">\263\310f>:\264\210\276\001\256\252=\013\365\000?r/\t\277\306\300\024>\006\362\250\276\213\246\227>\352-\353=\322h,\276Ag\201\276(\272\322\274y\t\244=Z\206\262\275\263\205\n\276\246\244a\276_\301\231>\\\007\305\275\264\r\215\276\371\013\260\276f^x\276\325\267+\276\216\025\277\276\325\374\263>\3616 >w_\224\275p,\305=\016j\315=\022\266\340>f\272\322\276I\313\341>\017wH<=\366\235\275`\260\036\276\354\262G\276\014\213\001\276\210\322\250>\331B\332\2768\n\267\275\363\272H\276\025e\255=\361)\277\276\005I\333\275\200;\023\276S,w=\315\322;>2\277\233>qM\203;A\036V\275\037E\325\276\353\335)>H4\"=\356b\241>\271#0=L_\226>5|\220\276\273\001\215\275\354\226V=\335\233\200\276H\321G<\016\005\234\275\200\n\320\275I\262\t\276\306\311~>\326\314H>+\n\035=Uv\265>\'\211\253\276u\2053>\200E\226>\303{\n\276,\223\035>]\221C=4\277\342<&\023x>\036\021{\276z\010T\277\223\316\017<\275\332n\275\343R\326\276\014\250\n>a5\252>\022\234V>Mv\020\277\217\226,\275\"\260\377>|\356\301\276\225\232\034\277;.6\277o\263-\276?\330\210=\352\017\235\274\243\320\230\275\'|\014\273\211a\004?\"\006\016??\013\267=\006\260\275=\024\030\"\277{RR\2759\242I\276l\354W<)\201\266>H\"C=\365\'j?\241T\205>\311\360\303=\225\343\347<\254v\256>w\013\000\277\301\341\230<\007\344\375\275\226I\370:GJ`\276z\344l<3\023\326>\313\364\253=\366\203C>\r\014\347=\275u\023\276\372!\366\276W,\314\276s\032\247=fcX>\231W\204=\265\227\257\276\021\272q>\300\031\036?+B\022?m\255Z\275D\314\274=y%\233\275\251\374K?\265y\316\274@\320\231\276\211\242X\276\306\347\007\2766l\344\276\320C\242>m\017\325>\320\2529\276w\223\271\2769\350A?@\347\">\351\337/\276\021p\323\275\341t\227\275z\004 =\327\211\276\275\372(\301\276\017\244<\276\224\020\027=B3\254\274\364\275\255>/3)?\243s\320\276\323\250\341\274\267\033\007>wb\230\275\347w\330\275\246V\000?\327\221\322\274w\017/?\303\3140?\334\352\374\276*\360*<\336\257\354\275pL$>mW\245\275\273j\001>{\250\226>\274`5=\227X\357\275\216\356\014?o\231\220=\337\244]\276\373\300\020?{\342\223>\205`\341\2767:\326\276.*\t\276\366w\365>\211\3363?\313\376\240>\373\332\216\275\322\304;\276\021\244\343>\004H\023>\024+_\276n\263\033\277,\217X\276\235 \227\276\351\345&?\257\316?>\224AO\277\r\351U>\234\347\210\276\270\026\323>.\226\343\274\362\246s\276\352r\362=\210\244\322<\353\205\362\276\0355f\276\2073V\276\023\321\227=\032\346s>\2320T\2765\017v\275\331-\204\2765hz\276\003{\301\275G\215\370=\326\203i\276b\346f>\036\376a\276\206jE\276\3208n\276|B\350=\205^\307\275\300\001\252\275\024\362\003\275Q\275\200=\273\373\267\275So\316\275KB\227>\220\235\006\277l\r\216\2766\230\333>\277\232\356=w5\354\365\034\\\276|\276i>\2356=\276XH\004=\007:1?\336S\225>\206\355\224>\261\337\220=\0020\227\276\350\322\013>HNu\276\274\007\037\275\3105S=K\306\332\2766OD\275\324\225\240\276\270\244\216>\242\357n>g<<\275\024\177\241\276\3212\252\276(\355*>\373\275\354=H\346\005\2762\002=?3 \241\274\200\256\327\276K*\213=\315\237\331>\353\214r>\307~\317>^\026r=\375\315\267\275\323\341==\225\215\230\276\270\221\266\275Y8\037\276\272\207\261\276[\036#\276[\\\323;\216\377\337=\"x\277\276\220\370P\276r\252o\276\342V\277>\357\2349?\367\365O=P{\223\275I\3112>\017\352\260=\307\314\026>\231\225\016\277|\320\337;\336\312\252\275\267.\315\274_3S\276\342lP\275\365-\334\276\037\027#>\305Y\377\275\360\nX>w\276\245=k`\307\275\223M\233=D\226\233\275\231\223\232>0\256J\275CaS\276N\231\277=\351#\332\275\210\215\023=\214\376v\276\206\351f\275\350u\217>\202\337\206\274\352,\213;1\357\'=\245g\370=\365\202\305\276\271\344\367\2754\037$>\201#\312<\314w\245\276\342\375\013>\342O\274=\236\203\333=%\244K=\232\264\n\276\203\241)\276&E\244>\311)3?\306r\270>\225\264,>\t\305\267\276\033\242\363\274z\315P\276\257\300~\275\004-\253\275\214\267&\275\013\000$\276\322\033\215\276\022E\244>.\226\021\274\346 \332\276\004=\236\276\227R\034>\253\376\352\274\2513V\276n\327w?\327\300\267\274\247\343\223\274\216\345B>\260\247\214>\250\314\033\276\235\301\244>*\353\007\277\201\324k\275\250\000\034\276*[\352=\0052}>\267{\342>\204.\345=\247\275\307\276j\321\223=\253\355\276\275\177\360\001>\373\335\222\275\032f\315\276Y\2304>x6L>\325#\010\276\240\343\265>\r\331\025>\206/\341\276\371\323\304=\276\322A\276\n\255>\275\004\333\270\276b\037\353\275\2120\203>#@\366=@|\343=\002W\352=\360\351\013>\254\357O\275E!\365\275\276\032\037\277\245\322\013?\026\034\205\275.\007\020\276\023\321?>\220t\230<\372\227\357>h\327\224\276=\023D\276@\265\256>o\001l>GY\276\276\362\364\251>\2042\247>r,*>\'\031a>\005#\022?\322\310\307\276\271\313\273\276\003\302\'\275\006\302^\273T\311\212\276\013\271\211\274 C\264>\216\337\020\276\'\233!\275*\371\244>\000\221C?\002\254\360\276\361\256\007\276\2448\">?\t\307>\002\303\254\275=\0348\276\232u#?*\340X\273\007\354\332\275M\210\307\275\376\313{>S\ty>\301\006Q\276\355#\241>\330\017\025?Kd\353\276\311\366\030>\324K\206>\2013/>z\022(\276\203Yr\275\031(\306=d,:>\014\357\026>z\3078\276\016\337O>X\252;\276\221\347\360\275\361\304H\276\211O\306\276`Z\t\276&\263t\276\265J\301>\315\263\'?\'\014G\2765\256\230\272\001\030=\276\262\027R>p;\254>\032\022e\276A\222[\276;>\237\276xt\021>L\275[>\217\356\201=\"0\246=\352J\325=\025\301\372\275\262\224\'>\350\251\362\276Y\240\300\275\361\025\224\276\n_\265\276\203\267\016\276\244\033k>\376\260\027?\021\307\007=\203\200\232>\216>C>\307\356(\275\030g\205\2763|\272<\324\272J\276?\321\210=Z[\210\276\234\310?\275\273\ty\203\3445\276\265\003\201\276d\255\361\276G\370\031\272P\344s\276\233F69<\374B>U\333\353\275\251\205\n>\320\244\231>\357\333\354\275i\264\271\275\322\263\200>\306\016\237\276\212\024\343\275\366\233\326/\363\302\276=\003\227>\310\356\372\276\304\261\212=%\374\350\276m\335f=E\350\r?\357\237%?\214O@\276\354\216!>v\2533\276x\021\343\276\235\207}\275\316bI>\375\250\270\276#\'\034>\376\273\262\272e\224\342\276\0207\263=\210\023\304\275\215\240\354\271\350\346\227>\333e6\276\303\363*>a\314\320>>A\260\276\310\320\367\275\374@\306>\344\2702>KV\340<]n\345=@g\366\275\207\031\030\276\353;\317\276\322\022{\276\305\3661\276}\030\210>~A\256\276\220\234\250\276\n\316u>~\333_\275\371M\242>\232\252\034?\256\334\271\276_}q\275p\244\201=2M\250>\253\024x\276\021\346\301\276_~\223;\212v\023\276j\037\351=t<\022>\273x8\276:\232\002\276\310\023\023\276\335\010\373\276\204V\343\276[\331\032\276\202l\001\276\342p>>8\201R\275\334w\203\276\247nL>n\310\206>\346\237\323>\006\364\302\276\221J >\256\020\013\276~\264\336\276\206d\320\275\036\020/>(\272\024>\354\215@>\245ir\276\277\317\271\275\223x\340\276#@\014\275\273R\203\276\247\226\250>\234\270\212\275\323\2117>\271{\234>yh\252>>l\030>\200\251\207=\346E\030\276\022\r\327\275\2363\316\275Q^\026\276N\306\025>U\260\204=\032\255\301>V\334\010\277\261\366\360=\363\021\202\276?\013\246\275v!\223\276\r;\242\276\201$\022\276ic\233\276\217\236\273>\335\037\237<\315\373\253=\013L/\276a\233:\276C\363\036\274Fa\232>b\250\242==\004\362\275k\311\375>)wY>\327\2639=\333\251(=\217\2429\276>\344\273\275\206;\305\274\3537\t\277\244AE=\270\340\275=\314\342\232=o\352v=\274I%\276\246\215\227>\2561F\275\010\2140\276]\2169\276\362Z\262\274\253\372\\=\230-\217\276\321\200\320\2740\214Z=\255\362n>[>\315=J\301\226\276\372\'\006\276e\006\217\276\030\244\003\276\313\364\001\277O\237\231=K6\032>\363a\206\276=s\307\276j\247\220>\236\244\237\275\230h\354=7\361K>qmG\276ANh=\243\322\016\276\341\377\332\276\324\036)?\243s\241=Z\253\260>\247\213]>\345ka\276\022\224x\276\247g\267=\010\350t\275\026W\216>\321\2142\275/\t\214\276\302\374\334\275N\013I\272\261\353\257=\204\022x=\312#\r\276\243qu\276!\024\337>\227\267\237>I\260\232>6\222\316\276\222`+\276\260Y\314>\302\020+\273\370\\S=\266\340\235\275pW\332\275\330\212~\274;\350\353=\212;_\276u-F=#\230\002>L 8>wkH>M\203*\277\367\275\030?\231r\366\275y\313\000\276\017OL\276TM\222\275\351\033\300>\373\321+>\366\2069\275?\315\211<\332\247\274>5(#\275i\317\026>e}1>\032\210\351\276=C\022?\236i\324>\225\020|=\2202\016>]\005\214>K\375\243\275\272\233\354\275xf\006>\235\241\222\276{\0138\275\310\246\224>\317\313\031<\317\330\335>\"\271\325\276,\313\301>\276\204\261>9\377*>\306\032m\276n[ >yI\225>\200Wd>\313n\210>J\004W?H\031\037>\345\246\377=^.s=4\237m=q\241c\276\021\224\200\276\177\346{\274\2614\337=\030\272\253\276\374gK>\\N\275\275\364u\224\276\251S\020\275\377\356c\275\326\334\006\275\\\013\001\276\221\202\036>S}\247>W\314r\274~0\333>\276H\025?\204K\213>e\267\265=\253\223\211\275\377$t\276\0377\257\274T\367\236\276\010-\232\273\360\0018\276\252k\243\275\205:!?\344K\003\275#\242\230=`^\256\276\026\005\204=0\231\214>Q\242\353\276W\013\032\276\307W\252>;\022\273\276\t\013\332\276b9J\277\210\251\203\276\331\204\337\273\272cy=\252\032\030\275\335\257\245\276\\\251`\276\211\036\317=\343eu\276\233\273\033\276\226\274\266\275\321\311\222\276\363m\377\274\035\010G>\266|\237>\023\262\273\274}h\005\276\233\351\220\273\307\277\025\276\336l\375\275r\304\342\276\016M\246=\210\307\363\376\216\245\276}{\320=\337\241^>k\020\212>\034\n\235<\261`\250\276R\325\223\276\364;c\275e\266\256<\335p\255\275\374w\024>\316\362\005>%\310\327>\305d;\276fE\351\275\341\354\213>Mr=?\212\333\220\275}fq>yZ\254\276!\021\025\275\325(7\276\264o!?\360\nC\275\365\332@\276\334\222#\276a\037!\276#\247\254\276Oe.>H\352w>MU\205\274\376\216\265>\232\001\372>\267K\031>\377\266\203\276\246\275\310=@\000%<\004\244\024\275\025\236\270\276\221\0036=!Q\363=\264\010\237\273]\354\377>\325\261\275\275W\301\022\277\022\220\256=\335\016\315\276\353\311W=\267e\373>\"\342C=}w\265\2765\231\234\276\007IK\276\033\213(>\003\212\263>\020\223\312=\322\030.\275g\341\032>H\035o\276\271P\376\276\014\350\001>\202\211\203\276\231\275=\276v\004*?\353\203m>S\277\346\275\020\2773=\024\017\356\276\223xb>\000\346\232\275\353j\016\275\030?F\305\231>\353\233\246>\371_\221>\214 \r>=\210\036=}\345\304>\025\366\302\273q\220$\276\224\226\267\276O\373\342\275\334\251\022>\3720&\276\346\1777\276\017\326\373\276\3754\305>}\323\013>v\317\023\275{\314\250>\002\313\243>\006y\014\347\270\000\277\250jP>Y^\325\275\030\337\267=\202\361\217\276\016$\002\276\266?\332=\271\t\276\275\333\353\307>Y\260\025>\305KQ=\220\2664\276\243B\033\276@0\377\274HV\234\275Z\016\301>\363\005\277\275\252+\201\276\355y\351\274+f\260<(h\346>\tf\025\276#\314\340=\222\243\365\275\361i\035\276P\343\214>\251\266\307\2756\031\265\275\211@Y>dk\350\276\200\236\373>\315p0>~*j\276x`\004\272M\266\004\275,\355\237\275\347so=@\326\300=?\337\206\275H\237A>\353\202\254>\344!&>\213\260\270\275O\375b\275\032\324I\276D[\270\276\336\343r<\244\013\222\276\004\247\324<\335}F\276L\344u\276\033\3731>2\345V>F\362~\276\240\361\233\276\215rH\276\315\000g>\267\323\035?\336\004\211\273a\341\346<\222O \277Q\240\250\276\005=\346\275\346\357\321=\2101\242=\201\347\315\274\257\304\265>\372$$>WYA=\363\354}\275\230\325\003\276<\005\372\276\347P\327\275p\245q>\250P\227=\265\207\013\274~\270\004>\342\233\026>[\214\221\276X\376\264>\336\200\026\277\232G\311>\270\014>>)K\302=\310E\235=_\214\274\275\360]G=#\036\236>j\323h>\207\364W\274\316b\313<\022)u\276\004q\r\276Pb\373\333?\236=>\327\350=\361j\350>!g)\2773\364\224=\330\237\177\276\305\336\223>|4\275\275\025\330\311\276\314S\366>\266b\240\276P]\300\274\351\212q\276\343\252\017?R\304\340=>-x\274+\340(>\265)\242>\222\030\204=q3;?\232\313\262\276\014\222@\275F3\336>A.\005\277\"so=\017\371\014\276\345\221V>.\217\244>\315\336\372\276\227\324\353\275\377:\006\277\377\034\003>f`\024\277\2073z=\017]\237\276\017\307\231\275\'\311(\277\243\214\022\276w\257\267>\340\241#>p5\261\276W\t\265>\237\203P>\231O\277\276a\342\037?O\305G=c\373e>2#\233\276\275u\013=\235\375\375=\257{\010>\003q\022>\241\265\210\2759\030`\276u\366&\275\252 \342\274\355\246\013>Q\253\343>E\324\320\276Ko\004\276\027\354\271\2764H\221>\005\321\253>\314^\362\276\376\005\340\276P\223\302\275zX\374=\211\354\333\276\202u\310=\310\237\371\275Qe\354>\202\2212\276_\371\353\274\001\220\367>\025\266\337>)\037Z\275\300\233\277>=G\250>\275\263\001?\330<\271>\022q{\276\242\330\016<#YV<\035\212\252>\276\003e\277\255\3202>&\270\200\275\312\341\210>\355\024G>_[\363\275{\rS>\326W\277>\352\277\367\275\225I\217\274\326\266\304\276\022\223\230\276U\226\226>\246(\361\275\026\004\312>\335\340T\2768\355\337=\263\307^\276\207j`\276v\2043\27624\300\275\371\024{>\244\0058?\265p^\276@\237\330>\t\371\032>\241@u\275\206\024\365\276|f\034>\272\'X>\305p~\275D\376\216=\016\355\241=\373\225@\276\351\231V\276eG\275=\023\302z\276j\2214>*\274h\275GZ\233>$\330\233>a\314\222>\312\256\323=\355s\024?\351\273\037=#\010V=\007K\355=\030\026\017?\271{E\276\3772E>S\'\n?\240JR\275|js\276\263\263M>\346\227\224\2755\342\376\276W\031$>\263\206\231\276\307\313*\277\254U >F\270\224\276\257P\221\275\2441l\275\322\242\243\276Z\250\262>!v\277=\310\253^>\357\"\232>/C\252\276\361!\210=\310/U?\253\327\257\276\242/.?\253\336\373\2740\300\\\276$\213\320\276\257p\227=\363\344\326>R\033r\276a\203\002\275e\206,>\312\241\366=\321\263Z\275\366\216\335=<\236\033?ebO>\337\\\276\275\311\355\233=V\200\321\274\004\256\n\276\334\324\206;L\266\267\275D\364K>\304\026\374=8r\236>\373\330\301\276\303\306\360>(hp>\374\313\253\273\250\320\031>x\002\265\274%\3145>\274\345h>v\207C>e\372b>)b\347\274\330S\254\275$\261\233=\307\350&\372\276\034v\026\276\007::>\025\243u>}\002\320>R\242\224\276\003\337s>t\336\211\276\027\200\021?\256l\254\276\000\356\230\274\210\026\302<\242y:>*\330\273\276\232DA>J#\343\276\032\257-\276\275\342~\276w&\316>(5\204=\371\216\346=\360I\265\2762Q\366\275\310\362\367=\304\202\020\276\273\321\277\273\026\013K\276b\321S\276\241\272\n>\342D\362\275\250\246\341\274\326\n}>\034\000\244>\242\330\251>\216\256\022>S\213\177\276H\343\350\276>\364\031?K?\250=t\305$=\020&1\276\177\351|>\305\336\361>\013$\265\276Y\211\360>\221Zj=\337\226\345\276\365?\205>\276j\266>\366\263\303\2769Ff\276\312\\\033\276\314f\227>\325\\\\\276\272\000\304\275\036C&\277\310\311\274<\221dV\27574\021\274/<\216\275@\226\302\276\344E\305\276\013\031\007?\275\233\277>+\204\032\275\223@F=<\375\311=\007\323\360=\330V+>O;\314\276\002\'G\276\257\001\333\276\373\'\227\275\304\032\210>\231\264\001>\266\014\224\275T\n\030>\237\210\316>\227\216\322)q\322\274\363\t\347>L\3008\276\233\211I\276t.\346\275\351s\317=?z\323=\3112\207>Zh\237\274\247\350\250;8\352\217=.\325\206>\260\365\277\272\036\236\234>\245\242(\276\367\263\006\276\255p+\276\272\305Z=y1\024\276\265\261\313=s\377\206>,\017\365\275\'5\275\275\315\354\265\276GH\255\275\222\"\344=u\005\343>\366\262\303\276\272\274G\275\037\334\334>=\302\215\276\020\n\376<\016p!\275\272\261\266\275\022->\276n\252B\275\256\315\255\276\337\303\217\275\031\023\010=\327;\236>U\246\315=CUE=\335\222\231>\334\261\025\275J6\370>\232t6>\214\020J>m\240\273\276J\355k\275xJ\323>\255h\266\276\365sh>\317T\001\277y\026/=\201F\023\277\324SX\276\336\367\201>^\366\364\276Q\374\243>[\336~>\341!L\276\362\223~\275\352\027\316<\003\302\371>\212\017v>MR\235\275\305\370\304=q\340F>\014+H?\264\262\273=q\270\360\275I\362\317\276\341tn\276IF\272>o\332\227>\231\257\253\275\006\022\326>\177q\374\275\226\256g\276Hb\230>~\366L\276\252{>\276\343Q\"\277\315\250\370<\265\304K=\267\2641>\037\210\035>\17715>\304$\256>\361N\001\276\305\324\301>4\307:>\235\223I>gig>\246Q\365=\332\307q\2761V\027\273\002D\341\275m\305\250>\020\034\006\276\022\021\243\276\344\357\213\276\331\tm\276\026\204\341\275l\315\331\2739\307\201\276>\024\337>\355\215\210\274#\226\215\276R<\211>\366\321\244\276$\211;>\306\025.\276o\316Y=\006\230\220\276\204\275B\276\231!\240\276\266I\301>0\3627E\245(\276\2702O\275\325\362\202\276\246\257\272=\371\025\335>k\256\220=LM\340=\300|f\275\3060\305>f\373\000?1\213\002\2762\314\324\021?\250\205\207>hz\200\276\322\013\200>\277s\037?\025\352\340:V\243\300=*\246g\276|\232\335\275\225\374K\276\004\323\002?\017\247\214=\311*\213>\244ZM=1w\210\350_\253>z\343\321=27\032>\253\271\000\274\037\212\026?\223\315\024\276\377 \022=qWi\277u\273\253=\207w\334\276\332\310*=\331\344\265>;B\267\276\301\225\014?\237\304\r>+|\017>\257D*\277\233\256p>\202\245\205>)\300\002\277X>\'\277:\247\362\276\003\320\230>8*[=\305c\325\275\274\001\246\275\217\322\010\276!6\344\275^\250\242\275\350RD\276\254}\204\276\340\020\037\276K\324\024>\271\034\300\275\377hX>\344rm>\230\201w\275\377\227u\276\372}\023\276\277\234\000\277K\232\"\274J\010\233=p(/=@l`<\007q\n=\204\253\263>\342B\274\275u\361\354\276\241\272C\276\301_\200>O\367\310\276\272\244w>\343\377\330>-\342\000\2765\351\032>\206\006\352>)P\006<\354\311\020>\377p\340\275-\247\224>\271\232\014=Xl\017?\340\006\337=\253@\236>\240\014\210\276q\363r\276W\017j\276\255*\330\275\352\213\031>\222\362\273=\364\362\216\275\332\323\034>z\\>>1\310\263>1w ?S\273\334\275?f\257\276\006\371\310\274?E\r\277\266%4\276\342\370/\276\340\350W\275\305\032\215>*\354m\275K\320\'\276Rb#=i?\020?\314w:?\226\202\023\274\252K\251\275\313JV\275\200\030^>\360\247!\275t\2458\276\344\256\300\276gb\003?\363tF>&:\250\276\323)\213\276\355\306\353=\353\341m\275\241\373\270>\240\037\3219g\303>\007\274\204\275/0\312\275d\304\203\276\210\031\002?4\232\361=GJ\004>gY\200\275\025\354\323=\263.\000?\266\235}\276U\020\311=.\351\341\276\222\007\251=\267\234\223>\325{\211>\372\316\036\276\336\241\375\276\371\2374\277\305T\025\274(\203#>B\341\306\276\203F\301>\025\311\237\274Y9\314\275\216D\013\277j\037\266>\035^1>\367\250/=rv\353\2760\037K\276\353o\037\276\200Pr>\020\336@>\007p,\276l\320\013?\354\336\231>7\214\301>\346\036\307=3\305)\275)\213Q\275\252\322\372=\216\247n>\301\364\357\275\224\214\213<0n\245\275\237\032\351\275\017\2616\276\371\"\314>\032\031/>\026\214\313>\021\332\035\277\242\337\332>\363\034\365\276\370\302\326>T\024\207\276\344\016\223\276VY\207\276\277M5\276\376N\206>7\334:\276U\327t>qe\010\277\334\246\006\276P\365\223>\007\327\275\276{n\364\275\027\374\315>?\020\002>\200(\312\276\205N\373<\311\025\004?L3\001\277\013\016Y\276\263zh=\027\261A\277\310\001e\276\363\2401=\272\230O\275L(\230>9n\254\275w\317\363\276\010\230\340\275](\000?_\310.>y!\324=\206\320\364=\223>\377\275\312=S\276\316K\224>\234\364\">\0043E\276\310\307\325\275\3444z\275U\330i>\361\316\215\276\316\024=\277\002F\270\276\354\341\226=\017\305e\276\322\216\341>\226\206B>\307\023g\276\337\302^;KM\025\275-\261\026\277\020\337R>\301\203\017\277\243m3\276\016\377\246\275\034*G>G\233\000\276f\304\\\276\214\250\376\275\273\033/>\365E\275>\211W\205\276J\254\236>\362\216\254\276\016L\234\276\241ug\276\371\271;>cr\000\277\262\203d=\332\242.\2762\264\304>\375\327|>D\254\007\277\371\365\312>\335H\266\276\304s\201\275\352n\010>\3763\251=\210#\256>=D\210\276Z\355\366\276h\031\247\276\364|f\275\377m\305\275d\320\211>\306S\212>\364\352\214>\333\002\341\276\231\267\314=\274C\260<\027\266\237>}\375\255\276\036\305\242\275\355q\330\275nn\327>\214\273\207>m\225\035>\244\277i;a\241N\274j\004\315>&FW\275*\021+\276!O\\>f\244\274=\357\275\203\276\010w/\276\032\206\222>\211\375\034\277m\300\261\276\3307\257\275\250\317\374;M\374\303>\225I\354<\220\254\344\275g\224\312\275r=E>\314Z#\277Z\3151\276\277q\372>\246:b\275v\272\234>\324?\000?8\371K\276\014\036T>}\335<\276\262\243\261<\225t\230\275>6y\276\030\2365>K\277\215\276o\3104\275a\000\320>\223I\201>$R\003\274\304\352\">\366C\002\277]B\333=m\224\203>\356\014\014?\317/\327>\243\242\234=\305\002\002\277\013Q\016\277<,\210\276\246\374\331=\025(\333\275!\210\221\276\034h\007\277\027\306\227>eU\264\275\"\006\222\273z?.?#q\306=J\212\304>R\251\217=6\261I>\207\2653?./O\276>\357-\275\336FR\275\350\327,\276$HT\274\006\316\017\276B\244\216\275\256g\351\276\377\301\352\276\227\240\177\276\241\234\232>#u\256>\377\006\240\274\034\350\257\275\352\321:>\037\200\023\277\305\273\223>\027~\340\276x\347\212\275Gn\313\274\372\313\022\274\226\010\212\275\"\024`>Ii\206>\006\262\002?Q\035\272\276\020e]\276H\026\316\276\004\227\332\275\275\227\361>\005\370\253>\240M\020\2766\035\014\276\310hC\275\301\355\317>}\313\347>,o\223\276\025\363\343;\252t]\274&f\352=\215\207\177>\257f\364<\235\032\024?\014\341\205;\n\006M\275\031\215\t?\003\027\215\276+4\032\276m\251\010\275\277\0031?\257\031\375\274+\366\205=\312\205\\>o\273\034>Dx\375\275\305c\230=\331\237\003\277q(g>n\302\300\276\203\200\354>Q\211\303>5\255d\025f\204\275zl\277\276\r\013\020\276\3603\252>\346g%>0\316\357=\036\310\344\275\252|\327=\360N\224>\2245\030\277!\343a=K\325\203=\320\363\030\276@\3744\276\315\232\215=\257\337\300=\306h\323=J\255\037?\225s\364>\340}\202\276\323z1>\242\370\305>@\t\000?\315\254\271=\315i\335\274$Mw\275q9\203>\234\330 \276#\241\201=\343d\037?yX\353\275\232E\340\276\020\255\222=S\210u>\027Sn>3\264l>s\352r\275SEy>v]5\275\270\"3?s\036\371\275R\266\207=U\267\345<\016\272\242\276\227[\215\276x\277O=\351\203\236>_b^<\321\004,?\204\323\264>\205\223\263=N\350\253\276\336YH\276\3370z\276[\036\335\276\206P\000\277\321E6\276\261\017\234>\273\200\306=\021\231\033>\tj\377\274\022\305\023\275\003\207&?\264\037 >\030\010S\276]1l\276\300\207\252=\367ds>$!\210>\256\341\337\276Rc\311>p\265\253=y\330\257=aS\203>Yf\365\275\001\332\224=\236\2154<5\332l<>E\320\275\372\266\360\276`\034\276\274Z\354\300\275G\024\326>8\314\216>\322D\010\277\307o5\275!|\034>\006[\216\276\360n\214\2751\000]\276\367\375\306=\002}\223\276-\001\311>\271\332\276>\266\321\207\276)\356\305\276\365\\\252;\201\2407\276wn\005=0w\312\275\257\207\006\275l\270i=\230\241\032\275\263\353&<\021\036\370>\204\236\234?Sn\307>\326\205\223\275vC\216=\211\003c\276\257\327C\275z\217\263\275\343\265K\275\330\033\014=\030\355S\276\320\212\263:\032\355\356\275\016\020\004\277\336\232\302>6\227\362=\316X\210>\275\005\">\251k\375\275\236^\251\276\245YF=>9@=i\010\312\247U$?\277\257\353\275\226\216\245=\004R\276\276\203\270\216<\322\3671>\274\"\225=~\006\331\274\3549#=\007[\366\275\265\205\354\274\302\0065<\372\215\270\276Y\262\021>]\370\253>\3743\253=D{#>\312f\237\276\277\010\000?|\020\'\276\032,m>\314e\231>0\334\355=\036\006o>\327hj\275\253\037\222\277\004\035\242\276\226|m>\206\322\035\276\317\204\211=\225\247E>\013\363\332<\351}\202>i\306\207>\024\"\347\275\260\255\237=N<\305>\331\311\202>7\242P>\334Wx>\321\032\215=yP#\2766T\010\277\256Z\226=\371+/\276\350\202A>\363\264\262\276\t\310a\274\203\'\371\276\3254:\276\005\032\010>\260\260\237\276\302#\253\276a%\211\275Q\301\255\276\354\005G?cH\373>\220!\024>\311\275\250\275\362w\254\276\343v\021\276\355\034\202\276\246P!\274n7\212=\022\204\022>4\261o>\214\343\016>j\342%\277\240G-<\206xG>\247\005\257\275\344%0?\005\025\272\275[#\304\274v\311\032=\236\362\026\276\035\301\225\276F\366\377\275\226*\220\276\024\356J>\364\315\264:+j\005\276\245Q\334=\245$\031?\210E.\274\020\315\365\276\025S\367\276\246\010\025>\326`\'?}h\201>\235\214c\276\036\342\026\275\261\202W>\016\305!\274\r\247w\275\263v\377\276#GY=\346\274\037>\344\357\"\277\364o\301\2760\0349>\236R\323=\241L\273>E\227\235\276\332\356\256\275\325\357a\275\0130\275\276k\024K\275\311\216I>\'\344\304=\272\031\300\275A\2071\276s\355\216>c\211\">l\002D\276\325\346\262>\032G(>\303\333u>\225\255{=\362\367\347\276\321\322\256\275\247|\323\276\234\311\246\275\364\003\030>z\262\202\276:P\356\273z\326T=T\253\253>?\374\304=K\201\202>\215\035\000\276\343:\310\276\3360W=\304\3316=.5m\275g\357\202\276`\341\371>\223;0\276\3469\331>\323\365W>\023H\226\276\324o_\274\326w\355>\200)\017\277B\334=<\337\331\201=\037eJ?\200\305A\276\361t\211\276\033\364\273>\322\000\215=,\323v\276\373.\321\273\000\"\242>\215\305\270\276\365Xf=$6(\275\177\303\277>r\263\366\274\024v\346\2767\201.\276\253|\223>\261/\372\275\225\352\306\276\002\233$?6X\220>Q\245\224\275\005m\353\034\350g>\025\313\324\276\266\264<\276\317\201\277\2759\006\231>,\014\020\276nJ\303\275\311\032,\276\203\255\305\275\206\031\243>\307\206\320=/o2\276\203\277\365\276\3231J>\3410\001>\206\003\337\276\375V\212\276\262\327\005\276\226\264}\276b\250\336=\035\3479>8\237P>M0^=~\016\233\276\030|\013<\253u\007>\257\371\300\276\223g\025=\353X4\276Qw*>\372\025\244=C\224\004>\rx\024>+\262\347\275M\370\351\274\254\375\332=\272\003\262=\010W\223\275\020\214\316>3\rA\277\027a\230=i\265=>\314\205~\276`\331P\275\037\226\206>\316\020E\275\255\034\215\276IQn\276\313b\241\276(`x\276\367\357|\275\0036\261>\361\322\027Q\315\037>p\255\254>`\340N>\227\333\033>>e\337\276\323\034\317=)\326\222\276\r\347K\277\024v\205\276A\243\242>\235\300\210>C\2744>]\006/\276g\245\232\276l\000\030\276z\"\330\276 k\207\276\202\320\315>\305J\205\276yG\035\2758\301\371=\242\327\224<\313G\254\275 \034\374\275\361\005\326\275\3349\372<\300\333\234\276h\276\353=l\263@\277\232\214\245\276F\247\001\276\226*\215\276P\362\356>\211y\366\276\177j\332\276B\364\234>\270\224>\276m\246\330>u\340\307>\2573\310\276\224k\203\2765/\316<\305@,\276b\375\274=~\341\001\275\301i\017=\000\214\340\274\234)\217\276cI\203\276p|D>\347<\036\274\352\223\266=\2766\022?\221\223\212>P\375\230=:w\207\275\307\237M>\237\306\203>\225\337\320\275z\223\302\275\360\016\032\277\325\251\332\276+\343\277\275\010q\337\276\322E\224\276\267\010E>jN\225=(\177\254>\203mY\276\212z\035>\264U\250\275H\376\023?\334\375\203>\272\014\306>\017\242\013>\320A\000\276\315M\277>.\260\270>l\260\200=\263\220r>av\221>\211\377[\276\0107\242=\267\255\320\276\353\262!>\362\016/\274-\207\r?\233V\235=d\032\222\274\312\211T\2752M\016>w*%?@\272\177>d\216K>\231\270\033?\316\025B>\230\033z\276\227)\241=\265Au>I\263$>\336\326\340\276)X\307\275p\206\001\277\326\213\372\274T\265|>A\367\250>Y\207\001>\234$\375=xM\221\275\233\205\265>\021\006Q>\322\202\337>/bj\276!m\030=y>\233>\340\356\016\275\230V\221>58K\2769\217\201=\316\223\364>T&\251\276\352\350E\277\377\361\025\277\240\277\211\275}\013o\276I\242\313>\267\351t\2738\275\240\275\277j\326\276x\337\202\275\203\317\330\275NA\244>\367$2>\232wf\276\216\0232\2766\206U>\300\265\376=\355N\237;#3G\276\326\277n\275\330\0317>9\334\010?\004\005F\277\003\344\360>!\244\036\275\211\241\311\275M\201\322\275\024\221m>\365\341\244\275:\362-\276\342\347\216\276\233y\235\276\360\212\243\275\217\376\240>\"\323\206;&\2116\277\337\366\027\274\265v\022\274vW\330=\2741\370\300\323\014\275\304\013\355=\234\307\351\276\212Q\"\277B\331F>\324X]\275dJ\037>\001\307-=p\346\206\276\002hM\275Nvj>\276\202\014\277\033=s\276\270q\232>\277\205\344>\256\260K\276\007\354\025=\022p\013?\223\305\211=\333\276Y=\207\353\r>\tHs>)\030\007\277$C\r\276\214L\275\276\223\317->y~\000>\005U\003\277W\223y\276\006\022\305\276\275\3159;\342\231n>\266\347\026\275\254T\263>\037\344\"\275:\242\345=\033\r\333>\341\006\301>\207\177X>4\301\313\275\262a\310=r]t\275\366\353\016=\275I^>\305X\264=x_\017>\376\373\217>\274q!\276\357\351G\276?\346\204\276Vy?\276\017\354\177>J\325#\276\357\001\016>\353K\022\276\337\0358>\255\334w\276\253\017\372=\303\262w\275=\r\342>\022X\333>\336\'.>\353t\200q\240,\276?}\303\276\267\250s>O\265\332=q+\272;T&/>Mb\234>\363p\367=a9\261\275\367~\232>RM\237=\356_\010>\377\317==?>u>\350\326\203\276<\347\t\277zy)\275\231\354r\276~\301k\276\357K\010\277\322h\360>\213\365\034>\203\345\007=]so>V\364V\273\347]\260\274S<\305\276\2623\313\275q4\247\276&\331T=\351\202\276\275\332\257\275\276\331W\'>\005\030K\276\326\230I\276\303\322\347\275_\010\255>dc\313>1*\236=\335\353\253\276\024(q\276\226.\205>S\271\002\275.\250\256\276\304\270\273\276\013\201M>\032\354\\>[vW>\275\2501\277_5\376>\337\276w\276a\256H>\034z\302\274n\347\221\275\312\243\224Fj%\275\202\366\006\277\252\377\354=\353\3703>U\364\277;0\202\006=\275)\212>(\243\324=W\377h\276\361\253&?\304\375\007>X\220e\275\'\276\264>\246\311\365:\223\025\355\276s@5>\023\324@?\366\316\004\275\367\243=\276o\364W\276y\352\230>\341\n%\277\321\266\036=\352\252\356<\317\243\332=\272>\236\275JD\313\275\343\305k\275\364\310n>\335\3745\276/\376\300>\361\237\330>5\252\031\277!\277\253>e\321\243\276 S.>3\244*?\262c&\276\021\025\005?\271\370,>H\246,\276&?k\276\1777\203\276\034\267\264\274\177X\250>l\235\235\276nJ\033\2768}\325\276IF\352>\371\312\263>\234Z==\013@O>\030\346\271> \204]\276O\210\034?\016;6?\225`\317>\346\332\201\276\2371|\276\2675!\274g\370\000\277\256`\365>\332c\226\276B\377\345>8\2349\276\370s\274\274\000r\036\030S\273\275\220M\366\275\207\326{>\264p\227>\004\t\353\276s\277\001=]\220\252<[X\343;\346\0141\275\215\273\301\276J^\251>\000k\305>\236\030\236\275\375\315R>b\023\205>\304\025\027>\003\003A\275y\213I\276m\277;>q\213 >\213G\254>\306\004\273\276\030\243\263\275\2060p\276\177@\304>Fl\026=\303\035\225\275\216\024\010\276\200\005\032\276\374\204\303=\201U\005?\242\201\025\277\277B\327>6}8\275:\356\351\276F\317\225>\337\214\314=\211\275\007w\213\214<\335Md>\020\343\023<\320Vg>\363\327\244\276t\365\243\276\343\373\001\341\275Jt\260\276\006N\301<\017\304\214\276\315\221\264:x0|\276\307\333\274\276Z*2\275g\226\004=\201$\203=,\367\010\276r\177\212\277L\032\254\276@\337\004=e2\377\274\251g\246=\352\200)\275\205}D>\004\274\211=\343\244\007?\270_0\276=\266g\276?1\026>\207\341@\276\334(\253>\001\263\222=\334\206\303=\372Y\247\276\020\253k>\377\347{>R\n\206>\364\272\203\276Y/\343>32X\276\344!\010?:Hz>\320\362F>U\372\240\273Ab\005>\312\360\314\276\274\365\245\275\013\"\014=\234e\3037\336\202\350(Z=\257\366\241>\271\342S\276\243\321\020\276\362)X>\027A$?q\237\002=\234\304V\276\344\220\035?\014\353\216\276\2103\371\276y\350\315\275\002\371\242\276\274kl=\017\003y>%\034z>\363\231Q\275*\277\353\275\234\221\215>*j\243>w\364e\276\230\203H\276\234+9>w\211\276>!\313\307=|\311\213>\325\001\221>\2341\375\276r\213\210\275\320\003\210=\216\336\250\275\025\267\\\276\247\326\364=\332\335\315\276M\271n>\027\326\026>\004\233\212>\336\372L\276t\0276\275*\331\n>{\010\321>D\001\250\274L{\212\276R%\231\276R\361?>\031\234P\276\330,}>Sz,\277\221\024\370=\372\246\322=\271+\032\275\256\206m\276\266\021\326\275\263\321\324\275\022\247\021>\036\020\261=\035\232\341>\332!}>hWM\276\333\305\\\276e\212\010<\260\243\023>5ap>\376\234\004\275\2010\034?\230\316\336>\203.\037\276jQ\264>\177\316\345\273\034\365\267>\343N\214>\203\027\200\276>\236\257=\240\225+\273mT\330>\351\342\r=%\356\256<\331\376(>\255\"\030\277F#\246=5tT\276\303\021\366=\255\222t\276,T\005\277\360\002\226\275\362\'\002?\t\005!\276\367\\\335=\255\306\233\275O\002\353=r\300\017=h\252\345\274\360\210\232<\233\232\331=\354}\254>\014\332>?\302\315\237\275O\227\322\276\376!B\2763\006\260\276O\362\304\275w\225\016>W\232\230>\326F\274>k^!>\034\025q\276\265\235\036\276l\024\227>q\3722\276f\266\205>/2V>\331\3749\276\004m\036\276^\251\232\276\034b\363\276\272\224\274\276\273\275\256>\345\301\366=.)\270\276d\316\333\276{\2246\277\360\245\'?\222\275\220>\212~\305>\320\205\245,\275\233|C\276\306_r\276\242&%\276\372\276M\277\021\365\215\273l\205\272>\032\000\253\276\217\277\003>c\315\302\276\216\352/?B\264&?\370\n\313\274v\254\023\276sm\303\276F\360\206=\333\240\335\275\352W\213>\324\004\035>\334|e=\260\211\207=\325\315\201\276\245\177\r>\313\350\304\275E\344\255\276\265\213\207=\200\334\2247\374\201\341>\270\311\265\276VO8>\252\017\225=f\333 >\004\032\266<\276Y\343\275\270\356N\276\220m\035\277\270&\253\276\005\005\227\275\r\ni\276\036yP\275\342\026\021>\211s\307>\324{Z>\265\300\202\2748z\022>bf\261\276\032AX\275E\241,>\342\243\217\276\245~\226>i\211]\276*\000\341\276T\014\202>\213\240\225\276O\340\342>3\266\022>K\236\234\276\305cq>\032\211\314\275Jg\306>\230\207\212\275\236\016\n\275p\020\324\275\244\311#=p\201\216\275\222\233\247\276\260\004\357\275\373\232\277=\336\226\352\276\010\n\315:3\276\271\272\0331\364>_V`\276\273a>>\300o\201\275\321\362\001>\212}\205\276\220\325\202\276$\r\027=3\206\206>\031\242_\276e\217\362\276\361\262\333\273\273yZ\275\246\273\035?\210V\266>\272|H\276\022\003\305\276l\314\210=$\336\306\275=\017d>\262\321\226>A\247\355\276\016u\313=\326ao\275\300\001\177>\321\215\372>@L\325=\336\226\346>\356\260\232>B*a\273#\254B>\036\036\212>\206\032\033>\302\177g\276\366J\220\275\372\325c\276\324\251}=\254qy>\t\035\200=\016(M>t\201\210>\033$W\276\327\344\370;~`\000\2774d\205\275\321\237\343>~\r;>|\216\353\276}Q\315\275\014\230\023\276\237\035\242>\376\231\033>C\247\001>n0C\277.\326.=\220\225\304\275\327b\275\276\224\206\213>\273S\325>}\355\254\276\036v\300\275\2470\247>}\277\013\275\326\337u>\031\355\305\274\216\225c>\375H\204\275*\277\217>\340_\"\276\277\331\333\276\332\"X>+\215\320>p\326\314>\3756\253\276x\267\030\277\341U\256\276\031>\030>k\227\321\276\265\242\027=`\337\356\275\215\300\305\276\033\346\202=yl\203\276\347\273\307=\216\362O>\304\241\030\276r\224\251>\345t\302\275m\033b\276\352\301\232>\r\341<;\255W\234\276\332&\324\275\364\247\\\274\004C\300\275\371fv\276\372.P\276\234\007\357=\373)I\275*4N\276K\243\246\276\237\363b=fN\037>\235\2041\276B\024\317=Y!\252\275\225+\034=\264\006\245=\342\214\213\276@]\263\276y\362\270\276J\353z>\276\006\223>B\226&\276WO\354\275\217`\252\275}\004\036\2774y8\276\257?\313\275\337\204&?~\223\377=!\203Z\277(\022\340\274\337\241\215\2765\035\005\277|u\024>\264\370\224\276\277\314==u\251\332\276\313\366\344\325\224\274W\237\007>\226\323\207>5qP\275\022I\177\275\304^\375\274\366\203\321>\214s\026?\377\257\331=F\200O\275\327%\266\275\327R\004\276\277g\230\2762S%\276\000MN\276`\003\233\276\353RZ>\264\207\036\277Y+\240>2\323\205\276v\367\211>/d\275;~\206\266>\212>\374\276\271\232\032\277`n\004\277k&\003>\255/\265;\223{\367\276\244`\013\277v\346\n>\217k\304>\350\232=\275D\222T\275IJ\026\277\302\355\001\277S\275\261\274}V*\276f(\014\275\026{\227>\021k5\277\233g\271\276\215!(?K1\031>\021\351\347>\357\263s>\252b\332>yb\007\276Y\351\202\2754\251\373>\235\357u\276T \016=\362\t\003\277|\270\275=\300\002\267>\005\225\377\276/\024\347=f\265K\276\260_\227>\340\337\263\275B*\357\275\007\270\030\277u\213\200\276K(;>\3520S>\026S\014>]\034\310=\236\347\373=\270\374\341=\277$\272>K\212l>\367\263\264\275\032\242\216\273\376>m\276(\330\347>GM\317\276\257\216l\276\311\224\261\276\363\357\200\276\315.\036\275$\305\000?\227ju\276\332\232\004\276\272y+\276\n\201H\276l\003\013?\304Eh\275U\327\t?\264.\257\276*\'C\276\264\340=?1\033\233\276s\216|>w\356\007\277\203r2>n\026\310\276\313\242/\276\352\002=\275\225\332z\274\"-\202\276a\261\240<\330\362\240>}\354\327=\333&\023>\325\313\364\276\321*\004z\002>O\254\246>\267\016I=\021\360\r=\240D\210\275\\`\324>[\212/\276\247\203\376\275_\"\241=\264\230\023?\366\222\317>8-\323\276\350\224\376=\345\010@\276\246\017\341>\371\217\262\276\\\332C\276\356f\315\276\213\374\252\276;\2435>E:\214;\377\341\205\276=\257\306\276]$\371>\351\363\337\275\335\356_?\021\211\005=H\203R>\304Z\226\276\251N\026?\235\334\363\273g\t\005>\231\235\026?J\223\342\276R\365\314=e-\372=x\344j\276\002g\350>l9\321\275zB\020\277,SC\277\300@\253>\251x\030\274\266}\301>\300\312r>\304\013\251\275lJ\202\276.\323\r=\311x\224\276u\272I=\254\320\342\275m\222\311=:\233\272\275?\2570=A\005:\276\313Q\247=&xJ?(<\"<>\261\351>\243`\025\275\272\"\224\276n\0078=\013\206\302>&l\305\275)\364\371>\030\017\361\276\231z\377=%\026I\276\264\3651>\350F\316=W\234\321>\357V\377>\005\005\200=\373\250\347;\302\024\332=\323\215(>`\271d\276\303\250&>Uo\362\275\270m\t\276\372\213\374\276\305cl>7/:\274\t\273\371>=`\204>ze6\277\215\003S>;\266O>\210Z\210\276\263\360\273\275\345Ao\275\005\016\330\275\345\344/\276\017\346\335\275\345\2562\277\213\316\264=1\335\010>\247\247\326>\016_\211\274\321\360\254=\342\342\311=\3442\240\276P\177]\276\356\004Q>z\334\211>=\227\251>*\'\372>\n\243\207\274\3636\206\273Kx\270=\203\276\307>\026o\022\275D\213\272\276\360\363!\276\307\016\370>\217/\036?C\227Z\275\\V\337\275bi\277\276\374\233E?\230\276\270=\255\206\320=7b\301>\377t\242>/\277\330\276C\036\210>b/*=^\236\273>u9\005>\326\361\003?_\352H>\373\372\277\276U\325\311>\232\325\270\276\026\2411\276v\354r\276\036=\212>5\331\033>\275\374\217\276m5\257\276\216\2247>Q\362(>\262cW>\374F\031;\241\177Q>\300he\276\314\262\306\275\257\276\205>!\367\360\275\360\233\277\276\202\035\223=V\014\212=\362\344\364\274C\337Q\276\332r\245\275\265\344\003=\025\346\234>\206s\t>\036\032\032\276\213\370\264>G\020\350>n\353y\274:I\321;\242\245:>\366\002\022\277\263\374y*\372(?\315\230\231\275\333z\207>\031\217/>fm\346>XIE\276\344\360\034?\'j\347\276<\306\332\275-\242\340\275\023\275[>\034\344\227\276\315\304\322=\301\'\302\276\234!\250\276\252\216r\255\351*\2749\201\033\276\3064O?_[?\276iF\245\276\334}S;a\203\342>i\316\023>\002\203\005?\357p\214\276\205\335\303\275\370?g\276\333\367\214\276\237\243\303>\345Q\241=\252i\001\277\237\237\366=T\345\177>\266\214\242=^M\n\277\373g;\277\245\342\345<\312\213\007=\306\306\">)*=\277B\303\234\275i\320->y\346z\275\367k\025=\314\250\350\2761\002\257\276\245\273\320\274|\007\246\274\320X\204\276s\230\n=0\210+\275\314R\007>\241i\210>\362z\330>Dj\031\276.\261\227\275$\362\264=\221\203\023\276Jk\323>B\3442>\034\242H\276\030\276v=\213\372\260<\2427\002>~\022\277>\022\330\237\276\025\277V>\264\317\347\275bZ\265=\326l\270>w\241\341>\216\346\221>\032=\213\276\366\237\021\276\222F\277>\277\270\005\274\354\016\373=@\230\354\276\376\351\315\275+$L\274O\267\232\276\035\036\027\274e\224\247\275>\023\263>~\354\235>\013\361R>\230h-\276\035N\242=\035j\321;G\264\253\275\245\031\257>\233\253}\276lg>>3\326\372\276\343\204k=L\363;\276gO\242\276\003\324\275>\203\246\363>)X\036\277\230\354\343;\327\312@=\235\341\010\276\373n\325>\235\307\322\275p#\357\276\375%a\275\277\026\215=\007\036\316\276\331\373\252\275\360`\240\276\242\324\233\276u\251\333>M\331\321\276\253M\000\275x:\021?n6\037>\262\253\240\2762\275\343\274\224O+\277\253\211\341\275*\370\207\276#\320\321=\351\373\246\276\034\2518\276e\351\266\275\331\233\243<\247\t\234>~\254\334>\331`\262\275\361\215\031\275nB\372>b\007\225\276P\230\370\006\260\251\276{\272\262>\270\035{\276\255\321e\276\220\326\370>\310T{\274\315J\027?\303-\203\274\202V;\276c\365y=8K\237\274\020Z\24391N%=r\204i>\371J\261\275d\242E=\"2\221>\336\335\260=\234\330\265=fH\370>\273Z\371=\327Mq\275Fz\026>\007\372\233>$\207>=ks\324\2754\330\361\275\030m\t\277\025sV\276\263Q\243\276\255\206)>Y\247\373=>J\342=\342\342\'>\2167g\275\261\023|>\001\327a\275\255\007\027\276\2058\236\276s\262`?\237\211j>\201\260G>d\203\311\2750b\212\272\303j \277L\302\214>\300\221=\276Y\3564\276\305\010\223\276\247\027\'\276$\032\213\276E\223\024\276\237\313J\276" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/candidate/kernel/read" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/candidate/kernel" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/candidate/bias" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - dim { - size: 96 - } - } - tensor_content: "\261\272\026>\211\326\370=\342\317\022\276\352\304\261\276t\002.>\\\nt\030\273E=\3218w\276\341\271q\275\217\341\n=\274.\235=\262\321\326\276\213\237\304\275,\020\014\276\024\273[>\267\321%\276u\317\217>i\000\252>X\2736>\364\351\001>Y\231E>\301\023\256=x4\250\276\030\321\305\274\0017\326\275\264y\215\276\211\207\271>w\333w>Z\224q>^\260\006>jX0\2769\256\177>T%!\277\223I\207>\302\000/?V\360z\276X\020\036\276}\342\334\276\\O*=\300\177\244>\374\245\237\276~\010\035>\276\221\253\275z\316\215\276bhN\276\274\201\324\276\030wf>W\324\271=\r\242\230=\365ao=q\215\221?$\344K=\235\022\240<\n\357\314>\313\240Z>2\274\026\275\372Vc>\374\320\354=\320\323T\277w\253c\276#o\252\275\337e\347>\321N\332=\264\3253>\334\302\242>Ai\036\275!/\006?\304)\006\276n6\325=\250\265Z>2\341\020>\244\203\247\276\302\212\203\274\026\263\345>\030k%=6\262\277>\257_\227>F\233\3007\370\200<3\211+>\244qD\274\231w\240=\211q6\274\200\027\275\274\261X\314\275\361\345#>\366\006c\2759\264\237>\027\t\027\276\202\316m=" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/candidate/bias/read" - op: "Identity" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/candidate/bias" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/concat/axis" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/concat" - op: "ConcatV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/TensorArrayReadV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity_3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/concat/axis" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/MatMul/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/gates/kernel/read" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/MatMul" - op: "MatMul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/concat" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/MatMul/Enter" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "transpose_a" - value { - b: false - } - } - attr { - key: "transpose_b" - value { - b: false - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/BiasAdd/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/gates/bias/read" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/BiasAdd" - op: "BiasAdd" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/MatMul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/BiasAdd/Enter" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "data_format" - value { - s: "NHWC" - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/Sigmoid" - op: "Sigmoid" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/BiasAdd" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/split/split_dim" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/split" - op: "Split" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/split/split_dim" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/Sigmoid" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "num_split" - value { - i: 2 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/mul" - op: "Mul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/split" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity_3" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/concat_1/axis" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/concat_1" - op: "ConcatV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/TensorArrayReadV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/mul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/concat_1/axis" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/MatMul_1/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/candidate/kernel/read" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/MatMul_1" - op: "MatMul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/concat_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/MatMul_1/Enter" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "transpose_a" - value { - b: false - } - } - attr { - key: "transpose_b" - value { - b: false - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/BiasAdd_1/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/gru_cell/candidate/bias/read" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/BiasAdd_1" - op: "BiasAdd" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/MatMul_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/BiasAdd_1/Enter" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "data_format" - value { - s: "NHWC" - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/Tanh" - op: "Tanh" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/BiasAdd_1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/mul_1" - op: "Mul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/split:1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity_3" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/sub/x" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - } - float_val: 1.0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/sub" - op: "Sub" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/sub/x" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/split:1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/mul_2" - op: "Mul" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/sub" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/Tanh" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/add" - op: "Add" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/mul_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/mul_2" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/TensorArrayWrite/TensorArrayWriteV3/Enter" - op: "Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray" - attr { - key: "T" - value { - type: DT_RESOURCE - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/add" - } - } - } - attr { - key: "frame_name" - value { - s: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/while_context" - } - } - attr { - key: "is_constant" - value { - b: true - } - } - attr { - key: "parallel_iterations" - value { - i: 32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/TensorArrayWrite/TensorArrayWriteV3" - op: "TensorArrayWriteV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/TensorArrayWrite/TensorArrayWriteV3/Enter" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/add" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity_2" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/add" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/add_1/y" - op: "Const" - input: "^rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/add_1" - op: "Add" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Identity_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/add_1/y" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/NextIteration" - op: "NextIteration" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/add" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/NextIteration_1" - op: "NextIteration" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/add_1" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/NextIteration_2" - op: "NextIteration" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/TensorArrayWrite/TensorArrayWriteV3" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/NextIteration_3" - op: "NextIteration" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/gru_cell/add" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Exit_2" - op: "Exit" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Switch_2" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayStack/TensorArraySizeV3" - op: "TensorArraySizeV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Exit_2" - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayStack/range/start" - op: "Const" - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray" - } - } - } - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayStack/range/delta" - op: "Const" - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray" - } - } - } - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayStack/range" - op: "Range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayStack/range/start" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayStack/TensorArraySizeV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayStack/range/delta" - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayStack/TensorArrayGatherV3" - op: "TensorArrayGatherV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayStack/range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/while/Exit_2" - attr { - key: "_class" - value { - list { - s: "loc:@rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArray" - } - } - } - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "element_shape" - value { - shape { - dim { - size: -1 - } - dim { - size: 96 - } - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Rank_1" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 3 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range_1/start" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 2 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range_1/delta" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range_1" - op: "Range" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range_1/start" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/Rank_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range_1/delta" - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat_2/values_0" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 2 - } - } - tensor_content: "\001\000\000\000\000\000\000\000" - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat_2/axis" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 0 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat_2" - op: "ConcatV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat_2/values_0" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/range_1" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat_2/axis" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_INT32 - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/transpose_1" - op: "Transpose" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/TensorArrayStack/TensorArrayGatherV3" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/concat_2" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tperm" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/ReverseV2/axis" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 1 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/ReverseV2" - op: "ReverseV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/bw/bw/transpose_1" - input: "rnn/stack_bidirectional_rnn/cell_0/ReverseV2/axis" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/concat/axis" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 2 - } - } - } -} -node { - name: "rnn/stack_bidirectional_rnn/cell_0/concat" - op: "ConcatV2" - input: "rnn/stack_bidirectional_rnn/cell_0/bidirectional_rnn/fw/fw/transpose_1" - input: "rnn/stack_bidirectional_rnn/cell_0/ReverseV2" - input: "rnn/stack_bidirectional_rnn/cell_0/concat/axis" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } -} \ No newline at end of file diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/fake.pb b/tools/mo/unit_tests/moc_tf_fe/test_models/fake.pb deleted file mode 100644 index ae05864994afaf..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/fake.pb +++ /dev/null @@ -1,2 +0,0 @@ -dcfsdcdsdcs -cscscsc \ No newline at end of file diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/future_op.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/future_op.pbtxt deleted file mode 100644 index d5fdcce4a90dd0..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/future_op.pbtxt +++ /dev/null @@ -1,58 +0,0 @@ -node { - name: "in1" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 3 - } - } - } - } -} -node { - name: "in2" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 3 - } - } - } - } -} -node { - name: "future_op" - op: "FutureOp" - input: "in1" - input: "in2" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_add_with_undefined_constant.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_add_with_undefined_constant.pbtxt deleted file mode 100644 index 37dd135ccfd6ea..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_add_with_undefined_constant.pbtxt +++ /dev/null @@ -1,58 +0,0 @@ -node { - name: "x" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 3 - } - } - } - } -} -node { - name: "Const" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - dim { - size: 3 - } - } - } - } - } -} -node { - name: "add" - op: "AddV2" - input: "x" - input: "Const" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_add_with_undefined_constant.py b/tools/mo/unit_tests/moc_tf_fe/test_models/model_add_with_undefined_constant.py deleted file mode 100644 index 68d041f3a32619..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_add_with_undefined_constant.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf - -tf.reset_default_graph() -# Create the graph and model -with tf.Session() as sess: - x = tf.placeholder(tf.float32, [2, 3], 'x') - const = tf.constant(value=[], dtype=tf.float32, shape=[3], name='Const') - tf.add(x, const, name="add") - tf.global_variables_initializer() - tf.io.write_graph(sess.graph, './', 'model_add_with_undefined_constant.pbtxt', as_text=True) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool.pbtxt deleted file mode 100644 index 3078a20607e251..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool.pbtxt +++ /dev/null @@ -1,52 +0,0 @@ -node { - name: "in1" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_BOOL - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 3 - } - } - } - } -} -node { - name: "in2" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_BOOL - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 3 - } - } - } - } -} -node { - name: "LogicalAnd" - op: "LogicalAnd" - input: "in1" - input: "in2" -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool.py b/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool.py deleted file mode 100644 index 8bf6c0af751744..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf - -tf.reset_default_graph() -with tf.Session() as sess: - x = tf.placeholder(tf.bool, [2, 3], 'in1') - y = tf.placeholder(tf.bool, [2, 3], 'in2') - tf.math.logical_and(x, y) - - tf.global_variables_initializer() - tf_net = sess.graph_def - -tf.io.write_graph(tf_net, './', 'model_bool.pbtxt', True) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool2.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool2.pbtxt deleted file mode 100644 index b0070f8f9bf768..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool2.pbtxt +++ /dev/null @@ -1,70 +0,0 @@ -node { - name: "in1" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 3 - } - } - } - } -} -node { - name: "in2" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 3 - } - } - } - } -} -node { - name: "cond" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_BOOL - } - } - attr { - key: "shape" - value { - shape { - } - } - } -} -node { - name: "Select" - op: "Select" - input: "cond" - input: "in1" - input: "in2" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool2.py b/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool2.py deleted file mode 100644 index 08aa4ca20c5b90..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_bool2.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf - -tf.reset_default_graph() -with tf.Session() as sess: - x = tf.placeholder(tf.float32, [3], 'in1') - y = tf.placeholder(tf.float32, [3], 'in2') - cond = tf.placeholder(tf.bool, [], 'cond') - tf.where(cond, x, y) - - tf.global_variables_initializer() - tf_net = sess.graph_def - -tf.io.write_graph(tf_net, './', 'model_bool2.pbtxt', True) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_fp32.frozen b/tools/mo/unit_tests/moc_tf_fe/test_models/model_fp32.frozen deleted file mode 100644 index 3343e4106f837c..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_fp32.frozen +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:8a33c91148b5e72ca03608c7d2ee18229ee4b610344dadd6896efeb6ac7b93e0 -size 141 diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_fp32.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_fp32.pbtxt deleted file mode 100644 index 57d5a6008dc469..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_fp32.pbtxt +++ /dev/null @@ -1,58 +0,0 @@ -node { - name: "in1" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 2 - } - } - } - } -} -node { - name: "in2" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 2 - } - } - } - } -} -node { - name: "add" - op: "AddV2" - input: "in1" - input: "in2" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_fp32.py b/tools/mo/unit_tests/moc_tf_fe/test_models/model_fp32.py deleted file mode 100644 index 11f6fe9b7c2b5c..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_fp32.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf - -tf.reset_default_graph() -with tf.Session() as sess: - x = tf.placeholder(tf.float32, [2, 2], 'in1') - y = tf.placeholder(tf.float32, [2, 2], 'in2') - tf.add(x, y, name="add") - - tf.global_variables_initializer() - tf_net = sess.graph_def - -tf.io.write_graph(tf_net, './', 'model_fp32.pbtxt', True) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_int32.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_int32.pbtxt deleted file mode 100644 index acfcb5f54417f9..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_int32.pbtxt +++ /dev/null @@ -1,58 +0,0 @@ -node { - name: "in1" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 3 - } - } - } - } -} -node { - name: "in2" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 3 - } - } - } - } -} -node { - name: "add" - op: "Mul" - input: "in1" - input: "in2" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_int32.py b/tools/mo/unit_tests/moc_tf_fe/test_models/model_int32.py deleted file mode 100644 index e26dd475677576..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_int32.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf - -tf.reset_default_graph() -with tf.Session() as sess: - x = tf.placeholder(tf.int32, [2, 3], 'in1') - y = tf.placeholder(tf.int32, [2, 3], 'in2') - tf.multiply(x, y, name="add") - - tf.global_variables_initializer() - tf_net = sess.graph_def - -tf.io.write_graph(tf_net, './', 'model_int32.pbtxt', True) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_mul_with_undefined_constant.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_mul_with_undefined_constant.pbtxt deleted file mode 100644 index e4a6470f3d2ab6..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_mul_with_undefined_constant.pbtxt +++ /dev/null @@ -1,52 +0,0 @@ -node { - name: "x" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - } - } - } -} -node { - name: "Const" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - } - } - } -} -node { - name: "mul" - op: "Mul" - input: "x" - input: "Const" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_mul_with_undefined_constant.py b/tools/mo/unit_tests/moc_tf_fe/test_models/model_mul_with_undefined_constant.py deleted file mode 100644 index 5d11e167633675..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_mul_with_undefined_constant.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf - -tf.reset_default_graph() -# Create the graph and model -with tf.Session() as sess: - x = tf.placeholder(tf.int32, [2], 'x') - const = tf.constant(value=[], dtype=tf.int32, shape=[], name='Const') - tf.multiply(x, const, name="mul") - tf.global_variables_initializer() - tf.io.write_graph(sess.graph, './', 'model_mul_with_undefined_constant.pbtxt', as_text=True) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_oneshot_iterator.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_oneshot_iterator.pbtxt deleted file mode 100644 index 68278cb12f06f0..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_oneshot_iterator.pbtxt +++ /dev/null @@ -1,129 +0,0 @@ -node { - name: "OneShotIterator" - op: "OneShotIterator" - attr { - key: "container" - value { - s: "" - } - } - attr { - key: "dataset_factory" - value { - func { - name: "_make_dataset_Ap6cSkjEDjc" - } - } - } - attr { - key: "output_shapes" - value { - list { - shape { - dim { - size: -1 - } - dim { - size: 224 - } - dim { - size: 224 - } - dim { - size: 3 - } - } - } - } - } - attr { - key: "output_types" - value { - list { - type: DT_FLOAT - } - } - } - attr { - key: "shared_name" - value { - s: "" - } - } -} -node { - name: "IteratorGetNext" - op: "IteratorGetNext" - input: "OneShotIterator" - attr { - key: "output_shapes" - value { - list { - shape { - dim { - size: -1 - } - dim { - size: 224 - } - dim { - size: 224 - } - dim { - size: 3 - } - } - } - } - } - attr { - key: "output_types" - value { - list { - type: DT_FLOAT - } - } - } -} -node { - name: "Const_2" - op: "Const" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_FLOAT - tensor_shape { - dim { - size: 1 - } - dim { - size: 1 - } - dim { - size: 3 - } - } - tensor_content: "\232Y\367B\\\217\350B\\\017\317B" - } - } - } -} -node { - name: "sub" - op: "Sub" - input: "IteratorGetNext" - input: "Const_2" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_switch_merge.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_switch_merge.pbtxt deleted file mode 100644 index c5603bb633fd3e..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_switch_merge.pbtxt +++ /dev/null @@ -1,134 +0,0 @@ -node { - name: "x" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 3 - } - } - } - } -} -node { - name: "y" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 3 - } - } - } - } -} -node { - name: "is_training" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_BOOL - } - } - attr { - key: "shape" - value { - shape { - } - } - } -} -node { - name: "Switch" - op: "Switch" - input: "x" - input: "is_training" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "Relu" - op: "Relu" - input: "Switch" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "Sigmoid" - op: "Sigmoid" - input: "Switch:1" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "Merge" - op: "Merge" - input: "Relu" - input: "Sigmoid" - attr { - key: "N" - value { - i: 2 - } - } - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "AddV2" - op: "AddV2" - input: "Merge" - input: "y" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "init" - op: "NoOp" -} -versions { - producer: 808 -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_switch_merge.py b/tools/mo/unit_tests/moc_tf_fe/test_models/model_switch_merge.py deleted file mode 100644 index 5a261534ce8ddb..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_switch_merge.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf - -tf.reset_default_graph() -with tf.Session() as sess: - x = tf.placeholder(tf.float32, [2, 3], 'x') - y = tf.placeholder(tf.float32, [2, 3], 'y') - is_training = tf.placeholder(tf.bool, [], 'is_training') - switch = tf.raw_ops.Switch(data=x, pred=is_training) - relu = tf.raw_ops.Relu(features=switch[0]) - sigmoid = tf.raw_ops.Sigmoid(x=switch[1]) - merge = tf.raw_ops.Merge(inputs=[relu, sigmoid]) - tf.raw_ops.AddV2(x=merge[0], y=y) - - tf.global_variables_initializer() - tf_net = sess.graph_def - -tf.io.write_graph(tf_net, './', 'model_switch_merge.pbtxt', True) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_three_inputs.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_three_inputs.pbtxt deleted file mode 100644 index 86399c2ffbe3bd..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_three_inputs.pbtxt +++ /dev/null @@ -1,84 +0,0 @@ -node { - name: "x" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 3 - } - } - } - } -} -node { - name: "y" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 3 - } - } - } - } -} -node { - name: "z" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 3 - } - } - } - } -} -node { - name: "add" - op: "AddV2" - input: "x" - input: "y" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "multiply" - op: "Mul" - input: "add" - input: "z" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_three_inputs.py b/tools/mo/unit_tests/moc_tf_fe/test_models/model_three_inputs.py deleted file mode 100644 index 615d5c0374b223..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_three_inputs.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf - -tf.reset_default_graph() -with tf.Session() as sess: - x = tf.placeholder(tf.float32, [3], 'x') - y = tf.placeholder(tf.float32, [3], 'y') - z = tf.placeholder(tf.float32, [3], 'z') - add = tf.add(x, y, name="add") - tf.multiply(add, z, name="multiply") - - tf.global_variables_initializer() - tf_net = sess.graph_def - -tf.io.write_graph(tf_net, './', 'model_three_inputs.pbtxt', True) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_convolution_dynamic_rank.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_convolution_dynamic_rank.pbtxt deleted file mode 100644 index 366e3bf6ef7298..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_convolution_dynamic_rank.pbtxt +++ /dev/null @@ -1,124 +0,0 @@ -node { - name: "x" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - unknown_rank: true - } - } - } -} -node { - name: "kernel" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - dim { - size: 2 - } - dim { - size: 3 - } - dim { - size: 1 - } - } - } - } -} -node { - name: "Conv2D" - op: "Conv2D" - input: "x" - input: "kernel" - attr { - key: "T" - value { - type: DT_FLOAT - } - } - attr { - key: "data_format" - value { - s: "NHWC" - } - } - attr { - key: "dilations" - value { - list { - i: 1 - i: 1 - i: 1 - i: 1 - } - } - } - attr { - key: "explicit_paddings" - value { - list { - } - } - } - attr { - key: "padding" - value { - s: "SAME" - } - } - attr { - key: "strides" - value { - list { - i: 1 - i: 1 - i: 1 - i: 1 - } - } - } - attr { - key: "use_cudnn_on_gpu" - value { - b: true - } - } -} -node { - name: "Relu" - op: "Relu" - input: "Conv2D" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} -node { - name: "init" - op: "NoOp" -} -versions { - producer: 808 -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_convolution_dynamic_rank.py b/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_convolution_dynamic_rank.py deleted file mode 100644 index a527c391db4ad2..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_convolution_dynamic_rank.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf - -tf.reset_default_graph() -with tf.Session() as sess: - x = tf.placeholder(tf.float32, None, 'x') - filter = tf.placeholder(tf.float32, [2, 2, 3, 1], 'kernel') - - conv2d = tf.raw_ops.Conv2D(input=x, filter=filter, strides=[1, 1, 1, 1], padding='SAME', - dilations=None) - relu = tf.raw_ops.Relu(features=conv2d) - - tf.global_variables_initializer() - tf_net = sess.graph_def - -tf.io.write_graph(tf_net, './', 'model_with_convolution_dynamic_rank.pbtxt', True) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_if.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_if.pbtxt deleted file mode 100644 index 735ef4a32c89f0..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_if.pbtxt +++ /dev/null @@ -1,320 +0,0 @@ -node { - name: "x" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 2 - } - } - } - } -} -node { - name: "y" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 1 - } - } - } - } -} -node { - name: "Const" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - } - int_val: 10 - } - } - } -} -node { - name: "Greater" - op: "Greater" - input: "x" - input: "Const" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} -node { - name: "Const_1" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 1 - } - } - int_val: 0 - } - } - } -} -node { - name: "All" - op: "All" - input: "Greater" - input: "Const_1" - attr { - key: "Tidx" - value { - type: DT_INT32 - } - } - attr { - key: "keep_dims" - value { - b: false - } - } -} -node { - name: "If" - op: "If" - input: "All" - input: "x" - input: "y" - attr { - key: "Tcond" - value { - type: DT_BOOL - } - } - attr { - key: "Tin" - value { - list { - type: DT_INT32 - type: DT_INT32 - } - } - } - attr { - key: "Tout" - value { - list { - type: DT_INT32 - } - } - } - attr { - key: "else_branch" - value { - func { - name: "else_branch_func_Fw4jHLGozIk" - } - } - } - attr { - key: "output_shapes" - value { - list { - } - } - } - attr { - key: "then_branch" - value { - func { - name: "then_branch_func_mdn8Hcdd6RQ" - } - } - } -} -node { - name: "init" - op: "NoOp" -} -library { - function { - signature { - name: "then_branch_func_mdn8Hcdd6RQ" - input_arg { - name: "x" - type: DT_INT32 - } - input_arg { - name: "y" - type: DT_INT32 - } - output_arg { - name: "add" - type: DT_INT32 - } - } - node_def { - name: "add_0" - op: "AddV2" - input: "x" - input: "y" - attr { - key: "T" - value { - type: DT_INT32 - } - } - experimental_debug_info { - original_node_names: "add" - } - } - ret { - key: "add" - value: "add_0:z:0" - } - attr { - key: "_disable_call_shape_inference" - value { - b: true - } - } - arg_attr { - value { - attr { - key: "_output_shapes" - value { - list { - shape { - unknown_rank: true - } - } - } - } - } - } - arg_attr { - key: 1 - value { - attr { - key: "_output_shapes" - value { - list { - shape { - unknown_rank: true - } - } - } - } - } - } - } - function { - signature { - name: "else_branch_func_Fw4jHLGozIk" - input_arg { - name: "x" - type: DT_INT32 - } - input_arg { - name: "y" - type: DT_INT32 - } - output_arg { - name: "sub" - type: DT_INT32 - } - } - node_def { - name: "sub_0" - op: "Sub" - input: "x" - input: "y" - attr { - key: "T" - value { - type: DT_INT32 - } - } - experimental_debug_info { - original_node_names: "sub" - } - } - ret { - key: "sub" - value: "sub_0:z:0" - } - attr { - key: "_disable_call_shape_inference" - value { - b: true - } - } - arg_attr { - value { - attr { - key: "_output_shapes" - value { - list { - shape { - unknown_rank: true - } - } - } - } - } - } - arg_attr { - key: 1 - value { - attr { - key: "_output_shapes" - value { - list { - shape { - unknown_rank: true - } - } - } - } - } - } - } -} -versions { - producer: 808 - min_consumer: 12 -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_if.py b/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_if.py deleted file mode 100644 index 677193650fbf84..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_if.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf -from tensorflow.python.framework import function - -tf.reset_default_graph() - - -@function.Defun(tf.int32, tf.int32) -def then_branch_func(x, y): - return x + y - - -@function.Defun(tf.int32, tf.int32) -def else_branch_func(x, y): - return x - y - - -with tf.Session() as sess: - x = tf.placeholder(tf.int32, [2], 'x') - y = tf.placeholder(tf.int32, [1], 'y') - const_cond = tf.constant(10, dtype=tf.int32) - greater = tf.raw_ops.Greater(x=x, y=const_cond) - axis = tf.constant([0], dtype=tf.int32) - cond = tf.raw_ops.All(input=greater, axis=axis) - if_op = tf.raw_ops.If(cond=cond, input=[x, y], Tout=[tf.int32], then_branch=then_branch_func, - else_branch=else_branch_func) - tf.global_variables_initializer() - tf_net = sess.graph_def - -tf.io.write_graph(tf_net, './', 'model_with_if.pbtxt', as_text=True) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_variable_v1.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_variable_v1.pbtxt deleted file mode 100644 index 4bd69c1afbeed7..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/model_with_variable_v1.pbtxt +++ /dev/null @@ -1,53 +0,0 @@ -node { - name: "input1" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_INT64 - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 1 - } - dim { - size: 1 - } - } - } - } -} -node { - name: "global_step" - op: "Variable" - device: "/device:CPU:0" - attr { - key: "dtype" - value { - type: DT_INT64 - } - } - attr { - key: "shape" - value { - shape { - } - } - } -} -node { - name: "add" - op: "Add" - input: "input1" - input: "global_step" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/mul_with_unknown_rank_y.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/mul_with_unknown_rank_y.pbtxt deleted file mode 100644 index 42061cf6fd9cb9..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/mul_with_unknown_rank_y.pbtxt +++ /dev/null @@ -1,50 +0,0 @@ -node { - name: "x" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - dim { - size: 3 - } - } - } - } -} -node { - name: "y" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_FLOAT - } - } - attr { - key: "shape" - value { - shape { - unknown_rank: true - } - } - } -} -node { - name: "Mul" - op: "Mul" - input: "x" - input: "y" - attr { - key: "T" - value { - type: DT_FLOAT - } - } -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/mul_with_unknown_rank_y.py b/tools/mo/unit_tests/moc_tf_fe/test_models/mul_with_unknown_rank_y.py deleted file mode 100644 index 7183075205c245..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/mul_with_unknown_rank_y.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf - -tf.reset_default_graph() -with tf.Session() as sess: - x = tf.placeholder(tf.float32, [3], 'x') - keep_prob = tf.placeholder(tf.float32, None, 'y') - tf.multiply(x, keep_prob) - - tf.global_variables_initializer() - tf_net = sess.graph_def - -tf.io.write_graph(tf_net, './', 'mul_with_unknown_rank_y.pbtxt', True) diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/placeholder_with_default.pbtxt b/tools/mo/unit_tests/moc_tf_fe/test_models/placeholder_with_default.pbtxt deleted file mode 100644 index c8d0deb4d5dfa0..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/placeholder_with_default.pbtxt +++ /dev/null @@ -1,86 +0,0 @@ -node { - name: "x" - op: "Placeholder" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "shape" - value { - shape { - dim { - size: -1 - } - dim { - size: 3 - } - } - } - } -} -node { - name: "Const" - op: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "value" - value { - tensor { - dtype: DT_INT32 - tensor_shape { - dim { - size: 2 - } - dim { - size: 3 - } - } - tensor_content: "\001\000\000\000\002\000\000\000\003\000\000\000\004\000\000\000\005\000\000\000\006\000\000\000" - } - } - } -} -node { - name: "y" - op: "PlaceholderWithDefault" - input: "Const" - attr { - key: "dtype" - value { - type: DT_INT32 - } - } - attr { - key: "shape" - value { - shape { - dim { - size: -1 - } - dim { - size: 3 - } - } - } - } -} -node { - name: "Add" - op: "AddV2" - input: "x" - input: "y" - attr { - key: "T" - value { - type: DT_INT32 - } - } -} diff --git a/tools/mo/unit_tests/moc_tf_fe/test_models/placeholder_with_default.py b/tools/mo/unit_tests/moc_tf_fe/test_models/placeholder_with_default.py deleted file mode 100644 index b559443f9922f9..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/test_models/placeholder_with_default.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import tensorflow.compat.v1 as tf - -tf.reset_default_graph() -with tf.Session() as sess: - x = tf.placeholder(tf.int32, [None, 3], 'x') - y = tf.placeholder_with_default(tf.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.int32), - [None, 3], 'y') - tf.add(x, y) - - tf.global_variables_initializer() - tf_net = sess.graph_def - -tf.io.write_graph(tf_net, './', 'placeholder_with_default.pbtxt', True) diff --git a/tools/mo/unit_tests/moc_tf_fe/utils.py b/tools/mo/unit_tests/moc_tf_fe/utils.py deleted file mode 100644 index f866cf278dbf0b..00000000000000 --- a/tools/mo/unit_tests/moc_tf_fe/utils.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import os - -import numpy as np - -from openvino.runtime import Core -from openvino.tools.mo.convert import convert_model - - -def basic_check(input_model, argv_input, input_data, expected_dtype, expected_value, freeze_placeholder_with_value=None, - input_shape=None, only_conversion=False, input_model_is_text=True, use_new_frontend=True, - use_legacy_frontend=False, extensions=None, input_checkpoint=None): - path = os.path.dirname(__file__) - input_model = os.path.join(path, "test_models", input_model) - - ov_model = convert_model(input_model, input=argv_input, - freeze_placeholder_with_value=freeze_placeholder_with_value, - input_shape=input_shape, input_model_is_text=input_model_is_text, - use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend, - framework="tf", extensions=extensions, input_checkpoint=input_checkpoint) - - if only_conversion: - return ov_model - - ie = Core() - exec_net = ie.compile_model(ov_model, "CPU") - req = exec_net.create_infer_request() - results = req.infer(input_data) - values = list(results.values())[0] - if expected_dtype is not None: - assert values.dtype == expected_dtype - assert np.allclose(values, - expected_value), "Expected and actual values are different." \ - " Expected value: {}, actual value: {}".format(expected_value, values) - - return ov_model diff --git a/tools/mo/unit_tests/mock_mo_frontend/.clang-format b/tools/mo/unit_tests/mock_mo_frontend/.clang-format deleted file mode 100644 index 109a57d53263fe..00000000000000 --- a/tools/mo/unit_tests/mock_mo_frontend/.clang-format +++ /dev/null @@ -1,54 +0,0 @@ -BasedOnStyle: LLVM -IndentWidth: 4 -UseTab: Never -Language: Cpp -Standard: Cpp11 - -AccessModifierOffset: -4 - -AlignConsecutiveDeclarations: false -AlignConsecutiveAssignments: false -AlignTrailingComments: true - -AllowShortBlocksOnASingleLine: true -AllowShortCaseLabelsOnASingleLine: true -AllowShortFunctionsOnASingleLine: Inline - -AlwaysBreakBeforeMultilineStrings: true -AlwaysBreakTemplateDeclarations: true - -BinPackArguments: false -BinPackParameters: false - -BreakBeforeBraces: Allman -BreakConstructorInitializersBeforeComma: true - -ColumnLimit: 100 - -IndentCaseLabels: false -IndentWrappedFunctionNames: true - -KeepEmptyLinesAtTheStartOfBlocks: false -NamespaceIndentation: All - -PointerAlignment: Left -SpaceAfterCStyleCast: false -SpaceBeforeAssignmentOperators: true -SpaceBeforeParens: ControlStatements -SpaceInEmptyParentheses: false -SpacesInAngles: false -SpacesInCStyleCastParentheses: false -SpacesInParentheses: false -SpacesInSquareBrackets: false - -SortIncludes: false -ReflowComments: true - -IncludeCategories: - - Regex: '^".*' - Priority: 3 - - Regex: '^<.*' - Priority: 2 -SortIncludes: true - -FixNamespaceComments: true diff --git a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/CMakeLists.txt b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/CMakeLists.txt deleted file mode 100644 index 3c859f85f4ec48..00000000000000 --- a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/CMakeLists.txt +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -set(TARGET_FE_NAME "openvino_mock_mo_frontend") - -file(GLOB_RECURSE LIBRARY_SRC ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp) -file(GLOB_RECURSE LIBRARY_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/*.hpp) - -source_group("src" FILES ${LIBRARY_SRC}) -source_group("include" FILES ${LIBRARY_HEADERS}) - -# Create shared library -add_library(${TARGET_FE_NAME} SHARED ${LIBRARY_SRC} ${LIBRARY_HEADERS}) - -target_include_directories(${TARGET_FE_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) - -target_link_libraries(${TARGET_FE_NAME} PUBLIC openvino::runtime) - -ov_add_clang_format_target(${TARGET_FE_NAME}_clang FOR_TARGETS ${TARGET_FE_NAME}) - -if(BUILD_SHARED_LIBS) - install(TARGETS ${TARGET_FE_NAME} - RUNTIME DESTINATION ${OV_CPACK_RUNTIMEDIR} COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${OV_CPACK_LIBRARYDIR} COMPONENT tests EXCLUDE_FROM_ALL) -else() - install(TARGETS ${TARGET_FE_NAME} - RUNTIME DESTINATION ${OV_CPACK_PYTHONDIR}/openvino COMPONENT tests EXCLUDE_FROM_ALL - LIBRARY DESTINATION ${OV_CPACK_PYTHONDIR}/openvino COMPONENT tests EXCLUDE_FROM_ALL) -endif() diff --git a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/mock_mo_frontend.cpp b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/mock_mo_frontend.cpp deleted file mode 100644 index c82194ec48eff8..00000000000000 --- a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/mock_mo_frontend.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "mock_mo_frontend.hpp" -#include "openvino/frontend/manager.hpp" - -using namespace ov::frontend; - -FeStat FrontEndMockPy::m_stat = {}; -ModelStat InputModelMockPy::m_stat = {}; -PlaceStat PlaceMockPy::m_stat = {}; - -std::string MockSetup::m_equal_data_node1 = {}; -std::string MockSetup::m_equal_data_node2 = {}; -int MockSetup::m_max_input_port_index = 0; -int MockSetup::m_max_output_port_index = 0; - -ov::PartialShape InputModelMockPy::m_returnShape = {}; - -extern "C" MOCK_API FrontEndVersion get_api_version() -{ - return OV_FRONTEND_API_VERSION; -} - -extern "C" MOCK_API void* get_front_end_data() -{ - FrontEndPluginInfo* res = new FrontEndPluginInfo(); - res->m_name = "openvino_mock_mo_frontend"; - res->m_creator = []() { return std::make_shared(); }; - - return res; -} diff --git a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/mock_mo_frontend.hpp b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/mock_mo_frontend.hpp deleted file mode 100644 index 4967a53d2148fb..00000000000000 --- a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_frontend/mock_mo_frontend.hpp +++ /dev/null @@ -1,457 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -#include "openvino/core/visibility.hpp" -#include "openvino/frontend/manager.hpp" -#include "openvino/frontend/visibility.hpp" - -// Defined if we are building the plugin DLL (instead of using it) -#ifdef openvino_mock_mo_frontend_EXPORTS -#define MOCK_API OPENVINO_CORE_EXPORTS -#else -#define MOCK_API OPENVINO_CORE_IMPORTS -#endif // openvino_mock_mo_frontend_EXPORTS - -// OK to have 'using' in mock header - -using namespace ov::frontend; - -//////////////////////////////// -/// \brief This structure holds number static setup values -/// It will be used by Python unit tests to setup particular mock behavior -struct MOCK_API MockSetup -{ - static std::string m_equal_data_node1; - static std::string m_equal_data_node2; - static int m_max_input_port_index; - static int m_max_output_port_index; - - static void clear_setup() - { - m_equal_data_node1 = {}; - m_equal_data_node2 = {}; - m_max_input_port_index = 0; - m_max_output_port_index = 0; - } - - static void set_equal_data(const std::string& node1, const std::string& node2) - { - m_equal_data_node1 = node1; - m_equal_data_node2 = node2; - } - - static void set_max_port_counts(int max_input, int max_output) - { - m_max_input_port_index = max_input; - m_max_output_port_index = max_output; - } -}; - -/// \brief This structure holds number of calls of particular methods of Place objects -/// It will be used by Python unit tests to verify that appropriate API -/// was called with correct arguments during test execution -struct MOCK_API PlaceStat -{ - int m_get_names = 0; - int m_get_input_port = 0; - int m_get_output_port = 0; - int m_is_input = 0; - int m_is_output = 0; - int m_is_equal = 0; - int m_is_equal_data = 0; - - // Arguments tracking - std::string m_lastArgString; - int m_lastArgInt = -1; - Place::Ptr m_lastArgPlace = nullptr; - - // Getters - int get_names() const { return m_get_names; } - int get_input_port() const { return m_get_input_port; } - int get_output_port() const { return m_get_output_port; } - int is_input() const { return m_is_input; } - int is_output() const { return m_is_output; } - int is_equal() const { return m_is_equal; } - int is_equal_data() const { return m_is_equal_data; } - - // Arguments getters - std::string get_lastArgString() const { return m_lastArgString; } - int get_lastArgInt() const { return m_lastArgInt; } - Place::Ptr get_lastArgPlace() const { return m_lastArgPlace; } -}; - -/// \brief Mock implementation of Place -/// Every call increments appropriate counters in statistic and stores argument values to statistics -/// as well -class MOCK_API PlaceMockPy : public Place -{ - static PlaceStat m_stat; - std::string m_name; - bool m_is_op = false; - int m_portIndex = -1; - -public: - explicit PlaceMockPy(std::string name = {}, bool is_op = false, int portIndex = -1) - : m_name(std::move(name)) - , m_is_op(is_op) - , m_portIndex(portIndex) - { - } - - std::vector get_names() const override - { - m_stat.m_get_names++; - return {m_name}; - } - - Place::Ptr get_input_port() const override - { - m_stat.m_get_input_port++; - m_stat.m_lastArgInt = -1; - return std::make_shared(); - } - - Place::Ptr get_input_port(int inputPortIndex) const override - { - m_stat.m_get_input_port++; - m_stat.m_lastArgInt = inputPortIndex; - if (inputPortIndex < MockSetup::m_max_input_port_index) - { - return std::make_shared(m_name, false, inputPortIndex); - } - return nullptr; - } - - Place::Ptr get_input_port(const std::string& inputName) const override - { - m_stat.m_get_input_port++; - m_stat.m_lastArgInt = -1; - m_stat.m_lastArgString = inputName; - return std::make_shared(); - } - - Place::Ptr get_input_port(const std::string& inputName, int inputPortIndex) const override - { - m_stat.m_get_input_port++; - m_stat.m_lastArgInt = inputPortIndex; - m_stat.m_lastArgString = inputName; - return std::make_shared(); - } - - Place::Ptr get_output_port() const override - { - m_stat.m_get_output_port++; - m_stat.m_lastArgInt = -1; - return std::make_shared(); - } - - Place::Ptr get_output_port(int outputPortIndex) const override - { - m_stat.m_get_output_port++; - m_stat.m_lastArgInt = outputPortIndex; - if (outputPortIndex < MockSetup::m_max_output_port_index) - { - return std::make_shared(m_name, false, outputPortIndex); - } - return nullptr; - } - - Place::Ptr get_output_port(const std::string& outputName) const override - { - m_stat.m_get_output_port++; - m_stat.m_lastArgInt = -1; - m_stat.m_lastArgString = outputName; - return std::make_shared(outputName); - } - - Place::Ptr get_output_port(const std::string& outputName, int outputPortIndex) const override - { - m_stat.m_get_output_port++; - m_stat.m_lastArgInt = outputPortIndex; - m_stat.m_lastArgString = outputName; - return std::make_shared(); - } - - bool is_input() const override - { - m_stat.m_is_input++; - return m_name.find("input") != std::string::npos; - } - - bool is_output() const override - { - m_stat.m_is_output++; - return m_name.find("output") != std::string::npos; - } - - bool is_equal(const Ptr& another) const override - { - m_stat.m_is_equal++; - m_stat.m_lastArgPlace = another; - std::shared_ptr mock = std::dynamic_pointer_cast(another); - return m_name == mock->m_name && m_is_op == mock->m_is_op && - m_portIndex == mock->m_portIndex; - } - - bool is_equal_data(const Ptr& another) const override - { - if (m_is_op) - throw std::runtime_error("Not implemented"); - m_stat.m_is_equal_data++; - m_stat.m_lastArgPlace = another; - std::shared_ptr mock = std::dynamic_pointer_cast(another); - if (!MockSetup::m_equal_data_node1.empty() && !MockSetup::m_equal_data_node2.empty()) - { - if ((mock->m_name.find(MockSetup::m_equal_data_node1) != std::string::npos || - mock->m_name.find(MockSetup::m_equal_data_node2) != std::string::npos) && - (m_name.find(MockSetup::m_equal_data_node1) != std::string::npos || - m_name.find(MockSetup::m_equal_data_node2) != std::string::npos)) - { - return true; - } - } - return !mock->m_is_op && m_name == mock->m_name; - } - - //---------------Stat-------------------- - static PlaceStat get_stat() { return m_stat; } - static void clear_stat() { m_stat = {}; } -}; - -//////////////////////////////// - -/// \brief This structure holds number of calls of particular methods of InputModel objects -/// It will be used by Python unit tests to verify that appropriate API -/// was called with correct arguments during test execution -struct MOCK_API ModelStat -{ - int m_get_inputs = 0; - int m_get_outputs = 0; - int m_get_place_by_tensor_name = 0; - int m_get_place_by_operation_name = 0; - int m_set_partial_shape = 0; - int m_get_partial_shape = 0; - int m_set_element_type = 0; - - int m_extract_subgraph = 0; - int m_override_all_inputs = 0; - int m_override_all_outputs = 0; - - // Arguments tracking - std::string m_lastArgString; - int m_lastArgInt = -1; - Place::Ptr m_lastArgPlace = nullptr; - std::vector m_lastArgInputPlaces; - std::vector m_lastArgOutputPlaces; - ov::element::Type m_lastArgElementType; - ov::PartialShape m_lastArgPartialShape; - - // Getters - int get_inputs() const { return m_get_inputs; } - int get_outputs() const { return m_get_outputs; } - int extract_subgraph() const { return m_extract_subgraph; } - int override_all_inputs() const { return m_override_all_inputs; } - int override_all_outputs() const { return m_override_all_outputs; } - int get_place_by_operation_name() const { return m_get_place_by_operation_name; } - int get_place_by_tensor_name() const { return m_get_place_by_tensor_name; } - int set_partial_shape() const { return m_set_partial_shape; } - int get_partial_shape() const { return m_get_partial_shape; } - int set_element_type() const { return m_set_element_type; } - - // Arguments getters - std::string get_lastArgString() const { return m_lastArgString; } - int get_lastArgInt() const { return m_lastArgInt; } - Place::Ptr get_lastArgPlace() const { return m_lastArgPlace; } - std::vector get_lastArgInputPlaces() const { return m_lastArgInputPlaces; } - std::vector get_lastArgOutputPlaces() const { return m_lastArgOutputPlaces; } - ov::element::Type get_lastArgElementType() const { return m_lastArgElementType; } - ov::PartialShape get_lastArgPartialShape() const { return m_lastArgPartialShape; } -}; - -/// \brief Mock implementation of InputModel -/// Every call increments appropriate counters in statistic and stores argument values to statistics -/// as well -class MOCK_API InputModelMockPy : public InputModel -{ - static ModelStat m_stat; - static ov::PartialShape m_returnShape; - - std::set m_operations = { - "8", "9", "8:9", "operation", "operation:0", "0:operation", "tensorAndOp", "conv2d"}; - std::set m_tensors = {"8:9", - "tensor", - "tensor:0", - "0:tensor", - "tensorAndOp:0", - "conv2d:0", - "0:conv2d", - "mock_input1", - "mock_input2", - "newInput1", - "newIn1", - "newIn2", - "mock_output1", - "mock_output2", - "new_output2", - "newOut1", - "newOut2"}; - -public: - std::vector get_inputs() const override - { - m_stat.m_get_inputs++; - return {std::make_shared("mock_input1"), - std::make_shared("mock_input2")}; - } - - std::vector get_outputs() const override - { - m_stat.m_get_outputs++; - return {std::make_shared("mock_output1"), - std::make_shared("mock_output2")}; - } - - Place::Ptr get_place_by_operation_name(const std::string& opName) const override - { - m_stat.m_get_place_by_operation_name++; - m_stat.m_lastArgString = opName; - if (m_operations.count(opName)) - { - return std::make_shared(opName, true); - } - return nullptr; - } - - Place::Ptr get_place_by_tensor_name(const std::string& tensorName) const override - { - m_stat.m_get_place_by_tensor_name++; - m_stat.m_lastArgString = tensorName; - if (m_tensors.count(tensorName)) - { - return std::make_shared(tensorName); - } - return nullptr; - } - - void override_all_outputs(const std::vector& outputs) override - { - m_stat.m_override_all_outputs++; - m_stat.m_lastArgOutputPlaces = outputs; - } - - void override_all_inputs(const std::vector& inputs) override - { - m_stat.m_override_all_inputs++; - m_stat.m_lastArgInputPlaces = inputs; - } - - void extract_subgraph(const std::vector& inputs, - const std::vector& outputs) override - { - m_stat.m_extract_subgraph++; - m_stat.m_lastArgInputPlaces = inputs; - m_stat.m_lastArgOutputPlaces = outputs; - } - - // Setting tensor properties - void set_partial_shape(const Place::Ptr& place, const ov::PartialShape& shape) override - { - m_stat.m_set_partial_shape++; - m_stat.m_lastArgPlace = place; - m_stat.m_lastArgPartialShape = shape; - } - - ov::PartialShape get_partial_shape(const Place::Ptr& place) const override - { - m_stat.m_get_partial_shape++; - m_stat.m_lastArgPlace = place; - return m_returnShape; - } - - void set_element_type(const Place::Ptr& place, const ov::element::Type& type) override - { - m_stat.m_set_element_type++; - m_stat.m_lastArgPlace = place; - m_stat.m_lastArgElementType = type; - } - - static void mock_return_partial_shape(const ov::PartialShape& shape) { m_returnShape = shape; } - - //---------------Stat-------------------- - static ModelStat get_stat() { return m_stat; } - static void clear_stat() { m_stat = {}; } -}; - -///////////////////////////////////////////////////////// - -/// \brief This structure holds number of calls of particular methods of FrontEnd objects -/// It will be used by Python unit tests to verify that appropriate API -/// was called with correct arguments during test execution -struct MOCK_API FeStat -{ - std::vector m_load_paths; - int m_convert_model = 0; - int m_supported = 0; - int m_get_name = 0; - // Getters - std::vector load_paths() const { return m_load_paths; } - int convert_model() const { return m_convert_model; } - int supported() const { return m_supported; } - int get_name() const { return m_get_name; } -}; - -/// \brief Mock implementation of FrontEnd -/// Every call increments appropriate counters in statistic and stores argument values to statistics -/// as well -class MOCK_API FrontEndMockPy : public FrontEnd -{ - static FeStat m_stat; - -public: - FrontEndMockPy() = default; - - std::shared_ptr convert(const InputModel::Ptr& model) const override - { - std::cout << "MVN: convert called\n"; - m_stat.m_convert_model++; - return std::make_shared(ov::NodeVector{}, ov::ParameterVector{}); - } - - static FeStat get_stat() { return m_stat; } - - static void clear_stat() { m_stat = {}; } - -private: - InputModel::Ptr load_impl(const std::vector& params) const override - { - if (!params.empty() && params[0].is()) - { - auto path = params[0].as(); - m_stat.m_load_paths.push_back(path); - } - return std::make_shared(); - } - - bool supported_impl(const std::vector& params) const override - { - m_stat.m_supported++; - if (!params.empty() && params[0].is()) - { - auto path = params[0].as(); - if (path.find(".test_mo_mock_mdl") != std::string::npos) - { - return true; - } - } - return false; - } - - std::string get_name() const override - { - m_stat.m_get_name++; - return "openvino_mock_mo_frontend"; - } -}; diff --git a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt deleted file mode 100644 index 1c5210cda2875a..00000000000000 --- a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_python_api/CMakeLists.txt +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY_OLD ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}) -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_OLD ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}) -set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY_OLD ${CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY}) -set(CMAKE_PDB_OUTPUT_DIRECTORY_OLD ${CMAKE_PDB_OUTPUT_DIRECTORY}) - -if(OV_GENERATOR_MULTI_CONFIG) - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/$/python/openvino) -else() - set(PYTHON_BRIDGE_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/python/openvino) -endif() - -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) -set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) -set(CMAKE_PDB_OUTPUT_DIRECTORY ${PYTHON_BRIDGE_OUTPUT_DIRECTORY}) - -set(TARGET_FE_NAME "openvino_mock_mo_frontend") -set(PYBIND_FE_NAME "mock_mo_python_api") - -set(PYBIND_FE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/mock_mo_python_api.cpp) - -source_group("src" FILES ${PYBIND_FE_SRC}) - -if(CMAKE_CROSSCOMPILING) - set(pybind11_min_version 2.12.0) -else() - set(pybind11_min_version 2.12.0) -endif() -# search for FindPython3.cmake instead of legacy modules -set(PYBIND11_FINDPYTHON ON) - -ov_detect_python_module_extension() - -find_package(pybind11 ${pybind11_min_version} QUIET) - -if(NOT pybind11_FOUND) - add_subdirectory(${OpenVINO_SOURCE_DIR}/src/bindings/python/thirdparty/pybind11 - ${CMAKE_CURRENT_BINARY_DIR}/pybind11_build - EXCLUDE_FROM_ALL) -endif() - -ov_find_python3(REQUIRED) -pybind11_add_module(${PYBIND_FE_NAME} MODULE NO_EXTRAS ${PYBIND_FE_SRC}) - -target_link_libraries(${PYBIND_FE_NAME} PRIVATE openvino::runtime) -target_link_libraries(${PYBIND_FE_NAME} PRIVATE ${TARGET_FE_NAME}) - -add_dependencies(${PYBIND_FE_NAME} ${TARGET_FE_NAME}) - -ov_add_clang_format_target(${PYBIND_FE_NAME}_clang FOR_TARGETS ${PYBIND_FE_NAME}) - -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY_OLD}) -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY_OLD}) -set(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY ${CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY_OLD}) -set(CMAKE_PDB_OUTPUT_DIRECTORY ${CMAKE_PDB_OUTPUT_DIRECTORY_OLD}) - -install(TARGETS ${PYBIND_FE_NAME} - DESTINATION ${OV_CPACK_PYTHONDIR} - COMPONENT tests EXCLUDE_FROM_ALL) diff --git a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_python_api/mock_mo_python_api.cpp b/tools/mo/unit_tests/mock_mo_frontend/mock_mo_python_api/mock_mo_python_api.cpp deleted file mode 100644 index a46ac6b3e15657..00000000000000 --- a/tools/mo/unit_tests/mock_mo_frontend/mock_mo_python_api/mock_mo_python_api.cpp +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (C) 2018-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include -#include - -#include "../mock_mo_frontend/mock_mo_frontend.hpp" - -namespace py = pybind11; -using namespace ov::frontend; - -static void register_mock_frontend_stat(py::module m) -{ - m.def("get_frontend_statistic", &FrontEndMockPy::get_stat); - m.def("clear_frontend_statistic", &FrontEndMockPy::clear_stat); - - py::class_ feStat(m, "FeStat", py::dynamic_attr()); - feStat.def_property_readonly("load_paths", &FeStat::load_paths); - feStat.def_property_readonly("convert_model", &FeStat::convert_model); - feStat.def_property_readonly("supported", &FeStat::supported); - feStat.def_property_readonly("get_name", &FeStat::get_name); -} - -static void register_mock_setup(py::module m) -{ - m.def("clear_setup", &MockSetup::clear_setup); - m.def("set_equal_data", &MockSetup::set_equal_data); - m.def("set_max_port_counts", &MockSetup::set_max_port_counts); -} - -static void register_mock_model_stat(py::module m) -{ - m.def("get_model_statistic", &InputModelMockPy::get_stat); - m.def("clear_model_statistic", &InputModelMockPy::clear_stat); - m.def("mock_return_partial_shape", &InputModelMockPy::mock_return_partial_shape); - - py::class_ mdlStat(m, "ModelStat", py::dynamic_attr()); - mdlStat.def_property_readonly("get_inputs", &ModelStat::get_inputs); - mdlStat.def_property_readonly("get_outputs", &ModelStat::get_outputs); - mdlStat.def_property_readonly("get_place_by_operation_name", - &ModelStat::get_place_by_operation_name); - mdlStat.def_property_readonly("get_place_by_tensor_name", &ModelStat::get_place_by_tensor_name); - - mdlStat.def_property_readonly("set_partial_shape", &ModelStat::set_partial_shape); - mdlStat.def_property_readonly("get_partial_shape", &ModelStat::get_partial_shape); - mdlStat.def_property_readonly("set_element_type", &ModelStat::set_element_type); - mdlStat.def_property_readonly("extract_subgraph", &ModelStat::extract_subgraph); - mdlStat.def_property_readonly("override_all_inputs", &ModelStat::override_all_inputs); - mdlStat.def_property_readonly("override_all_outputs", &ModelStat::override_all_outputs); - - // Arguments tracking - mdlStat.def_property_readonly("lastArgString", &ModelStat::get_lastArgString); - mdlStat.def_property_readonly("lastArgInt", &ModelStat::get_lastArgInt); - mdlStat.def_property_readonly("lastArgPlace", &ModelStat::get_lastArgPlace); - mdlStat.def_property_readonly("lastArgInputPlaces", &ModelStat::get_lastArgInputPlaces); - mdlStat.def_property_readonly("lastArgOutputPlaces", &ModelStat::get_lastArgOutputPlaces); - mdlStat.def_property_readonly("lastArgElementType", &ModelStat::get_lastArgElementType); - mdlStat.def_property_readonly("lastArgPartialShape", &ModelStat::get_lastArgPartialShape); -} - -static void register_mock_place_stat(py::module m) -{ - m.def("get_place_statistic", &PlaceMockPy::get_stat); - m.def("clear_place_statistic", &PlaceMockPy::clear_stat); - - py::class_ placeStat(m, "PlaceStat", py::dynamic_attr()); - - placeStat.def_property_readonly("lastArgString", &PlaceStat::get_lastArgString); - placeStat.def_property_readonly("lastArgInt", &PlaceStat::get_lastArgInt); - placeStat.def_property_readonly("lastArgPlace", &PlaceStat::get_lastArgPlace); - - placeStat.def_property_readonly("get_names", &PlaceStat::get_names); - placeStat.def_property_readonly("get_input_port", &PlaceStat::get_input_port); - placeStat.def_property_readonly("get_output_port", &PlaceStat::get_output_port); - placeStat.def_property_readonly("is_input", &PlaceStat::is_input); - placeStat.def_property_readonly("is_output", &PlaceStat::is_output); - placeStat.def_property_readonly("is_equal", &PlaceStat::is_equal); - placeStat.def_property_readonly("is_equal_data", &PlaceStat::is_equal_data); -} - -PYBIND11_MODULE(mock_mo_python_api, m) -{ - m.doc() = "Mock frontend call counters for testing Pyngraph frontend bindings"; - register_mock_frontend_stat(m); - register_mock_setup(m); - register_mock_model_stat(m); - register_mock_place_stat(m); -} diff --git a/tools/mo/unit_tests/utils/__init__.py b/tools/mo/unit_tests/utils/__init__.py deleted file mode 100644 index e69de29bb2d1d6..00000000000000 diff --git a/tools/mo/unit_tests/utils/extractors.py b/tools/mo/unit_tests/utils/extractors.py deleted file mode 100644 index affc12fd35ccb2..00000000000000 --- a/tools/mo/unit_tests/utils/extractors.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import unittest -from unittest.mock import patch - -import numpy as np - - -class PB(dict): - def update_node(self): - pass - __getattr__ = dict.get - - -class BaseExtractorsTestingClass(unittest.TestCase): - expected = None - res = None - call_args = None - expected_call_args = None - - def setUp(self): - if hasattr(self, 'patcher') and self.patcher: # pylint: disable=no-member - patcher = patch(self.patcher) # pylint: disable=no-member - self.addCleanup(patcher.stop) - self.infer_mock = patcher.start() - - def compare(self): - if hasattr(self, 'infer_mock'): - self.assertTrue(self.infer_mock.called) - for key, val in self.expected.items(): - if key == "infer": - self.assertEqual(self.call_args, self.expected_call_args) - if type(val) is np.ndarray: - np.testing.assert_equal(val, self.res[key]) - elif type(val) is list: - self.assertTrue(np.all([val == self.res[key]])) - else: - self.assertAlmostEqual(val, self.res[key], 7, - "{} attribute comparison failed! Expected {} but {} given.".format(key, val, - self.res[key])) - - -class FakeParam: - def __init__(self, param_key, param_val): - setattr(self, param_key, param_val) - - -class FakeMultiParam: - def __init__(self, dict_values): - self.dict_values = dict_values - for (key, value) in dict_values.items(): - # if type(value) != dict: - setattr(self, key, value) - # else: - # setattr(self, key, FakeMultiParam(value)) - - -class FakeBlob: - def __init__(self, param_key, param_val): - setattr(self, param_key, param_val) - - -class FakeModelLayer: - def __init__(self, blobs_val): - self.blobs = [FakeBlob('data', val) for val in blobs_val] - - -class FakeValue: - def __init__(self, val): - self.shape = val diff --git a/tools/mo/unit_tests/utils/graph.py b/tools/mo/unit_tests/utils/graph.py deleted file mode 100644 index 35ee228de9ea48..00000000000000 --- a/tools/mo/unit_tests/utils/graph.py +++ /dev/null @@ -1,388 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -from argparse import Namespace -from copy import deepcopy - -import networkx as nx - -from openvino.tools.mo.ops.parameter import Parameter -from openvino.tools.mo.front.common.partial_infer.utils import shape_array, int64_array -from openvino.tools.mo.graph.graph import Node, Graph -from openvino.tools.mo.middle.pattern_match import all_edges_in_nodes -from openvino.tools.mo.ops.const import Const -from openvino.tools.mo.utils.error import Error - - -def not_all_new(old_elements: list, new_elements: list): - """ - This function check whether at least one element from new_elements are in old_elements. - """ - return any([element in old_elements for element in new_elements]) - - -def check_and_update_ports(node, edges_data: list, in_port: bool = True): - key = 'in' if in_port else 'out' - key_in_edges = [key in edge_data for edge_data in edges_data] - if all(key_in_edges): - ports = [edge_data[key] for edge_data in edges_data] - if len(ports) != len(set(ports)): - raise Error("Please, provide unique {} ports for nodes".format(key)) - elif not any(key_in_edges): - if node.has_valid('kind') and node.kind == 'data': - return - for i, edge_data in enumerate(edges_data): - edge_data[key] = i - else: - raise Error("Please, provide all {} ports for nodes".format(key)) - - -def build_graph_with_attrs(nodes_with_attrs: list, edges_with_attrs: list, new_nodes_with_attrs: list = [], - new_edges_with_attrs: list = [], update_edge_attrs: dict = None, - update_nodes_attributes: list = None, nodes_with_edges_only: bool = False, - add_nodes_from_edges: bool = False): - """ - Build the Graph with specific nodes and edges. Also update of edge and node parameters is supported. - :param nodes_with_attrs: list of tuples ('node_name', {node_attrs}) - :param edges_with_attrs: list of tuples like (start node, end node, (optional) {attrs of the edge}). - :param new_nodes_with_attrs: analogically nodes_with_attrs - :param new_edges_with_attrs: analogically new_edges - :param update_edge_attrs: optional dictionary like {('from_node', 'to_node', key): {edge_attrs}}. - :param update_nodes_attributes: optional list of tuples which specifies nodes names and their attributes to be - updated. The first element is a node name to update attribute and the second element is a dictionary with attribute - name and its value. - :param nodes_with_edges_only: add nodes which has at least one incoming or outcoming edge. - :param add_nodes_from_edges: whether nodes that is not listed in all_nodes but are in all_edges is allowed. - :return: generated graph. - """ - if not_all_new([node[0] for node in nodes_with_attrs], [node[0] for node in new_nodes_with_attrs]): - raise Error('Some nodes from new_nodes_with_attrs are already in nodes.' - ' Please, add to new_nodes_with_attrs only NEW nodes.') - - if not_all_new([(edge[0], edge[1]) for edge in edges_with_attrs], - [(edge[0], edge[1]) for edge in new_edges_with_attrs]): - raise Error('Some edges from new_edges_with_attrs are already in edges.' - ' Please, add to new_edges_with_attrs only NEW edges.') - - # Check that all nodes from list of edges are in nodes - all_nodes = nodes_with_attrs + new_nodes_with_attrs - all_edges = edges_with_attrs + new_edges_with_attrs - all_nodes_names = [node[0] for node in all_nodes] - if not add_nodes_from_edges and not all_edges_in_nodes(nodes=all_nodes_names, edges=all_edges): - raise Error("Some nodes from list of edges is not in nodes. Please, add all necessary nodes.") - - graph = Graph() - - # Create dict for nodes with attrs - nodes_attrs = {} - for node_name, attrs in all_nodes: - nodes_attrs[node_name] = attrs - if 'name' not in attrs: - attrs['name'] = node_name - - if nodes_with_edges_only: - # filter nodes to keep only ones with edges connected - filtered_nodes = {} - for edge in all_edges: - node_1, node_2 = edge[0], edge[1] - filtered_nodes[node_1] = nodes_attrs[node_1] - filtered_nodes[node_2] = nodes_attrs[node_2] - nodes_attrs = filtered_nodes - - # Create all nodes - for node, attrs in nodes_attrs.items(): - graph.add_node(node, **deepcopy(attrs)) - - # Connect nodes with edges (also unpack edge params) - for edge in all_edges: - node_1, node_2 = edge[0], edge[1] - edge_attrs = edge[2] if len(edge) == 3 else {} - graph.add_edge(node_1, node_2, **edge_attrs) - - # Update attributes of edges - if update_edge_attrs: - # it will work in 2.x networkx only - for edge, attr in update_edge_attrs.items(): - for k, v in attr.items(): - nx.set_edge_attributes(G=graph, name=k, values={edge: v}) - - # Update attributes of nodes - if update_nodes_attributes is not None: - for node_name, new_attrs in update_nodes_attributes: - assert (node_name in graph.nodes()) - for attr, value in new_attrs.items(): - graph.node[node_name][attr] = value - - for node_id in graph.nodes(): - node = Node(graph, node_id) - check_and_update_ports(node, [graph.get_edge_data(edge[0], node_id)[0] for edge in graph.in_edges(node_id)], - True) - check_and_update_ports(node, [graph.get_edge_data(node_id, edge[1])[0] for edge in graph.out_edges(node_id)], - False) - - for node in graph.get_op_nodes(): - # Add in_ports attribute - in_edges = node.in_edges() - for i in range(len(in_edges)): - node.add_input_port(idx=i) - - # Add out_ports attribute - out_edges = node.out_edges() - for i in range(len(out_edges)): - node.add_output_port(idx=i) - return graph - - -def build_graph(nodes_attrs: dict, edges: list, update_attributes: dict = None, nodes_with_edges_only: bool = False, - cli: Namespace = None): - """ - Build the Graph with specific nodes and edges. - :param nodes_attrs: dictionary where key is the node name and the value is the dictionary with node attributes. - :param edges: list of pairs with start and end node names of the edge. - :param update_attributes: optional dictionary which specifies nodes names and their attributes to be updated. The - key is a node name to update attribute and the value is a dictionary with attribute name and its value. - :param nodes_with_edges_only: add nodes which has at least one incoming or outcoming edge. - :param cli: Namespace with cli keys to associate with the graph - :return: generated graph. - """ - # no mutable values must be set as default function argument - cli = Namespace(static_shape=False, data_type='FP32') if cli is None else cli - graph = Graph() - - for node_name, attrs in nodes_attrs.items(): - if 'name' not in attrs: - attrs['name'] = node_name - - if nodes_with_edges_only: - # filter nodes to keep only ones with edges connected - filtered_nodes = {} - for item in edges: - node1, node2, *_ = item - - filtered_nodes[node1] = nodes_attrs[node1] - filtered_nodes[node2] = nodes_attrs[node2] - nodes_attrs = filtered_nodes - - # create all nodes first - for node, attrs in nodes_attrs.items(): - assert node not in graph.nodes() - graph.add_node(node, **deepcopy(attrs)) - - # connect nodes with edges - for item in edges: - node_1, node_2, *edge_attrs_list = item - edge_attrs = dict(edge_attrs_list[0]) if edge_attrs_list else {} - - common_attrs = {'in': len(graph.in_edges(node_2)), - 'out': len(graph.out_edges(node_1)), - 'name': nodes_attrs[node_1]['name']} - common_attrs.update(edge_attrs) - graph.add_edge(node_1, node_2, **common_attrs) - - if update_attributes is not None: - for node_name, new_attrs in update_attributes.items(): - assert (node_name in graph.nodes()), 'Node with name "{}" is not in the graph'.format(node_name) - for attr, value in new_attrs.items(): - graph.node[node_name][attr] = value - - for node in graph.get_op_nodes(): - # Add in_ports attribute - in_edges = node.in_edges(control_flow=True) - for attr in in_edges.values(): - control_flow = True if 'control_flow_edge' in attr and attr['control_flow_edge'] is True else False - node.add_input_port(idx=attr['in'], control_flow=control_flow) - - # Add out_ports attribute - out_edges = node.out_edges(control_flow=True) - for attr in out_edges.values(): - control_flow = True if 'control_flow_edge' in attr and attr['control_flow_edge'] is True else False - node.add_output_port(idx=attr['out'], control_flow=control_flow) - - graph.graph['cmd_params'] = cli - return graph - - -def build_graph_with_edge_attrs(nodes_attrs: dict, edges: list, update_attributes: dict = None, - cli: Namespace = Namespace(static_shape=False, data_type='FP32')): - """ - Build the Graph with specific nodes and edges. - :param nodes_attrs: dictionary where key is the node name and the value is the dictionary with node attributes. - :param edges: list of pairs with start and end node names of the edge. - :param update_attributes: optional dictionary which specifies nodes names and their attributes to be updated. The - key is a node name to update attribute and the value is a dictionary with attribute name and its value. - :param cli: Namespace with cli keys to associate with the graph - :return: generated graph. - """ - graph = Graph() - for node_1, node_2, attr in edges: - if node_1 not in graph.nodes(): - graph.add_node(node_1, **deepcopy(nodes_attrs[node_1])) - if node_2 not in graph.nodes(): - graph.add_node(node_2, **deepcopy(nodes_attrs[node_2])) - graph.add_edge(node_1, node_2, **attr) - if update_attributes is not None: - for node_name, new_attrs in update_attributes.items(): - assert (node_name in graph.nodes()) - for attr, value in new_attrs.items(): - graph.node[node_name][attr] = value - - for node in graph.get_op_nodes(): - # Add in_ports attribute - in_edges = node.in_edges() - for attr in in_edges.values(): - node.add_input_port(idx=attr['in']) - - # Add out_ports attribute - out_edges = node.out_edges() - for attr in out_edges.values(): - node.add_output_port(idx=attr['out']) - - graph.graph['cmd_params'] = cli - return graph - - -class FakeAttr: - def __init__(self, **kwargs): - self.__dict__.update(kwargs) - - def __setitem__(self, key, value): - setattr(self, key, value) - - def __getitem__(self, item): - return getattr(self, item) - - -class FakeNode: - def __init__(self, pl, ml): - self.pb = pl - self.model_pb = ml - self.graph = FakeAttr() - self.graph.graph = {} - self.update_node = lambda: None - - def __setitem__(self, key, value): - setattr(self, key, value) - - def __getitem__(self, item): - return getattr(self, item) - - -def const(name, value, shape=None, kwargs=None): - # no mutable default arguments must be passed - kwargs = {} if kwargs is None else kwargs - if value is not None: - shape = int64_array(value.shape) - elif value is None and shape is not None: - shape = shape_array(shape) - res = {name: {'kind': 'op', 'type': 'Const', 'op': 'Const', - 'value': value, 'shape': shape, - 'infer': Const.infer, 'type_infer': Const.type_infer, **kwargs}} - return res - - -def valued_data(name, value, shape=None): - if value is not None: - shape = int64_array(value.shape) - elif value is None and shape is not None: - shape = shape_array(shape) - return {name: {'kind': 'data', 'value': value, 'shape': shape}} - - -regular_op = lambda name, kwargs: {name: {'kind': 'op', 'type': 'NoType', **kwargs}} - -shaped_data = lambda name, shape: {name: {'kind': 'data', 'value': None, - 'shape': shape_array(shape) if shape is not None else None}} -empty_data = lambda name: valued_data(name, None) - -shaped_parameter = lambda name, shape, kwargs={}: {**regular_op(name, {'op': 'Parameter', 'type': 'Parameter', - 'shape': shape, 'infer': Parameter.infer, - **kwargs}), - **shaped_data(name + '_d', shape)} - -result = lambda name='output': {name: {'kind': 'op', 'type': 'Result', 'op': 'Result', 'infer': lambda x: 0}} - -regular_op_with_shaped_data = lambda name, shape, kwargs: {**regular_op(name, kwargs), - **shaped_data(name + '_d', shape)} -regular_op_with_empty_data = lambda name, kwargs: {**regular_op(name, kwargs), **empty_data(name + '_d')} - -fake_const = lambda name, shape, kwargs={}: {name: {'kind': 'op', 'op': 'Const', 'type': 'Const', - 'value': None, 'infer': Const.infer, **kwargs, - 'shape': shape_array(shape) if shape is not None else None}} - -shaped_const_with_data = lambda name, shape, kwargs={}: {**fake_const(name, shape, kwargs), - **shaped_data(name + '_d', shape)} - -valued_const_with_data = lambda name, value, shape=None, kwargs={}: {**const(name, value, shape, kwargs), - **valued_data(name + '_d', value, shape)} - - -def extract_port_from_string(node_name: str): - """ - Extracts port and node name from string - - Raises if node name was not provided in the expected format: - NODE:OUT_PORT - or - IN_PORT:NODE - - :param node_name: string value provided by user - :return: node name, input port and output port - """ - parts = node_name.split(':') - if len(parts) > 2: - raise Error("Please provide only one port number for {}. Expected format is NODE:OUT_PORT or IN_PORT:NODE, " - "where IN_PORT and OUTPUT_PORT are integers".format(node_name)) - if len(parts) == 1: - return node_name, None, None - else: - in_port, out_port, name = None, None, None - try: - in_port, name = int(parts[0]), parts[1] - except ValueError: - try: - out_port, name = int(parts[1]), parts[0] - except ValueError: - raise Error("Non integer port number in {}. Expected format is NODE:OUT_PORT or IN_PORT:NODE, where " - "IN_PORT and OUTPUT_PORT are integers".format(node_name)) - return name, in_port, out_port - - -def get_name_and_port(tensor_name): - node_name, in_port, out_port = extract_port_from_string(tensor_name) - - assert in_port is None or out_port is None - - if in_port is not None: - return node_name, in_port - elif out_port is not None: - return node_name, out_port - else: - return node_name, 0 - - -def connect(first_tensor_name, second_tensor_name, skip_data=False, front_phase=False): - # ports could be skipped -- then zero in/out ports would be used - # first_tensor_name = first_op_name:out_port - # second_tensor_name = in_port:second_op_name - # if skip_data is True connect directly from data node with postfix '_d' to second - # if front_phase is True connect nodes directly without postfixes and data nodes - - first_op_name, out_port = get_name_and_port(first_tensor_name) - second_op_name, in_port = get_name_and_port(second_tensor_name) - - if skip_data: - return [(first_op_name + '_d', second_op_name, {'out': out_port, 'in': in_port})] - if front_phase: - return [(first_op_name, second_op_name, {'out': out_port, 'in': in_port})] - return [ - (first_op_name, first_op_name + '_d', {'out': out_port}), - (first_op_name + '_d', second_op_name, {'in': in_port}), - ] - - -def connect_data(first_tensor_name, second_tensor_name): - return connect(first_tensor_name, second_tensor_name, skip_data=True) - - -def connect_front(first_tensor_name, second_tensor_name): - return connect(first_tensor_name, second_tensor_name, skip_data=False, front_phase=True) diff --git a/tools/mo/unit_tests/utils/test_data/mxnet_synthetic_gru_bidirectional_FP16_1_v6.bin b/tools/mo/unit_tests/utils/test_data/mxnet_synthetic_gru_bidirectional_FP16_1_v6.bin deleted file mode 100644 index 00fa110928d10b..00000000000000 Binary files a/tools/mo/unit_tests/utils/test_data/mxnet_synthetic_gru_bidirectional_FP16_1_v6.bin and /dev/null differ diff --git a/tools/mo/unit_tests/utils/test_data/mxnet_synthetic_gru_bidirectional_FP16_1_v6.xml b/tools/mo/unit_tests/utils/test_data/mxnet_synthetic_gru_bidirectional_FP16_1_v6.xml deleted file mode 100644 index 1c572cb268ddc1..00000000000000 --- a/tools/mo/unit_tests/utils/test_data/mxnet_synthetic_gru_bidirectional_FP16_1_v6.xml +++ /dev/null @@ -1,626 +0,0 @@ - - - - - - - 1 - 10 - 16 - - - - - - - - 1 - 10 - 16 - - - - - 10 - 1 - 16 - - - - - - - 2 - 1 - 128 - - - - - - - - - - - 2 - 1 - 128 - - - - - 1 - 1 - 128 - - - 1 - 1 - 128 - - - - - - - 2 - - - - - - - - - - 1 - 1 - 128 - - - 2 - - - - - 1 - 128 - - - - - - - 10 - 1 - 16 - - - 1 - 128 - - - - - 10 - 1 - 128 - - - 1 - 128 - - - - - - - - - - - - - - - - - - 1 - 1 - 16 - - - - - 1 - 16 - - - - - - - - 1 - 16 - - - 1 - 128 - - - - - 1 - 128 - - - - - - - - - - - - 1 - 128 - - - - - 1 - 1 - 128 - - - - - - - - - - - - - - 3 - - - - - - - - - - 1 - 128 - - - 3 - - - - - 1 - 1 - 128 - - - - - - - 2 - - - - - - - - - - 1 - 1 - 128 - - - 2 - - - - - 1 - 128 - - - - - - - 10 - 1 - 16 - - - 1 - 128 - - - - - 10 - 1 - 128 - - - 1 - 128 - - - - - - - - - - - - - - - - - - 1 - 1 - 16 - - - - - 1 - 16 - - - - - - - - 1 - 16 - - - 1 - 128 - - - - - 1 - 128 - - - - - - - - - - - - 1 - 128 - - - - - 1 - 1 - 128 - - - - - - - - - - - - - - 3 - - - - - - - - - - 1 - 128 - - - 3 - - - - - 1 - 1 - 128 - - - - - - - - 1 - 1 - 128 - - - 1 - 1 - 128 - - - - - 2 - 1 - 128 - - - - - - - 4 - - - - - - - - - - 10 - 1 - 128 - - - 4 - - - - - 10 - 1 - 1 - 128 - - - - - - - 4 - - - - - - - - - - 10 - 1 - 128 - - - 4 - - - - - 10 - 1 - 1 - 128 - - - - - - - - 10 - 1 - 1 - 128 - - - 10 - 1 - 1 - 128 - - - - - 10 - 2 - 1 - 128 - - - - - - - - 10 - 2 - 1 - 128 - - - - - 10 - 1 - 2 - 128 - - - - - - - 3 - - - - - - - - - - 10 - 1 - 2 - 128 - - - 3 - - - - - 10 - 1 - 256 - - - - - - - - 10 - 1 - 256 - - - - - 1 - 10 - 256 - - - - - - - 1 - 10 - 256 - - - - - 1 - 10 - 256 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/tools/mo/unit_tests/utils/test_data/mxnet_synthetic_gru_bidirectional_FP16_1_v6_negative.xml b/tools/mo/unit_tests/utils/test_data/mxnet_synthetic_gru_bidirectional_FP16_1_v6_negative.xml deleted file mode 100644 index 62e8422a27f772..00000000000000 --- a/tools/mo/unit_tests/utils/test_data/mxnet_synthetic_gru_bidirectional_FP16_1_v6_negative.xml +++ /dev/null @@ -1,626 +0,0 @@ - - - - - - - 1 - 10 - 16 - - - - - - - - 1 - 10 - 16 - - - - - 10 - 1 - 16 - - - - - - - 2 - 1 - 128 - - - - - - - - - - - 2 - 1 - 128 - - - - - 1 - 1 - 128 - - - 1 - 1 - 128 - - - - - - - 2 - - - - - - - - - - 1 - 1 - 128 - - - 2 - - - - - 1 - 128 - - - - - - - 10 - 1 - 16 - - - 1 - 128 - - - - - 10 - 1 - 128 - - - 1 - 128 - - - - - - - - - - - - - - - - - - 1 - 1 - 16 - - - - - 1 - 16 - - - - - - - - 1 - 16 - - - 1 - 128 - - - - - 1 - 128 - - - - - - - - - - - - 1 - 128 - - - - - 1 - 1 - 128 - - - - - - - - - - - - - - 3 - - - - - - - - - - 1 - 128 - - - 3 - - - - - 1 - 1 - 128 - - - - - - - 2 - - - - - - - - - - 1 - 1 - 128 - - - 2 - - - - - 1 - 128 - - - - - - - 10 - 1 - 16 - - - 1 - 128 - - - - - 10 - 1 - 128 - - - 1 - 128 - - - - - - - - - - - - - - - - - - 1 - 1 - 16 - - - - - 1 - 16 - - - - - - - - 1 - 16 - - - 1 - 128 - - - - - 1 - 128 - - - - - - - - - - - - 1 - 128 - - - - - 1 - 1 - 128 - - - - - - - - - - - - - - 3 - - - - - - - - - - 1 - 128 - - - 3 - - - - - 1 - 1 - 128 - - - - - - - - 1 - 1 - 128 - - - 1 - 1 - 128 - - - - - 2 - 1 - 128 - - - - - - - 4 - - - - - - - - - - 10 - 1 - 128 - - - 4 - - - - - 10 - 1 - 1 - 128 - - - - - - - 4 - - - - - - - - - - 10 - 1 - 128 - - - 4 - - - - - 10 - 1 - 1 - 128 - - - - - - - - 10 - 1 - 1 - 128 - - - 10 - 1 - 1 - 128 - - - - - 10 - 2 - 1 - 128 - - - - - - - - 10 - 2 - 1 - 128 - - - - - 10 - 1 - 2 - 128 - - - - - - - 3 - - - - - - - - - - 10 - 1 - 2 - 128 - - - 3 - - - - - 10 - 1 - 256 - - - - - - - - 10 - 1 - 256 - - - - - 1 - 10 - 256 - - - - - - - 1 - 10 - 256 - - - - - 1 - 10 - 256 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/tools/openvino_dev/CMakeLists.txt b/tools/openvino_dev/CMakeLists.txt deleted file mode 100644 index 924c83abc9bff8..00000000000000 --- a/tools/openvino_dev/CMakeLists.txt +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -cmake_minimum_required (VERSION 3.13) - -project(OpenVINODevPython DESCRIPTION "OpenVINO Python Development tools") - -# -# Packages & settings -# - -if(NOT DEFINED OpenVINO_SOURCE_DIR) - get_filename_component(OpenVINO_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../.." REALPATH) -endif() - -if(NOT OpenVINODeveloperScripts_FOUND) - find_package(OpenVINODeveloperScripts REQUIRED - PATHS "${OpenVINO_SOURCE_DIR}/cmake/developer_package" - NO_CMAKE_FIND_ROOT_PATH - NO_DEFAULT_PATH) -endif() - -# define version (syncronize with src/bindings/python/wheel/CMakeLists.txt) - -if(DEFINED ENV{CI_BUILD_DEV_TAG} AND NOT "$ENV{CI_BUILD_DEV_TAG}" STREQUAL "") - set(WHEEL_VERSION "${OpenVINO_VERSION}.$ENV{CI_BUILD_DEV_TAG}" CACHE STRING "Version of this release" FORCE) - set(wheel_pre_release ON) -else() - set(WHEEL_VERSION ${OpenVINO_VERSION} CACHE STRING "Version of this release" FORCE) -endif() -set(WHEEL_BUILD "${OpenVINO_VERSION_BUILD}" CACHE STRING "Build number of this release" FORCE) - -# check __init__.py files alignment - -function(ov_check_init_files_alignment init_files) - # check the files in pairs - list(LENGTH init_files init_files_count) - math(EXPR file_loop_range "${init_files_count}-2") - foreach(init_file_idx RANGE 0 ${file_loop_range}) - math(EXPR init_file_idx_next "${init_file_idx}+1") - list(GET init_files ${init_file_idx} file1) - list(GET init_files ${init_file_idx_next} file2) - - execute_process(COMMAND ${CMAKE_COMMAND} -E compare_files ${file1} ${file2} - RESULT_VARIABLE compare_result - ) - if(compare_result EQUAL 1) - message(FATAL_ERROR "The tools __init__.py files are misaligned: ${file1} and ${file2}") - endif() - endforeach() -endfunction() - -set(INIT_FILES_TOOLS -"${OpenVINO_SOURCE_DIR}/tools/mo/openvino/__init__.py" -"${OpenVINO_SOURCE_DIR}/tools/openvino_dev/src/openvino/__init__.py") - -ov_check_init_files_alignment("${INIT_FILES_TOOLS}") - -# openvino_dev build - -if(NOT ENABLE_WHEEL) - return() -endif() - -set(SETUP_PY "${CMAKE_CURRENT_SOURCE_DIR}/setup.py") - -set(openvino_wheel_name "openvino_dev-${WHEEL_VERSION}-${WHEEL_BUILD}-py3-none-any.whl") -set(openvino_wheels_output_dir "${CMAKE_BINARY_DIR}/wheels") -set(openvino_wheel_path "${openvino_wheels_output_dir}/${openvino_wheel_name}") - -add_custom_command(OUTPUT ${openvino_wheel_path} - COMMAND ${CMAKE_COMMAND} -E copy "${OpenVINO_SOURCE_DIR}/thirdparty/open_model_zoo/licensing/omz-third-party-programs.txt" "${CMAKE_CURRENT_BINARY_DIR}" - COMMAND ${CMAKE_COMMAND} -E copy "${OpenVINO_SOURCE_DIR}/licensing/dev-third-party-programs.txt" "${CMAKE_CURRENT_BINARY_DIR}" - COMMAND ${CMAKE_COMMAND} -E copy "${OpenVINO_SOURCE_DIR}/LICENSE" "${CMAKE_CURRENT_BINARY_DIR}" - COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/readme.txt" "${CMAKE_CURRENT_BINARY_DIR}" - COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_SOURCE_DIR}/setup.cfg" "${CMAKE_CURRENT_BINARY_DIR}" - COMMAND ${CMAKE_COMMAND} -E env OPENVINO_VERSION=${WHEEL_VERSION} - ${Python3_EXECUTABLE} ${SETUP_PY} - --quiet - --no-user-cfg - bdist_wheel - --dist-dir ${openvino_wheels_output_dir} - --build=${WHEEL_BUILD} - COMMAND ${CMAKE_COMMAND} -E env OPENVINO_VERSION=${WHEEL_VERSION} - ${Python3_EXECUTABLE} ${SETUP_PY} clean - WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" - COMMENT "Building Python wheel ${openvino_wheel_name}" - VERBATIM) - -add_custom_target(openvino_dev_wheel ALL DEPENDS ${openvino_wheel_path}) - -if(TARGET ie_wheel) - add_dependencies(openvino_dev_wheel ie_wheel) -endif() - -# install - -ov_cpack_add_component(${OV_CPACK_COMP_PYTHON_WHEELS} HIDDEN) - -install(FILES ${openvino_wheel_path} - DESTINATION ${OV_CPACK_WHEELSDIR} - COMPONENT ${OV_CPACK_COMP_PYTHON_WHEELS} - ${OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL}) diff --git a/tools/openvino_dev/readme.txt b/tools/openvino_dev/readme.txt deleted file mode 100644 index a189f27d55a5b1..00000000000000 --- a/tools/openvino_dev/readme.txt +++ /dev/null @@ -1,13 +0,0 @@ -“LEGAL NOTICE: Your use of this software and any required dependent software (the “Software Package”) is subject to the terms and conditions of the software license agreements for the Software Package, which may also include notices, disclaimers, or license terms for third party or open source software included in or with the Software Package, and your use indicates your acceptance of all such terms. Please refer to the “third-party-programs.txt” or other similarly-named text file included with the Software Package for additional details. - ------------------------------------------------------------------------- -Components and their third party programs: - - * OpenVINO(TM) Development Tools (Apache 2.0): - - Model Optimizer: /dev-third-party-programs.txt - - Open Model Zoo Tools: /omz-third-party-programs.txt - ------------------------------------------------------------------------- -Licenses: - * Apache 2.0 /LICENSE - \ No newline at end of file diff --git a/tools/openvino_dev/requirements.txt b/tools/openvino_dev/requirements.txt deleted file mode 100644 index 5153bff24e3440..00000000000000 --- a/tools/openvino_dev/requirements.txt +++ /dev/null @@ -1 +0,0 @@ --c ../constraints.txt \ No newline at end of file diff --git a/tools/openvino_dev/requirements_dev.txt.in b/tools/openvino_dev/requirements_dev.txt.in deleted file mode 100644 index 03e2105dbb92b3..00000000000000 --- a/tools/openvino_dev/requirements_dev.txt.in +++ /dev/null @@ -1 +0,0 @@ -openvino-dev${EXTRAS}==${WHEEL_VERSION} # dependabot-ignore diff --git a/tools/openvino_dev/setup.cfg b/tools/openvino_dev/setup.cfg deleted file mode 100644 index 37ea36cc439a23..00000000000000 --- a/tools/openvino_dev/setup.cfg +++ /dev/null @@ -1,20 +0,0 @@ -[global] -quiet = 1 -[options] -setup_requires = - wheel - -[options.extras_require] - -[options.package_data] - * = * - -[options.entry_points] -console_scripts = - -[metadata] -license_files = - readme* - *LICENSE* - *license* - *third-party-programs* diff --git a/tools/openvino_dev/setup.py b/tools/openvino_dev/setup.py deleted file mode 100644 index cad2e7235c87cc..00000000000000 --- a/tools/openvino_dev/setup.py +++ /dev/null @@ -1,310 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (C) 2018-2024 Intel Corporation SPDX-License-Identifier: Apache-2.0 - -""" Use this script to create a openvino-dev wheel package: - $ python3 setup.py bdist_wheel -""" -# pylint: disable-msg=line-too-long - -import os -import sys -import platform -import subprocess # nosec -import shutil -import re -import logging as log -from setuptools import Command -from setuptools.command.build import build -from pathlib import Path -from fnmatch import fnmatchcase -import pkg_resources -from setuptools.command.install import install -from setuptools import setup, find_namespace_packages -from typing import Dict, List - -PYTHON_VERSION = f'python{sys.version_info.major}.{sys.version_info.minor}' -SCRIPT_DIR = Path(__file__).resolve().parents[0] -OPENVINO_DIR = Path(__file__).resolve().parents[2] -SRC_DIR = SCRIPT_DIR / 'src' - -PKG_INSTALL_CFG = { - 'openvino-mo': { - 'src_dir': OPENVINO_DIR / 'tools' / 'mo', - 'black_list': ['*unit_tests*'], - 'prefix': 'mo', - 'extract_entry_points': True, - 'extract_requirements': True, - 'extract_extras': True, - }, - "omz_tools": { - 'src_dir': OPENVINO_DIR / 'thirdparty' / 'open_model_zoo' / 'tools' / 'model_tools', # noqa:E501 - 'black_list': [], - 'prefix': 'omz_tools', - 'extract_requirements': True, - 'extract_entry_points': True, - 'extract_extras': True, - }, -} - - -def ignore_patterns(*patterns): - """ - Filter names by given patterns - """ - return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns) - - -class CustomBuild(build): - """Custom implementation of build""" - - def run(self): - - # pylint: disable-msg=too-many-locals - self.announce('Installing packages', level=log.INFO) - BUILD_BASE = Path.cwd() / self.build_base - for cmp, cmp_data in PKG_INSTALL_CFG.items(): - self.announce(f'Processing package: {cmp}', level=log.INFO) - subprocess.run([sys.executable, 'setup.py', - '--quiet', - '--no-user-cfg', - 'install', - '--root', str(BUILD_BASE), - '--prefix', str(cmp_data.get("prefix")), - '--no-compile'], - check=True, - cwd=str(cmp_data.get('src_dir')), - stdout=sys.stdout, - stderr=sys.stderr) - - # grab installed modules - lib_dir = 'lib/site-packages' if platform.system() == 'Windows' else f'lib/{PYTHON_VERSION}/site-packages' - src = BUILD_BASE / cmp_data.get('prefix') / lib_dir - - egg_info = list(src.glob('**/*.egg-info')) - if egg_info: - distributions = pkg_resources.find_distributions(str(Path(egg_info[0]).parent)) - for dist in distributions: - self.announce(f'Distribution: {dist.egg_name()}', level=log.INFO) - dmap = dist._build_dep_map() # pylint: disable=W0212 - - # load install_requires list - if cmp_data.get("extract_requirements"): - # install requires {None: [requirements]} - install_requires = sorted(map(str, dmap.get(None, []))) - self.announce(f'Install requires: {install_requires}', level=log.INFO) - self.distribution.install_requires.extend(install_requires) - # conditional requirements {':': [requirements]} - conditionals_req = dict(filter(lambda x: x[0] is not None and x[0].split(':')[0] == '', dmap.items())) - self.announce(f'Install requires with marker: {conditionals_req}', level=log.INFO) - for extra, req in conditionals_req.items(): - if extra not in self.distribution.extras_require: - self.distribution.extras_require[extra] = [] - self.distribution.extras_require[extra].extend(sorted(map(str, req))) - - if cmp_data.get("extract_extras"): - # extra requirements {'marker:': [requirements]} - extras = dict(filter(lambda x: x[0] is not None and x[0].split(':')[0] != '', dmap.items())) - for extra, req in extras.items(): - self.announce(f'Extras: {extra}:{req}', level=log.INFO) - if extra not in self.distribution.extras_require: - self.distribution.extras_require[extra] = [] - self.distribution.extras_require[extra].extend(sorted(map(str, req))) - - # extract console scripts - if cmp_data.get("extract_entry_points"): - for console_scripts in dist.get_entry_map('console_scripts'): - self.announce(f'Entry point: {console_scripts}', level=log.INFO) - entry = dist.get_entry_info('console_scripts', console_scripts) - self.distribution.entry_points['console_scripts'].append(str(entry)) - - # copy modules to the build directory - dst = Path(self.build_lib) - black_list = cmp_data.get('black_list') - exclude = ignore_patterns('*ez_setup*', '*__pycache__*', '*.egg-info*', *black_list) - for path in src.glob('**/*'): - if path.is_dir() or exclude(str(path)): - continue - path_rel = path.relative_to(src) - (dst / path_rel.parent).mkdir(exist_ok=True, parents=True) - shutil.copyfile(path, dst / path_rel) - - # remove duplications in requirements - reqs_set = set(map(lambda x: x.lower(), self.distribution.install_requires)) - self.distribution.install_requires = sorted(reqs_set) - for extra, req in self.distribution.extras_require.items(): - unique_req = list(set(map(lambda x: x.lower(), req))) - self.distribution.extras_require[extra] = unique_req - - # add dependency on runtime package - runtime_req = [f'openvino=={self.distribution.get_version()}'] - self.distribution.install_requires.extend(runtime_req) - - self.announce(f'{self.distribution.install_requires}', level=log.DEBUG) - self.announce(f'{self.distribution.extras_require}', level=log.DEBUG) - self.announce(f'{self.distribution.entry_points}', level=log.DEBUG) - - -class CustomInstall(install): - """Enable build_clib during the installation""" - - def run(self): - self.run_command('build') - install.run(self) - - -class CustomClean(Command): - """Clean up staging directories""" - - user_options = [] - - def initialize_options(self): - pass - - def finalize_options(self): - pass - - def clean_temp_files(self): - """Clean components staging directories""" - for pattern in './build ./dist **/*.pyc **/*.tgz **/*.egg-info'.split(' '): - paths = [] - for comp, comp_data in PKG_INSTALL_CFG.items(): - src_dir = Path(comp_data.get('src_dir')) - paths += src_dir.glob(pattern) - paths += SCRIPT_DIR.glob(pattern) - for path in paths: - if path.is_file() and path.exists(): - path = path.parent - self.announce(f'Cleaning: {path}', level=log.INFO) - if os.path.exists(path): - shutil.rmtree(path) - - def run(self): - self.clean_temp_files() - - -def get_description(desc_file_path): - """read description from README.md""" - with open(desc_file_path, 'r', encoding='utf-8') as fstream: - description = fstream.read() - return description - - -def read_constraints(path: str='../constraints.txt') -> Dict[str, List[str]]: - """ - Read a constraints.txt file and return a dict - of {package_name: [required_version_1, required_version_2]}. - The dict values are a list because a package can be mentioned - multiple times, for example: - mxnet~=1.2.0; sys_platform == 'win32' - mxnet>=1.7.0; sys_platform != 'win32' - """ - constraints = {} - with open(Path(__file__).resolve().parent / path) as f: - raw_constraints = f.readlines() - for line in raw_constraints: - # skip comments - if line.startswith('#'): - continue - line = line.replace('\n', '') - # read constraints for that package - package, delimiter, constraint = re.split('(~|=|<|>|;)', line, maxsplit=1) - # if there is no entry for that package, add it - if constraints.get(package) is None: - constraints[package] = [delimiter + constraint] - # else add another entry for that package - else: - constraints[package].extend([delimiter + constraint]) - return constraints - - -def read_requirements(path: str) -> List[str]: - """ - Read a requirements.txt file and return a list - of requirements. Three cases are supported, the - list corresponds to priority: - 1. version specified in requirements.txt - 2. version specified in constraints.txt - 3. version unbound - - Putting environment markers into constraints.txt is prone to bugs. - They should be specified in requirements.txt files. - """ - requirements = [] - constraints = read_constraints() - with open(Path(__file__).resolve().parent / path) as f: - raw_requirements = f.readlines() - for line in raw_requirements: - # skip comments and constraints link - if line.startswith(('#', '-c')): - continue - # get rid of newlines - line = line.replace('\n', '') - # if version is specified (non-word chars present) - package_constraint = constraints.get(line.split(';')[0]) - if re.search('(~|=|<|>)', line) and len(line.split(';'))>1: - if package_constraint: # both markers and versions specified - marker_index = line.find(";") - # insert package version between package name and environment markers - line = line[:marker_index] \ - + ",".join([constraint for constraint in package_constraint]) \ - + line[marker_index:] - requirements.append(line) - # else get version from constraints - else: - constraint = constraints.get(line) - # if version found in constraints.txt - if constraint: - for marker in constraint: - requirements.append(line+marker) - # else version is unbound - else: - requirements.append(line) - return requirements - - -def concat_files(output_file, input_files): - with open(output_file, 'w', encoding='utf-8') as outfile: - for filename in input_files: - with open(filename, 'r', encoding='utf-8') as infile: - content = infile.read() - outfile.write(content) - return output_file - -description_md = SCRIPT_DIR.parents[1] / 'docs' / 'dev' / "pypi_publish" / 'pypi-openvino-dev.md' -md_files = [description_md, SCRIPT_DIR.parents[1] / 'docs' / 'dev' / "pypi_publish" / 'pre-release-note.md'] -docs_url = 'https://docs.openvino.ai/2023.0/index.html' - -if(os.getenv('CI_BUILD_DEV_TAG')): - output = Path.cwd() / 'build' / 'pypi-openvino-dev.md' - output.parent.mkdir(exist_ok=True) - description_md = concat_files(output, md_files) - docs_url = 'https://docs.openvino.ai/2023.0/index.html' - -setup( - name='openvino-dev', - version=os.getenv('OPENVINO_VERSION', '0.0.0'), - author=os.getenv('WHEEL_AUTHOR', 'Intel® Corporation'), - license=os.getenv('WHEEL_LICENCE_TYPE', 'OSI Approved :: Apache Software License'), - author_email=os.getenv('WHEEL_AUTHOR_EMAIL', 'openvino_pushbot@intel.com'), - url=os.getenv('WHEEL_URL', docs_url), - download_url=os.getenv('WHEEL_DOWNLOAD_URL', 'https://github.com/openvinotoolkit/openvino/tags'), - description=os.getenv('WHEEL_DESC', 'OpenVINO(TM) Development Tools'), - long_description=get_description(os.getenv('WHEEL_OVERVIEW', description_md)), - long_description_content_type='text/markdown', - classifiers=[ - 'Programming Language :: Python :: 3', - 'Operating System :: OS Independent', - ], - cmdclass={ - 'build': CustomBuild, - 'install': CustomInstall, - 'clean': CustomClean, - }, - entry_points = { - 'console_scripts': [], - }, - install_requires=read_requirements(SCRIPT_DIR / 'requirements.txt'), - packages=find_namespace_packages(where=str(SRC_DIR)), - package_dir={'': str(SRC_DIR)}, -) diff --git a/tools/openvino_dev/src/openvino/__init__.py b/tools/openvino_dev/src/openvino/__init__.py deleted file mode 100644 index b015570964c520..00000000000000 --- a/tools/openvino_dev/src/openvino/__init__.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -__path__ = __import__("pkgutil").extend_path(__path__, __name__) - -# Required for Windows OS platforms -# Note: always top-level -try: - from openvino.utils import _add_openvino_libs_to_search_path - _add_openvino_libs_to_search_path() -except ImportError: - pass - -# OpenVINO API -try: - # Import all public modules - from openvino import runtime as runtime - from openvino import frontend as frontend - from openvino import helpers as helpers - from openvino import preprocess as preprocess - from openvino import utils as utils - from openvino import properties as properties - - # Import most important classes and functions from openvino.runtime - from openvino.runtime import Model - from openvino.runtime import Core - from openvino.runtime import CompiledModel - from openvino.runtime import InferRequest - from openvino.runtime import AsyncInferQueue - - from openvino.runtime import Symbol - from openvino.runtime import Dimension - from openvino.runtime import Strides - from openvino.runtime import PartialShape - from openvino.runtime import Shape - from openvino.runtime import Layout - from openvino.runtime import Type - from openvino.runtime import Tensor - from openvino.runtime import OVAny - - from openvino.runtime import compile_model - from openvino.runtime import get_batch - from openvino.runtime import set_batch - from openvino.runtime import serialize - from openvino.runtime import shutdown - from openvino.runtime import tensor_from_file - from openvino.runtime import save_model - from openvino.runtime import layout_helpers - - from openvino._pyopenvino import RemoteContext - from openvino._pyopenvino import RemoteTensor - from openvino._pyopenvino import Op - - # libva related: - from openvino._pyopenvino import VAContext - from openvino._pyopenvino import VASurfaceTensor - - # Set version for openvino package - from openvino.runtime import get_version - __version__ = get_version() -except ImportError: - import warnings - warnings.warn("openvino package has problems with imports!", ImportWarning, stacklevel=2) - -# Tools -try: - # Model Conversion API - ovc should reside in the main namespace - from openvino.tools.ovc import convert_model -except ImportError: - pass diff --git a/tools/openvino_dev/src/openvino/tools/__init__.py b/tools/openvino_dev/src/openvino/tools/__init__.py deleted file mode 100644 index 0d0e5a44956e88..00000000000000 --- a/tools/openvino_dev/src/openvino/tools/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (C) 2018-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -__path__ = __import__("pkgutil").extend_path(__path__, __name__)